summaryrefslogtreecommitdiffstats
path: root/contrib/gcc/config
diff options
context:
space:
mode:
authorkan <kan@FreeBSD.org>2005-06-03 03:28:44 +0000
committerkan <kan@FreeBSD.org>2005-06-03 03:28:44 +0000
commit2156e40a831a8e0ab68e4bc091c2940bf46ca6df (patch)
treef0dc8ad34f9fcaf27052e24e893a4284b5fee6e9 /contrib/gcc/config
parent0a20abcc95340c9d2bb59421bac84eca4fb43b0c (diff)
downloadFreeBSD-src-2156e40a831a8e0ab68e4bc091c2940bf46ca6df.zip
FreeBSD-src-2156e40a831a8e0ab68e4bc091c2940bf46ca6df.tar.gz
Gcc 3.4.4 release.
Diffstat (limited to 'contrib/gcc/config')
-rw-r--r--contrib/gcc/config/alpha/alpha.c139
-rw-r--r--contrib/gcc/config/alpha/alpha.h2
-rw-r--r--contrib/gcc/config/alpha/alpha.md124
-rw-r--r--contrib/gcc/config/alpha/qrnnd.asm4
-rw-r--r--contrib/gcc/config/alpha/t-osf46
-rw-r--r--contrib/gcc/config/arm/arm-protos.h4
-rw-r--r--contrib/gcc/config/arm/arm.c40
-rw-r--r--contrib/gcc/config/arm/arm.h2
-rw-r--r--contrib/gcc/config/arm/arm.md20
-rw-r--r--contrib/gcc/config/arm/t-netbsd6
-rw-r--r--contrib/gcc/config/arm/t-rtems10
-rw-r--r--contrib/gcc/config/darwin-protos.h2
-rw-r--r--contrib/gcc/config/darwin.c35
-rw-r--r--contrib/gcc/config/darwin.h14
-rw-r--r--contrib/gcc/config/freebsd-spec.h26
-rw-r--r--contrib/gcc/config/i386/cygwin1.c8
-rw-r--r--contrib/gcc/config/i386/darwin.h4
-rw-r--r--contrib/gcc/config/i386/emmintrin.h2
-rw-r--r--contrib/gcc/config/i386/freebsd.h11
-rw-r--r--contrib/gcc/config/i386/gthr-win32.c32
-rw-r--r--contrib/gcc/config/i386/i386-modes.def2
-rw-r--r--contrib/gcc/config/i386/i386-protos.h1
-rw-r--r--contrib/gcc/config/i386/i386.c132
-rw-r--r--contrib/gcc/config/i386/i386.h14
-rw-r--r--contrib/gcc/config/i386/i386.md1068
-rw-r--r--contrib/gcc/config/i386/t-rtems-i38612
-rw-r--r--contrib/gcc/config/i386/xmmintrin.h4
-rw-r--r--contrib/gcc/config/ia64/ia64.c98
-rw-r--r--contrib/gcc/config/ia64/t-glibc4
-rw-r--r--contrib/gcc/config/ia64/t-glibc-libunwind4
-rw-r--r--contrib/gcc/config/ia64/t-hpux2
-rw-r--r--contrib/gcc/config/ia64/unwind-ia64.c23
-rw-r--r--contrib/gcc/config/ia64/unwind-ia64.h3
-rw-r--r--contrib/gcc/config/rs6000/aix.h18
-rw-r--r--contrib/gcc/config/rs6000/aix41.h4
-rw-r--r--contrib/gcc/config/rs6000/aix43.h5
-rw-r--r--contrib/gcc/config/rs6000/aix52.h4
-rw-r--r--contrib/gcc/config/rs6000/altivec.h11868
-rw-r--r--contrib/gcc/config/rs6000/altivec.md4
-rw-r--r--contrib/gcc/config/rs6000/beos.h12
-rw-r--r--contrib/gcc/config/rs6000/darwin-ldouble-shared.c2
-rw-r--r--contrib/gcc/config/rs6000/darwin-ldouble.c39
-rw-r--r--contrib/gcc/config/rs6000/darwin.h7
-rw-r--r--contrib/gcc/config/rs6000/eabi.asm2
-rw-r--r--contrib/gcc/config/rs6000/libgcc-ppc64.ver10
-rw-r--r--contrib/gcc/config/rs6000/linux-unwind.h322
-rw-r--r--contrib/gcc/config/rs6000/linux.h98
-rw-r--r--contrib/gcc/config/rs6000/linux64.h195
-rw-r--r--contrib/gcc/config/rs6000/rs6000-c.c14
-rw-r--r--contrib/gcc/config/rs6000/rs6000-protos.h2
-rw-r--r--contrib/gcc/config/rs6000/rs6000.c781
-rw-r--r--contrib/gcc/config/rs6000/rs6000.h16
-rw-r--r--contrib/gcc/config/rs6000/rs6000.md212
-rw-r--r--contrib/gcc/config/rs6000/rtems.h21
-rw-r--r--contrib/gcc/config/rs6000/spe.h19
-rw-r--r--contrib/gcc/config/rs6000/spe.md4
-rw-r--r--contrib/gcc/config/rs6000/sysv4.h9
-rw-r--r--contrib/gcc/config/rs6000/t-aix435
-rw-r--r--contrib/gcc/config/rs6000/t-aix525
-rw-r--r--contrib/gcc/config/rs6000/t-linux645
-rw-r--r--contrib/gcc/config/rs6000/t-newas3
-rw-r--r--contrib/gcc/config/rs6000/t-rtems1
-rw-r--r--contrib/gcc/config/s390/s390.md14
-rw-r--r--contrib/gcc/config/s390/tpf.h3
-rw-r--r--contrib/gcc/config/sparc/sparc.c156
-rw-r--r--contrib/gcc/config/sparc/sparc.md1
-rw-r--r--contrib/gcc/config/sparc/t-elf4
-rw-r--r--contrib/gcc/config/t-libunwind9
-rw-r--r--contrib/gcc/config/t-libunwind-elf30
-rw-r--r--contrib/gcc/config/t-slibgcc-darwin6
-rw-r--r--contrib/gcc/config/t-slibgcc-elf-ver6
-rw-r--r--contrib/gcc/config/t-slibgcc-sld6
72 files changed, 9787 insertions, 5993 deletions
diff --git a/contrib/gcc/config/alpha/alpha.c b/contrib/gcc/config/alpha/alpha.c
index 0086968..fbaeabe 100644
--- a/contrib/gcc/config/alpha/alpha.c
+++ b/contrib/gcc/config/alpha/alpha.c
@@ -1947,6 +1947,17 @@ alpha_legitimize_address (rtx x, rtx scratch,
}
}
+/* Primarily this is required for TLS symbols, but given that our move
+ patterns *ought* to be able to handle any symbol at any time, we
+ should never be spilling symbolic operands to the constant pool, ever. */
+
+static bool
+alpha_cannot_force_const_mem (rtx x)
+{
+ enum rtx_code code = GET_CODE (x);
+ return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
+}
+
/* We do not allow indirect calls to be optimized into sibling calls, nor
can we allow a call to a function with a different GP to be optimized
into a sibcall. */
@@ -3186,7 +3197,13 @@ alpha_emit_conditional_branch (enum rtx_code code)
/* If the constants doesn't fit into an immediate, but can
be generated by lda/ldah, we adjust the argument and
compare against zero, so we can use beq/bne directly. */
- else if (GET_CODE (op1) == CONST_INT && (code == EQ || code == NE))
+ /* ??? Don't do this when comparing against symbols, otherwise
+ we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
+ be declared false out of hand (at least for non-weak). */
+ else if (GET_CODE (op1) == CONST_INT
+ && (code == EQ || code == NE)
+ && !(symbolic_operand (op0, VOIDmode)
+ || (GET_CODE (op0) == REG && REG_POINTER (op0))))
{
HOST_WIDE_INT v = INTVAL (op1), n = -v;
@@ -6786,11 +6803,6 @@ alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
break;
imask |= 1UL << regno;
}
-
- /* Glibc likes to use $31 as an unwind stopper for crt0. To
- avoid hackery in unwind-dw2.c, we need to actively store a
- zero in the prologue of _Unwind_RaiseException et al. */
- imask |= 1UL << 31;
}
/* If any register spilled, then spill the return address also. */
@@ -7046,6 +7058,48 @@ set_frame_related_p (void)
#define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
+/* Generates a store with the proper unwind info attached. VALUE is
+ stored at BASE_REG+BASE_OFS. If FRAME_BIAS is non-zero, then BASE_REG
+ contains SP+FRAME_BIAS, and that is the unwind info that should be
+ generated. If FRAME_REG != VALUE, then VALUE is being stored on
+ behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
+
+static void
+emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
+ HOST_WIDE_INT base_ofs, rtx frame_reg)
+{
+ rtx addr, mem, insn;
+
+ addr = plus_constant (base_reg, base_ofs);
+ mem = gen_rtx_MEM (DImode, addr);
+ set_mem_alias_set (mem, alpha_sr_alias_set);
+
+ insn = emit_move_insn (mem, value);
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ if (frame_bias || value != frame_reg)
+ {
+ if (frame_bias)
+ {
+ addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
+ mem = gen_rtx_MEM (DImode, addr);
+ }
+
+ REG_NOTES (insn)
+ = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ gen_rtx_SET (VOIDmode, mem, frame_reg),
+ REG_NOTES (insn));
+ }
+}
+
+static void
+emit_frame_store (unsigned int regno, rtx base_reg,
+ HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
+{
+ rtx reg = gen_rtx_REG (DImode, regno);
+ emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
+}
+
/* Write function prologue. */
/* On vms we have two kinds of functions:
@@ -7075,7 +7129,7 @@ alpha_expand_prologue (void)
HOST_WIDE_INT frame_size;
/* Offset from base reg to register save area. */
HOST_WIDE_INT reg_offset;
- rtx sa_reg, mem;
+ rtx sa_reg;
int i;
sa_size = alpha_sa_size ();
@@ -7225,37 +7279,40 @@ alpha_expand_prologue (void)
if (!TARGET_ABI_UNICOSMK)
{
+ HOST_WIDE_INT sa_bias = 0;
+
/* Cope with very large offsets to the register save area. */
sa_reg = stack_pointer_rtx;
if (reg_offset + sa_size > 0x8000)
{
int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
- HOST_WIDE_INT bias;
+ rtx sa_bias_rtx;
if (low + sa_size <= 0x8000)
- bias = reg_offset - low, reg_offset = low;
+ sa_bias = reg_offset - low, reg_offset = low;
else
- bias = reg_offset, reg_offset = 0;
+ sa_bias = reg_offset, reg_offset = 0;
sa_reg = gen_rtx_REG (DImode, 24);
- FRP (emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx,
- GEN_INT (bias))));
+ sa_bias_rtx = GEN_INT (sa_bias);
+
+ if (add_operand (sa_bias_rtx, DImode))
+ emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
+ else
+ {
+ emit_move_insn (sa_reg, sa_bias_rtx);
+ emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
+ }
}
/* Save regs in stack order. Beginning with VMS PV. */
if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
- {
- mem = gen_rtx_MEM (DImode, stack_pointer_rtx);
- set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_PV)));
- }
+ emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
/* Save register RA next. */
if (imask & (1UL << REG_RA))
{
- mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
- set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
+ emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
imask &= ~(1UL << REG_RA);
reg_offset += 8;
}
@@ -7264,36 +7321,14 @@ alpha_expand_prologue (void)
for (i = 0; i < 31; i++)
if (imask & (1UL << i))
{
- mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
- set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (mem, gen_rtx_REG (DImode, i)));
+ emit_frame_store (i, sa_reg, sa_bias, reg_offset);
reg_offset += 8;
}
- /* Store a zero if requested for unwinding. */
- if (imask & (1UL << 31))
- {
- rtx insn, t;
-
- mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
- set_mem_alias_set (mem, alpha_sr_alias_set);
- insn = emit_move_insn (mem, const0_rtx);
-
- RTX_FRAME_RELATED_P (insn) = 1;
- t = gen_rtx_REG (Pmode, 31);
- t = gen_rtx_SET (VOIDmode, mem, t);
- t = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, t, REG_NOTES (insn));
- REG_NOTES (insn) = t;
-
- reg_offset += 8;
- }
-
for (i = 0; i < 31; i++)
if (fmask & (1UL << i))
{
- mem = gen_rtx_MEM (DFmode, plus_constant (sa_reg, reg_offset));
- set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (mem, gen_rtx_REG (DFmode, i+32)));
+ emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
reg_offset += 8;
}
}
@@ -7307,19 +7342,13 @@ alpha_expand_prologue (void)
for (i = 9; i < 15; i++)
if (imask & (1UL << i))
{
- mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
- reg_offset));
- set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (mem, gen_rtx_REG (DImode, i)));
+ emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
reg_offset -= 8;
}
for (i = 2; i < 10; i++)
if (fmask & (1UL << i))
{
- mem = gen_rtx_MEM (DFmode, plus_constant (hard_frame_pointer_rtx,
- reg_offset));
- set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (mem, gen_rtx_REG (DFmode, i+32)));
+ emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
reg_offset -= 8;
}
}
@@ -7713,9 +7742,6 @@ alpha_expand_epilogue (void)
reg_offset += 8;
}
- if (imask & (1UL << 31))
- reg_offset += 8;
-
for (i = 0; i < 31; ++i)
if (fmask & (1UL << i))
{
@@ -10215,6 +10241,8 @@ alpha_init_libfuncs (void)
#define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
#undef TARGET_CANNOT_COPY_INSN_P
#define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
+#undef TARGET_CANNOT_FORCE_CONST_MEM
+#define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
#if TARGET_ABI_OSF
#undef TARGET_ASM_OUTPUT_MI_THUNK
@@ -10257,4 +10285,3 @@ struct gcc_target targetm = TARGET_INITIALIZER;
#include "gt-alpha.h"
-
diff --git a/contrib/gcc/config/alpha/alpha.h b/contrib/gcc/config/alpha/alpha.h
index d59797c..ae3a349 100644
--- a/contrib/gcc/config/alpha/alpha.h
+++ b/contrib/gcc/config/alpha/alpha.h
@@ -641,6 +641,7 @@ extern const char *alpha_tls_size_string; /* For -mtls-size= */
#define HARD_REGNO_MODE_OK(REGNO, MODE) \
((REGNO) >= 32 && (REGNO) <= 62 \
? (MODE) == SFmode || (MODE) == DFmode || (MODE) == DImode \
+ || (MODE) == SCmode || (MODE) == DCmode \
: 1)
/* Value is 1 if MODE is a supported vector mode. */
@@ -1189,6 +1190,7 @@ do { \
#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, 26)
#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (26)
#define DWARF_ALT_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (64)
+#define DWARF_ZERO_REG 31
/* Describe how we implement __builtin_eh_return. */
#define EH_RETURN_DATA_REGNO(N) ((N) < 4 ? (N) + 16 : INVALID_REGNUM)
diff --git a/contrib/gcc/config/alpha/alpha.md b/contrib/gcc/config/alpha/alpha.md
index 998e300..c008e1b 100644
--- a/contrib/gcc/config/alpha/alpha.md
+++ b/contrib/gcc/config/alpha/alpha.md
@@ -77,6 +77,7 @@
(UNSPECV_PLDGP2 11) ; prologue ldgp
(UNSPECV_SET_TP 12)
(UNSPECV_RPCC 13)
+ (UNSPECV_SETJMPR_ER 14) ; builtin_setjmp_receiver fragment
])
;; Where necessary, the suffixes _le and _be are used to distinguish between
@@ -438,9 +439,9 @@
;; and if we split before reload, we will require additional instructions.
(define_insn "*adddi_fp_hack"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (plus:DI (match_operand:DI 1 "reg_no_subreg_operand" "r")
- (match_operand:DI 2 "const_int_operand" "n")))]
+ [(set (match_operand:DI 0 "register_operand" "=r,r,r")
+ (plus:DI (match_operand:DI 1 "reg_no_subreg_operand" "r,r,r")
+ (match_operand:DI 2 "const_int_operand" "K,L,n")))]
"NONSTRICT_REG_OK_FP_BASE_P (operands[1])
&& INTVAL (operands[2]) >= 0
/* This is the largest constant an lda+ldah pair can add, minus
@@ -454,7 +455,10 @@
+ max_reg_num () * UNITS_PER_WORD
+ current_function_pretend_args_size)
- current_function_pretend_args_size))"
- "#")
+ "@
+ lda %0,%2(%1)
+ ldah %0,%h2(%1)
+ #")
;; Don't do this if we are adjusting SP since we don't want to do it
;; in two steps. Don't split FP sources for the reason listed above.
@@ -6897,70 +6901,44 @@
"jmp $31,(%0),0"
[(set_attr "type" "ibr")])
-(define_insn "*builtin_setjmp_receiver_er_sl_1"
- [(unspec_volatile [(label_ref (match_operand 0 "" ""))] UNSPECV_SETJMPR)]
- "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF && TARGET_AS_CAN_SUBTRACT_LABELS"
- "lda $27,$LSJ%=-%l0($27)\n$LSJ%=:")
-
-(define_insn "*builtin_setjmp_receiver_er_1"
- [(unspec_volatile [(label_ref (match_operand 0 "" ""))] UNSPECV_SETJMPR)]
- "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF"
- "br $27,$LSJ%=\n$LSJ%=:"
- [(set_attr "type" "ibr")])
-
-(define_split
- [(unspec_volatile [(label_ref (match_operand 0 "" ""))] UNSPECV_SETJMPR)]
- "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF
- && prev_nonnote_insn (insn) == operands[0]"
- [(const_int 0)]
- "
-{
- emit_note (NOTE_INSN_DELETED);
- DONE;
-}")
-
-(define_insn "*builtin_setjmp_receiver_1"
+(define_expand "builtin_setjmp_receiver"
[(unspec_volatile [(label_ref (match_operand 0 "" ""))] UNSPECV_SETJMPR)]
"TARGET_ABI_OSF"
- "br $27,$LSJ%=\n$LSJ%=:\;ldgp $29,0($27)"
- [(set_attr "length" "12")
- (set_attr "type" "multi")])
+ "")
-(define_expand "builtin_setjmp_receiver_er"
- [(unspec_volatile [(label_ref (match_operand 0 "" ""))] UNSPECV_SETJMPR)
+(define_insn_and_split "*builtin_setjmp_receiver_1"
+ [(unspec_volatile [(match_operand 0 "" "")] UNSPECV_SETJMPR)]
+ "TARGET_ABI_OSF"
+{
+ if (TARGET_EXPLICIT_RELOCS)
+ return "#";
+ else
+ return "br $27,$LSJ%=\n$LSJ%=:\;ldgp $29,0($27)";
+}
+ "&& TARGET_EXPLICIT_RELOCS && reload_completed"
+ [(unspec_volatile [(match_dup 0)] UNSPECV_SETJMPR_ER)
(set (match_dup 1)
(unspec_volatile:DI [(match_dup 2) (match_dup 3)] UNSPECV_LDGP1))
(set (match_dup 1)
(unspec:DI [(match_dup 1) (match_dup 3)] UNSPEC_LDGP2))]
- ""
{
operands[1] = pic_offset_table_rtx;
operands[2] = gen_rtx_REG (Pmode, 27);
operands[3] = GEN_INT (alpha_next_sequence_number++);
-})
+}
+ [(set_attr "length" "12")
+ (set_attr "type" "multi")])
-(define_expand "builtin_setjmp_receiver"
- [(unspec_volatile [(label_ref (match_operand 0 "" ""))] UNSPECV_SETJMPR)]
- "TARGET_ABI_OSF"
-{
- if (TARGET_EXPLICIT_RELOCS)
- {
- emit_insn (gen_builtin_setjmp_receiver_er (operands[0]));
- DONE;
- }
-})
+(define_insn "*builtin_setjmp_receiver_er_sl_1"
+ [(unspec_volatile [(match_operand 0 "" "")] UNSPECV_SETJMPR_ER)]
+ "TARGET_ABI_OSF && TARGET_EXPLICIT_RELOCS && TARGET_AS_CAN_SUBTRACT_LABELS"
+ "lda $27,$LSJ%=-%l0($27)\n$LSJ%=:")
-(define_expand "exception_receiver_er"
- [(set (match_dup 0)
- (unspec_volatile:DI [(match_dup 1) (match_dup 2)] UNSPECV_LDGP1))
- (set (match_dup 0)
- (unspec:DI [(match_dup 0) (match_dup 2)] UNSPEC_LDGP2))]
- ""
-{
- operands[0] = pic_offset_table_rtx;
- operands[1] = gen_rtx_REG (Pmode, 26);
- operands[2] = GEN_INT (alpha_next_sequence_number++);
-})
+(define_insn "*builtin_setjmp_receiver_er_1"
+ [(unspec_volatile [(match_operand 0 "" "")] UNSPECV_SETJMPR_ER)]
+ "TARGET_ABI_OSF && TARGET_EXPLICIT_RELOCS"
+ "br $27,$LSJ%=\n$LSJ%=:"
+ [(set_attr "type" "ibr")])
(define_expand "exception_receiver"
[(unspec_volatile [(match_dup 0)] UNSPECV_EHR)]
@@ -6968,28 +6946,38 @@
{
if (TARGET_LD_BUGGY_LDGP)
operands[0] = alpha_gp_save_rtx ();
- else if (TARGET_EXPLICIT_RELOCS)
- {
- emit_insn (gen_exception_receiver_er ());
- DONE;
- }
else
operands[0] = const0_rtx;
})
-(define_insn "*exception_receiver_1"
- [(unspec_volatile [(const_int 0)] UNSPECV_EHR)]
- "! TARGET_LD_BUGGY_LDGP"
- "ldgp $29,0($26)"
- [(set_attr "length" "8")
- (set_attr "type" "multi")])
-
(define_insn "*exception_receiver_2"
[(unspec_volatile [(match_operand:DI 0 "memory_operand" "m")] UNSPECV_EHR)]
- "TARGET_LD_BUGGY_LDGP"
+ "TARGET_ABI_OSF && TARGET_LD_BUGGY_LDGP"
"ldq $29,%0"
[(set_attr "type" "ild")])
+(define_insn_and_split "*exception_receiver_1"
+ [(unspec_volatile [(const_int 0)] UNSPECV_EHR)]
+ "TARGET_ABI_OSF"
+{
+ if (TARGET_EXPLICIT_RELOCS)
+ return "ldah $29,0($26)\t\t!gpdisp!%*\;lda $29,0($29)\t\t!gpdisp!%*";
+ else
+ return "ldgp $29,0($26)";
+}
+ "&& TARGET_EXPLICIT_RELOCS && reload_completed"
+ [(set (match_dup 0)
+ (unspec_volatile:DI [(match_dup 1) (match_dup 2)] UNSPECV_LDGP1))
+ (set (match_dup 0)
+ (unspec:DI [(match_dup 0) (match_dup 2)] UNSPEC_LDGP2))]
+{
+ operands[0] = pic_offset_table_rtx;
+ operands[1] = gen_rtx_REG (Pmode, 26);
+ operands[2] = GEN_INT (alpha_next_sequence_number++);
+}
+ [(set_attr "length" "8")
+ (set_attr "type" "multi")])
+
(define_expand "nonlocal_goto_receiver"
[(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)
(set (reg:DI 27) (mem:DI (reg:DI 29)))
diff --git a/contrib/gcc/config/alpha/qrnnd.asm b/contrib/gcc/config/alpha/qrnnd.asm
index d6373ec..da9c4bc 100644
--- a/contrib/gcc/config/alpha/qrnnd.asm
+++ b/contrib/gcc/config/alpha/qrnnd.asm
@@ -26,6 +26,10 @@
# Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
# MA 02111-1307, USA.
+#ifdef __ELF__
+.section .note.GNU-stack,""
+#endif
+
.set noreorder
.set noat
diff --git a/contrib/gcc/config/alpha/t-osf4 b/contrib/gcc/config/alpha/t-osf4
index fe747a3..58ce6c2 100644
--- a/contrib/gcc/config/alpha/t-osf4
+++ b/contrib/gcc/config/alpha/t-osf4
@@ -16,8 +16,12 @@ SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
-Wl,-hidden_symbol,pthread\* -Wl,-hidden_symbol,__pthread\* \
-Wl,-hidden_symbol,sched_get_\* -Wl,-hidden_symbol,sched_yield \
-Wl,-msym -Wl,-set_version,gcc.1 -Wl,-soname,$(SHLIB_SONAME) \
- -o $(SHLIB_NAME) @multilib_flags@ $(SHLIB_OBJS) -lc && \
+ -o $(SHLIB_NAME).tmp @multilib_flags@ $(SHLIB_OBJS) -lc && \
rm -f $(SHLIB_SONAME) && \
+ if [ -f $(SHLIB_NAME) ]; then \
+ mv -f $(SHLIB_NAME) $(SHLIB_NAME).backup; \
+ else true; fi && \
+ mv $(SHLIB_NAME).tmp $(SHLIB_NAME) && \
$(LN_S) $(SHLIB_NAME) $(SHLIB_SONAME)
# $(slibdir) double quoted to protect it from expansion while building
# libgcc.mk. We want this delayed until actual install time.
diff --git a/contrib/gcc/config/arm/arm-protos.h b/contrib/gcc/config/arm/arm-protos.h
index 471254e..2da99b8 100644
--- a/contrib/gcc/config/arm/arm-protos.h
+++ b/contrib/gcc/config/arm/arm-protos.h
@@ -1,5 +1,6 @@
/* Prototypes for exported functions defined in arm.c and pe.c
- Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+ Copyright (C) 1999, 2000, 2001, 2002, 2003, 2005
+ Free Software Foundation, Inc.
Contributed by Richard Earnshaw (rearnsha@arm.com)
Minor hacks by Nick Clifton (nickc@cygnus.com)
@@ -138,6 +139,7 @@ extern int arm_debugger_arg_offset (int, rtx);
extern int arm_is_longcall_p (rtx, int, int);
extern int arm_emit_vector_const (FILE *, rtx);
extern const char * arm_output_load_gr (rtx *);
+extern int arm_eliminable_register (rtx);
#if defined TREE_CODE
extern rtx arm_function_arg (CUMULATIVE_ARGS *, enum machine_mode, tree, int);
diff --git a/contrib/gcc/config/arm/arm.c b/contrib/gcc/config/arm/arm.c
index 91e4486..9518894 100644
--- a/contrib/gcc/config/arm/arm.c
+++ b/contrib/gcc/config/arm/arm.c
@@ -1,6 +1,6 @@
/* Output routines for GCC for ARM.
Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
- 2002, 2003, 2004 Free Software Foundation, Inc.
+ 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
and Martin Simmons (@harleqn.co.uk).
More major hacks by Richard Earnshaw (rearnsha@arm.com).
@@ -4056,6 +4056,16 @@ cirrus_shift_const (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
&& INTVAL (op) < 64);
}
+/* Return true if X is a register that will be eliminated later on. */
+int
+arm_eliminable_register (rtx x)
+{
+ return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
+ || REGNO (x) == ARG_POINTER_REGNUM
+ || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
+ && REGNO (x) <= LAST_VIRTUAL_REGISTER));
+}
+
/* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
Use by the Cirrus Maverick code which has to workaround
a hardware bug triggered by such instructions. */
@@ -4569,33 +4579,42 @@ adjacent_mem_locations (rtx a, rtx b)
|| (GET_CODE (XEXP (b, 0)) == PLUS
&& GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
{
- int val0 = 0, val1 = 0;
- int reg0, reg1;
-
+ HOST_WIDE_INT val0 = 0, val1 = 0;
+ rtx reg0, reg1;
+ int val_diff;
+
if (GET_CODE (XEXP (a, 0)) == PLUS)
{
- reg0 = REGNO (XEXP (XEXP (a, 0), 0));
+ reg0 = XEXP (XEXP (a, 0), 0);
val0 = INTVAL (XEXP (XEXP (a, 0), 1));
}
else
- reg0 = REGNO (XEXP (a, 0));
+ reg0 = XEXP (a, 0);
if (GET_CODE (XEXP (b, 0)) == PLUS)
{
- reg1 = REGNO (XEXP (XEXP (b, 0), 0));
+ reg1 = XEXP (XEXP (b, 0), 0);
val1 = INTVAL (XEXP (XEXP (b, 0), 1));
}
else
- reg1 = REGNO (XEXP (b, 0));
+ reg1 = XEXP (b, 0);
/* Don't accept any offset that will require multiple
instructions to handle, since this would cause the
arith_adjacentmem pattern to output an overlong sequence. */
if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
return 0;
-
- return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4);
+
+ /* Don't allow an eliminable register: register elimination can make
+ the offset too large. */
+ if (arm_eliminable_register (reg0))
+ return 0;
+
+ val_diff = val1 - val0;
+ return ((REGNO (reg0) == REGNO (reg1))
+ && (val_diff == 4 || val_diff == -4));
}
+
return 0;
}
@@ -7301,7 +7320,6 @@ output_call_mem (rtx *operands)
return "";
}
-
/* Output a move from arm registers to an fpa registers.
OPERANDS[0] is an fpa register.
OPERANDS[1] is the first registers of an arm register pair. */
diff --git a/contrib/gcc/config/arm/arm.h b/contrib/gcc/config/arm/arm.h
index 3a13d91..94d8b94 100644
--- a/contrib/gcc/config/arm/arm.h
+++ b/contrib/gcc/config/arm/arm.h
@@ -1396,7 +1396,7 @@ enum reg_class
: NO_REGS)
#define THUMB_SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X) \
- ((CLASS) != LO_REGS \
+ ((CLASS) != LO_REGS && (CLASS) != BASE_REGS \
? ((true_regnum (X) == -1 ? LO_REGS \
: (true_regnum (X) + HARD_REGNO_NREGS (0, MODE) > 8) ? LO_REGS \
: NO_REGS)) \
diff --git a/contrib/gcc/config/arm/arm.md b/contrib/gcc/config/arm/arm.md
index 9f10d10..57926ba 100644
--- a/contrib/gcc/config/arm/arm.md
+++ b/contrib/gcc/config/arm/arm.md
@@ -5960,22 +5960,24 @@
[(set (pc)
(if_then_else
(match_operator 5 "equality_operator"
- [(and:SI (not:SI (match_operand:SI 3 "s_register_operand" "l,l,l,l"))
- (match_operand:SI 2 "s_register_operand" "0,1,1,1"))
+ [(and:SI (not:SI (match_operand:SI 3 "s_register_operand" "l,l,l,l,l"))
+ (match_operand:SI 2 "s_register_operand" "0,1,1,1,1"))
(const_int 0)])
(label_ref (match_operand 4 "" ""))
(pc)))
- (set (match_operand:SI 0 "thumb_cbrch_target_operand" "=l,*?h,*?m,*?m")
+ (set (match_operand:SI 0 "thumb_cbrch_target_operand" "=!l,l,*?h,*?m,*?m")
(and:SI (not:SI (match_dup 3)) (match_dup 2)))
- (clobber (match_scratch:SI 1 "=X,l,&l,&l"))]
+ (clobber (match_scratch:SI 1 "=X,l,l,&l,&l"))]
"TARGET_THUMB"
"*
{
if (which_alternative == 0)
output_asm_insn (\"bic\\t%0, %3\", operands);
- else if (which_alternative == 1)
+ else if (which_alternative <= 2)
{
output_asm_insn (\"bic\\t%1, %3\", operands);
+ /* It's ok if OP0 is a lo-reg, even though the mov will set the
+ conditions again, since we're only testing for equality. */
output_asm_insn (\"mov\\t%0, %1\", operands);
}
else
@@ -6234,10 +6236,10 @@
case 1:
output_asm_insn (\"cmn\t%1, %2\", operands);
break;
- case 3:
+ case 2:
output_asm_insn (\"add\t%0, %1, %2\", operands);
break;
- case 4:
+ case 3:
output_asm_insn (\"add\t%0, %0, %2\", operands);
break;
}
@@ -7128,8 +7130,8 @@
(const_string "no")))
(set (attr "length")
(if_then_else
- (and (ge (minus (match_dup 0) (pc)) (const_int -2048))
- (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2048)))
(const_int 2)
(const_int 4)))]
)
diff --git a/contrib/gcc/config/arm/t-netbsd b/contrib/gcc/config/arm/t-netbsd
index 77e6227..533fab9 100644
--- a/contrib/gcc/config/arm/t-netbsd
+++ b/contrib/gcc/config/arm/t-netbsd
@@ -11,8 +11,12 @@ SHLIB_OBJS = @shlib_objs@
SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
-Wl,-soname,$(SHLIB_SONAME) \
- -o $(SHLIB_NAME) @multilib_flags@ $(SHLIB_OBJS) -lc && \
+ -o $(SHLIB_NAME).tmp @multilib_flags@ $(SHLIB_OBJS) -lc && \
rm -f $(SHLIB_SONAME) && \
+ if [ -f $(SHLIB_NAME) ]; then \
+ mv -f $(SHLIB_NAME) $(SHLIB_NAME).backup; \
+ else true; fi && \
+ mv $(SHLIB_NAME).tmp $(SHLIB_NAME) && \
$(LN_S) $(SHLIB_NAME) $(SHLIB_SONAME)
# $(slibdir) double quoted to protect it from expansion while building
# libgcc.mk. We want this delayed until actual install time.
diff --git a/contrib/gcc/config/arm/t-rtems b/contrib/gcc/config/arm/t-rtems
new file mode 100644
index 0000000..52d14ba
--- /dev/null
+++ b/contrib/gcc/config/arm/t-rtems
@@ -0,0 +1,10 @@
+# Custom rtems multilibs
+
+MULTILIB_OPTIONS = marm/mthumb
+MULTILIB_DIRNAMES = arm thumb
+MULTILIB_EXCEPTIONS =
+MULTILIB_MATCHES = marm=mno-thumb
+
+MULTILIB_OPTIONS += msoft-float/mhard-float
+MULTILIB_DIRNAMES += soft fpu
+MULTILIB_EXCEPTIONS += *mthumb/*mhard-float*
diff --git a/contrib/gcc/config/darwin-protos.h b/contrib/gcc/config/darwin-protos.h
index 41bad64..3305112 100644
--- a/contrib/gcc/config/darwin-protos.h
+++ b/contrib/gcc/config/darwin-protos.h
@@ -65,8 +65,6 @@ extern const char *darwin_strip_name_encoding (const char *);
extern void machopic_finish (FILE *);
-extern void machopic_output_possible_stub_label (FILE *, const char*);
-
extern void darwin_exception_section (void);
extern void darwin_eh_frame_section (void);
extern void machopic_select_section (tree, int, unsigned HOST_WIDE_INT);
diff --git a/contrib/gcc/config/darwin.c b/contrib/gcc/config/darwin.c
index 8005ecd..c946e59 100644
--- a/contrib/gcc/config/darwin.c
+++ b/contrib/gcc/config/darwin.c
@@ -900,10 +900,6 @@ machopic_finish (FILE *asm_out_file)
if (! TREE_USED (temp))
continue;
- /* If the symbol is actually defined, we don't need a stub. */
- if (sym_name[0] == '!' && sym_name[1] == 'T')
- continue;
-
sym_name = darwin_strip_name_encoding (sym_name);
sym = alloca (strlen (sym_name) + 2);
@@ -1096,37 +1092,6 @@ update_non_lazy_ptrs (const char *name)
}
}
-/* Function NAME is being defined, and its label has just been output.
- If there's already a reference to a stub for this function, we can
- just emit the stub label now and we don't bother emitting the stub later. */
-
-void
-machopic_output_possible_stub_label (FILE *file, const char *name)
-{
- tree temp;
-
- /* Ensure we're looking at a section-encoded name. */
- if (name[0] != '!' || (name[1] != 't' && name[1] != 'T'))
- return;
-
- for (temp = machopic_stubs;
- temp != NULL_TREE;
- temp = TREE_CHAIN (temp))
- {
- const char *sym_name;
-
- sym_name = IDENTIFIER_POINTER (TREE_VALUE (temp));
- if (sym_name[0] == '!' && (sym_name[1] == 'T' || sym_name[1] == 't')
- && ! strcmp (name+2, sym_name+2))
- {
- ASM_OUTPUT_LABEL (file, IDENTIFIER_POINTER (TREE_PURPOSE (temp)));
- /* Avoid generating a stub for this. */
- TREE_USED (temp) = 0;
- break;
- }
- }
-}
-
/* Scan the list of stubs and update any recorded names whose
stripped name matches the argument. */
diff --git a/contrib/gcc/config/darwin.h b/contrib/gcc/config/darwin.h
index 045091a..c6ff93f 100644
--- a/contrib/gcc/config/darwin.h
+++ b/contrib/gcc/config/darwin.h
@@ -99,7 +99,13 @@ Boston, MA 02111-1307, USA. */
Note that an option name with a prefix that matches another option
name, that also takes an argument, needs to be modified so the
prefix is different, otherwise a '*' after the shorter option will
- match with the longer one. */
+ match with the longer one.
+
+ The SUBTARGET_OPTION_TRANSLATE_TABLE macro, which _must_ be defined
+ in gcc/config/{i386,rs6000}/darwin.h, should contain any additional
+ command-line option translations specific to the particular target
+ architecture. */
+
#define TARGET_OPTION_TRANSLATE_TABLE \
{ "-all_load", "-Zall_load" }, \
{ "-allowable_client", "-Zallowable_client" }, \
@@ -126,7 +132,8 @@ Boston, MA 02111-1307, USA. */
{ "-multi_module", "-Zmulti_module" }, \
{ "-static", "-static -Wa,-static" }, \
{ "-single_module", "-Zsingle_module" }, \
- { "-unexported_symbols_list", "-Zunexported_symbols_list" }
+ { "-unexported_symbols_list", "-Zunexported_symbols_list" }, \
+ SUBTARGET_OPTION_TRANSLATE_TABLE
/* These compiler options take n arguments. */
@@ -390,9 +397,6 @@ do { text_section (); \
|| DECL_INITIAL (DECL)) \
(* targetm.encode_section_info) (DECL, DECL_RTL (DECL), false); \
ASM_OUTPUT_LABEL (FILE, xname); \
- /* Avoid generating stubs for functions we've just defined by \
- outputting any required stub name label now. */ \
- machopic_output_possible_stub_label (FILE, xname); \
} while (0)
#define ASM_DECLARE_CONSTANT_NAME(FILE, NAME, EXP, SIZE) \
diff --git a/contrib/gcc/config/freebsd-spec.h b/contrib/gcc/config/freebsd-spec.h
index e4459ba..a98b0e5 100644
--- a/contrib/gcc/config/freebsd-spec.h
+++ b/contrib/gcc/config/freebsd-spec.h
@@ -107,12 +107,12 @@ Boston, MA 02111-1307, USA. */
500016, select the appropriate libc, depending on whether we're
doing profiling or need threads support. At __FreeBSD_version
500016 and later, when threads support is requested include both
- -lc and -lc_r instead of only -lc_r. To make matters interesting,
- we can't actually use __FreeBSD_version provided by <osreldate.h>
- directly since it breaks cross-compiling. As a final twist, make
- it a hard error if -pthread is provided on the command line and gcc
- was configured with --disable-threads (this will help avoid bug
- reports from users complaining about threading when they
+ -lc and the threading lib instead of only -lc_r. To make matters
+ interesting, we can't actually use __FreeBSD_version provided by
+ <osreldate.h> directly since it breaks cross-compiling. As a final
+ twist, make it a hard error if -pthread is provided on the command
+ line and gcc was configured with --disable-threads (this will help
+ avoid bug reports from users complaining about threading when they
misconfigured the gcc bootstrap but are later consulting FreeBSD
manual pages that refer to the mythical -pthread option). */
@@ -129,13 +129,7 @@ is built with the --enable-threads configure-time option.} \
%{pg: -lc_p} \
}"
#else
-#if FBSD_MAJOR >= 5
-#define FBSD_LIB_SPEC " \
- %{!shared: \
- %{!pg: %{pthread:-lc_r} -lc} \
- %{pg: %{pthread:-lc_r_p} -lc_p} \
- }"
-#else
+#if FBSD_MAJOR < 5
#define FBSD_LIB_SPEC " \
%{!shared: \
%{!pg: \
@@ -145,6 +139,12 @@ is built with the --enable-threads configure-time option.} \
%{!pthread:-lc_p} \
%{pthread:-lc_r_p}} \
}"
+#else
+#define FBSD_LIB_SPEC " \
+ %{!shared: \
+ %{!pg: %{pthread:-lpthread} -lc} \
+ %{pg: %{pthread:-lpthread_p} -lc_p} \
+ }"
#endif
#endif
diff --git a/contrib/gcc/config/i386/cygwin1.c b/contrib/gcc/config/i386/cygwin1.c
index 2cab96c..88c44fc 100644
--- a/contrib/gcc/config/i386/cygwin1.c
+++ b/contrib/gcc/config/i386/cygwin1.c
@@ -30,13 +30,13 @@ mingw_scan (int argc ATTRIBUTE_UNUSED,
const char *const *argv,
char **spec_machine)
{
- putenv ("GCC_CYGWIN_MINGW=0");
+ putenv (xstrdup ("GCC_CYGWIN_MINGW=0"));
while (*++argv)
if (strcmp (*argv, "-mno-win32") == 0)
- putenv ("GCC_CYGWIN_WIN32=0");
+ putenv (xstrdup ("GCC_CYGWIN_WIN32=0"));
else if (strcmp (*argv, "-mwin32") == 0)
- putenv ("GCC_CYGWIN_WIN32=1");
+ putenv (xstrdup ("GCC_CYGWIN_WIN32=1"));
else if (strcmp (*argv, "-mno-cygwin") == 0)
{
char *p = strstr (*spec_machine, "-cygwin");
@@ -48,7 +48,7 @@ mingw_scan (int argc ATTRIBUTE_UNUSED,
strcpy (s + len, "-mingw32");
*spec_machine = s;
}
- putenv ("GCC_CYGWIN_MINGW=1");
+ putenv (xstrdup ("GCC_CYGWIN_MINGW=1"));
}
return;
}
diff --git a/contrib/gcc/config/i386/darwin.h b/contrib/gcc/config/i386/darwin.h
index fd501bf..8246b9e 100644
--- a/contrib/gcc/config/i386/darwin.h
+++ b/contrib/gcc/config/i386/darwin.h
@@ -41,6 +41,10 @@ Boston, MA 02111-1307, USA. */
#undef CC1_SPEC
#define CC1_SPEC "%{!static:-fPIC}"
+/* Use the following macro for any Darwin/x86-specific command-line option
+ translation. */
+#define SUBTARGET_OPTION_TRANSLATE_TABLE
+
#define ASM_SPEC "-arch i386 \
%{Zforce_cpusubtype_ALL:-force_cpusubtype_ALL} \
%{!Zforce_cpusubtype_ALL:%{mmmx:-force_cpusubtype_ALL}\
diff --git a/contrib/gcc/config/i386/emmintrin.h b/contrib/gcc/config/i386/emmintrin.h
index abe450a..2869063 100644
--- a/contrib/gcc/config/i386/emmintrin.h
+++ b/contrib/gcc/config/i386/emmintrin.h
@@ -34,7 +34,7 @@
#include <xmmintrin.h>
/* SSE2 */
-typedef int __v2df __attribute__ ((mode (V2DF)));
+typedef double __v2df __attribute__ ((mode (V2DF)));
typedef int __v2di __attribute__ ((mode (V2DI)));
typedef int __v4si __attribute__ ((mode (V4SI)));
typedef int __v8hi __attribute__ ((mode (V8HI)));
diff --git a/contrib/gcc/config/i386/freebsd.h b/contrib/gcc/config/i386/freebsd.h
index 9e538e9..4fc7a9a 100644
--- a/contrib/gcc/config/i386/freebsd.h
+++ b/contrib/gcc/config/i386/freebsd.h
@@ -138,12 +138,5 @@ Boston, MA 02111-1307, USA. */
/* FreeBSD sets the rounding precision of the FPU to 53 bits. Let the
compiler get the contents of <float.h> and std::numeric_limits correct. */
-#define SUBTARGET_OVERRIDE_OPTIONS \
- do { \
- if (!TARGET_64BIT) { \
- REAL_MODE_FORMAT (XFmode) \
- = &ieee_extended_intel_96_round_53_format; \
- REAL_MODE_FORMAT (TFmode) \
- = &ieee_extended_intel_96_round_53_format; \
- } \
- } while (0)
+#undef TARGET_96_ROUND_53_LONG_DOUBLE
+#define TARGET_96_ROUND_53_LONG_DOUBLE (!TARGET_64BIT)
diff --git a/contrib/gcc/config/i386/gthr-win32.c b/contrib/gcc/config/i386/gthr-win32.c
index 4e2b282..c53369b 100644
--- a/contrib/gcc/config/i386/gthr-win32.c
+++ b/contrib/gcc/config/i386/gthr-win32.c
@@ -31,11 +31,13 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
the executable file might be covered by the GNU General Public License. */
+#include <windows.h>
#ifndef __GTHREAD_HIDE_WIN32API
# define __GTHREAD_HIDE_WIN32API 1
#endif
+#undef __GTHREAD_I486_INLINE_LOCK_PRIMITIVES
+#define __GTHREAD_I486_INLINE_LOCK_PRIMITIVES
#include <gthr-win32.h>
-#include <windows.h>
/* Windows32 threads specific definitions. The windows32 threading model
does not map well into pthread-inspired gcc's threading model, and so
@@ -61,10 +63,9 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
This may cause incorrect error return due to truncation values on
hw where sizeof (DWORD) > sizeof (int).
- 3. We might consider using Critical Sections instead of Windows32
- mutexes for better performance, but emulating __gthread_mutex_trylock
- interface becomes more complicated (Win9x does not support
- TryEnterCriticalSectioni, while NT does).
+ 3. We are currently using a special mutex instead of the Critical
+ Sections, since Win9x does not support TryEnterCriticalSection
+ (while NT does).
The basic framework should work well enough. In the long term, GCC
needs to use Structured Exception Handling on Windows32. */
@@ -145,23 +146,29 @@ __gthr_win32_setspecific (__gthread_key_t key, const void *ptr)
void
__gthr_win32_mutex_init_function (__gthread_mutex_t *mutex)
{
- /* Create unnamed mutex with default security attr and no initial owner. */
- *mutex = CreateMutex (NULL, 0, NULL);
+ mutex->counter = -1;
+ mutex->sema = CreateSemaphore (NULL, 0, 65535, NULL);
}
int
__gthr_win32_mutex_lock (__gthread_mutex_t *mutex)
{
- if (WaitForSingleObject (*mutex, INFINITE) == WAIT_OBJECT_0)
+ if (InterlockedIncrement (&mutex->counter) == 0 ||
+ WaitForSingleObject (mutex->sema, INFINITE) == WAIT_OBJECT_0)
return 0;
else
- return 1;
+ {
+ /* WaitForSingleObject returns WAIT_FAILED, and we can only do
+ some best-effort cleanup here. */
+ InterlockedDecrement (&mutex->counter);
+ return 1;
+ }
}
int
__gthr_win32_mutex_trylock (__gthread_mutex_t *mutex)
{
- if (WaitForSingleObject (*mutex, 0) == WAIT_OBJECT_0)
+ if (__GTHR_W32_InterlockedCompareExchange (&mutex->counter, 0, -1) < 0)
return 0;
else
return 1;
@@ -170,5 +177,8 @@ __gthr_win32_mutex_trylock (__gthread_mutex_t *mutex)
int
__gthr_win32_mutex_unlock (__gthread_mutex_t *mutex)
{
- return (ReleaseMutex (*mutex) != 0) ? 0 : 1;
+ if (InterlockedDecrement (&mutex->counter) >= 0)
+ return ReleaseSemaphore (mutex->sema, 1, NULL) ? 0 : 1;
+ else
+ return 0;
}
diff --git a/contrib/gcc/config/i386/i386-modes.def b/contrib/gcc/config/i386/i386-modes.def
index 89c83c4..36c6d42 100644
--- a/contrib/gcc/config/i386/i386-modes.def
+++ b/contrib/gcc/config/i386/i386-modes.def
@@ -29,6 +29,8 @@ Boston, MA 02111-1307, USA. */
FLOAT_MODE (XF, 12, ieee_extended_intel_96_format);
ADJUST_FLOAT_FORMAT (XF, (TARGET_128BIT_LONG_DOUBLE
? &ieee_extended_intel_128_format
+ : TARGET_96_ROUND_53_LONG_DOUBLE
+ ? &ieee_extended_intel_96_round_53_format
: &ieee_extended_intel_96_format));
ADJUST_BYTESIZE (XF, TARGET_128BIT_LONG_DOUBLE ? 16 : 12);
ADJUST_ALIGNMENT (XF, TARGET_128BIT_LONG_DOUBLE ? 16 : 4);
diff --git a/contrib/gcc/config/i386/i386-protos.h b/contrib/gcc/config/i386/i386-protos.h
index cc1bb81..ea0e8f6 100644
--- a/contrib/gcc/config/i386/i386-protos.h
+++ b/contrib/gcc/config/i386/i386-protos.h
@@ -93,6 +93,7 @@ extern int memory_displacement_operand (rtx, enum machine_mode);
extern int cmpsi_operand (rtx, enum machine_mode);
extern int long_memory_operand (rtx, enum machine_mode);
extern int aligned_operand (rtx, enum machine_mode);
+extern int compare_operator (rtx, enum machine_mode);
extern enum machine_mode ix86_cc_mode (enum rtx_code, rtx, rtx);
extern int ix86_expand_movstr (rtx, rtx, rtx, rtx);
diff --git a/contrib/gcc/config/i386/i386.c b/contrib/gcc/config/i386/i386.c
index c2f59c9..9504583 100644
--- a/contrib/gcc/config/i386/i386.c
+++ b/contrib/gcc/config/i386/i386.c
@@ -522,7 +522,14 @@ const int x86_sse_typeless_stores = m_ATHLON_K8;
const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4;
const int x86_use_ffreep = m_ATHLON_K8;
const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6;
-const int x86_inter_unit_moves = ~(m_ATHLON_K8);
+
+/* ??? HACK! The following is a lie. SSE can hold e.g. SImode, and
+ indeed *must* be able to hold SImode so that SSE2 shifts are able
+ to work right. But this can result in some mighty surprising
+ register allocation when building kernels. Turning this off should
+ make us less likely to all-of-the-sudden select an SSE register. */
+const int x86_inter_unit_moves = 0; /* ~(m_ATHLON_K8) */
+
const int x86_ext_80387_constants = m_K6 | m_ATHLON | m_PENT4 | m_PPRO;
/* In case the average insn count for single function invocation is
@@ -2536,6 +2543,34 @@ function_arg_advance (CUMULATIVE_ARGS *cum, /* current arg information */
return;
}
+/* A subroutine of function_arg. We want to pass a parameter whose nominal
+ type is MODE in REGNO. We try to minimize ABI variation, so MODE may not
+ actually be valid for REGNO with the current ISA. In this case, ALT_MODE
+ is used instead. It must be the same size as MODE, and must be known to
+ be valid for REGNO. Finally, ORIG_MODE is the original mode of the
+ parameter, as seen by the type system. This may be different from MODE
+ when we're mucking with things minimizing ABI variations.
+
+ Returns a REG or a PARALLEL as appropriate. */
+
+static rtx
+gen_reg_or_parallel (enum machine_mode mode, enum machine_mode alt_mode,
+ enum machine_mode orig_mode, unsigned int regno)
+{
+ rtx tmp;
+
+ if (HARD_REGNO_MODE_OK (regno, mode))
+ tmp = gen_rtx_REG (mode, regno);
+ else
+ {
+ tmp = gen_rtx_REG (alt_mode, regno);
+ tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
+ tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
+ }
+
+ return tmp;
+}
+
/* Define where to put the arguments to a function.
Value is zero to push the argument on the stack,
or a hard register in which to store the argument.
@@ -2550,12 +2585,11 @@ function_arg_advance (CUMULATIVE_ARGS *cum, /* current arg information */
(otherwise it is an extra parameter matching an ellipsis). */
rtx
-function_arg (CUMULATIVE_ARGS *cum, /* current arg information */
- enum machine_mode mode, /* current arg mode */
- tree type, /* type of the argument or 0 if lib support */
- int named) /* != 0 for normal args, == 0 for ... args */
+function_arg (CUMULATIVE_ARGS *cum, enum machine_mode orig_mode,
+ tree type, int named)
{
- rtx ret = NULL_RTX;
+ enum machine_mode mode = orig_mode;
+ rtx ret = NULL_RTX;
int bytes =
(mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
@@ -2628,7 +2662,8 @@ function_arg (CUMULATIVE_ARGS *cum, /* current arg information */
"changes the ABI");
}
if (cum->sse_nregs)
- ret = gen_rtx_REG (mode, cum->sse_regno + FIRST_SSE_REG);
+ ret = gen_reg_or_parallel (mode, TImode, orig_mode,
+ cum->sse_regno + FIRST_SSE_REG);
}
break;
case V8QImode:
@@ -2644,7 +2679,8 @@ function_arg (CUMULATIVE_ARGS *cum, /* current arg information */
"changes the ABI");
}
if (cum->mmx_nregs)
- ret = gen_rtx_REG (mode, cum->mmx_regno + FIRST_MMX_REG);
+ ret = gen_reg_or_parallel (mode, DImode, orig_mode,
+ cum->mmx_regno + FIRST_MMX_REG);
}
break;
}
@@ -4319,6 +4355,12 @@ aligned_operand (rtx op, enum machine_mode mode)
/* Didn't find one -- this must be an aligned address. */
return 1;
}
+
+int
+compare_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ return GET_CODE (op) == COMPARE;
+}
/* Initialize the table of extra 80387 mathematical constants. */
@@ -5775,45 +5817,40 @@ ix86_find_base_term (rtx x)
bool
legitimate_constant_p (rtx x)
{
- rtx inner;
-
switch (GET_CODE (x))
{
- case SYMBOL_REF:
- /* TLS symbols are not constant. */
- if (tls_symbolic_operand (x, Pmode))
- return false;
- break;
-
case CONST:
- inner = XEXP (x, 0);
-
- /* Offsets of TLS symbols are never valid.
- Discourage CSE from creating them. */
- if (GET_CODE (inner) == PLUS
- && tls_symbolic_operand (XEXP (inner, 0), Pmode))
- return false;
+ x = XEXP (x, 0);
- if (GET_CODE (inner) == PLUS
- || GET_CODE (inner) == MINUS)
+ if (GET_CODE (x) == PLUS)
{
- if (GET_CODE (XEXP (inner, 1)) != CONST_INT)
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
return false;
- inner = XEXP (inner, 0);
+ x = XEXP (x, 0);
}
/* Only some unspecs are valid as "constants". */
- if (GET_CODE (inner) == UNSPEC)
- switch (XINT (inner, 1))
+ if (GET_CODE (x) == UNSPEC)
+ switch (XINT (x, 1))
{
case UNSPEC_TPOFF:
case UNSPEC_NTPOFF:
- return local_exec_symbolic_operand (XVECEXP (inner, 0, 0), Pmode);
+ return local_exec_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
case UNSPEC_DTPOFF:
- return local_dynamic_symbolic_operand (XVECEXP (inner, 0, 0), Pmode);
+ return local_dynamic_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
default:
return false;
}
+
+ /* We must have drilled down to a symbol. */
+ if (!symbolic_operand (x, Pmode))
+ return false;
+ /* FALLTHRU */
+
+ case SYMBOL_REF:
+ /* TLS symbols are never valid. */
+ if (tls_symbolic_operand (x, Pmode))
+ return false;
break;
default:
@@ -10609,10 +10646,11 @@ ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
else if (GET_CODE (operand) == CONST_DOUBLE)
{
REAL_VALUE_TYPE r;
- long l[3];
+ long l[4];
REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
real_to_target (l, &r, mode);
+
/* Do not use shift by 32 to avoid warning on 32bit systems. */
if (HOST_BITS_PER_WIDE_INT >= 64)
parts[0]
@@ -10622,6 +10660,7 @@ ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
DImode);
else
parts[0] = immed_double_const (l[0], l[1], DImode);
+
if (upper_mode == SImode)
parts[1] = gen_int_mode (l[2], SImode);
else if (HOST_BITS_PER_WIDE_INT >= 64)
@@ -14896,10 +14935,29 @@ ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
if (FP_REGNO_P (regno))
return VALID_FP_MODE_P (mode);
if (SSE_REGNO_P (regno))
- return (TARGET_SSE ? VALID_SSE_REG_MODE (mode) : 0);
+ {
+ /* HACK! We didn't change all of the constraints for SSE1 for the
+ scalar modes on the branch. Fortunately, they're not required
+ for ABI compatibility. */
+ if (!TARGET_SSE2 && !VECTOR_MODE_P (mode))
+ return VALID_SSE_REG_MODE (mode);
+
+ /* We implement the move patterns for all vector modes into and
+ out of SSE registers, even when no operation instructions
+ are available. */
+ return (VALID_SSE_REG_MODE (mode)
+ || VALID_SSE2_REG_MODE (mode)
+ || VALID_MMX_REG_MODE (mode)
+ || VALID_MMX_REG_MODE_3DNOW (mode));
+ }
if (MMX_REGNO_P (regno))
- return (TARGET_MMX
- ? VALID_MMX_REG_MODE (mode) || VALID_MMX_REG_MODE_3DNOW (mode) : 0);
+ {
+ /* We implement the move patterns for 3DNOW modes even in MMX mode,
+ so if the register is available at all, then we can move data of
+ the given mode into or out of it. */
+ return (VALID_MMX_REG_MODE (mode)
+ || VALID_MMX_REG_MODE_3DNOW (mode));
+ }
/* We handle both integer and floats in the general purpose registers.
In future we should be able to handle vector modes as well. */
if (!VALID_INT_MODE_P (mode) && !VALID_FP_MODE_P (mode))
@@ -15235,7 +15293,9 @@ ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
return false;
case FLOAT_EXTEND:
- if (!TARGET_SSE_MATH || !VALID_SSE_REG_MODE (mode))
+ if (!TARGET_SSE_MATH
+ || mode == XFmode
+ || (mode == DFmode && !TARGET_SSE2))
*total = 0;
return false;
diff --git a/contrib/gcc/config/i386/i386.h b/contrib/gcc/config/i386/i386.h
index f5be340..8a912d5 100644
--- a/contrib/gcc/config/i386/i386.h
+++ b/contrib/gcc/config/i386/i386.h
@@ -447,6 +447,10 @@ extern int x86_prefetch_sse;
redefines this to 1. */
#define TARGET_MACHO 0
+/* Subtargets may reset this to 1 in order to enable 96-bit long double
+ with the rounding mode forced to 53 bits. */
+#define TARGET_96_ROUND_53_LONG_DOUBLE 0
+
/* This macro is similar to `TARGET_SWITCHES' but defines names of
command options that have values. Its definition is an
initializer with a subgrouping for each command option.
@@ -1059,14 +1063,11 @@ do { \
#define VALID_SSE2_REG_MODE(MODE) \
((MODE) == V16QImode || (MODE) == V8HImode || (MODE) == V2DFmode \
- || (MODE) == V2DImode)
+ || (MODE) == V2DImode || (MODE) == DFmode)
#define VALID_SSE_REG_MODE(MODE) \
((MODE) == TImode || (MODE) == V4SFmode || (MODE) == V4SImode \
- || (MODE) == SFmode || (MODE) == TFmode \
- /* Always accept SSE2 modes so that xmmintrin.h compiles. */ \
- || VALID_SSE2_REG_MODE (MODE) \
- || (TARGET_SSE2 && ((MODE) == DFmode || VALID_MMX_REG_MODE (MODE))))
+ || (MODE) == SFmode || (MODE) == TFmode)
#define VALID_MMX_REG_MODE_3DNOW(MODE) \
((MODE) == V2SFmode || (MODE) == SFmode)
@@ -2990,7 +2991,8 @@ do { \
{"zero_extended_scalar_load_operand", {MEM}}, \
{"vector_move_operand", {CONST_VECTOR, SUBREG, REG, MEM}}, \
{"no_seg_address_operand", {CONST_INT, CONST_DOUBLE, CONST, SYMBOL_REF, \
- LABEL_REF, SUBREG, REG, MEM, PLUS, MULT}},
+ LABEL_REF, SUBREG, REG, MEM, PLUS, MULT}}, \
+ {"compare_operator", {COMPARE}},
/* A list of predicates that do special things with modes, and so
should not elicit warnings for VOIDmode match_operand. */
diff --git a/contrib/gcc/config/i386/i386.md b/contrib/gcc/config/i386/i386.md
index a190d23..93d9dcd 100644
--- a/contrib/gcc/config/i386/i386.md
+++ b/contrib/gcc/config/i386/i386.md
@@ -1261,10 +1261,9 @@
""
"xchg{l}\t%1, %0"
[(set_attr "type" "imov")
+ (set_attr "mode" "SI")
(set_attr "pent_pair" "np")
(set_attr "athlon_decode" "vector")
- (set_attr "mode" "SI")
- (set_attr "modrm" "0")
(set_attr "ppro_uops" "few")])
(define_expand "movhi"
@@ -1377,12 +1376,12 @@
(match_operand:HI 1 "register_operand" "+r"))
(set (match_dup 1)
(match_dup 0))]
- "TARGET_PARTIAL_REG_STALL"
- "xchg{w}\t%1, %0"
+ "!TARGET_PARTIAL_REG_STALL || optimize_size"
+ "xchg{l}\t%k1, %k0"
[(set_attr "type" "imov")
+ (set_attr "mode" "SI")
(set_attr "pent_pair" "np")
- (set_attr "mode" "HI")
- (set_attr "modrm" "0")
+ (set_attr "athlon_decode" "vector")
(set_attr "ppro_uops" "few")])
(define_insn "*swaphi_2"
@@ -1390,12 +1389,12 @@
(match_operand:HI 1 "register_operand" "+r"))
(set (match_dup 1)
(match_dup 0))]
- "! TARGET_PARTIAL_REG_STALL"
- "xchg{l}\t%k1, %k0"
+ "TARGET_PARTIAL_REG_STALL"
+ "xchg{w}\t%1, %0"
[(set_attr "type" "imov")
+ (set_attr "mode" "HI")
(set_attr "pent_pair" "np")
- (set_attr "mode" "SI")
- (set_attr "modrm" "0")
+ (set_attr "athlon_decode" "vector")
(set_attr "ppro_uops" "few")])
(define_expand "movstricthi"
@@ -1543,17 +1542,30 @@
DONE;
})
-(define_insn "*swapqi"
+(define_insn "*swapqi_1"
[(set (match_operand:QI 0 "register_operand" "+r")
(match_operand:QI 1 "register_operand" "+r"))
(set (match_dup 1)
(match_dup 0))]
- ""
- "xchg{b}\t%1, %0"
+ "!TARGET_PARTIAL_REG_STALL || optimize_size"
+ "xchg{l}\t%k1, %k0"
[(set_attr "type" "imov")
+ (set_attr "mode" "SI")
(set_attr "pent_pair" "np")
+ (set_attr "athlon_decode" "vector")
+ (set_attr "ppro_uops" "few")])
+
+(define_insn "*swapqi_2"
+ [(set (match_operand:QI 0 "register_operand" "+q")
+ (match_operand:QI 1 "register_operand" "+q"))
+ (set (match_dup 1)
+ (match_dup 0))]
+ "TARGET_PARTIAL_REG_STALL"
+ "xchg{b}\t%1, %0"
+ [(set_attr "type" "imov")
(set_attr "mode" "QI")
- (set_attr "modrm" "0")
+ (set_attr "pent_pair" "np")
+ (set_attr "athlon_decode" "vector")
(set_attr "ppro_uops" "few")])
(define_expand "movstrictqi"
@@ -2108,13 +2120,11 @@
"TARGET_64BIT"
"xchg{q}\t%1, %0"
[(set_attr "type" "imov")
+ (set_attr "mode" "DI")
(set_attr "pent_pair" "np")
(set_attr "athlon_decode" "vector")
- (set_attr "mode" "DI")
- (set_attr "modrm" "0")
(set_attr "ppro_uops" "few")])
-
(define_expand "movsf"
[(set (match_operand:SF 0 "nonimmediate_operand" "")
(match_operand:SF 1 "general_operand" ""))]
@@ -6314,9 +6324,13 @@
}
}
[(set (attr "type")
- (if_then_else (match_operand:QI 2 "incdec_operand" "")
+ (if_then_else (match_operand:QI 1 "incdec_operand" "")
(const_string "incdec")
(const_string "alu1")))
+ (set (attr "memory")
+ (if_then_else (match_operand 1 "memory_operand" "")
+ (const_string "load")
+ (const_string "none")))
(set_attr "mode" "QI")])
(define_insn "*addqi_2"
@@ -7872,18 +7886,21 @@
""
"")
-(define_insn "*testqi_1"
+(define_insn "*testqi_1_maybe_si"
[(set (reg 17)
- (compare (and:QI (match_operand:QI 0 "nonimmediate_operand" "%!*a,q,qm,r")
- (match_operand:QI 1 "general_operand" "n,n,qn,n"))
- (const_int 0)))]
- "ix86_match_ccmode (insn, CCNOmode)
- && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ (compare
+ (and:QI
+ (match_operand:QI 0 "nonimmediate_operand" "%!*a,q,qm,r")
+ (match_operand:QI 1 "general_operand" "n,n,qn,n"))
+ (const_int 0)))]
+ "(GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+ && ix86_match_ccmode (insn,
+ GET_CODE (operands[1]) == CONST_INT
+ && INTVAL (operands[1]) >= 0 ? CCNOmode : CCZmode)"
{
if (which_alternative == 3)
{
- if (GET_CODE (operands[1]) == CONST_INT
- && (INTVAL (operands[1]) & 0xffffff00))
+ if (GET_CODE (operands[1]) == CONST_INT && INTVAL (operands[1]) < 0)
operands[1] = GEN_INT (INTVAL (operands[1]) & 0xff);
return "test{l}\t{%1, %k0|%k0, %1}";
}
@@ -7894,6 +7911,21 @@
(set_attr "mode" "QI,QI,QI,SI")
(set_attr "pent_pair" "uv,np,uv,np")])
+(define_insn "*testqi_1"
+ [(set (reg 17)
+ (compare
+ (and:QI
+ (match_operand:QI 0 "nonimmediate_operand" "%!*a,q,qm")
+ (match_operand:QI 1 "general_operand" "n,n,qn"))
+ (const_int 0)))]
+ "(GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+ && ix86_match_ccmode (insn, CCNOmode)"
+ "test{b}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "test")
+ (set_attr "modrm" "0,1,1")
+ (set_attr "mode" "QI")
+ (set_attr "pent_pair" "uv,np,uv")])
+
(define_expand "testqi_ext_ccno_0"
[(set (reg:CCNO 17)
(compare:CCNO
@@ -8012,51 +8044,53 @@
"#")
(define_split
- [(set (reg 17)
- (compare (zero_extract
- (match_operand 0 "nonimmediate_operand" "")
- (match_operand 1 "const_int_operand" "")
- (match_operand 2 "const_int_operand" ""))
- (const_int 0)))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(zero_extract
+ (match_operand 2 "nonimmediate_operand" "")
+ (match_operand 3 "const_int_operand" "")
+ (match_operand 4 "const_int_operand" ""))
+ (const_int 0)]))]
"ix86_match_ccmode (insn, CCNOmode)"
- [(set (reg:CCNO 17) (compare:CCNO (match_dup 3) (const_int 0)))]
+ [(set (match_dup 0) (match_op_dup 1 [(match_dup 2) (const_int 0)]))]
{
- HOST_WIDE_INT len = INTVAL (operands[1]);
- HOST_WIDE_INT pos = INTVAL (operands[2]);
+ rtx val = operands[2];
+ HOST_WIDE_INT len = INTVAL (operands[3]);
+ HOST_WIDE_INT pos = INTVAL (operands[4]);
HOST_WIDE_INT mask;
enum machine_mode mode, submode;
- mode = GET_MODE (operands[0]);
- if (GET_CODE (operands[0]) == MEM)
+ mode = GET_MODE (val);
+ if (GET_CODE (val) == MEM)
{
/* ??? Combine likes to put non-volatile mem extractions in QImode
no matter the size of the test. So find a mode that works. */
- if (! MEM_VOLATILE_P (operands[0]))
+ if (! MEM_VOLATILE_P (val))
{
mode = smallest_mode_for_size (pos + len, MODE_INT);
- operands[0] = adjust_address (operands[0], mode, 0);
+ val = adjust_address (val, mode, 0);
}
}
- else if (GET_CODE (operands[0]) == SUBREG
- && (submode = GET_MODE (SUBREG_REG (operands[0])),
+ else if (GET_CODE (val) == SUBREG
+ && (submode = GET_MODE (SUBREG_REG (val)),
GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (submode))
&& pos + len <= GET_MODE_BITSIZE (submode))
{
/* Narrow a paradoxical subreg to prevent partial register stalls. */
mode = submode;
- operands[0] = SUBREG_REG (operands[0]);
+ val = SUBREG_REG (val);
}
else if (mode == HImode && pos + len <= 8)
{
/* Small HImode tests can be converted to QImode. */
mode = QImode;
- operands[0] = gen_lowpart (QImode, operands[0]);
+ val = gen_lowpart (QImode, val);
}
mask = ((HOST_WIDE_INT)1 << (pos + len)) - 1;
mask &= ~(((HOST_WIDE_INT)1 << pos) - 1);
- operands[3] = gen_rtx_AND (mode, operands[0], gen_int_mode (mask, mode));
+ operands[2] = gen_rtx_AND (mode, val, gen_int_mode (mask, mode));
})
;; Convert HImode/SImode test instructions with immediate to QImode ones.
@@ -8065,46 +8099,44 @@
;; Do the conversion only post-reload to avoid limiting of the register class
;; to QI regs.
(define_split
- [(set (reg 17)
- (compare
- (and (match_operand 0 "register_operand" "")
- (match_operand 1 "const_int_operand" ""))
- (const_int 0)))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(and (match_operand 2 "register_operand" "")
+ (match_operand 3 "const_int_operand" ""))
+ (const_int 0)]))]
"reload_completed
- && QI_REG_P (operands[0])
+ && QI_REG_P (operands[2])
+ && GET_MODE (operands[2]) != QImode
&& ((ix86_match_ccmode (insn, CCZmode)
- && !(INTVAL (operands[1]) & ~(255 << 8)))
+ && !(INTVAL (operands[3]) & ~(255 << 8)))
|| (ix86_match_ccmode (insn, CCNOmode)
- && !(INTVAL (operands[1]) & ~(127 << 8))))
- && GET_MODE (operands[0]) != QImode"
- [(set (reg:CCNO 17)
- (compare:CCNO
- (and:SI (zero_extract:SI (match_dup 0) (const_int 8) (const_int 8))
- (match_dup 1))
- (const_int 0)))]
- "operands[0] = gen_lowpart (SImode, operands[0]);
- operands[1] = gen_int_mode (INTVAL (operands[1]) >> 8, SImode);")
+ && !(INTVAL (operands[3]) & ~(127 << 8))))"
+ [(set (match_dup 0)
+ (match_op_dup 1
+ [(and:SI (zero_extract:SI (match_dup 2) (const_int 8) (const_int 8))
+ (match_dup 3))
+ (const_int 0)]))]
+ "operands[2] = gen_lowpart (SImode, operands[2]);
+ operands[3] = gen_int_mode (INTVAL (operands[3]) >> 8, SImode);")
(define_split
- [(set (reg 17)
- (compare
- (and (match_operand 0 "nonimmediate_operand" "")
- (match_operand 1 "const_int_operand" ""))
- (const_int 0)))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(and (match_operand 2 "nonimmediate_operand" "")
+ (match_operand 3 "const_int_operand" ""))
+ (const_int 0)]))]
"reload_completed
- && (!REG_P (operands[0]) || ANY_QI_REG_P (operands[0]))
+ && GET_MODE (operands[2]) != QImode
+ && (!REG_P (operands[2]) || ANY_QI_REG_P (operands[2]))
&& ((ix86_match_ccmode (insn, CCZmode)
- && !(INTVAL (operands[1]) & ~255))
+ && !(INTVAL (operands[3]) & ~255))
|| (ix86_match_ccmode (insn, CCNOmode)
- && !(INTVAL (operands[1]) & ~127)))
- && GET_MODE (operands[0]) != QImode"
- [(set (reg:CCNO 17)
- (compare:CCNO
- (and:QI (match_dup 0)
- (match_dup 1))
- (const_int 0)))]
- "operands[0] = gen_lowpart (QImode, operands[0]);
- operands[1] = gen_lowpart (QImode, operands[1]);")
+ && !(INTVAL (operands[3]) & ~127)))"
+ [(set (match_dup 0)
+ (match_op_dup 1 [(and:QI (match_dup 2) (match_dup 3))
+ (const_int 0)]))]
+ "operands[2] = gen_lowpart (QImode, operands[2]);
+ operands[3] = gen_lowpart (QImode, operands[3]);")
;; %%% This used to optimize known byte-wide and operations to memory,
@@ -8381,21 +8413,22 @@
[(set_attr "type" "alu1")
(set_attr "mode" "QI")])
-(define_insn "*andqi_2"
+(define_insn "*andqi_2_maybe_si"
[(set (reg 17)
(compare (and:QI
- (match_operand:QI 1 "nonimmediate_operand" "%0,0,0")
- (match_operand:QI 2 "general_operand" "qim,qi,i"))
+ (match_operand:QI 1 "nonimmediate_operand" "%0,0,0")
+ (match_operand:QI 2 "general_operand" "qim,qi,i"))
(const_int 0)))
(set (match_operand:QI 0 "nonimmediate_operand" "=q,qm,*r")
(and:QI (match_dup 1) (match_dup 2)))]
- "ix86_match_ccmode (insn, CCNOmode)
- && ix86_binary_operator_ok (AND, QImode, operands)"
+ "ix86_binary_operator_ok (AND, QImode, operands)
+ && ix86_match_ccmode (insn,
+ GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) >= 0 ? CCNOmode : CCZmode)"
{
if (which_alternative == 2)
{
- if (GET_CODE (operands[2]) == CONST_INT
- && (INTVAL (operands[2]) & 0xffffff00))
+ if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0)
operands[2] = GEN_INT (INTVAL (operands[2]) & 0xff);
return "and{l}\t{%2, %k0|%k0, %2}";
}
@@ -8404,6 +8437,20 @@
[(set_attr "type" "alu")
(set_attr "mode" "QI,QI,SI")])
+(define_insn "*andqi_2"
+ [(set (reg 17)
+ (compare (and:QI
+ (match_operand:QI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:QI 2 "general_operand" "qim,qi"))
+ (const_int 0)))
+ (set (match_operand:QI 0 "nonimmediate_operand" "=q,qm")
+ (and:QI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCNOmode)
+ && ix86_binary_operator_ok (AND, QImode, operands)"
+ "and{b}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "QI")])
+
(define_insn "*andqi_2_slp"
[(set (reg 17)
(compare (and:QI
@@ -9567,8 +9614,8 @@
[(parallel [(set (match_operand:SF 0 "nonimmediate_operand" "")
(neg:SF (match_operand:SF 1 "nonimmediate_operand" "")))
(clobber (reg:CC 17))])]
- "TARGET_80387"
- "if (TARGET_SSE)
+ "TARGET_80387 || TARGET_SSE_MATH"
+ "if (TARGET_SSE_MATH)
{
/* In case operand is in memory, we will not use SSE. */
if (memory_operand (operands[0], VOIDmode)
@@ -9641,12 +9688,12 @@
(use (match_operand:V4SF 2 "nonimmediate_operand" ""))
(clobber (reg:CC 17))]
"reload_completed && SSE_REG_P (operands[0])"
- [(set (subreg:TI (match_dup 0) 0)
- (xor:TI (match_dup 1)
- (match_dup 2)))]
+ [(set (match_dup 0)
+ (xor:V4SF (match_dup 1)
+ (match_dup 2)))]
{
- operands[1] = simplify_gen_subreg (TImode, operands[1], SFmode, 0);
- operands[2] = simplify_gen_subreg (TImode, operands[2], V4SFmode, 0);
+ operands[0] = simplify_gen_subreg (V4SFmode, operands[0], SFmode, 0);
+ operands[1] = simplify_gen_subreg (V4SFmode, operands[1], SFmode, 0);
if (operands_match_p (operands[0], operands[2]))
{
rtx tmp;
@@ -9664,7 +9711,7 @@
[(set (match_operand:SF 0 "nonimmediate_operand" "=f#r,rm#f")
(neg:SF (match_operand:SF 1 "nonimmediate_operand" "0,0")))
(clobber (reg:CC 17))]
- "TARGET_80387 && !TARGET_SSE
+ "TARGET_80387
&& ix86_unary_operator_ok (NEG, SFmode, operands)"
"#")
@@ -9707,8 +9754,8 @@
[(parallel [(set (match_operand:DF 0 "nonimmediate_operand" "")
(neg:DF (match_operand:DF 1 "nonimmediate_operand" "")))
(clobber (reg:CC 17))])]
- "TARGET_80387"
- "if (TARGET_SSE2)
+ "TARGET_80387 || (TARGET_SSE2 && TARGET_SSE_MATH)"
+ "if (TARGET_SSE2 && TARGET_SSE_MATH)
{
/* In case operand is in memory, we will not use SSE. */
if (memory_operand (operands[0], VOIDmode)
@@ -9809,13 +9856,12 @@
(use (match_operand:V2DF 2 "nonimmediate_operand" ""))
(clobber (reg:CC 17))]
"reload_completed && SSE_REG_P (operands[0])"
- [(set (subreg:TI (match_dup 0) 0)
- (xor:TI (match_dup 1)
- (match_dup 2)))]
+ [(set (match_dup 0)
+ (xor:V2DF (match_dup 1)
+ (match_dup 2)))]
{
operands[0] = simplify_gen_subreg (V2DFmode, operands[0], DFmode, 0);
- operands[1] = simplify_gen_subreg (TImode, operands[1], DFmode, 0);
- operands[2] = simplify_gen_subreg (TImode, operands[2], V2DFmode, 0);
+ operands[1] = simplify_gen_subreg (V2DFmode, operands[1], DFmode, 0);
/* Avoid possible reformatting on the operands. */
if (TARGET_SSE_PARTIAL_REGS && !optimize_size)
emit_insn (gen_sse2_unpcklpd (operands[0], operands[0], operands[0]));
@@ -9974,8 +10020,8 @@
[(parallel [(set (match_operand:SF 0 "nonimmediate_operand" "")
(neg:SF (match_operand:SF 1 "nonimmediate_operand" "")))
(clobber (reg:CC 17))])]
- "TARGET_80387"
- "if (TARGET_SSE)
+ "TARGET_80387 || TARGET_SSE_MATH"
+ "if (TARGET_SSE_MATH)
{
/* In case operand is in memory, we will not use SSE. */
if (memory_operand (operands[0], VOIDmode)
@@ -10049,12 +10095,12 @@
(use (match_operand:V4SF 2 "nonimmediate_operand" ""))
(clobber (reg:CC 17))]
"reload_completed && SSE_REG_P (operands[0])"
- [(set (subreg:TI (match_dup 0) 0)
- (and:TI (match_dup 1)
- (match_dup 2)))]
+ [(set (match_dup 0)
+ (and:V4SF (match_dup 1)
+ (match_dup 2)))]
{
- operands[1] = simplify_gen_subreg (TImode, operands[1], SFmode, 0);
- operands[2] = simplify_gen_subreg (TImode, operands[2], V4SFmode, 0);
+ operands[0] = simplify_gen_subreg (V4SFmode, operands[0], SFmode, 0);
+ operands[1] = simplify_gen_subreg (V4SFmode, operands[1], SFmode, 0);
if (operands_match_p (operands[0], operands[2]))
{
rtx tmp;
@@ -10071,7 +10117,7 @@
[(set (match_operand:SF 0 "nonimmediate_operand" "=f#r,rm#f")
(abs:SF (match_operand:SF 1 "nonimmediate_operand" "0,0")))
(clobber (reg:CC 17))]
- "TARGET_80387 && ix86_unary_operator_ok (ABS, SFmode, operands) && !TARGET_SSE"
+ "TARGET_80387 && ix86_unary_operator_ok (ABS, SFmode, operands)"
"#")
(define_split
@@ -10113,8 +10159,8 @@
[(parallel [(set (match_operand:DF 0 "nonimmediate_operand" "")
(neg:DF (match_operand:DF 1 "nonimmediate_operand" "")))
(clobber (reg:CC 17))])]
- "TARGET_80387"
- "if (TARGET_SSE2)
+ "TARGET_80387 || (TARGET_SSE2 && TARGET_SSE_MATH)"
+ "if (TARGET_SSE2 && TARGET_SSE_MATH)
{
/* In case operand is in memory, we will not use SSE. */
if (memory_operand (operands[0], VOIDmode)
@@ -10203,13 +10249,12 @@
(use (match_operand:V2DF 2 "nonimmediate_operand" ""))
(clobber (reg:CC 17))]
"reload_completed && SSE_REG_P (operands[0])"
- [(set (subreg:TI (match_dup 0) 0)
- (and:TI (match_dup 1)
- (match_dup 2)))]
+ [(set (match_dup 0)
+ (and:V2DF (match_dup 1)
+ (match_dup 2)))]
{
operands[0] = simplify_gen_subreg (V2DFmode, operands[0], DFmode, 0);
- operands[1] = simplify_gen_subreg (TImode, operands[1], DFmode, 0);
- operands[2] = simplify_gen_subreg (TImode, operands[2], V2DFmode, 0);
+ operands[1] = simplify_gen_subreg (V2DFmode, operands[1], DFmode, 0);
/* Avoid possible reformatting on the operands. */
if (TARGET_SSE_PARTIAL_REGS && !optimize_size)
emit_insn (gen_sse2_unpcklpd (operands[0], operands[0], operands[0]));
@@ -10383,17 +10428,19 @@
(set_attr "mode" "DI")])
(define_split
- [(set (reg 17)
- (compare (not:DI (match_operand:DI 1 "nonimmediate_operand" ""))
- (const_int 0)))
- (set (match_operand:DI 0 "nonimmediate_operand" "")
- (not:DI (match_dup 1)))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 2 "compare_operator"
+ [(not:DI (match_operand:DI 3 "nonimmediate_operand" ""))
+ (const_int 0)]))
+ (set (match_operand:DI 1 "nonimmediate_operand" "")
+ (not:DI (match_dup 3)))]
"TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode)"
- [(parallel [(set (reg:CCNO 17)
- (compare:CCNO (xor:DI (match_dup 1) (const_int -1))
- (const_int 0)))
- (set (match_dup 0)
- (xor:DI (match_dup 1) (const_int -1)))])]
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 2
+ [(xor:DI (match_dup 3) (const_int -1))
+ (const_int 0)]))
+ (set (match_dup 1)
+ (xor:DI (match_dup 3) (const_int -1)))])]
"")
(define_expand "one_cmplsi2"
@@ -10432,17 +10479,18 @@
(set_attr "mode" "SI")])
(define_split
- [(set (reg 17)
- (compare (not:SI (match_operand:SI 1 "nonimmediate_operand" ""))
- (const_int 0)))
- (set (match_operand:SI 0 "nonimmediate_operand" "")
- (not:SI (match_dup 1)))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 2 "compare_operator"
+ [(not:SI (match_operand:SI 3 "nonimmediate_operand" ""))
+ (const_int 0)]))
+ (set (match_operand:SI 1 "nonimmediate_operand" "")
+ (not:SI (match_dup 3)))]
"ix86_match_ccmode (insn, CCNOmode)"
- [(parallel [(set (reg:CCNO 17)
- (compare:CCNO (xor:SI (match_dup 1) (const_int -1))
- (const_int 0)))
- (set (match_dup 0)
- (xor:SI (match_dup 1) (const_int -1)))])]
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 2 [(xor:SI (match_dup 3) (const_int -1))
+ (const_int 0)]))
+ (set (match_dup 1)
+ (xor:SI (match_dup 3) (const_int -1)))])]
"")
;; ??? Currently never generated - xor is used instead.
@@ -10459,17 +10507,18 @@
(set_attr "mode" "SI")])
(define_split
- [(set (reg 17)
- (compare (not:SI (match_operand:SI 1 "register_operand" ""))
- (const_int 0)))
- (set (match_operand:DI 0 "register_operand" "")
- (zero_extend:DI (not:SI (match_dup 1))))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 2 "compare_operator"
+ [(not:SI (match_operand:SI 3 "register_operand" ""))
+ (const_int 0)]))
+ (set (match_operand:DI 1 "register_operand" "")
+ (zero_extend:DI (not:SI (match_dup 3))))]
"ix86_match_ccmode (insn, CCNOmode)"
- [(parallel [(set (reg:CCNO 17)
- (compare:CCNO (xor:SI (match_dup 1) (const_int -1))
- (const_int 0)))
- (set (match_dup 0)
- (zero_extend:DI (xor:SI (match_dup 1) (const_int -1))))])]
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 2 [(xor:SI (match_dup 3) (const_int -1))
+ (const_int 0)]))
+ (set (match_dup 1)
+ (zero_extend:DI (xor:SI (match_dup 3) (const_int -1))))])]
"")
(define_expand "one_cmplhi2"
@@ -10499,17 +10548,18 @@
(set_attr "mode" "HI")])
(define_split
- [(set (reg 17)
- (compare (not:HI (match_operand:HI 1 "nonimmediate_operand" ""))
- (const_int 0)))
- (set (match_operand:HI 0 "nonimmediate_operand" "")
- (not:HI (match_dup 1)))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 2 "compare_operator"
+ [(not:HI (match_operand:HI 3 "nonimmediate_operand" ""))
+ (const_int 0)]))
+ (set (match_operand:HI 1 "nonimmediate_operand" "")
+ (not:HI (match_dup 3)))]
"ix86_match_ccmode (insn, CCNOmode)"
- [(parallel [(set (reg:CCNO 17)
- (compare:CCNO (xor:HI (match_dup 1) (const_int -1))
- (const_int 0)))
- (set (match_dup 0)
- (xor:HI (match_dup 1) (const_int -1)))])]
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 2 [(xor:HI (match_dup 3) (const_int -1))
+ (const_int 0)]))
+ (set (match_dup 1)
+ (xor:HI (match_dup 3) (const_int -1)))])]
"")
;; %%% Potential partial reg stall on alternative 1. What to do?
@@ -10542,17 +10592,18 @@
(set_attr "mode" "QI")])
(define_split
- [(set (reg 17)
- (compare (not:QI (match_operand:QI 1 "nonimmediate_operand" ""))
- (const_int 0)))
- (set (match_operand:QI 0 "nonimmediate_operand" "")
- (not:QI (match_dup 1)))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 2 "compare_operator"
+ [(not:QI (match_operand:QI 3 "nonimmediate_operand" ""))
+ (const_int 0)]))
+ (set (match_operand:QI 1 "nonimmediate_operand" "")
+ (not:QI (match_dup 3)))]
"ix86_match_ccmode (insn, CCNOmode)"
- [(parallel [(set (reg:CCNO 17)
- (compare:CCNO (xor:QI (match_dup 1) (const_int -1))
- (const_int 0)))
- (set (match_dup 0)
- (xor:QI (match_dup 1) (const_int -1)))])]
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 2 [(xor:QI (match_dup 3) (const_int -1))
+ (const_int 0)]))
+ (set (match_dup 1)
+ (xor:QI (match_dup 3) (const_int -1)))])]
"")
;; Arithmetic shift instructions
@@ -17003,7 +17054,8 @@
(clobber (match_operand 6 "" ""))
(clobber (reg:CC 17))]
"!SSE_REG_P (operands[0]) && reload_completed
- && VALID_SSE_REG_MODE (GET_MODE (operands[0]))"
+ && (GET_MODE (operands[0]) == SFmode
+ || (TARGET_SSE2 && GET_MODE (operands[0]) == DFmode))"
[(const_int 0)]
{
ix86_compare_op0 = operands[5];
@@ -17020,22 +17072,60 @@
;; nand op0, op3 - load op3 to op0 if comparison was false
;; or op2, op0 - get the nonzero one into the result.
(define_split
- [(set (match_operand 0 "register_operand" "")
- (if_then_else (match_operator 1 "sse_comparison_operator"
- [(match_operand 4 "register_operand" "")
- (match_operand 5 "nonimmediate_operand" "")])
- (match_operand 2 "register_operand" "")
- (match_operand 3 "register_operand" "")))
+ [(set (match_operand:SF 0 "register_operand" "")
+ (if_then_else:SF (match_operator 1 "sse_comparison_operator"
+ [(match_operand:SF 4 "register_operand" "")
+ (match_operand:SF 5 "nonimmediate_operand" "")])
+ (match_operand:SF 2 "register_operand" "")
+ (match_operand:SF 3 "register_operand" "")))
(clobber (match_operand 6 "" ""))
(clobber (reg:CC 17))]
"SSE_REG_P (operands[0]) && reload_completed"
[(set (match_dup 4) (match_op_dup 1 [(match_dup 4) (match_dup 5)]))
- (set (subreg:TI (match_dup 2) 0) (and:TI (subreg:TI (match_dup 2) 0)
- (subreg:TI (match_dup 4) 0)))
- (set (subreg:TI (match_dup 4) 0) (and:TI (not:TI (subreg:TI (match_dup 4) 0))
- (subreg:TI (match_dup 3) 0)))
- (set (subreg:TI (match_dup 0) 0) (ior:TI (subreg:TI (match_dup 6) 0)
- (subreg:TI (match_dup 7) 0)))]
+ (set (match_dup 2) (and:V4SF (match_dup 2)
+ (match_dup 8)))
+ (set (match_dup 8) (and:V4SF (not:V4SF (match_dup 8))
+ (match_dup 3)))
+ (set (match_dup 0) (ior:V4SF (match_dup 6)
+ (match_dup 7)))]
+{
+ /* If op2 == op3, op3 would be clobbered before it is used. */
+ if (operands_match_p (operands[2], operands[3]))
+ {
+ emit_move_insn (operands[0], operands[2]);
+ DONE;
+ }
+
+ PUT_MODE (operands[1], GET_MODE (operands[0]));
+ if (operands_match_p (operands[0], operands[4]))
+ operands[6] = operands[4], operands[7] = operands[2];
+ else
+ operands[6] = operands[2], operands[7] = operands[4];
+ operands[0] = simplify_gen_subreg (V4SFmode, operands[0], SFmode, 0);
+ operands[2] = simplify_gen_subreg (V4SFmode, operands[2], SFmode, 0);
+ operands[3] = simplify_gen_subreg (V4SFmode, operands[3], SFmode, 0);
+ operands[8] = simplify_gen_subreg (V4SFmode, operands[4], SFmode, 0);
+ operands[6] = simplify_gen_subreg (V4SFmode, operands[6], SFmode, 0);
+ operands[7] = simplify_gen_subreg (V4SFmode, operands[7], SFmode, 0);
+})
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (if_then_else:DF (match_operator 1 "sse_comparison_operator"
+ [(match_operand:DF 4 "register_operand" "")
+ (match_operand:DF 5 "nonimmediate_operand" "")])
+ (match_operand:DF 2 "register_operand" "")
+ (match_operand:DF 3 "register_operand" "")))
+ (clobber (match_operand 6 "" ""))
+ (clobber (reg:CC 17))]
+ "SSE_REG_P (operands[0]) && reload_completed"
+ [(set (match_dup 4) (match_op_dup 1 [(match_dup 4) (match_dup 5)]))
+ (set (match_dup 2) (and:V2DF (match_dup 2)
+ (match_dup 8)))
+ (set (match_dup 8) (and:V2DF (not:V2DF (match_dup 8))
+ (match_dup 3)))
+ (set (match_dup 0) (ior:V2DF (match_dup 6)
+ (match_dup 7)))]
{
if (GET_MODE (operands[2]) == DFmode
&& TARGET_SSE_PARTIAL_REGS && !optimize_size)
@@ -17058,6 +17148,12 @@
operands[6] = operands[4], operands[7] = operands[2];
else
operands[6] = operands[2], operands[7] = operands[4];
+ operands[0] = simplify_gen_subreg (V2DFmode, operands[0], DFmode, 0);
+ operands[2] = simplify_gen_subreg (V2DFmode, operands[2], DFmode, 0);
+ operands[3] = simplify_gen_subreg (V2DFmode, operands[3], DFmode, 0);
+ operands[8] = simplify_gen_subreg (V2DFmode, operands[4], DFmode, 0);
+ operands[6] = simplify_gen_subreg (V2DFmode, operands[6], DFmode, 0);
+ operands[7] = simplify_gen_subreg (V2DFmode, operands[7], DFmode, 0);
})
;; Special case of conditional move we can handle effectively.
@@ -17144,18 +17240,55 @@
"#")
(define_split
- [(set (match_operand 0 "register_operand" "")
- (if_then_else (match_operator 1 "comparison_operator"
- [(match_operand 4 "nonimmediate_operand" "")
- (match_operand 5 "nonimmediate_operand" "")])
- (match_operand 2 "nonmemory_operand" "")
- (match_operand 3 "nonmemory_operand" "")))]
+ [(set (match_operand:SF 0 "register_operand" "")
+ (if_then_else:SF (match_operator 1 "comparison_operator"
+ [(match_operand:SF 4 "nonimmediate_operand" "")
+ (match_operand:SF 5 "nonimmediate_operand" "")])
+ (match_operand:SF 2 "nonmemory_operand" "")
+ (match_operand:SF 3 "nonmemory_operand" "")))]
"SSE_REG_P (operands[0]) && reload_completed
&& (const0_operand (operands[2], GET_MODE (operands[0]))
|| const0_operand (operands[3], GET_MODE (operands[0])))"
[(set (match_dup 0) (match_op_dup 1 [(match_dup 0) (match_dup 5)]))
- (set (subreg:TI (match_dup 0) 0) (and:TI (match_dup 6)
- (match_dup 7)))]
+ (set (match_dup 8) (and:V4SF (match_dup 6) (match_dup 7)))]
+{
+ PUT_MODE (operands[1], GET_MODE (operands[0]));
+ if (!sse_comparison_operator (operands[1], VOIDmode)
+ || !rtx_equal_p (operands[0], operands[4]))
+ {
+ rtx tmp = operands[5];
+ operands[5] = operands[4];
+ operands[4] = tmp;
+ PUT_CODE (operands[1], swap_condition (GET_CODE (operands[1])));
+ }
+ if (!rtx_equal_p (operands[0], operands[4]))
+ abort ();
+ operands[8] = simplify_gen_subreg (V4SFmode, operands[0], SFmode, 0);
+ if (const0_operand (operands[2], GET_MODE (operands[2])))
+ {
+ operands[7] = operands[3];
+ operands[6] = gen_rtx_NOT (V4SFmode, operands[5]);
+ }
+ else
+ {
+ operands[7] = operands[2];
+ operands[6] = operands[8];
+ }
+ operands[7] = simplify_gen_subreg (V4SFmode, operands[7], SFmode, 0);
+})
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (if_then_else:DF (match_operator 1 "comparison_operator"
+ [(match_operand:DF 4 "nonimmediate_operand" "")
+ (match_operand:DF 5 "nonimmediate_operand" "")])
+ (match_operand:DF 2 "nonmemory_operand" "")
+ (match_operand:DF 3 "nonmemory_operand" "")))]
+ "SSE_REG_P (operands[0]) && reload_completed
+ && (const0_operand (operands[2], GET_MODE (operands[0]))
+ || const0_operand (operands[3], GET_MODE (operands[0])))"
+ [(set (match_dup 0) (match_op_dup 1 [(match_dup 0) (match_dup 5)]))
+ (set (match_dup 8) (and:V2DF (match_dup 6) (match_dup 7)))]
{
if (TARGET_SSE_PARTIAL_REGS && !optimize_size
&& GET_MODE (operands[2]) == DFmode)
@@ -17182,19 +17315,18 @@
}
if (!rtx_equal_p (operands[0], operands[4]))
abort ();
- if (const0_operand (operands[2], GET_MODE (operands[0])))
+ operands[8] = simplify_gen_subreg (V2DFmode, operands[0], DFmode, 0);
+ if (const0_operand (operands[2], GET_MODE (operands[2])))
{
operands[7] = operands[3];
- operands[6] = gen_rtx_NOT (TImode, gen_rtx_SUBREG (TImode, operands[0],
- 0));
+ operands[6] = gen_rtx_NOT (V2DFmode, operands[8]);
}
else
{
operands[7] = operands[2];
- operands[6] = gen_rtx_SUBREG (TImode, operands[0], 0);
+ operands[6] = operands[8];
}
- operands[7] = simplify_gen_subreg (TImode, operands[7],
- GET_MODE (operands[7]), 0);
+ operands[7] = simplify_gen_subreg (V2DFmode, operands[7], DFmode, 0);
})
(define_expand "allocate_stack_worker"
@@ -17319,52 +17451,56 @@
; instruction size is unchanged, except in the %eax case for
; which it is increased by one byte, hence the ! optimize_size.
(define_split
- [(set (reg 17)
- (compare (and (match_operand 1 "aligned_operand" "")
- (match_operand 2 "const_int_operand" ""))
- (const_int 0)))
- (set (match_operand 0 "register_operand" "")
- (and (match_dup 1) (match_dup 2)))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 2 "compare_operator"
+ [(and (match_operand 3 "aligned_operand" "")
+ (match_operand 4 "const_int_operand" ""))
+ (const_int 0)]))
+ (set (match_operand 1 "register_operand" "")
+ (and (match_dup 3) (match_dup 4)))]
"! TARGET_PARTIAL_REG_STALL && reload_completed
/* Ensure that the operand will remain sign-extended immediate. */
- && ix86_match_ccmode (insn, INTVAL (operands[2]) >= 0 ? CCNOmode : CCZmode)
+ && ix86_match_ccmode (insn, INTVAL (operands[4]) >= 0 ? CCNOmode : CCZmode)
&& ! optimize_size
- && ((GET_MODE (operands[0]) == HImode && ! TARGET_FAST_PREFIX)
- || (GET_MODE (operands[0]) == QImode && TARGET_PROMOTE_QImode))"
- [(parallel [(set (reg:CCNO 17)
- (compare:CCNO (and:SI (match_dup 1) (match_dup 2))
- (const_int 0)))
- (set (match_dup 0)
- (and:SI (match_dup 1) (match_dup 2)))])]
- "operands[2]
- = gen_int_mode (INTVAL (operands[2])
- & GET_MODE_MASK (GET_MODE (operands[0])),
- SImode);
- operands[0] = gen_lowpart (SImode, operands[0]);
- operands[1] = gen_lowpart (SImode, operands[1]);")
+ && ((GET_MODE (operands[1]) == HImode && ! TARGET_FAST_PREFIX)
+ || (GET_MODE (operands[1]) == QImode && TARGET_PROMOTE_QImode))"
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 2 [(and:SI (match_dup 3) (match_dup 4))
+ (const_int 0)]))
+ (set (match_dup 1)
+ (and:SI (match_dup 3) (match_dup 4)))])]
+{
+ operands[4]
+ = gen_int_mode (INTVAL (operands[4])
+ & GET_MODE_MASK (GET_MODE (operands[1])), SImode);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[3] = gen_lowpart (SImode, operands[3]);
+})
; Don't promote the QImode tests, as i386 doesn't have encoding of
; the TEST instruction with 32-bit sign-extended immediate and thus
; the instruction size would at least double, which is not what we
; want even with ! optimize_size.
(define_split
- [(set (reg 17)
- (compare (and (match_operand:HI 0 "aligned_operand" "")
- (match_operand:HI 1 "const_int_operand" ""))
- (const_int 0)))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(and (match_operand:HI 2 "aligned_operand" "")
+ (match_operand:HI 3 "const_int_operand" ""))
+ (const_int 0)]))]
"! TARGET_PARTIAL_REG_STALL && reload_completed
/* Ensure that the operand will remain sign-extended immediate. */
- && ix86_match_ccmode (insn, INTVAL (operands[1]) >= 0 ? CCNOmode : CCZmode)
+ && ix86_match_ccmode (insn, INTVAL (operands[3]) >= 0 ? CCNOmode : CCZmode)
&& ! TARGET_FAST_PREFIX
&& ! optimize_size"
- [(set (reg:CCNO 17)
- (compare:CCNO (and:SI (match_dup 0) (match_dup 1))
- (const_int 0)))]
- "operands[1]
- = gen_int_mode (INTVAL (operands[1])
- & GET_MODE_MASK (GET_MODE (operands[0])),
- SImode);
- operands[0] = gen_lowpart (SImode, operands[0]);")
+ [(set (match_dup 0)
+ (match_op_dup 1 [(and:SI (match_dup 2) (match_dup 3))
+ (const_int 0)]))]
+{
+ operands[3]
+ = gen_int_mode (INTVAL (operands[3])
+ & GET_MODE_MASK (GET_MODE (operands[2])), SImode);
+ operands[2] = gen_lowpart (SImode, operands[2]);
+})
(define_split
[(set (match_operand 0 "register_operand" "")
@@ -17537,13 +17673,14 @@
;; Don't compare memory with zero, load and use a test instead.
(define_peephole2
- [(set (reg 17)
- (compare (match_operand:SI 0 "memory_operand" "")
- (const_int 0)))
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(match_operand:SI 2 "memory_operand" "")
+ (const_int 0)]))
(match_scratch:SI 3 "r")]
"ix86_match_ccmode (insn, CCNOmode) && ! optimize_size"
- [(set (match_dup 3) (match_dup 0))
- (set (reg:CCNO 17) (compare:CCNO (match_dup 3) (const_int 0)))]
+ [(set (match_dup 3) (match_dup 2))
+ (set (match_dup 0) (match_op_dup 1 [(match_dup 3) (const_int 0)]))]
"")
;; NOT is not pairable on Pentium, while XOR is, but one byte longer.
@@ -17607,77 +17744,77 @@
;; versions if we're concerned about partial register stalls.
(define_peephole2
- [(set (reg 17)
- (compare (and:SI (match_operand:SI 0 "register_operand" "")
- (match_operand:SI 1 "immediate_operand" ""))
- (const_int 0)))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(and:SI (match_operand:SI 2 "register_operand" "")
+ (match_operand:SI 3 "immediate_operand" ""))
+ (const_int 0)]))]
"ix86_match_ccmode (insn, CCNOmode)
- && (true_regnum (operands[0]) != 0
- || (GET_CODE (operands[1]) == CONST_INT
- && CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'K')))
- && find_regno_note (insn, REG_DEAD, true_regnum (operands[0]))"
+ && (true_regnum (operands[2]) != 0
+ || (GET_CODE (operands[3]) == CONST_INT
+ && CONST_OK_FOR_LETTER_P (INTVAL (operands[3]), 'K')))
+ && peep2_reg_dead_p (1, operands[2])"
[(parallel
- [(set (reg:CCNO 17)
- (compare:CCNO (and:SI (match_dup 0)
- (match_dup 1))
- (const_int 0)))
- (set (match_dup 0)
- (and:SI (match_dup 0) (match_dup 1)))])]
+ [(set (match_dup 0)
+ (match_op_dup 1 [(and:SI (match_dup 2) (match_dup 3))
+ (const_int 0)]))
+ (set (match_dup 2)
+ (and:SI (match_dup 2) (match_dup 3)))])]
"")
;; We don't need to handle HImode case, because it will be promoted to SImode
;; on ! TARGET_PARTIAL_REG_STALL
(define_peephole2
- [(set (reg 17)
- (compare (and:QI (match_operand:QI 0 "register_operand" "")
- (match_operand:QI 1 "immediate_operand" ""))
- (const_int 0)))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(and:QI (match_operand:QI 2 "register_operand" "")
+ (match_operand:QI 3 "immediate_operand" ""))
+ (const_int 0)]))]
"! TARGET_PARTIAL_REG_STALL
&& ix86_match_ccmode (insn, CCNOmode)
- && true_regnum (operands[0]) != 0
- && find_regno_note (insn, REG_DEAD, true_regnum (operands[0]))"
+ && true_regnum (operands[2]) != 0
+ && peep2_reg_dead_p (1, operands[2])"
[(parallel
- [(set (reg:CCNO 17)
- (compare:CCNO (and:QI (match_dup 0)
- (match_dup 1))
- (const_int 0)))
- (set (match_dup 0)
- (and:QI (match_dup 0) (match_dup 1)))])]
+ [(set (match_dup 0)
+ (match_op_dup 1 [(and:QI (match_dup 2) (match_dup 3))
+ (const_int 0)]))
+ (set (match_dup 2)
+ (and:QI (match_dup 2) (match_dup 3)))])]
"")
(define_peephole2
- [(set (reg 17)
- (compare
- (and:SI
- (zero_extract:SI
- (match_operand 0 "ext_register_operand" "")
- (const_int 8)
- (const_int 8))
- (match_operand 1 "const_int_operand" ""))
- (const_int 0)))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(and:SI
+ (zero_extract:SI
+ (match_operand 2 "ext_register_operand" "")
+ (const_int 8)
+ (const_int 8))
+ (match_operand 3 "const_int_operand" ""))
+ (const_int 0)]))]
"! TARGET_PARTIAL_REG_STALL
&& ix86_match_ccmode (insn, CCNOmode)
- && true_regnum (operands[0]) != 0
- && find_regno_note (insn, REG_DEAD, true_regnum (operands[0]))"
- [(parallel [(set (reg:CCNO 17)
- (compare:CCNO
- (and:SI
- (zero_extract:SI
- (match_dup 0)
- (const_int 8)
- (const_int 8))
- (match_dup 1))
- (const_int 0)))
- (set (zero_extract:SI (match_dup 0)
+ && true_regnum (operands[2]) != 0
+ && peep2_reg_dead_p (1, operands[2])"
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 1
+ [(and:SI
+ (zero_extract:SI
+ (match_dup 2)
+ (const_int 8)
+ (const_int 8))
+ (match_dup 3))
+ (const_int 0)]))
+ (set (zero_extract:SI (match_dup 2)
(const_int 8)
(const_int 8))
(and:SI
(zero_extract:SI
- (match_dup 0)
+ (match_dup 2)
(const_int 8)
(const_int 8))
- (match_dup 1)))])]
+ (match_dup 3)))])]
"")
;; Don't do logical operations with memory inputs.
@@ -17979,66 +18116,20 @@
"")
;; Convert compares with 1 to shorter inc/dec operations when CF is not
-;; required and register dies.
-(define_peephole2
- [(set (reg 17)
- (compare (match_operand:SI 0 "register_operand" "")
- (match_operand:SI 1 "incdec_operand" "")))]
- "ix86_match_ccmode (insn, CCGCmode)
- && find_regno_note (insn, REG_DEAD, true_regnum (operands[0]))"
- [(parallel [(set (reg:CCGC 17)
- (compare:CCGC (match_dup 0)
- (match_dup 1)))
- (clobber (match_dup 0))])]
- "")
-
+;; required and register dies. Similarly for 128 to plus -128.
(define_peephole2
- [(set (reg 17)
- (compare (match_operand:HI 0 "register_operand" "")
- (match_operand:HI 1 "incdec_operand" "")))]
- "ix86_match_ccmode (insn, CCGCmode)
- && find_regno_note (insn, REG_DEAD, true_regnum (operands[0]))"
- [(parallel [(set (reg:CCGC 17)
- (compare:CCGC (match_dup 0)
- (match_dup 1)))
- (clobber (match_dup 0))])]
- "")
-
-(define_peephole2
- [(set (reg 17)
- (compare (match_operand:QI 0 "register_operand" "")
- (match_operand:QI 1 "incdec_operand" "")))]
- "ix86_match_ccmode (insn, CCGCmode)
- && find_regno_note (insn, REG_DEAD, true_regnum (operands[0]))"
- [(parallel [(set (reg:CCGC 17)
- (compare:CCGC (match_dup 0)
- (match_dup 1)))
- (clobber (match_dup 0))])]
- "")
-
-;; Convert compares with 128 to shorter add -128
-(define_peephole2
- [(set (reg 17)
- (compare (match_operand:SI 0 "register_operand" "")
- (const_int 128)))]
- "ix86_match_ccmode (insn, CCGCmode)
- && find_regno_note (insn, REG_DEAD, true_regnum (operands[0]))"
- [(parallel [(set (reg:CCGC 17)
- (compare:CCGC (match_dup 0)
- (const_int 128)))
- (clobber (match_dup 0))])]
- "")
-
-(define_peephole2
- [(set (reg 17)
- (compare (match_operand:HI 0 "register_operand" "")
- (const_int 128)))]
- "ix86_match_ccmode (insn, CCGCmode)
- && find_regno_note (insn, REG_DEAD, true_regnum (operands[0]))"
- [(parallel [(set (reg:CCGC 17)
- (compare:CCGC (match_dup 0)
- (const_int 128)))
- (clobber (match_dup 0))])]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(match_operand 2 "register_operand" "")
+ (match_operand 3 "const_int_operand" "")]))]
+ "(INTVAL (operands[3]) == -1
+ || INTVAL (operands[3]) == 1
+ || INTVAL (operands[3]) == 128)
+ && ix86_match_ccmode (insn, CCGCmode)
+ && peep2_reg_dead_p (1, operands[2])"
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 1 [(match_dup 2) (match_dup 3)]))
+ (clobber (match_dup 2))])]
"")
(define_peephole2
@@ -18326,7 +18417,7 @@
{
if (constant_call_address_operand (operands[1], QImode))
return "call\t%P1";
- return "call\t%*%1";
+ return "call\t%A1";
}
[(set_attr "type" "callv")])
@@ -18338,7 +18429,7 @@
{
if (constant_call_address_operand (operands[1], QImode))
return "jmp\t%P1";
- return "jmp\t%*%1";
+ return "jmp\t%A1";
}
[(set_attr "type" "callv")])
@@ -18422,10 +18513,11 @@
;; Moves for SSE/MMX regs.
-(define_insn "movv4sf_internal"
+(define_insn "*movv4sf_internal"
[(set (match_operand:V4SF 0 "nonimmediate_operand" "=x,x,m")
(match_operand:V4SF 1 "vector_move_operand" "C,xm,x"))]
- "TARGET_SSE"
+ "TARGET_SSE
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
"@
xorps\t%0, %0
movaps\t{%1, %0|%0, %1}
@@ -18436,7 +18528,7 @@
(define_split
[(set (match_operand:V4SF 0 "register_operand" "")
(match_operand:V4SF 1 "zero_extended_scalar_load_operand" ""))]
- "TARGET_SSE"
+ "TARGET_SSE && reload_completed"
[(set (match_dup 0)
(vec_merge:V4SF
(vec_duplicate:V4SF (match_dup 1))
@@ -18447,10 +18539,11 @@
operands[2] = CONST0_RTX (V4SFmode);
})
-(define_insn "movv4si_internal"
+(define_insn "*movv4si_internal"
[(set (match_operand:V4SI 0 "nonimmediate_operand" "=x,x,m")
(match_operand:V4SI 1 "vector_move_operand" "C,xm,x"))]
- "TARGET_SSE"
+ "TARGET_SSE
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
{
switch (which_alternative)
{
@@ -18487,10 +18580,11 @@
(const_string "TI"))]
(const_string "TI")))])
-(define_insn "movv2di_internal"
+(define_insn "*movv2di_internal"
[(set (match_operand:V2DI 0 "nonimmediate_operand" "=x,x,m")
(match_operand:V2DI 1 "vector_move_operand" "C,xm,x"))]
- "TARGET_SSE"
+ "TARGET_SSE
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
{
switch (which_alternative)
{
@@ -18530,7 +18624,7 @@
(define_split
[(set (match_operand:V2DF 0 "register_operand" "")
(match_operand:V2DF 1 "zero_extended_scalar_load_operand" ""))]
- "TARGET_SSE2"
+ "TARGET_SSE2 && reload_completed"
[(set (match_dup 0)
(vec_merge:V2DF
(vec_duplicate:V2DF (match_dup 1))
@@ -18541,52 +18635,80 @@
operands[2] = CONST0_RTX (V2DFmode);
})
-(define_insn "movv8qi_internal"
- [(set (match_operand:V8QI 0 "nonimmediate_operand" "=y,y,m")
- (match_operand:V8QI 1 "vector_move_operand" "C,ym,y"))]
+(define_insn "*movv2si_internal"
+ [(set (match_operand:V2SI 0 "nonimmediate_operand"
+ "=y,y ,m,!y,!*Y,*x,?*x,?m")
+ (match_operand:V2SI 1 "vector_move_operand"
+ "C ,ym,y,*Y,y ,C ,*xm,*x"))]
"TARGET_MMX
&& (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
"@
pxor\t%0, %0
movq\t{%1, %0|%0, %1}
+ movq\t{%1, %0|%0, %1}
+ movdq2q\t{%1, %0|%0, %1}
+ movq2dq\t{%1, %0|%0, %1}
+ pxor\t%0, %0
+ movq\t{%1, %0|%0, %1}
movq\t{%1, %0|%0, %1}"
- [(set_attr "type" "mmxmov")
+ [(set_attr "type" "mmxmov,mmxmov,mmxmov,ssecvt,ssecvt,ssemov,ssemov,ssemov")
(set_attr "mode" "DI")])
-(define_insn "movv4hi_internal"
- [(set (match_operand:V4HI 0 "nonimmediate_operand" "=y,y,m")
- (match_operand:V4HI 1 "vector_move_operand" "C,ym,y"))]
+(define_insn "*movv4hi_internal"
+ [(set (match_operand:V4HI 0 "nonimmediate_operand"
+ "=y,y ,m,!y,!*Y,*x,?*x,?m")
+ (match_operand:V4HI 1 "vector_move_operand"
+ "C ,ym,y,*Y,y ,C ,*xm,*x"))]
"TARGET_MMX
&& (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
"@
pxor\t%0, %0
movq\t{%1, %0|%0, %1}
+ movq\t{%1, %0|%0, %1}
+ movdq2q\t{%1, %0|%0, %1}
+ movq2dq\t{%1, %0|%0, %1}
+ pxor\t%0, %0
+ movq\t{%1, %0|%0, %1}
movq\t{%1, %0|%0, %1}"
- [(set_attr "type" "mmxmov")
+ [(set_attr "type" "mmxmov,mmxmov,mmxmov,ssecvt,ssecvt,ssemov,ssemov,ssemov")
(set_attr "mode" "DI")])
-(define_insn "movv2si_internal"
- [(set (match_operand:V2SI 0 "nonimmediate_operand" "=y,y,m")
- (match_operand:V2SI 1 "vector_move_operand" "C,ym,y"))]
+(define_insn "*movv8qi_internal"
+ [(set (match_operand:V8QI 0 "nonimmediate_operand"
+ "=y,y ,m,!y,!*Y,*x,?*x,?m")
+ (match_operand:V8QI 1 "vector_move_operand"
+ "C ,ym,y,*Y,y ,C ,*xm,*x"))]
"TARGET_MMX
&& (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
"@
pxor\t%0, %0
movq\t{%1, %0|%0, %1}
+ movq\t{%1, %0|%0, %1}
+ movdq2q\t{%1, %0|%0, %1}
+ movq2dq\t{%1, %0|%0, %1}
+ pxor\t%0, %0
+ movq\t{%1, %0|%0, %1}
movq\t{%1, %0|%0, %1}"
- [(set_attr "type" "mmxcvt")
+ [(set_attr "type" "mmxmov,mmxmov,mmxmov,ssecvt,ssecvt,ssemov,ssemov,ssemov")
(set_attr "mode" "DI")])
-(define_insn "movv2sf_internal"
- [(set (match_operand:V2SF 0 "nonimmediate_operand" "=y,y,m")
- (match_operand:V2SF 1 "vector_move_operand" "C,ym,y"))]
- "TARGET_3DNOW
+(define_insn "*movv2sf_internal"
+ [(set (match_operand:V2SF 0 "nonimmediate_operand"
+ "=y,y ,m,!y,!*Y,*x,?*x,?m")
+ (match_operand:V2SF 1 "vector_move_operand"
+ "C ,ym,y,*Y,y ,C ,*xm,*x"))]
+ "TARGET_MMX
&& (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
"@
pxor\t%0, %0
movq\t{%1, %0|%0, %1}
+ movq\t{%1, %0|%0, %1}
+ movdq2q\t{%1, %0|%0, %1}
+ movq2dq\t{%1, %0|%0, %1}
+ xorps\t%0, %0
+ movq\t{%1, %0|%0, %1}
movq\t{%1, %0|%0, %1}"
- [(set_attr "type" "mmxcvt")
+ [(set_attr "type" "mmxmov,mmxmov,mmxmov,ssecvt,ssecvt,ssemov,ssemov,ssemov")
(set_attr "mode" "DI")])
(define_expand "movti"
@@ -18606,17 +18728,14 @@
(match_operand:TF 1 "nonimmediate_operand" ""))]
"TARGET_64BIT"
{
- if (TARGET_64BIT)
- ix86_expand_move (TFmode, operands);
- else
- ix86_expand_vector_move (TFmode, operands);
+ ix86_expand_move (TFmode, operands);
DONE;
})
-(define_insn "movv2df_internal"
+(define_insn "*movv2df_internal"
[(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,x,m")
(match_operand:V2DF 1 "vector_move_operand" "C,xm,x"))]
- "TARGET_SSE2
+ "TARGET_SSE
&& (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
{
switch (which_alternative)
@@ -18638,7 +18757,9 @@
}
[(set_attr "type" "ssemov")
(set (attr "mode")
- (cond [(eq_attr "alternative" "0,1")
+ (cond [(eq (symbol_ref "TARGET_SSE2") (const_int 0))
+ (const_string "V4SF")
+ (eq_attr "alternative" "0,1")
(if_then_else
(ne (symbol_ref "optimize_size")
(const_int 0))
@@ -18654,10 +18775,10 @@
(const_string "V2DF"))]
(const_string "V2DF")))])
-(define_insn "movv8hi_internal"
+(define_insn "*movv8hi_internal"
[(set (match_operand:V8HI 0 "nonimmediate_operand" "=x,x,m")
(match_operand:V8HI 1 "vector_move_operand" "C,xm,x"))]
- "TARGET_SSE2
+ "TARGET_SSE
&& (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
{
switch (which_alternative)
@@ -18695,10 +18816,10 @@
(const_string "TI"))]
(const_string "TI")))])
-(define_insn "movv16qi_internal"
+(define_insn "*movv16qi_internal"
[(set (match_operand:V16QI 0 "nonimmediate_operand" "=x,x,m")
- (match_operand:V16QI 1 "nonimmediate_operand" "C,xm,x"))]
- "TARGET_SSE2
+ (match_operand:V16QI 1 "vector_move_operand" "C,xm,x"))]
+ "TARGET_SSE
&& (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
{
switch (which_alternative)
@@ -18739,7 +18860,7 @@
(define_expand "movv2df"
[(set (match_operand:V2DF 0 "nonimmediate_operand" "")
(match_operand:V2DF 1 "nonimmediate_operand" ""))]
- "TARGET_SSE2"
+ "TARGET_SSE"
{
ix86_expand_vector_move (V2DFmode, operands);
DONE;
@@ -18748,7 +18869,7 @@
(define_expand "movv8hi"
[(set (match_operand:V8HI 0 "nonimmediate_operand" "")
(match_operand:V8HI 1 "nonimmediate_operand" ""))]
- "TARGET_SSE2"
+ "TARGET_SSE"
{
ix86_expand_vector_move (V8HImode, operands);
DONE;
@@ -18757,7 +18878,7 @@
(define_expand "movv16qi"
[(set (match_operand:V16QI 0 "nonimmediate_operand" "")
(match_operand:V16QI 1 "nonimmediate_operand" ""))]
- "TARGET_SSE2"
+ "TARGET_SSE"
{
ix86_expand_vector_move (V16QImode, operands);
DONE;
@@ -18820,7 +18941,7 @@
(define_expand "movv2sf"
[(set (match_operand:V2SF 0 "nonimmediate_operand" "")
(match_operand:V2SF 1 "nonimmediate_operand" ""))]
- "TARGET_3DNOW"
+ "TARGET_MMX"
{
ix86_expand_vector_move (V2SFmode, operands);
DONE;
@@ -18841,19 +18962,19 @@
(define_insn "*pushv2di"
[(set (match_operand:V2DI 0 "push_operand" "=<")
(match_operand:V2DI 1 "register_operand" "x"))]
- "TARGET_SSE2"
+ "TARGET_SSE"
"#")
(define_insn "*pushv8hi"
[(set (match_operand:V8HI 0 "push_operand" "=<")
(match_operand:V8HI 1 "register_operand" "x"))]
- "TARGET_SSE2"
+ "TARGET_SSE"
"#")
(define_insn "*pushv16qi"
[(set (match_operand:V16QI 0 "push_operand" "=<")
(match_operand:V16QI 1 "register_operand" "x"))]
- "TARGET_SSE2"
+ "TARGET_SSE"
"#")
(define_insn "*pushv4sf"
@@ -18865,7 +18986,7 @@
(define_insn "*pushv4si"
[(set (match_operand:V4SI 0 "push_operand" "=<")
(match_operand:V4SI 1 "register_operand" "x"))]
- "TARGET_SSE2"
+ "TARGET_SSE"
"#")
(define_insn "*pushv2si"
@@ -18889,7 +19010,7 @@
(define_insn "*pushv2sf"
[(set (match_operand:V2SF 0 "push_operand" "=<")
(match_operand:V2SF 1 "register_operand" "y"))]
- "TARGET_3DNOW"
+ "TARGET_MMX"
"#")
(define_split
@@ -18915,7 +19036,7 @@
operands[3] = GEN_INT (-GET_MODE_SIZE (GET_MODE (operands[0])));")
-(define_insn "movti_internal"
+(define_insn "*movti_internal"
[(set (match_operand:TI 0 "nonimmediate_operand" "=x,x,m")
(match_operand:TI 1 "vector_move_operand" "C,xm,x"))]
"TARGET_SSE && !TARGET_64BIT
@@ -19462,26 +19583,16 @@
;; of DImode subregs again!
;; SSE1 single precision floating point logical operation
(define_expand "sse_andv4sf3"
- [(set (subreg:TI (match_operand:V4SF 0 "register_operand" "") 0)
- (and:TI (subreg:TI (match_operand:V4SF 1 "register_operand" "") 0)
- (subreg:TI (match_operand:V4SF 2 "nonimmediate_operand" "") 0)))]
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (and:V4SF (match_operand:V4SF 1 "register_operand" "")
+ (match_operand:V4SF 2 "nonimmediate_operand" "")))]
"TARGET_SSE"
"")
(define_insn "*sse_andv4sf3"
- [(set (subreg:TI (match_operand:V4SF 0 "register_operand" "=x") 0)
- (and:TI (match_operand:TI 1 "nonimmediate_operand" "%0")
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
- "TARGET_SSE
- && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
- "andps\t{%2, %0|%0, %2}"
- [(set_attr "type" "sselog")
- (set_attr "mode" "V4SF")])
-
-(define_insn "*sse_andsf3"
- [(set (subreg:TI (match_operand:SF 0 "register_operand" "=x") 0)
- (and:TI (match_operand:TI 1 "nonimmediate_operand" "%0")
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (and:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "%0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")))]
"TARGET_SSE
&& (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
"andps\t{%2, %0|%0, %2}"
@@ -19489,51 +19600,32 @@
(set_attr "mode" "V4SF")])
(define_expand "sse_nandv4sf3"
- [(set (subreg:TI (match_operand:V4SF 0 "register_operand" "") 0)
- (and:TI (not:TI (subreg:TI (match_operand:V4SF 1 "register_operand" "") 0))
- (subreg:TI (match_operand:V4SF 2 "nonimmediate_operand" "") 0)))]
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (and:V4SF (not:V4SF (match_operand:V4SF 1 "register_operand" ""))
+ (match_operand:V4SF 2 "nonimmediate_operand" "")))]
"TARGET_SSE"
"")
(define_insn "*sse_nandv4sf3"
- [(set (subreg:TI (match_operand:V4SF 0 "register_operand" "=x") 0)
- (and:TI (not:TI (match_operand:TI 1 "register_operand" "0"))
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
- "TARGET_SSE"
- "andnps\t{%2, %0|%0, %2}"
- [(set_attr "type" "sselog")
- (set_attr "mode" "V4SF")])
-
-(define_insn "*sse_nandsf3"
- [(set (subreg:TI (match_operand:SF 0 "register_operand" "=x") 0)
- (and:TI (not:TI (match_operand:TI 1 "register_operand" "0"))
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (and:V4SF (not:V4SF (match_operand:V4SF 1 "register_operand" "0"))
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")))]
"TARGET_SSE"
"andnps\t{%2, %0|%0, %2}"
[(set_attr "type" "sselog")
(set_attr "mode" "V4SF")])
(define_expand "sse_iorv4sf3"
- [(set (subreg:TI (match_operand:V4SF 0 "register_operand" "") 0)
- (ior:TI (subreg:TI (match_operand:V4SF 1 "register_operand" "") 0)
- (subreg:TI (match_operand:V4SF 2 "nonimmediate_operand" "") 0)))]
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (ior:V4SF (match_operand:V4SF 1 "register_operand" "")
+ (match_operand:V4SF 2 "nonimmediate_operand" "")))]
"TARGET_SSE"
"")
(define_insn "*sse_iorv4sf3"
- [(set (subreg:TI (match_operand:V4SF 0 "register_operand" "=x") 0)
- (ior:TI (match_operand:TI 1 "nonimmediate_operand" "%0")
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
- "TARGET_SSE
- && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
- "orps\t{%2, %0|%0, %2}"
- [(set_attr "type" "sselog")
- (set_attr "mode" "V4SF")])
-
-(define_insn "*sse_iorsf3"
- [(set (subreg:TI (match_operand:SF 0 "register_operand" "=x") 0)
- (ior:TI (match_operand:TI 1 "nonimmediate_operand" "%0")
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (ior:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "%0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")))]
"TARGET_SSE
&& (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
"orps\t{%2, %0|%0, %2}"
@@ -19541,27 +19633,16 @@
(set_attr "mode" "V4SF")])
(define_expand "sse_xorv4sf3"
- [(set (subreg:TI (match_operand:V4SF 0 "register_operand" "") 0)
- (xor:TI (subreg:TI (match_operand:V4SF 1 "register_operand" "") 0)
- (subreg:TI (match_operand:V4SF 2 "nonimmediate_operand" "") 0)))]
- "TARGET_SSE
- && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (xor:V4SF (match_operand:V4SF 1 "register_operand" "")
+ (match_operand:V4SF 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE"
"")
(define_insn "*sse_xorv4sf3"
- [(set (subreg:TI (match_operand:V4SF 0 "register_operand" "=x") 0)
- (xor:TI (match_operand:TI 1 "nonimmediate_operand" "%0")
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
- "TARGET_SSE
- && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
- "xorps\t{%2, %0|%0, %2}"
- [(set_attr "type" "sselog")
- (set_attr "mode" "V4SF")])
-
-(define_insn "*sse_xorsf3"
- [(set (subreg:TI (match_operand:SF 0 "register_operand" "=x") 0)
- (xor:TI (match_operand:TI 1 "nonimmediate_operand" "%0")
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (xor:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "%0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")))]
"TARGET_SSE
&& (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
"xorps\t{%2, %0|%0, %2}"
@@ -19571,26 +19652,16 @@
;; SSE2 double precision floating point logical operation
(define_expand "sse2_andv2df3"
- [(set (subreg:TI (match_operand:V2DF 0 "register_operand" "") 0)
- (and:TI (subreg:TI (match_operand:V2DF 1 "register_operand" "") 0)
- (subreg:TI (match_operand:V2DF 2 "nonimmediate_operand" "") 0)))]
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (and:V2DF (match_operand:V2DF 1 "register_operand" "")
+ (match_operand:V2DF 2 "nonimmediate_operand" "")))]
"TARGET_SSE2"
"")
(define_insn "*sse2_andv2df3"
- [(set (subreg:TI (match_operand:V2DF 0 "register_operand" "=x") 0)
- (and:TI (match_operand:TI 1 "nonimmediate_operand" "%0")
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
- "TARGET_SSE2
- && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
- "andpd\t{%2, %0|%0, %2}"
- [(set_attr "type" "sselog")
- (set_attr "mode" "V2DF")])
-
-(define_insn "*sse2_andv2df3"
- [(set (subreg:TI (match_operand:DF 0 "register_operand" "=x") 0)
- (and:TI (match_operand:TI 1 "nonimmediate_operand" "%0")
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (and:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "%0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")))]
"TARGET_SSE2
&& (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
"andpd\t{%2, %0|%0, %2}"
@@ -19598,51 +19669,32 @@
(set_attr "mode" "V2DF")])
(define_expand "sse2_nandv2df3"
- [(set (subreg:TI (match_operand:V2DF 0 "register_operand" "") 0)
- (and:TI (not:TI (subreg:TI (match_operand:V2DF 1 "register_operand" "") 0))
- (subreg:TI (match_operand:V2DF 2 "nonimmediate_operand" "") 0)))]
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (and:V2DF (not:V2DF (match_operand:V2DF 1 "register_operand" ""))
+ (match_operand:V2DF 2 "nonimmediate_operand" "")))]
"TARGET_SSE2"
"")
(define_insn "*sse2_nandv2df3"
- [(set (subreg:TI (match_operand:V2DF 0 "register_operand" "=x") 0)
- (and:TI (not:TI (match_operand:TI 1 "register_operand" "0"))
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
- "TARGET_SSE2"
- "andnpd\t{%2, %0|%0, %2}"
- [(set_attr "type" "sselog")
- (set_attr "mode" "V2DF")])
-
-(define_insn "*sse_nandti3_df"
- [(set (subreg:TI (match_operand:DF 0 "register_operand" "=Y") 0)
- (and:TI (not:TI (match_operand:TI 1 "register_operand" "0"))
- (match_operand:TI 2 "nonimmediate_operand" "Ym")))]
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (and:V2DF (not:V2DF (match_operand:V2DF 1 "register_operand" "0"))
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")))]
"TARGET_SSE2"
"andnpd\t{%2, %0|%0, %2}"
[(set_attr "type" "sselog")
(set_attr "mode" "V2DF")])
(define_expand "sse2_iorv2df3"
- [(set (subreg:TI (match_operand:V2DF 0 "register_operand" "") 0)
- (ior:TI (subreg:TI (match_operand:V2DF 1 "register_operand" "") 0)
- (subreg:TI (match_operand:V2DF 2 "nonimmediate_operand" "") 0)))]
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (ior:V2DF (match_operand:V2DF 1 "register_operand" "")
+ (match_operand:V2DF 2 "nonimmediate_operand" "")))]
"TARGET_SSE2"
"")
(define_insn "*sse2_iorv2df3"
- [(set (subreg:TI (match_operand:V2DF 0 "register_operand" "=x") 0)
- (ior:TI (match_operand:TI 1 "nonimmediate_operand" "%0")
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
- "TARGET_SSE2
- && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
- "orpd\t{%2, %0|%0, %2}"
- [(set_attr "type" "sselog")
- (set_attr "mode" "V2DF")])
-
-(define_insn "*sse2_iordf3"
- [(set (subreg:TI (match_operand:DF 0 "register_operand" "=x") 0)
- (ior:TI (match_operand:TI 1 "nonimmediate_operand" "%0")
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (ior:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "%0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")))]
"TARGET_SSE2
&& (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
"orpd\t{%2, %0|%0, %2}"
@@ -19650,26 +19702,16 @@
(set_attr "mode" "V2DF")])
(define_expand "sse2_xorv2df3"
- [(set (subreg:TI (match_operand:V2DF 0 "register_operand" "") 0)
- (xor:TI (subreg:TI (match_operand:V2DF 1 "nonimmediate_operand" "") 0)
- (subreg:TI (match_operand:V2DF 2 "nonimmediate_operand" "") 0)))]
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (xor:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "")
+ (match_operand:V2DF 2 "nonimmediate_operand" "")))]
"TARGET_SSE2"
"")
(define_insn "*sse2_xorv2df3"
- [(set (subreg:TI (match_operand:V2DF 0 "register_operand" "=x") 0)
- (xor:TI (match_operand:TI 1 "nonimmediate_operand" "%0")
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
- "TARGET_SSE2
- && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
- "xorpd\t{%2, %0|%0, %2}"
- [(set_attr "type" "sselog")
- (set_attr "mode" "V2DF")])
-
-(define_insn "*sse2_xordf3"
- [(set (subreg:TI (match_operand:DF 0 "register_operand" "=x") 0)
- (xor:TI (match_operand:TI 1 "nonimmediate_operand" "%0")
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (xor:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "%0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")))]
"TARGET_SSE2
&& (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
"xorpd\t{%2, %0|%0, %2}"
diff --git a/contrib/gcc/config/i386/t-rtems-i386 b/contrib/gcc/config/i386/t-rtems-i386
index b57f4fd..d32928c 100644
--- a/contrib/gcc/config/i386/t-rtems-i386
+++ b/contrib/gcc/config/i386/t-rtems-i386
@@ -36,17 +36,17 @@ xp-bit.c: $(srcdir)/config/fp-bit.c
echo '#define EXTENDED_FLOAT_STUBS' > xp-bit.c
cat $(srcdir)/config/fp-bit.c >> xp-bit.c
-MULTILIB_OPTIONS = mcpu=i486/mcpu=pentium/mcpu=pentiumpro/mcpu=k6/mcpu=athlon \
+MULTILIB_OPTIONS = mtune=i486/mtune=pentium/mtune=pentiumpro/mtune=k6/mtune=athlon \
msoft-float mno-fp-ret-in-387
MULTILIB_DIRNAMES= m486 mpentium mpentiumpro k6 athlon soft-float nofp
MULTILIB_MATCHES = msoft-float=mno-m80387
MULTILIB_EXCEPTIONS = \
mno-fp-ret-in-387 \
-mcpu=i486/*mno-fp-ret-in-387* \
-mcpu=pentium/*msoft-float* mcpu=pentium/*mno-fp-ret-in-387* \
-mcpu=pentiumpro/*msoft-float* mcpu=pentiumpro/*mno-fp-ret-in-387* \
-mcpu=k6/*msoft-float* mcpu=k6/*mno-fp-ret-in-387* \
-mcpu=athlon/*msoft-float* mcpu=athlon/*mno-fp-ret-in-387*
+mtune=i486/*mno-fp-ret-in-387* \
+mtune=pentium/*msoft-float* mtune=pentium/*mno-fp-ret-in-387* \
+mtune=pentiumpro/*msoft-float* mtune=pentiumpro/*mno-fp-ret-in-387* \
+mtune=k6/*msoft-float* mtune=k6/*mno-fp-ret-in-387* \
+mtune=athlon/*msoft-float* mtune=athlon/*mno-fp-ret-in-387*
EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o
diff --git a/contrib/gcc/config/i386/xmmintrin.h b/contrib/gcc/config/i386/xmmintrin.h
index 1bc8878..921806f 100644
--- a/contrib/gcc/config/i386/xmmintrin.h
+++ b/contrib/gcc/config/i386/xmmintrin.h
@@ -38,10 +38,10 @@
#include <mmintrin.h>
/* The data type intended for user use. */
-typedef int __m128 __attribute__ ((__mode__(__V4SF__)));
+typedef float __m128 __attribute__ ((__mode__(__V4SF__)));
/* Internal data types for implementing the intrinsics. */
-typedef int __v4sf __attribute__ ((__mode__(__V4SF__)));
+typedef float __v4sf __attribute__ ((__mode__(__V4SF__)));
/* Create a selector for use with the SHUFPS instruction. */
#define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
diff --git a/contrib/gcc/config/ia64/ia64.c b/contrib/gcc/config/ia64/ia64.c
index 19c5e92..c215b19 100644
--- a/contrib/gcc/config/ia64/ia64.c
+++ b/contrib/gcc/config/ia64/ia64.c
@@ -390,20 +390,55 @@ call_operand (rtx op, enum machine_mode mode)
int
sdata_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
+ HOST_WIDE_INT offset = 0, size = 0;
+
switch (GET_CODE (op))
{
case CONST:
- if (GET_CODE (XEXP (op, 0)) != PLUS
- || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF)
+ op = XEXP (op, 0);
+ if (GET_CODE (op) != PLUS
+ || GET_CODE (XEXP (op, 0)) != SYMBOL_REF
+ || GET_CODE (XEXP (op, 1)) != CONST_INT)
break;
- op = XEXP (XEXP (op, 0), 0);
+ offset = INTVAL (XEXP (op, 1));
+ op = XEXP (op, 0);
/* FALLTHRU */
case SYMBOL_REF:
if (CONSTANT_POOL_ADDRESS_P (op))
- return GET_MODE_SIZE (get_pool_mode (op)) <= ia64_section_threshold;
+ {
+ size = GET_MODE_SIZE (get_pool_mode (op));
+ if (size > ia64_section_threshold)
+ return false;
+ }
else
- return SYMBOL_REF_LOCAL_P (op) && SYMBOL_REF_SMALL_P (op);
+ {
+ tree t;
+
+ if (!SYMBOL_REF_LOCAL_P (op) || !SYMBOL_REF_SMALL_P (op))
+ return false;
+
+ /* Note that in addition to DECLs, we can get various forms
+ of constants here. */
+ t = SYMBOL_REF_DECL (op);
+ if (DECL_P (t))
+ t = DECL_SIZE_UNIT (t);
+ else
+ t = TYPE_SIZE_UNIT (TREE_TYPE (t));
+ if (t && host_integerp (t, 0))
+ {
+ size = tree_low_cst (t, 0);
+ if (size < 0)
+ size = 0;
+ }
+ }
+
+ /* Deny the stupid user trick of addressing outside the object. Such
+ things quickly result in GPREL22 relocation overflows. Of course,
+ they're also highly undefined. From a pure pedant's point of view
+ they deserve a slap on the wrist (such as provided by a relocation
+ overflow), but that just leads to bugzilla noise. */
+ return (offset >= 0 && offset <= size);
default:
break;
@@ -3154,10 +3189,13 @@ ia64_expand_epilogue (int sibcall_p)
preserve those input registers used as arguments to the sibling call.
It is unclear how to compute that number here. */
if (current_frame_info.n_input_regs != 0)
- emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
- GEN_INT (0), GEN_INT (0),
- GEN_INT (current_frame_info.n_input_regs),
- GEN_INT (0)));
+ {
+ rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
+ insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
+ const0_rtx, const0_rtx,
+ n_inputs, const0_rtx));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
}
}
@@ -3283,15 +3321,16 @@ static bool
ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
{
if (size == POINTER_SIZE / BITS_PER_UNIT
- && aligned_p
&& !(TARGET_NO_PIC || TARGET_AUTO_PIC)
&& GET_CODE (x) == SYMBOL_REF
&& SYMBOL_REF_FUNCTION_P (x))
{
- if (POINTER_SIZE == 32)
- fputs ("\tdata4\t@fptr(", asm_out_file);
- else
- fputs ("\tdata8\t@fptr(", asm_out_file);
+ static const char * const directive[2][2] = {
+ /* 64-bit pointer */ /* 32-bit pointer */
+ { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
+ { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
+ };
+ fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
output_addr_const (asm_out_file, x);
fputs (")\n", asm_out_file);
return true;
@@ -3917,6 +3956,12 @@ ia64_function_arg_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
static bool
ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
{
+ /* We can't perform a sibcall if the current function has the syscall_linkage
+ attribute. */
+ if (lookup_attribute ("syscall_linkage",
+ TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
+ return false;
+
/* We must always return with our current GP. This means we can
only sibcall to functions defined in the current module. */
return decl && (*targetm.binds_local_p) (decl);
@@ -7782,13 +7827,24 @@ process_set (FILE *asm_out_file, rtx pat)
{
dest_regno = REGNO (dest);
- /* If this isn't the final destination for ar.pfs, the alloc
- shouldn't have been marked frame related. */
- if (dest_regno != current_frame_info.reg_save_ar_pfs)
- abort ();
-
- fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
- ia64_dbx_register_number (dest_regno));
+ /* If this is the final destination for ar.pfs, then this must
+ be the alloc in the prologue. */
+ if (dest_regno == current_frame_info.reg_save_ar_pfs)
+ fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
+ ia64_dbx_register_number (dest_regno));
+ else
+ {
+ /* This must be an alloc before a sibcall. We must drop the
+ old frame info. The easiest way to drop the old frame
+ info is to ensure we had a ".restore sp" directive
+ followed by a new prologue. If the procedure doesn't
+ have a memory-stack frame, we'll issue a dummy ".restore
+ sp" now. */
+ if (current_frame_info.total_size == 0 && !frame_pointer_needed)
+ /* if haven't done process_epilogue() yet, do it now */
+ process_epilogue ();
+ fprintf (asm_out_file, "\t.prologue\n");
+ }
return 1;
}
diff --git a/contrib/gcc/config/ia64/t-glibc b/contrib/gcc/config/ia64/t-glibc
index a105662..df4fe9c 100644
--- a/contrib/gcc/config/ia64/t-glibc
+++ b/contrib/gcc/config/ia64/t-glibc
@@ -1 +1,3 @@
-LIB2ADDEH += $(srcdir)/config/ia64/fde-glibc.c
+# Use system libunwind library on IA-64 GLIBC based system.
+LIB2ADDEH = $(srcdir)/unwind-sjlj.c $(srcdir)/unwind-c.c \
+ $(srcdir)/unwind-compat.c
diff --git a/contrib/gcc/config/ia64/t-glibc-libunwind b/contrib/gcc/config/ia64/t-glibc-libunwind
new file mode 100644
index 0000000..df78f1d
--- /dev/null
+++ b/contrib/gcc/config/ia64/t-glibc-libunwind
@@ -0,0 +1,4 @@
+# Build libunwind for IA-64 GLIBC based system.
+LIBUNWIND = $(srcdir)/config/ia64/fde-glibc.c \
+ $(srcdir)/config/ia64/unwind-ia64.c
+LIBUNWINDDEP = unwind.inc
diff --git a/contrib/gcc/config/ia64/t-hpux b/contrib/gcc/config/ia64/t-hpux
index 597c2ac..d89f174 100644
--- a/contrib/gcc/config/ia64/t-hpux
+++ b/contrib/gcc/config/ia64/t-hpux
@@ -23,6 +23,8 @@ LIBGCC1_TEST =
# We do not want to include the EH stuff that linux uses, we want to use
# the HP-UX libunwind library.
+T_CFLAGS += -DUSE_LIBUNWIND_EXCEPTIONS
+
LIB2ADDEH =
SHLIB_EXT = .so
diff --git a/contrib/gcc/config/ia64/unwind-ia64.c b/contrib/gcc/config/ia64/unwind-ia64.c
index d981d8c..a49652e 100644
--- a/contrib/gcc/config/ia64/unwind-ia64.c
+++ b/contrib/gcc/config/ia64/unwind-ia64.c
@@ -37,6 +37,7 @@
#include "tm.h"
#include "unwind.h"
#include "unwind-ia64.h"
+#include "unwind-compat.h"
#include "ia64intrin.h"
/* This isn't thread safe, but nice for occasional tests. */
@@ -2274,6 +2275,8 @@ uw_install_context (struct _Unwind_Context *current __attribute__((unused)),
"(p6) ldf.fill f22 = [r28] \n\t"
"cmp.ne p7, p0 = r0, r29 \n\t"
";; \n\t"
+ "ld8 r27 = [r20], 8 \n\t"
+ ";; \n\t"
"ld8 r28 = [r20], 8 \n\t"
"(p7) ldf.fill f23 = [r29] \n\t"
"cmp.ne p6, p0 = r0, r22 \n\t"
@@ -2381,4 +2384,24 @@ uw_identify_context (struct _Unwind_Context *context)
}
#include "unwind.inc"
+
+#if defined (USE_GAS_SYMVER) && defined (SHARED) && defined (USE_LIBUNWIND_EXCEPTIONS)
+alias (_Unwind_Backtrace);
+alias (_Unwind_DeleteException);
+alias (_Unwind_FindEnclosingFunction);
+alias (_Unwind_FindTableEntry);
+alias (_Unwind_ForcedUnwind);
+alias (_Unwind_GetBSP);
+alias (_Unwind_GetCFA);
+alias (_Unwind_GetGR);
+alias (_Unwind_GetIP);
+alias (_Unwind_GetLanguageSpecificData);
+alias (_Unwind_GetRegionStart);
+alias (_Unwind_RaiseException);
+alias (_Unwind_Resume);
+alias (_Unwind_Resume_or_Rethrow);
+alias (_Unwind_SetGR);
+alias (_Unwind_SetIP);
+#endif
+
#endif
diff --git a/contrib/gcc/config/ia64/unwind-ia64.h b/contrib/gcc/config/ia64/unwind-ia64.h
index b56b38c..053829f 100644
--- a/contrib/gcc/config/ia64/unwind-ia64.h
+++ b/contrib/gcc/config/ia64/unwind-ia64.h
@@ -28,4 +28,5 @@ struct unw_table_entry
extern struct unw_table_entry *
_Unwind_FindTableEntry (void *pc, unsigned long *segment_base,
- unsigned long *gp);
+ unsigned long *gp)
+ __attribute__ ((__visibility__ ("hidden")));
diff --git a/contrib/gcc/config/rs6000/aix.h b/contrib/gcc/config/rs6000/aix.h
index f189407..b14107f 100644
--- a/contrib/gcc/config/rs6000/aix.h
+++ b/contrib/gcc/config/rs6000/aix.h
@@ -1,6 +1,6 @@
/* Definitions of target machine for GNU compiler,
for IBM RS/6000 POWER running AIX.
- Copyright (C) 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+ Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
This file is part of GCC.
@@ -175,15 +175,15 @@
#define JUMP_TABLES_IN_TEXT_SECTION 1
/* Enable AIX XL compiler calling convention breakage compatibility. */
-#undef TARGET_XL_CALL
-#define MASK_XL_CALL 0x40000000
-#define TARGET_XL_CALL (target_flags & MASK_XL_CALL)
+#undef TARGET_XL_COMPAT
+#define MASK_XL_COMPAT 0x40000000
+#define TARGET_XL_COMPAT (target_flags & MASK_XL_COMPAT)
#undef SUBTARGET_SWITCHES
#define SUBTARGET_SWITCHES \
- {"xl-call", MASK_XL_CALL, \
- N_("Always pass floating-point arguments in memory") }, \
- {"no-xl-call", - MASK_XL_CALL, \
- N_("Don't always pass floating-point arguments in memory") }, \
+ {"xl-compat", MASK_XL_COMPAT, \
+ N_("Conform more closely to IBM XLC semantics") }, \
+ {"no-xl-compat", - MASK_XL_COMPAT, \
+ N_("Default GCC semantics that differ from IBM XLC") }, \
SUBSUBTARGET_SWITCHES
#define SUBSUBTARGET_SWITCHES
@@ -209,7 +209,7 @@
code that does the save/restore is generated by the linker, so
we have no good way to determine at compile time what to do. */
-#ifdef __powerpc64__
+#ifdef __64BIT__
#define MD_FROB_UPDATE_CONTEXT(CTX, FS) \
do { \
if ((FS)->regs.reg[2].how == REG_UNSAVED) \
diff --git a/contrib/gcc/config/rs6000/aix41.h b/contrib/gcc/config/rs6000/aix41.h
index 373c10c..542f928 100644
--- a/contrib/gcc/config/rs6000/aix41.h
+++ b/contrib/gcc/config/rs6000/aix41.h
@@ -98,3 +98,7 @@
#undef RS6000_CALL_GLUE
#define RS6000_CALL_GLUE "{cror 31,31,31|nop}"
+/* The IBM AIX 4.x assembler doesn't support forward references in
+ .set directives. We handle this by deferring the output of .set
+ directives to the end of the compilation unit. */
+#define TARGET_DEFERRED_OUTPUT_DEFS(DECL,TARGET) true
diff --git a/contrib/gcc/config/rs6000/aix43.h b/contrib/gcc/config/rs6000/aix43.h
index a76e694..50bd304 100644
--- a/contrib/gcc/config/rs6000/aix43.h
+++ b/contrib/gcc/config/rs6000/aix43.h
@@ -187,3 +187,8 @@ do { \
#undef LD_INIT_SWITCH
#define LD_INIT_SWITCH "-binitfini"
+
+/* The IBM AIX 4.x assembler doesn't support forward references in
+ .set directives. We handle this by deferring the output of .set
+ directives to the end of the compilation unit. */
+#define TARGET_DEFERRED_OUTPUT_DEFS(DECL,TARGET) true
diff --git a/contrib/gcc/config/rs6000/aix52.h b/contrib/gcc/config/rs6000/aix52.h
index c066650..6f12619 100644
--- a/contrib/gcc/config/rs6000/aix52.h
+++ b/contrib/gcc/config/rs6000/aix52.h
@@ -193,3 +193,7 @@ do { \
#undef TARGET_C99_FUNCTIONS
#define TARGET_C99_FUNCTIONS 1
+#ifndef _AIX52
+extern long long int atoll(const char *);
+#endif
+
diff --git a/contrib/gcc/config/rs6000/altivec.h b/contrib/gcc/config/rs6000/altivec.h
index 04d120d..779b428 100644
--- a/contrib/gcc/config/rs6000/altivec.h
+++ b/contrib/gcc/config/rs6000/altivec.h
@@ -32,46 +32,29 @@
#ifndef _ALTIVEC_H
#define _ALTIVEC_H 1
-/* Required by Motorola specs. */
-#define __VEC__ 10206
-
-#ifndef __ALTIVEC__
-#define __ALTIVEC__ 1
+#if !defined(__VEC__) || !defined(__ALTIVEC__)
+#error Use the "-maltivec" flag to enable PowerPC AltiVec support
#endif
-#define __vector __attribute__((vector_size(16)))
+/* If __APPLE_ALTIVEC__ is defined, the compiler supports 'vector',
+ 'pixel' and 'bool' as context-sensitive AltiVec keywords (in
+ non-AltiVec contexts, they revert to their original meanings,
+ if any), so we do not need to define them as macros. */
-/* You are allowed to undef this for C++ compatibility. */
+#if !defined(__APPLE_ALTIVEC__)
+/* You are allowed to undef these for C++ compatibility. */
#define vector __vector
+#define pixel __pixel
+#define bool __bool
+#endif
-#define bool signed
-#define pixel unsigned short
-#define __pixel unsigned short
-
-/* Dummy prototype. */
-extern int __altivec_link_error_invalid_argument ();
-
-/* Helper macros. */
+/* Condition register codes for AltiVec predicates. */
#define __CR6_EQ 0
#define __CR6_EQ_REV 1
#define __CR6_LT 2
#define __CR6_LT_REV 3
-#define __bin_args_eq(xtype, x, ytype, y) \
- (__builtin_types_compatible_p (xtype, typeof (x)) \
- && __builtin_types_compatible_p (ytype, typeof (y)))
-
-#define __un_args_eq(xtype, x) \
- __builtin_types_compatible_p (xtype, typeof (x))
-
-#define __tern_args_eq(xtype, x, ytype, y, ztype, z) \
- (__builtin_types_compatible_p (xtype, typeof (x)) \
- && __builtin_types_compatible_p (ytype, typeof (y)) \
- && __builtin_types_compatible_p (ztype, typeof (z)))
-
-#define __ch(x, y, z) __builtin_choose_expr (x, y, z)
-
/* These are easy... Same exact arguments. */
#define vec_vaddcuw vec_addc
@@ -122,1077 +105,1434 @@ extern "C++" {
/* Prototypes for builtins that take literals and must always be
inlined. */
-inline vector float vec_ctf (vector unsigned int, const char) __attribute__ ((always_inline));
-inline vector float vec_ctf (vector signed int, const char) __attribute__ ((always_inline));
-inline vector float vec_vcfsx (vector signed int a1, const char a2) __attribute__ ((always_inline));
-inline vector float vec_vcfux (vector unsigned int a1, const char a2) __attribute__ ((always_inline));
-inline vector signed int vec_cts (vector float, const char) __attribute__ ((always_inline));
-inline vector unsigned int vec_ctu (vector float, const char) __attribute__ ((always_inline));
-inline void vec_dss (const char) __attribute__ ((always_inline));
-
-inline void vec_dst (vector unsigned char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (vector signed char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (vector unsigned short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (vector signed short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (vector unsigned int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (vector signed int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (vector float *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (unsigned char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (signed char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (unsigned short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (signed short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (unsigned int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (signed int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (unsigned long *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (signed long *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (float *, int, const char) __attribute__ ((always_inline));
-
-inline void vec_dstst (vector unsigned char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (vector signed char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (vector unsigned short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (vector signed short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (vector unsigned int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (vector signed int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (vector float *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (unsigned char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (signed char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (unsigned short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (signed short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (unsigned int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (signed int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (unsigned long *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (signed long *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (float *, int, const char) __attribute__ ((always_inline));
-
-inline void vec_dststt (vector unsigned char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (vector signed char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (vector unsigned short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (vector signed short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (vector unsigned int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (vector signed int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (vector float *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (unsigned char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (signed char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (unsigned short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (signed short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (unsigned int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (signed int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (unsigned long *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (signed long *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (float *, int, const char) __attribute__ ((always_inline));
-
-inline void vec_dstt (vector unsigned char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (vector signed char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (vector unsigned short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (vector signed short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (vector unsigned int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (vector signed int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (vector float *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (unsigned char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (signed char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (unsigned short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (signed short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (unsigned int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (signed int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (unsigned long *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (signed long *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (float *, int, const char) __attribute__ ((always_inline));
-
-inline vector float vec_sld (vector float, vector float, const char) __attribute__ ((always_inline));
-inline vector signed int vec_sld (vector signed int, vector signed int, const char) __attribute__ ((always_inline));
-inline vector unsigned int vec_sld (vector unsigned int, vector unsigned int, const char) __attribute__ ((always_inline));
-inline vector signed short vec_sld (vector signed short, vector signed short, const char) __attribute__ ((always_inline));
-inline vector unsigned short vec_sld (vector unsigned short, vector unsigned short, const char) __attribute__ ((always_inline));
-inline vector signed char vec_sld (vector signed char, vector signed char, const char) __attribute__ ((always_inline));
-inline vector unsigned char vec_sld (vector unsigned char, vector unsigned char, const char) __attribute__ ((always_inline));
-inline vector signed char vec_splat (vector signed char, const char) __attribute__ ((always_inline));
-inline vector unsigned char vec_splat (vector unsigned char, const char) __attribute__ ((always_inline));
-inline vector signed short vec_splat (vector signed short, const char) __attribute__ ((always_inline));
-inline vector unsigned short vec_splat (vector unsigned short, const char) __attribute__ ((always_inline));
-inline vector float vec_splat (vector float, const char) __attribute__ ((always_inline));
-inline vector signed int vec_splat (vector signed int, const char) __attribute__ ((always_inline));
-inline vector unsigned int vec_splat (vector unsigned int, const char) __attribute__ ((always_inline));
-inline vector signed char vec_splat_s8 (const char) __attribute__ ((always_inline));
-inline vector signed short vec_splat_s16 (const char) __attribute__ ((always_inline));
-inline vector signed int vec_splat_s32 (const char) __attribute__ ((always_inline));
-inline vector unsigned char vec_splat_u8 (const char) __attribute__ ((always_inline));
-inline vector unsigned short vec_splat_u16 (const char) __attribute__ ((always_inline));
-inline vector unsigned int vec_splat_u32 (const char) __attribute__ ((always_inline));
-inline vector float vec_vspltw (vector float a1, const char a2) __attribute__ ((always_inline));
-inline vector signed int vec_vspltw (vector signed int a1, const char a2) __attribute__ ((always_inline));
-inline vector unsigned int vec_vspltw (vector unsigned int a1, const char a2) __attribute__ ((always_inline));
-inline vector signed short vec_vsplth (vector signed short a1, const char a2) __attribute__ ((always_inline));
-inline vector unsigned short vec_vsplth (vector unsigned short a1, const char a2) __attribute__ ((always_inline));
-inline vector signed char vec_vspltb (vector signed char a1, const char a2) __attribute__ ((always_inline));
-inline vector unsigned char vec_vspltb (vector unsigned char a1, const char a2) __attribute__ ((always_inline));
+inline __vector float vec_ctf (__vector unsigned int, const int) __attribute__ ((always_inline));
+inline __vector float vec_ctf (__vector signed int, const int) __attribute__ ((always_inline));
+inline __vector float vec_vcfsx (__vector signed int a1, const int a2) __attribute__ ((always_inline));
+inline __vector float vec_vcfux (__vector unsigned int a1, const int a2) __attribute__ ((always_inline));
+inline __vector signed int vec_cts (__vector float, const int) __attribute__ ((always_inline));
+inline __vector unsigned int vec_ctu (__vector float, const int) __attribute__ ((always_inline));
+inline void vec_dss (const int) __attribute__ ((always_inline));
+
+inline void vec_dst (const __vector unsigned char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const __vector signed char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const __vector __bool char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const __vector unsigned short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const __vector signed short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const __vector __bool short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const __vector __pixel *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const __vector unsigned int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const __vector signed int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const __vector __bool int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const __vector float *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const unsigned char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const signed char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const unsigned short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const unsigned int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const unsigned long *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const long *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const float *, int, const int) __attribute__ ((always_inline));
+
+inline void vec_dstst (const __vector unsigned char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const __vector signed char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const __vector __bool char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const __vector unsigned short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const __vector signed short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const __vector __bool short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const __vector __pixel *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const __vector unsigned int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const __vector signed int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const __vector __bool int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const __vector float *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const unsigned char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const signed char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const unsigned short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const unsigned int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const unsigned long *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const long *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const float *, int, const int) __attribute__ ((always_inline));
+
+inline void vec_dststt (const __vector unsigned char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const __vector signed char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const __vector __bool char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const __vector unsigned short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const __vector signed short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const __vector __bool short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const __vector __pixel *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const __vector unsigned int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const __vector signed int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const __vector __bool int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const __vector float *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const unsigned char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const signed char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const unsigned short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const unsigned int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const unsigned long *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const long *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const float *, int, const int) __attribute__ ((always_inline));
+
+inline void vec_dstt (const __vector unsigned char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const __vector signed char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const __vector __bool char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const __vector unsigned short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const __vector signed short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const __vector __bool short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const __vector __pixel *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const __vector unsigned int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const __vector signed int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const __vector __bool int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const __vector float *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const unsigned char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const signed char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const unsigned short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const unsigned int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const unsigned long *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const long *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const float *, int, const int) __attribute__ ((always_inline));
+
+inline __vector float vec_sld (__vector float, __vector float, const int) __attribute__ ((always_inline));
+inline __vector signed int vec_sld (__vector signed int, __vector signed int, const int) __attribute__ ((always_inline));
+inline __vector unsigned int vec_sld (__vector unsigned int, __vector unsigned int, const int) __attribute__ ((always_inline));
+inline __vector __bool int vec_sld (__vector __bool int, __vector __bool int, const int) __attribute__ ((always_inline));
+inline __vector signed short vec_sld (__vector signed short, __vector signed short, const int) __attribute__ ((always_inline));
+inline __vector unsigned short vec_sld (__vector unsigned short, __vector unsigned short, const int) __attribute__ ((always_inline));
+inline __vector __bool short vec_sld (__vector __bool short, __vector __bool short, const int) __attribute__ ((always_inline));
+inline __vector __pixel vec_sld (__vector __pixel, __vector __pixel, const int) __attribute__ ((always_inline));
+inline __vector signed char vec_sld (__vector signed char, __vector signed char, const int) __attribute__ ((always_inline));
+inline __vector unsigned char vec_sld (__vector unsigned char, __vector unsigned char, const int) __attribute__ ((always_inline));
+inline __vector __bool char vec_sld (__vector __bool char, __vector __bool char, const int) __attribute__ ((always_inline));
+inline __vector signed char vec_splat (__vector signed char, const int) __attribute__ ((always_inline));
+inline __vector unsigned char vec_splat (__vector unsigned char, const int) __attribute__ ((always_inline));
+inline __vector __bool char vec_splat (__vector __bool char, const int) __attribute__ ((always_inline));
+inline __vector signed short vec_splat (__vector signed short, const int) __attribute__ ((always_inline));
+inline __vector unsigned short vec_splat (__vector unsigned short, const int) __attribute__ ((always_inline));
+inline __vector __bool short vec_splat (__vector __bool short, const int) __attribute__ ((always_inline));
+inline __vector __pixel vec_splat (__vector __pixel, const int) __attribute__ ((always_inline));
+inline __vector float vec_splat (__vector float, const int) __attribute__ ((always_inline));
+inline __vector signed int vec_splat (__vector signed int, const int) __attribute__ ((always_inline));
+inline __vector unsigned int vec_splat (__vector unsigned int, const int) __attribute__ ((always_inline));
+inline __vector __bool int vec_splat (__vector __bool int, const int) __attribute__ ((always_inline));
+inline __vector signed char vec_splat_s8 (const int) __attribute__ ((always_inline));
+inline __vector signed short vec_splat_s16 (const int) __attribute__ ((always_inline));
+inline __vector signed int vec_splat_s32 (const int) __attribute__ ((always_inline));
+inline __vector unsigned char vec_splat_u8 (const int) __attribute__ ((always_inline));
+inline __vector unsigned short vec_splat_u16 (const int) __attribute__ ((always_inline));
+inline __vector unsigned int vec_splat_u32 (const int) __attribute__ ((always_inline));
+inline __vector float vec_vspltw (__vector float a1, const int a2) __attribute__ ((always_inline));
+inline __vector __bool int vec_vspltw (__vector __bool int a1, const int a2) __attribute__ ((always_inline));
+inline __vector signed int vec_vspltw (__vector signed int a1, const int a2) __attribute__ ((always_inline));
+inline __vector unsigned int vec_vspltw (__vector unsigned int a1, const int a2) __attribute__ ((always_inline));
+inline __vector __bool short vec_vsplth (__vector __bool short a1, const int a2) __attribute__ ((always_inline));
+inline __vector signed short vec_vsplth (__vector signed short a1, const int a2) __attribute__ ((always_inline));
+inline __vector unsigned short vec_vsplth (__vector unsigned short a1, const int a2) __attribute__ ((always_inline));
+inline __vector __pixel vec_vsplth (__vector __pixel a1, const int a2) __attribute__ ((always_inline));
+inline __vector __bool char vec_vspltb (__vector __bool char a1, const int a2) __attribute__ ((always_inline));
+inline __vector signed char vec_vspltb (__vector signed char a1, const int a2) __attribute__ ((always_inline));
+inline __vector unsigned char vec_vspltb (__vector unsigned char a1, const int a2) __attribute__ ((always_inline));
+
+/* vec_step */
+
+template<typename _Tp>
+struct __vec_step_help
+{
+ // All proper __vector types will specialize _S_elem.
+};
+
+template<>
+struct __vec_step_help<__vector signed short>
+{
+ static const int _S_elem = 8;
+};
+
+template<>
+struct __vec_step_help<__vector unsigned short>
+{
+ static const int _S_elem = 8;
+};
+
+template<>
+struct __vec_step_help<__vector __bool short>
+{
+ static const int _S_elem = 8;
+};
+
+template<>
+struct __vec_step_help<__vector __pixel>
+{
+ static const int _S_elem = 8;
+};
+
+template<>
+struct __vec_step_help<__vector signed int>
+{
+ static const int _S_elem = 4;
+};
+
+template<>
+struct __vec_step_help<__vector unsigned int>
+{
+ static const int _S_elem = 4;
+};
+
+template<>
+struct __vec_step_help<__vector __bool int>
+{
+ static const int _S_elem = 4;
+};
+
+template<>
+struct __vec_step_help<__vector unsigned char>
+{
+ static const int _S_elem = 16;
+};
+
+template<>
+struct __vec_step_help<__vector signed char>
+{
+ static const int _S_elem = 16;
+};
+
+template<>
+struct __vec_step_help<__vector __bool char>
+{
+ static const int _S_elem = 16;
+};
+
+template<>
+struct __vec_step_help<__vector float>
+{
+ static const int _S_elem = 4;
+};
+
+#define vec_step(t) __vec_step_help<typeof(t)>::_S_elem
/* vec_abs */
-inline vector signed char
-vec_abs (vector signed char a1)
+inline __vector signed char
+vec_abs (__vector signed char a1)
{
return __builtin_altivec_abs_v16qi (a1);
}
-inline vector signed short
-vec_abs (vector signed short a1)
+inline __vector signed short
+vec_abs (__vector signed short a1)
{
return __builtin_altivec_abs_v8hi (a1);
}
-inline vector signed int
-vec_abs (vector signed int a1)
+inline __vector signed int
+vec_abs (__vector signed int a1)
{
return __builtin_altivec_abs_v4si (a1);
}
-inline vector float
-vec_abs (vector float a1)
+inline __vector float
+vec_abs (__vector float a1)
{
return __builtin_altivec_abs_v4sf (a1);
}
/* vec_abss */
-inline vector signed char
-vec_abss (vector signed char a1)
+inline __vector signed char
+vec_abss (__vector signed char a1)
{
return __builtin_altivec_abss_v16qi (a1);
}
-inline vector signed short
-vec_abss (vector signed short a1)
+inline __vector signed short
+vec_abss (__vector signed short a1)
{
return __builtin_altivec_abss_v8hi (a1);
}
-inline vector signed int
-vec_abss (vector signed int a1)
+inline __vector signed int
+vec_abss (__vector signed int a1)
{
return __builtin_altivec_abss_v4si (a1);
}
/* vec_add */
-inline vector signed char
-vec_add (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_add (__vector __bool char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_add (__vector signed char a1, __vector __bool char a2)
+{
+ return (__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_add (__vector signed char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector unsigned char
+vec_add (__vector __bool char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector unsigned char
+vec_add (__vector unsigned char a1, __vector __bool char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector unsigned char
+vec_add (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed short
+vec_add (__vector __bool short a1, __vector signed short a2)
{
- return (vector signed char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned char
-vec_add (vector signed char a1, vector unsigned char a2)
+inline __vector signed short
+vec_add (__vector signed short a1, __vector __bool short a2)
{
- return (vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned char
-vec_add (vector unsigned char a1, vector signed char a2)
+inline __vector signed short
+vec_add (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned char
-vec_add (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned short
+vec_add (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed short
-vec_add (vector signed short a1, vector signed short a2)
+inline __vector unsigned short
+vec_add (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector signed short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_add (vector signed short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_add (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_add (vector unsigned short a1, vector signed short a2)
+inline __vector signed int
+vec_add (__vector __bool int a1, __vector signed int a2)
{
- return (vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_add (vector unsigned short a1, vector unsigned short a2)
+inline __vector signed int
+vec_add (__vector signed int a1, __vector __bool int a2)
{
- return (vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_add (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_add (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_add (vector signed int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_add (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_add (vector unsigned int a1, vector signed int a2)
+inline __vector unsigned int
+vec_add (__vector unsigned int a1, __vector __bool int a2)
{
- return (vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_add (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_add (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_add (vector float a1, vector float a2)
+inline __vector float
+vec_add (__vector float a1, __vector float a2)
{
- return (vector float) __builtin_altivec_vaddfp ((vector float) a1, (vector float) a2);
+ return (__vector float) __builtin_altivec_vaddfp ((__vector float) a1, (__vector float) a2);
}
/* vec_vaddfp */
-inline vector float
-vec_vaddfp (vector float a1, vector float a2)
+inline __vector float
+vec_vaddfp (__vector float a1, __vector float a2)
{
- return (vector float) __builtin_altivec_vaddfp ((vector float) a1, (vector float) a2);
+ return (__vector float) __builtin_altivec_vaddfp ((__vector float) a1, (__vector float) a2);
}
/* vec_vadduwm */
-inline vector signed int
-vec_vadduwm (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_vadduwm (__vector __bool int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vadduwm (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_vadduwm (__vector signed int a1, __vector __bool int a2)
{
- return (vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vadduwm (vector unsigned int a1, vector signed int a2)
+inline __vector signed int
+vec_vadduwm (__vector signed int a1, __vector signed int a2)
{
- return (vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vadduwm (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vadduwm (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_vadduwm (__vector unsigned int a1, __vector __bool int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_vadduwm (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vadduhm */
-inline vector signed short
-vec_vadduhm (vector signed short a1, vector signed short a2)
+inline __vector signed short
+vec_vadduhm (__vector __bool short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vadduhm (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_vadduhm (__vector signed short a1, __vector __bool short a2)
{
- return (vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vadduhm (vector unsigned short a1, vector signed short a2)
+inline __vector signed short
+vec_vadduhm (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vadduhm (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vadduhm (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector unsigned short
+vec_vadduhm (__vector unsigned short a1, __vector __bool short a2)
+{
+ return (__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector unsigned short
+vec_vadduhm (__vector unsigned short a1, __vector unsigned short a2)
+{
+ return (__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vaddubm */
-inline vector signed char
-vec_vaddubm (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_vaddubm (__vector __bool char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_vaddubm (__vector signed char a1, __vector __bool char a2)
+{
+ return (__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_vaddubm (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vaddubm (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vaddubm (__vector __bool char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vaddubm (vector unsigned char a1, vector signed char a2)
+inline __vector unsigned char
+vec_vaddubm (__vector unsigned char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vaddubm (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vaddubm (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_addc */
-inline vector unsigned int
-vec_addc (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_addc (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vaddcuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vaddcuw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_adds */
-inline vector unsigned char
-vec_adds (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_adds (__vector __bool char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector unsigned char
+vec_adds (__vector unsigned char a1, __vector __bool char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector unsigned char
+vec_adds (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_adds (__vector __bool char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_adds (__vector signed char a1, __vector __bool char a2)
+{
+ return (__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_adds (__vector signed char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector unsigned short
+vec_adds (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned char
-vec_adds (vector unsigned char a1, vector signed char a2)
+inline __vector unsigned short
+vec_adds (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned char
-vec_adds (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned short
+vec_adds (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed char
-vec_adds (vector signed char a1, vector signed char a2)
+inline __vector signed short
+vec_adds (__vector __bool short a1, __vector signed short a2)
{
- return (vector signed char) __builtin_altivec_vaddsbs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_adds (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_adds (__vector signed short a1, __vector __bool short a2)
{
- return (vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_adds (vector unsigned short a1, vector signed short a2)
+inline __vector signed short
+vec_adds (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_adds (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned int
+vec_adds (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_adds (vector signed short a1, vector signed short a2)
+inline __vector unsigned int
+vec_adds (__vector unsigned int a1, __vector __bool int a2)
{
- return (vector signed short) __builtin_altivec_vaddshs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_adds (vector signed int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_adds (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vadduws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_adds (vector unsigned int a1, vector signed int a2)
+inline __vector signed int
+vec_adds (__vector __bool int a1, __vector signed int a2)
{
- return (vector unsigned int) __builtin_altivec_vadduws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_adds (vector unsigned int a1, vector unsigned int a2)
+inline __vector signed int
+vec_adds (__vector signed int a1, __vector __bool int a2)
{
- return (vector unsigned int) __builtin_altivec_vadduws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_adds (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_adds (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vaddsws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vaddsws */
-inline vector signed int
-vec_vaddsws (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_vaddsws (__vector __bool int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vaddsws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_vaddsws (__vector signed int a1, __vector __bool int a2)
+{
+ return (__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_vaddsws (__vector signed int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vadduws */
-inline vector unsigned int
-vec_vadduws (vector signed int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vadduws (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vadduws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vadduws (vector unsigned int a1, vector signed int a2)
+inline __vector unsigned int
+vec_vadduws (__vector unsigned int a1, __vector __bool int a2)
{
- return (vector unsigned int) __builtin_altivec_vadduws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vadduws (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vadduws (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vadduws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vaddshs */
-inline vector signed short
-vec_vaddshs (vector signed short a1, vector signed short a2)
+
+inline __vector signed short
+vec_vaddshs (__vector __bool short a1, __vector signed short a2)
+{
+ return (__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector signed short
+vec_vaddshs (__vector signed short a1, __vector __bool short a2)
{
- return (vector signed short) __builtin_altivec_vaddshs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector signed short
+vec_vaddshs (__vector signed short a1, __vector signed short a2)
+{
+ return (__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vadduhs */
-inline vector unsigned short
-vec_vadduhs (vector signed short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vadduhs (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vadduhs (vector unsigned short a1, vector signed short a2)
+inline __vector unsigned short
+vec_vadduhs (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vadduhs (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vadduhs (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vaddsbs */
-inline vector signed char
-vec_vaddsbs (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_vaddsbs (__vector __bool char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_vaddsbs (__vector signed char a1, __vector __bool char a2)
{
- return (vector signed char) __builtin_altivec_vaddsbs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_vaddsbs (__vector signed char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_vaddubs */
-inline vector unsigned char
-vec_vaddubs (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vaddubs (__vector __bool char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vaddubs (vector unsigned char a1, vector signed char a2)
+inline __vector unsigned char
+vec_vaddubs (__vector unsigned char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vaddubs (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vaddubs (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_and */
-inline vector float
-vec_and (vector float a1, vector float a2)
+inline __vector float
+vec_and (__vector float a1, __vector float a2)
{
- return (vector float) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector float) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_and (vector float a1, vector signed int a2)
+inline __vector float
+vec_and (__vector float a1, __vector __bool int a2)
{
- return (vector float) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector float) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_and (vector signed int a1, vector float a2)
+inline __vector float
+vec_and (__vector __bool int a1, __vector float a2)
{
- return (vector float) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector float) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_and (vector signed int a1, vector signed int a2)
+inline __vector __bool int
+vec_and (__vector __bool int a1, __vector __bool int a2)
{
- return (vector signed int) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_and (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_and (__vector __bool int a1, __vector signed int a2)
{
- return (vector unsigned int) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_and (vector unsigned int a1, vector signed int a2)
+inline __vector signed int
+vec_and (__vector signed int a1, __vector __bool int a2)
{
- return (vector unsigned int) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_and (vector unsigned int a1, vector unsigned int a2)
+inline __vector signed int
+vec_and (__vector signed int a1, __vector signed int a2)
{
- return (vector unsigned int) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_and (vector signed short a1, vector signed short a2)
+inline __vector unsigned int
+vec_and (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector signed short) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_and (vector signed short a1, vector unsigned short a2)
+inline __vector unsigned int
+vec_and (__vector unsigned int a1, __vector __bool int a2)
{
- return (vector unsigned short) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_and (vector unsigned short a1, vector signed short a2)
+inline __vector unsigned int
+vec_and (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned short) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_and (vector unsigned short a1, vector unsigned short a2)
+inline __vector __bool short
+vec_and (__vector __bool short a1, __vector __bool short a2)
{
- return (vector unsigned short) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool short) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_and (vector signed char a1, vector signed char a2)
+inline __vector signed short
+vec_and (__vector __bool short a1, __vector signed short a2)
{
- return (vector signed char) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_and (vector signed char a1, vector unsigned char a2)
+inline __vector signed short
+vec_and (__vector signed short a1, __vector __bool short a2)
{
- return (vector unsigned char) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_and (vector unsigned char a1, vector signed char a2)
+inline __vector signed short
+vec_and (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned char) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_and (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned short
+vec_and (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned short
+vec_and (__vector unsigned short a1, __vector __bool short a2)
+{
+ return (__vector unsigned short) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned short
+vec_and (__vector unsigned short a1, __vector unsigned short a2)
+{
+ return (__vector unsigned short) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed char
+vec_and (__vector __bool char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool char
+vec_and (__vector __bool char a1, __vector __bool char a2)
+{
+ return (__vector __bool char) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed char
+vec_and (__vector signed char a1, __vector __bool char a2)
+{
+ return (__vector signed char) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed char
+vec_and (__vector signed char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_and (__vector __bool char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_and (__vector unsigned char a1, __vector __bool char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_and (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_andc */
-inline vector float
-vec_andc (vector float a1, vector float a2)
+inline __vector float
+vec_andc (__vector float a1, __vector float a2)
+{
+ return (__vector float) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector float
+vec_andc (__vector float a1, __vector __bool int a2)
+{
+ return (__vector float) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector float
+vec_andc (__vector __bool int a1, __vector float a2)
+{
+ return (__vector float) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool int
+vec_andc (__vector __bool int a1, __vector __bool int a2)
+{
+ return (__vector __bool int) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_andc (__vector __bool int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_andc (__vector signed int a1, __vector __bool int a2)
+{
+ return (__vector signed int) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_andc (__vector signed int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_andc (__vector __bool int a1, __vector unsigned int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_andc (__vector unsigned int a1, __vector __bool int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_andc (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector float) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_andc (vector float a1, vector signed int a2)
+inline __vector __bool short
+vec_andc (__vector __bool short a1, __vector __bool short a2)
{
- return (vector float) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool short) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_andc (vector signed int a1, vector float a2)
+inline __vector signed short
+vec_andc (__vector __bool short a1, __vector signed short a2)
{
- return (vector float) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_andc (vector signed int a1, vector signed int a2)
+inline __vector signed short
+vec_andc (__vector signed short a1, __vector __bool short a2)
{
- return (vector signed int) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_andc (vector signed int a1, vector unsigned int a2)
+inline __vector signed short
+vec_andc (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned int) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_andc (vector unsigned int a1, vector signed int a2)
+inline __vector unsigned short
+vec_andc (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned int) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_andc (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned short
+vec_andc (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector unsigned int) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_andc (vector signed short a1, vector signed short a2)
+inline __vector unsigned short
+vec_andc (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_andc (vector signed short a1, vector unsigned short a2)
+inline __vector signed char
+vec_andc (__vector __bool char a1, __vector signed char a2)
{
- return (vector unsigned short) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_andc (vector unsigned short a1, vector signed short a2)
+inline __vector __bool char
+vec_andc (__vector __bool char a1, __vector __bool char a2)
{
- return (vector unsigned short) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool char) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_andc (vector unsigned short a1, vector unsigned short a2)
+inline __vector signed char
+vec_andc (__vector signed char a1, __vector __bool char a2)
{
- return (vector unsigned short) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_andc (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_andc (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_andc (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_andc (__vector __bool char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned char) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_andc (vector unsigned char a1, vector signed char a2)
+inline __vector unsigned char
+vec_andc (__vector unsigned char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned char) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_andc (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_andc (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned char) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_avg */
-inline vector unsigned char
-vec_avg (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_avg (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vavgub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vavgub ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed char
-vec_avg (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_avg (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vavgsb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vavgsb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned short
-vec_avg (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_avg (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vavguh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vavguh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed short
-vec_avg (vector signed short a1, vector signed short a2)
+inline __vector signed short
+vec_avg (__vector signed short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vavgsh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vavgsh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned int
-vec_avg (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_avg (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vavguw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vavguw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_avg (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_avg (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vavgsw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vavgsw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vavgsw */
-inline vector signed int
-vec_vavgsw (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_vavgsw (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vavgsw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vavgsw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vavguw */
-inline vector unsigned int
-vec_vavguw (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vavguw (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vavguw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vavguw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vavgsh */
-inline vector signed short
-vec_vavgsh (vector signed short a1, vector signed short a2)
+inline __vector signed short
+vec_vavgsh (__vector signed short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vavgsh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vavgsh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vavguh */
-inline vector unsigned short
-vec_vavguh (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vavguh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vavguh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vavguh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vavgsb */
-inline vector signed char
-vec_vavgsb (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_vavgsb (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vavgsb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vavgsb ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_vavgub */
-inline vector unsigned char
-vec_vavgub (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vavgub (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vavgub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vavgub ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_ceil */
-inline vector float
-vec_ceil (vector float a1)
+inline __vector float
+vec_ceil (__vector float a1)
{
- return (vector float) __builtin_altivec_vrfip ((vector float) a1);
+ return (__vector float) __builtin_altivec_vrfip ((__vector float) a1);
}
/* vec_cmpb */
-inline vector signed int
-vec_cmpb (vector float a1, vector float a2)
+inline __vector signed int
+vec_cmpb (__vector float a1, __vector float a2)
{
- return (vector signed int) __builtin_altivec_vcmpbfp ((vector float) a1, (vector float) a2);
+ return (__vector signed int) __builtin_altivec_vcmpbfp ((__vector float) a1, (__vector float) a2);
}
/* vec_cmpeq */
-inline vector signed char
-vec_cmpeq (vector signed char a1, vector signed char a2)
+inline __vector __bool char
+vec_cmpeq (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vcmpequb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector __bool char) __builtin_altivec_vcmpequb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed char
-vec_cmpeq (vector unsigned char a1, vector unsigned char a2)
+inline __vector __bool char
+vec_cmpeq (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vcmpequb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector __bool char) __builtin_altivec_vcmpequb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed short
-vec_cmpeq (vector signed short a1, vector signed short a2)
+inline __vector __bool short
+vec_cmpeq (__vector signed short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vcmpequh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector __bool short) __builtin_altivec_vcmpequh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed short
-vec_cmpeq (vector unsigned short a1, vector unsigned short a2)
+inline __vector __bool short
+vec_cmpeq (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vcmpequh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector __bool short) __builtin_altivec_vcmpequh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed int
-vec_cmpeq (vector signed int a1, vector signed int a2)
+inline __vector __bool int
+vec_cmpeq (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vcmpequw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpequw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_cmpeq (vector unsigned int a1, vector unsigned int a2)
+inline __vector __bool int
+vec_cmpeq (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vcmpequw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpequw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_cmpeq (vector float a1, vector float a2)
+inline __vector __bool int
+vec_cmpeq (__vector float a1, __vector float a2)
{
- return (vector signed int) __builtin_altivec_vcmpeqfp ((vector float) a1, (vector float) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpeqfp ((__vector float) a1, (__vector float) a2);
}
/* vec_vcmpeqfp */
-inline vector signed int
-vec_vcmpeqfp (vector float a1, vector float a2)
+inline __vector __bool int
+vec_vcmpeqfp (__vector float a1, __vector float a2)
{
- return (vector signed int) __builtin_altivec_vcmpeqfp ((vector float) a1, (vector float) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpeqfp ((__vector float) a1, (__vector float) a2);
}
/* vec_vcmpequw */
-inline vector signed int
-vec_vcmpequw (vector signed int a1, vector signed int a2)
+inline __vector __bool int
+vec_vcmpequw (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vcmpequw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpequw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_vcmpequw (vector unsigned int a1, vector unsigned int a2)
+inline __vector __bool int
+vec_vcmpequw (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vcmpequw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpequw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vcmpequh */
-inline vector signed short
-vec_vcmpequh (vector signed short a1, vector signed short a2)
+inline __vector __bool short
+vec_vcmpequh (__vector signed short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vcmpequh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector __bool short) __builtin_altivec_vcmpequh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed short
-vec_vcmpequh (vector unsigned short a1, vector unsigned short a2)
+inline __vector __bool short
+vec_vcmpequh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vcmpequh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector __bool short) __builtin_altivec_vcmpequh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vcmpequb */
-inline vector signed char
-vec_vcmpequb (vector signed char a1, vector signed char a2)
+inline __vector __bool char
+vec_vcmpequb (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vcmpequb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector __bool char) __builtin_altivec_vcmpequb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed char
-vec_vcmpequb (vector unsigned char a1, vector unsigned char a2)
+inline __vector __bool char
+vec_vcmpequb (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vcmpequb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector __bool char) __builtin_altivec_vcmpequb ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_cmpge */
-inline vector signed int
-vec_cmpge (vector float a1, vector float a2)
+inline __vector __bool int
+vec_cmpge (__vector float a1, __vector float a2)
{
- return (vector signed int) __builtin_altivec_vcmpgefp ((vector float) a1, (vector float) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpgefp ((__vector float) a1, (__vector float) a2);
}
/* vec_cmpgt */
-inline vector signed char
-vec_cmpgt (vector unsigned char a1, vector unsigned char a2)
+inline __vector __bool char
+vec_cmpgt (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vcmpgtub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector __bool char) __builtin_altivec_vcmpgtub ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed char
-vec_cmpgt (vector signed char a1, vector signed char a2)
+inline __vector __bool char
+vec_cmpgt (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vcmpgtsb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector __bool char) __builtin_altivec_vcmpgtsb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed short
-vec_cmpgt (vector unsigned short a1, vector unsigned short a2)
+inline __vector __bool short
+vec_cmpgt (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vcmpgtuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector __bool short) __builtin_altivec_vcmpgtuh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed short
-vec_cmpgt (vector signed short a1, vector signed short a2)
+inline __vector __bool short
+vec_cmpgt (__vector signed short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vcmpgtsh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector __bool short) __builtin_altivec_vcmpgtsh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed int
-vec_cmpgt (vector unsigned int a1, vector unsigned int a2)
+inline __vector __bool int
+vec_cmpgt (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vcmpgtuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpgtuw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_cmpgt (vector signed int a1, vector signed int a2)
+inline __vector __bool int
+vec_cmpgt (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vcmpgtsw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpgtsw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_cmpgt (vector float a1, vector float a2)
+inline __vector __bool int
+vec_cmpgt (__vector float a1, __vector float a2)
{
- return (vector signed int) __builtin_altivec_vcmpgtfp ((vector float) a1, (vector float) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpgtfp ((__vector float) a1, (__vector float) a2);
}
/* vec_vcmpgtfp */
-inline vector signed int
-vec_vcmpgtfp (vector float a1, vector float a2)
+inline __vector __bool int
+vec_vcmpgtfp (__vector float a1, __vector float a2)
{
- return (vector signed int) __builtin_altivec_vcmpgtfp ((vector float) a1, (vector float) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpgtfp ((__vector float) a1, (__vector float) a2);
}
/* vec_vcmpgtsw */
-inline vector signed int
-vec_vcmpgtsw (vector signed int a1, vector signed int a2)
+inline __vector __bool int
+vec_vcmpgtsw (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vcmpgtsw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpgtsw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vcmpgtuw */
-inline vector signed int
-vec_vcmpgtuw (vector unsigned int a1, vector unsigned int a2)
+inline __vector __bool int
+vec_vcmpgtuw (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vcmpgtuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpgtuw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vcmpgtsh */
-inline vector signed short
-vec_cmpgtsh (vector signed short a1, vector signed short a2)
+inline __vector __bool short
+vec_vcmpgtsh (__vector signed short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vcmpgtsh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector __bool short) __builtin_altivec_vcmpgtsh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vcmpgtuh */
-inline vector signed short
-vec_vcmpgtuh (vector unsigned short a1, vector unsigned short a2)
+inline __vector __bool short
+vec_vcmpgtuh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vcmpgtuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector __bool short) __builtin_altivec_vcmpgtuh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vcmpgtsb */
-inline vector signed char
-vec_vcmpgtsb (vector signed char a1, vector signed char a2)
+inline __vector __bool char
+vec_vcmpgtsb (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vcmpgtsb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector __bool char) __builtin_altivec_vcmpgtsb ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_vcmpgtub */
-inline vector signed char
-vec_vcmpgtub (vector unsigned char a1, vector unsigned char a2)
+inline __vector __bool char
+vec_vcmpgtub (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vcmpgtub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector __bool char) __builtin_altivec_vcmpgtub ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_cmple */
-inline vector signed int
-vec_cmple (vector float a1, vector float a2)
+inline __vector __bool int
+vec_cmple (__vector float a1, __vector float a2)
{
- return (vector signed int) __builtin_altivec_vcmpgefp ((vector float) a2, (vector float) a1);
+ return (__vector __bool int) __builtin_altivec_vcmpgefp ((__vector float) a2, (__vector float) a1);
}
/* vec_cmplt */
-inline vector signed char
-vec_cmplt (vector unsigned char a1, vector unsigned char a2)
+inline __vector __bool char
+vec_cmplt (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vcmpgtub ((vector signed char) a2, (vector signed char) a1);
+ return (__vector __bool char) __builtin_altivec_vcmpgtub ((__vector signed char) a2, (__vector signed char) a1);
}
-inline vector signed char
-vec_cmplt (vector signed char a1, vector signed char a2)
+inline __vector __bool char
+vec_cmplt (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vcmpgtsb ((vector signed char) a2, (vector signed char) a1);
+ return (__vector __bool char) __builtin_altivec_vcmpgtsb ((__vector signed char) a2, (__vector signed char) a1);
}
-inline vector signed short
-vec_cmplt (vector unsigned short a1, vector unsigned short a2)
+inline __vector __bool short
+vec_cmplt (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vcmpgtuh ((vector signed short) a2, (vector signed short) a1);
+ return (__vector __bool short) __builtin_altivec_vcmpgtuh ((__vector signed short) a2, (__vector signed short) a1);
}
-inline vector signed short
-vec_cmplt (vector signed short a1, vector signed short a2)
+inline __vector __bool short
+vec_cmplt (__vector signed short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vcmpgtsh ((vector signed short) a2, (vector signed short) a1);
+ return (__vector __bool short) __builtin_altivec_vcmpgtsh ((__vector signed short) a2, (__vector signed short) a1);
}
-inline vector signed int
-vec_cmplt (vector unsigned int a1, vector unsigned int a2)
+inline __vector __bool int
+vec_cmplt (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vcmpgtuw ((vector signed int) a2, (vector signed int) a1);
+ return (__vector __bool int) __builtin_altivec_vcmpgtuw ((__vector signed int) a2, (__vector signed int) a1);
}
-inline vector signed int
-vec_cmplt (vector signed int a1, vector signed int a2)
+inline __vector __bool int
+vec_cmplt (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vcmpgtsw ((vector signed int) a2, (vector signed int) a1);
+ return (__vector __bool int) __builtin_altivec_vcmpgtsw ((__vector signed int) a2, (__vector signed int) a1);
}
-inline vector signed int
-vec_cmplt (vector float a1, vector float a2)
+inline __vector __bool int
+vec_cmplt (__vector float a1, __vector float a2)
{
- return (vector signed int) __builtin_altivec_vcmpgtfp ((vector float) a2, (vector float) a1);
+ return (__vector __bool int) __builtin_altivec_vcmpgtfp ((__vector float) a2, (__vector float) a1);
}
/* vec_ctf */
-inline vector float
-vec_ctf (vector unsigned int a1, const char a2)
+inline __vector float
+vec_ctf (__vector unsigned int a1, const int a2)
{
- return (vector float) __builtin_altivec_vcfux ((vector signed int) a1, a2);
+ return (__vector float) __builtin_altivec_vcfux ((__vector signed int) a1, a2);
}
-inline vector float
-vec_ctf (vector signed int a1, const char a2)
+inline __vector float
+vec_ctf (__vector signed int a1, const int a2)
{
- return (vector float) __builtin_altivec_vcfsx ((vector signed int) a1, a2);
+ return (__vector float) __builtin_altivec_vcfsx ((__vector signed int) a1, a2);
}
/* vec_vcfsx */
-inline vector float
-vec_vcfsx (vector signed int a1, const char a2)
+inline __vector float
+vec_vcfsx (__vector signed int a1, const int a2)
{
- return (vector float) __builtin_altivec_vcfsx ((vector signed int) a1, a2);
+ return (__vector float) __builtin_altivec_vcfsx ((__vector signed int) a1, a2);
}
/* vec_vcfux */
-inline vector float
-vec_vcfux (vector unsigned int a1, const char a2)
+inline __vector float
+vec_vcfux (__vector unsigned int a1, const int a2)
{
- return (vector float) __builtin_altivec_vcfux ((vector signed int) a1, a2);
+ return (__vector float) __builtin_altivec_vcfux ((__vector signed int) a1, a2);
}
/* vec_cts */
-inline vector signed int
-vec_cts (vector float a1, const char a2)
+inline __vector signed int
+vec_cts (__vector float a1, const int a2)
{
- return (vector signed int) __builtin_altivec_vctsxs ((vector float) a1, a2);
+ return (__vector signed int) __builtin_altivec_vctsxs ((__vector float) a1, a2);
}
/* vec_ctu */
-inline vector unsigned int
-vec_ctu (vector float a1, const char a2)
+inline __vector unsigned int
+vec_ctu (__vector float a1, const int a2)
{
- return (vector unsigned int) __builtin_altivec_vctuxs ((vector float) a1, a2);
+ return (__vector unsigned int) __builtin_altivec_vctuxs ((__vector float) a1, a2);
}
/* vec_dss */
inline void
-vec_dss (const char a1)
+vec_dss (const int a1)
{
__builtin_altivec_dss (a1);
}
@@ -1208,97 +1548,121 @@ vec_dssall (void)
/* vec_dst */
inline void
-vec_dst (vector unsigned char *a1, int a2, const char a3)
+vec_dst (const __vector unsigned char *a1, int a2, const int a3)
+{
+ __builtin_altivec_dst ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dst (const __vector signed char *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (vector signed char *a1, int a2, const char a3)
+vec_dst (const __vector __bool char *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (vector unsigned short *a1, int a2, const char a3)
+vec_dst (const __vector unsigned short *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (vector signed short *a1, int a2, const char a3)
+vec_dst (const __vector signed short *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (vector unsigned int *a1, int a2, const char a3)
+vec_dst (const __vector __bool short *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (vector signed int *a1, int a2, const char a3)
+vec_dst (const __vector __pixel *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (vector float *a1, int a2, const char a3)
+vec_dst (const __vector unsigned int *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (unsigned char *a1, int a2, const char a3)
+vec_dst (const __vector signed int *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (signed char *a1, int a2, const char a3)
+vec_dst (const __vector __bool int *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (unsigned short *a1, int a2, const char a3)
+vec_dst (const __vector float *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (signed short *a1, int a2, const char a3)
+vec_dst (const unsigned char *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (unsigned int *a1, int a2, const char a3)
+vec_dst (const signed char *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (signed int *a1, int a2, const char a3)
+vec_dst (const unsigned short *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (unsigned long *a1, int a2, const char a3)
+vec_dst (const short *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (signed long *a1, int a2, const char a3)
+vec_dst (const unsigned int *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (float *a1, int a2, const char a3)
+vec_dst (const int *a1, int a2, const int a3)
+{
+ __builtin_altivec_dst ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dst (const unsigned long *a1, int a2, const int a3)
+{
+ __builtin_altivec_dst ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dst (const long *a1, int a2, const int a3)
+{
+ __builtin_altivec_dst ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dst (const float *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
@@ -1306,97 +1670,121 @@ vec_dst (float *a1, int a2, const char a3)
/* vec_dstst */
inline void
-vec_dstst (vector unsigned char *a1, int a2, const char a3)
+vec_dstst (const __vector unsigned char *a1, int a2, const int a3)
+{
+ __builtin_altivec_dstst ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dstst (const __vector signed char *a1, int a2, const int a3)
+{
+ __builtin_altivec_dstst ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dstst (const __vector __bool char *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (vector signed char *a1, int a2, const char a3)
+vec_dstst (const __vector unsigned short *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (vector unsigned short *a1, int a2, const char a3)
+vec_dstst (const __vector signed short *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (vector signed short *a1, int a2, const char a3)
+vec_dstst (const __vector __bool short *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (vector unsigned int *a1, int a2, const char a3)
+vec_dstst (const __vector __pixel *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (vector signed int *a1, int a2, const char a3)
+vec_dstst (const __vector unsigned int *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (vector float *a1, int a2, const char a3)
+vec_dstst (const __vector signed int *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (unsigned char *a1, int a2, const char a3)
+vec_dstst (const __vector __bool int *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (signed char *a1, int a2, const char a3)
+vec_dstst (const __vector float *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (unsigned short *a1, int a2, const char a3)
+vec_dstst (const unsigned char *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (signed short *a1, int a2, const char a3)
+vec_dstst (const signed char *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (unsigned int *a1, int a2, const char a3)
+vec_dstst (const unsigned short *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (signed int *a1, int a2, const char a3)
+vec_dstst (const short *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (unsigned long *a1, int a2, const char a3)
+vec_dstst (const unsigned int *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (signed long *a1, int a2, const char a3)
+vec_dstst (const int *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (float *a1, int a2, const char a3)
+vec_dstst (const unsigned long *a1, int a2, const int a3)
+{
+ __builtin_altivec_dstst ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dstst (const long *a1, int a2, const int a3)
+{
+ __builtin_altivec_dstst ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dstst (const float *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
@@ -1404,97 +1792,121 @@ vec_dstst (float *a1, int a2, const char a3)
/* vec_dststt */
inline void
-vec_dststt (vector unsigned char *a1, int a2, const char a3)
+vec_dststt (const __vector unsigned char *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (vector signed char *a1, int a2, const char a3)
+vec_dststt (const __vector signed char *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (vector unsigned short *a1, int a2, const char a3)
+vec_dststt (const __vector __bool char *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (vector signed short *a1, int a2, const char a3)
+vec_dststt (const __vector unsigned short *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (vector unsigned int *a1, int a2, const char a3)
+vec_dststt (const __vector signed short *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (vector signed int *a1, int a2, const char a3)
+vec_dststt (const __vector __bool short *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (vector float *a1, int a2, const char a3)
+vec_dststt (const __vector __pixel *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (unsigned char *a1, int a2, const char a3)
+vec_dststt (const __vector unsigned int *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (signed char *a1, int a2, const char a3)
+vec_dststt (const __vector signed int *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (unsigned short *a1, int a2, const char a3)
+vec_dststt (const __vector __bool int *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (signed short *a1, int a2, const char a3)
+vec_dststt (const __vector float *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (unsigned int *a1, int a2, const char a3)
+vec_dststt (const unsigned char *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (signed int *a1, int a2, const char a3)
+vec_dststt (const signed char *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (unsigned long *a1, int a2, const char a3)
+vec_dststt (const unsigned short *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (signed long *a1, int a2, const char a3)
+vec_dststt (const short *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (float *a1, int a2, const char a3)
+vec_dststt (const unsigned int *a1, int a2, const int a3)
+{
+ __builtin_altivec_dststt ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dststt (const int *a1, int a2, const int a3)
+{
+ __builtin_altivec_dststt ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dststt (const unsigned long *a1, int a2, const int a3)
+{
+ __builtin_altivec_dststt ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dststt (const long *a1, int a2, const int a3)
+{
+ __builtin_altivec_dststt ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dststt (const float *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
@@ -1502,3561 +1914,4931 @@ vec_dststt (float *a1, int a2, const char a3)
/* vec_dstt */
inline void
-vec_dstt (vector unsigned char *a1, int a2, const char a3)
+vec_dstt (const __vector unsigned char *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (vector signed char *a1, int a2, const char a3)
+vec_dstt (const __vector signed char *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (vector unsigned short *a1, int a2, const char a3)
+vec_dstt (const __vector __bool char *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (vector signed short *a1, int a2, const char a3)
+vec_dstt (const __vector unsigned short *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (vector unsigned int *a1, int a2, const char a3)
+vec_dstt (const __vector signed short *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (vector signed int *a1, int a2, const char a3)
+vec_dstt (const __vector __bool short *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (vector float *a1, int a2, const char a3)
+vec_dstt (const __vector __pixel *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (unsigned char *a1, int a2, const char a3)
+vec_dstt (const __vector unsigned int *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (signed char *a1, int a2, const char a3)
+vec_dstt (const __vector signed int *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (unsigned short *a1, int a2, const char a3)
+vec_dstt (const __vector __bool int *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (signed short *a1, int a2, const char a3)
+vec_dstt (const __vector float *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (unsigned int *a1, int a2, const char a3)
+vec_dstt (const unsigned char *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (signed int *a1, int a2, const char a3)
+vec_dstt (const signed char *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (unsigned long *a1, int a2, const char a3)
+vec_dstt (const unsigned short *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (signed long *a1, int a2, const char a3)
+vec_dstt (const short *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (float *a1, int a2, const char a3)
+vec_dstt (const unsigned int *a1, int a2, const int a3)
+{
+ __builtin_altivec_dstt ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dstt (const int *a1, int a2, const int a3)
+{
+ __builtin_altivec_dstt ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dstt (const unsigned long *a1, int a2, const int a3)
+{
+ __builtin_altivec_dstt ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dstt (const long *a1, int a2, const int a3)
+{
+ __builtin_altivec_dstt ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dstt (const float *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
/* vec_expte */
-inline vector float
-vec_expte (vector float a1)
+inline __vector float
+vec_expte (__vector float a1)
{
- return (vector float) __builtin_altivec_vexptefp ((vector float) a1);
+ return (__vector float) __builtin_altivec_vexptefp ((__vector float) a1);
}
/* vec_floor */
-inline vector float
-vec_floor (vector float a1)
+inline __vector float
+vec_floor (__vector float a1)
{
- return (vector float) __builtin_altivec_vrfim ((vector float) a1);
+ return (__vector float) __builtin_altivec_vrfim ((__vector float) a1);
}
/* vec_ld */
-inline vector float
-vec_ld (int a1, vector float *a2)
+inline __vector float
+vec_ld (int a1, const __vector float *a2)
+{
+ return (__vector float) __builtin_altivec_lvx (a1, (void *) a2);
+}
+
+inline __vector float
+vec_ld (int a1, const float *a2)
+{
+ return (__vector float) __builtin_altivec_lvx (a1, (void *) a2);
+}
+
+inline __vector __bool int
+vec_ld (int a1, const __vector __bool int *a2)
{
- return (vector float) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector __bool int) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector float
-vec_ld (int a1, float *a2)
+inline __vector signed int
+vec_ld (int a1, const __vector signed int *a2)
{
- return (vector float) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector signed int) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector signed int
-vec_ld (int a1, vector signed int *a2)
+inline __vector signed int
+vec_ld (int a1, const int *a2)
{
- return (vector signed int) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector signed int) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector signed int
-vec_ld (int a1, signed int *a2)
+inline __vector signed int
+vec_ld (int a1, const long *a2)
{
- return (vector signed int) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector signed int) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector signed int
-vec_ld (int a1, signed long *a2)
+inline __vector unsigned int
+vec_ld (int a1, const __vector unsigned int *a2)
{
- return (vector signed int) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector unsigned int) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector unsigned int
-vec_ld (int a1, vector unsigned int *a2)
+inline __vector unsigned int
+vec_ld (int a1, const unsigned int *a2)
{
- return (vector unsigned int) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector unsigned int) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector unsigned int
-vec_ld (int a1, unsigned int *a2)
+inline __vector unsigned int
+vec_ld (int a1, const unsigned long *a2)
{
- return (vector unsigned int) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector unsigned int) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector unsigned int
-vec_ld (int a1, unsigned long *a2)
+inline __vector __bool short
+vec_ld (int a1, const __vector __bool short *a2)
{
- return (vector unsigned int) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector __bool short) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector signed short
-vec_ld (int a1, vector signed short *a2)
+inline __vector __pixel
+vec_ld (int a1, const __vector __pixel *a2)
{
- return (vector signed short) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector __pixel) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector signed short
-vec_ld (int a1, signed short *a2)
+inline __vector signed short
+vec_ld (int a1, const __vector signed short *a2)
{
- return (vector signed short) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector signed short) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector unsigned short
-vec_ld (int a1, vector unsigned short *a2)
+inline __vector signed short
+vec_ld (int a1, const short *a2)
{
- return (vector unsigned short) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector signed short) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector unsigned short
-vec_ld (int a1, unsigned short *a2)
+inline __vector unsigned short
+vec_ld (int a1, const __vector unsigned short *a2)
{
- return (vector unsigned short) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector unsigned short) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector signed char
-vec_ld (int a1, vector signed char *a2)
+inline __vector unsigned short
+vec_ld (int a1, const unsigned short *a2)
{
- return (vector signed char) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector unsigned short) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector signed char
-vec_ld (int a1, signed char *a2)
+inline __vector __bool char
+vec_ld (int a1, const __vector __bool char *a2)
{
- return (vector signed char) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector __bool char) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector unsigned char
-vec_ld (int a1, vector unsigned char *a2)
+inline __vector signed char
+vec_ld (int a1, const __vector signed char *a2)
{
- return (vector unsigned char) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector signed char) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector unsigned char
-vec_ld (int a1, unsigned char *a2)
+inline __vector signed char
+vec_ld (int a1, const signed char *a2)
{
- return (vector unsigned char) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector signed char) __builtin_altivec_lvx (a1, (void *) a2);
+}
+
+inline __vector unsigned char
+vec_ld (int a1, const __vector unsigned char *a2)
+{
+ return (__vector unsigned char) __builtin_altivec_lvx (a1, (void *) a2);
+}
+
+inline __vector unsigned char
+vec_ld (int a1, const unsigned char *a2)
+{
+ return (__vector unsigned char) __builtin_altivec_lvx (a1, (void *) a2);
}
/* vec_lde */
-inline vector signed char
-vec_lde (int a1, signed char *a2)
+inline __vector signed char
+vec_lde (int a1, const signed char *a2)
{
- return (vector signed char) __builtin_altivec_lvebx (a1, (void *) a2);
+ return (__vector signed char) __builtin_altivec_lvebx (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lde (int a1, unsigned char *a2)
+inline __vector unsigned char
+vec_lde (int a1, const unsigned char *a2)
{
- return (vector unsigned char) __builtin_altivec_lvebx (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvebx (a1, (void *) a2);
}
-inline vector signed short
-vec_lde (int a1, signed short *a2)
+inline __vector signed short
+vec_lde (int a1, const short *a2)
{
- return (vector signed short) __builtin_altivec_lvehx (a1, (void *) a2);
+ return (__vector signed short) __builtin_altivec_lvehx (a1, (void *) a2);
}
-inline vector unsigned short
-vec_lde (int a1, unsigned short *a2)
+inline __vector unsigned short
+vec_lde (int a1, const unsigned short *a2)
{
- return (vector unsigned short) __builtin_altivec_lvehx (a1, (void *) a2);
+ return (__vector unsigned short) __builtin_altivec_lvehx (a1, (void *) a2);
}
-inline vector float
-vec_lde (int a1, float *a2)
+inline __vector float
+vec_lde (int a1, const float *a2)
{
- return (vector float) __builtin_altivec_lvewx (a1, (void *) a2);
+ return (__vector float) __builtin_altivec_lvewx (a1, (void *) a2);
}
-inline vector signed int
-vec_lde (int a1, signed int *a2)
+inline __vector signed int
+vec_lde (int a1, const int *a2)
{
- return (vector signed int) __builtin_altivec_lvewx (a1, (void *) a2);
+ return (__vector signed int) __builtin_altivec_lvewx (a1, (void *) a2);
}
-inline vector unsigned int
-vec_lde (int a1, unsigned int *a2)
+inline __vector unsigned int
+vec_lde (int a1, const unsigned int *a2)
{
- return (vector unsigned int) __builtin_altivec_lvewx (a1, (void *) a2);
+ return (__vector unsigned int) __builtin_altivec_lvewx (a1, (void *) a2);
}
-inline vector signed int
-vec_lde (int a1, signed long *a2)
+inline __vector signed int
+vec_lde (int a1, const long *a2)
{
- return (vector signed int) __builtin_altivec_lvewx (a1, (void *) a2);
+ return (__vector signed int) __builtin_altivec_lvewx (a1, (void *) a2);
}
-inline vector unsigned int
-vec_lde (int a1, unsigned long *a2)
+inline __vector unsigned int
+vec_lde (int a1, const unsigned long *a2)
{
- return (vector unsigned int) __builtin_altivec_lvewx (a1, (void *) a2);
+ return (__vector unsigned int) __builtin_altivec_lvewx (a1, (void *) a2);
}
/* vec_lvewx */
-inline vector float
+inline __vector float
vec_lvewx (int a1, float *a2)
{
- return (vector float) __builtin_altivec_lvewx (a1, (void *) a2);
+ return (__vector float) __builtin_altivec_lvewx (a1, (void *) a2);
}
-inline vector signed int
-vec_lvewx (int a1, signed int *a2)
+inline __vector signed int
+vec_lvewx (int a1, int *a2)
{
- return (vector signed int) __builtin_altivec_lvewx (a1, (void *) a2);
+ return (__vector signed int) __builtin_altivec_lvewx (a1, (void *) a2);
}
-inline vector unsigned int
+inline __vector unsigned int
vec_lvewx (int a1, unsigned int *a2)
{
- return (vector unsigned int) __builtin_altivec_lvewx (a1, (void *) a2);
+ return (__vector unsigned int) __builtin_altivec_lvewx (a1, (void *) a2);
}
-inline vector signed int
-vec_lvewx (int a1, signed long *a2)
+inline __vector signed int
+vec_lvewx (int a1, long *a2)
{
- return (vector signed int) __builtin_altivec_lvewx (a1, (void *) a2);
+ return (__vector signed int) __builtin_altivec_lvewx (a1, (void *) a2);
}
-inline vector unsigned int
+inline __vector unsigned int
vec_lvewx (int a1, unsigned long *a2)
{
- return (vector unsigned int) __builtin_altivec_lvewx (a1, (void *) a2);
+ return (__vector unsigned int) __builtin_altivec_lvewx (a1, (void *) a2);
}
/* vec_lvehx */
-inline vector signed short
-vec_lvehx (int a1, signed short *a2)
+inline __vector signed short
+vec_lvehx (int a1, short *a2)
{
- return (vector signed short) __builtin_altivec_lvehx (a1, (void *) a2);
+ return (__vector signed short) __builtin_altivec_lvehx (a1, (void *) a2);
}
-inline vector unsigned short
+inline __vector unsigned short
vec_lvehx (int a1, unsigned short *a2)
{
- return (vector unsigned short) __builtin_altivec_lvehx (a1, (void *) a2);
+ return (__vector unsigned short) __builtin_altivec_lvehx (a1, (void *) a2);
}
/* vec_lvebx */
-inline vector signed char
+inline __vector signed char
vec_lvebx (int a1, signed char *a2)
{
- return (vector signed char) __builtin_altivec_lvebx (a1, (void *) a2);
+ return (__vector signed char) __builtin_altivec_lvebx (a1, (void *) a2);
}
-inline vector unsigned char
+inline __vector unsigned char
vec_lvebx (int a1, unsigned char *a2)
{
- return (vector unsigned char) __builtin_altivec_lvebx (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvebx (a1, (void *) a2);
}
/* vec_ldl */
-inline vector float
-vec_ldl (int a1, vector float *a2)
+inline __vector float
+vec_ldl (int a1, const __vector float *a2)
+{
+ return (__vector float) __builtin_altivec_lvxl (a1, (void *) a2);
+}
+
+inline __vector float
+vec_ldl (int a1, const float *a2)
+{
+ return (__vector float) __builtin_altivec_lvxl (a1, (void *) a2);
+}
+
+inline __vector __bool int
+vec_ldl (int a1, const __vector __bool int *a2)
+{
+ return (__vector __bool int) __builtin_altivec_lvxl (a1, (void *) a2);
+}
+
+inline __vector signed int
+vec_ldl (int a1, const __vector signed int *a2)
{
- return (vector float) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector signed int) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector float
-vec_ldl (int a1, float *a2)
+inline __vector signed int
+vec_ldl (int a1, const int *a2)
{
- return (vector float) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector signed int) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector signed int
-vec_ldl (int a1, vector signed int *a2)
+inline __vector signed int
+vec_ldl (int a1, const long *a2)
{
- return (vector signed int) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector signed int) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector signed int
-vec_ldl (int a1, signed int *a2)
+inline __vector unsigned int
+vec_ldl (int a1, const __vector unsigned int *a2)
{
- return (vector signed int) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector unsigned int) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector signed int
-vec_ldl (int a1, signed long *a2)
+inline __vector unsigned int
+vec_ldl (int a1, const unsigned int *a2)
{
- return (vector signed int) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector unsigned int) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector unsigned int
-vec_ldl (int a1, vector unsigned int *a2)
+inline __vector unsigned int
+vec_ldl (int a1, const unsigned long *a2)
{
- return (vector unsigned int) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector unsigned int) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector unsigned int
-vec_ldl (int a1, unsigned int *a2)
+inline __vector __bool short
+vec_ldl (int a1, const __vector __bool short *a2)
{
- return (vector unsigned int) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector __bool short) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector unsigned int
-vec_ldl (int a1, unsigned long *a2)
+inline __vector __pixel
+vec_ldl (int a1, const __vector __pixel *a2)
{
- return (vector unsigned int) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector __pixel) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector signed short
-vec_ldl (int a1, vector signed short *a2)
+inline __vector signed short
+vec_ldl (int a1, const __vector signed short *a2)
{
- return (vector signed short) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector signed short) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector signed short
-vec_ldl (int a1, signed short *a2)
+inline __vector signed short
+vec_ldl (int a1, const short *a2)
{
- return (vector signed short) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector signed short) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector unsigned short
-vec_ldl (int a1, vector unsigned short *a2)
+inline __vector unsigned short
+vec_ldl (int a1, const __vector unsigned short *a2)
{
- return (vector unsigned short) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector unsigned short) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector unsigned short
-vec_ldl (int a1, unsigned short *a2)
+inline __vector unsigned short
+vec_ldl (int a1, const unsigned short *a2)
{
- return (vector unsigned short) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector unsigned short) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector signed char
-vec_ldl (int a1, vector signed char *a2)
+inline __vector __bool char
+vec_ldl (int a1, const __vector __bool char *a2)
{
- return (vector signed char) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector __bool char) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector signed char
-vec_ldl (int a1, signed char *a2)
+inline __vector signed char
+vec_ldl (int a1, const __vector signed char *a2)
{
- return (vector signed char) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector signed char) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector unsigned char
-vec_ldl (int a1, vector unsigned char *a2)
+inline __vector signed char
+vec_ldl (int a1, const signed char *a2)
{
- return (vector unsigned char) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector signed char) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector unsigned char
-vec_ldl (int a1, unsigned char *a2)
+inline __vector unsigned char
+vec_ldl (int a1, const __vector unsigned char *a2)
{
- return (vector unsigned char) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvxl (a1, (void *) a2);
+}
+
+inline __vector unsigned char
+vec_ldl (int a1, const unsigned char *a2)
+{
+ return (__vector unsigned char) __builtin_altivec_lvxl (a1, (void *) a2);
}
/* vec_loge */
-inline vector float
-vec_loge (vector float a1)
+inline __vector float
+vec_loge (__vector float a1)
{
- return (vector float) __builtin_altivec_vlogefp ((vector float) a1);
+ return (__vector float) __builtin_altivec_vlogefp ((__vector float) a1);
}
/* vec_lvsl */
-inline vector unsigned char
-vec_lvsl (int a1, unsigned char *a2)
+inline __vector unsigned char
+vec_lvsl (int a1, const volatile unsigned char *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsl (int a1, signed char *a2)
+inline __vector unsigned char
+vec_lvsl (int a1, const volatile signed char *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsl (int a1, unsigned short *a2)
+inline __vector unsigned char
+vec_lvsl (int a1, const volatile unsigned short *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsl (int a1, signed short *a2)
+inline __vector unsigned char
+vec_lvsl (int a1, const volatile short *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsl (int a1, unsigned int *a2)
+inline __vector unsigned char
+vec_lvsl (int a1, const volatile unsigned int *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsl (int a1, signed int *a2)
+inline __vector unsigned char
+vec_lvsl (int a1, const volatile int *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsl (int a1, unsigned long *a2)
+inline __vector unsigned char
+vec_lvsl (int a1, const volatile unsigned long *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsl (int a1, signed long *a2)
+inline __vector unsigned char
+vec_lvsl (int a1, const volatile long *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsl (int a1, float *a2)
+inline __vector unsigned char
+vec_lvsl (int a1, const volatile float *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
}
/* vec_lvsr */
-inline vector unsigned char
-vec_lvsr (int a1, unsigned char *a2)
+inline __vector unsigned char
+vec_lvsr (int a1, const volatile unsigned char *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsr (int a1, signed char *a2)
+inline __vector unsigned char
+vec_lvsr (int a1, const volatile signed char *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsr (int a1, unsigned short *a2)
+inline __vector unsigned char
+vec_lvsr (int a1, const volatile unsigned short *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsr (int a1, signed short *a2)
+inline __vector unsigned char
+vec_lvsr (int a1, const volatile short *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsr (int a1, unsigned int *a2)
+inline __vector unsigned char
+vec_lvsr (int a1, const volatile unsigned int *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsr (int a1, signed int *a2)
+inline __vector unsigned char
+vec_lvsr (int a1, const volatile int *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsr (int a1, unsigned long *a2)
+inline __vector unsigned char
+vec_lvsr (int a1, const volatile unsigned long *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsr (int a1, signed long *a2)
+inline __vector unsigned char
+vec_lvsr (int a1, const volatile long *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsr (int a1, float *a2)
+inline __vector unsigned char
+vec_lvsr (int a1, const volatile float *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
}
/* vec_madd */
-inline vector float
-vec_madd (vector float a1, vector float a2, vector float a3)
+inline __vector float
+vec_madd (__vector float a1, __vector float a2, __vector float a3)
{
- return (vector float) __builtin_altivec_vmaddfp ((vector float) a1, (vector float) a2, (vector float) a3);
+ return (__vector float) __builtin_altivec_vmaddfp ((__vector float) a1, (__vector float) a2, (__vector float) a3);
}
-
/* vec_madds */
-inline vector signed short
-vec_madds (vector signed short a1, vector signed short a2, vector signed short a3)
+inline __vector signed short
+vec_madds (__vector signed short a1, __vector signed short a2, __vector signed short a3)
{
- return (vector signed short) __builtin_altivec_vmhaddshs ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3);
+ return (__vector signed short) __builtin_altivec_vmhaddshs ((__vector signed short) a1, (__vector signed short) a2, (__vector signed short) a3);
}
/* vec_max */
-inline vector unsigned char
-vec_max (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_max (__vector __bool char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector unsigned char
+vec_max (__vector unsigned char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_max (vector unsigned char a1, vector signed char a2)
+inline __vector unsigned char
+vec_max (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_max (vector unsigned char a1, vector unsigned char a2)
+inline __vector signed char
+vec_max (__vector __bool char a1, __vector signed char a2)
{
- return (vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed char
-vec_max (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_max (__vector signed char a1, __vector __bool char a2)
{
- return (vector signed char) __builtin_altivec_vmaxsb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned short
-vec_max (vector signed short a1, vector unsigned short a2)
+inline __vector signed char
+vec_max (__vector signed char a1, __vector signed char a2)
{
- return (vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned short
-vec_max (vector unsigned short a1, vector signed short a2)
+inline __vector unsigned short
+vec_max (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_max (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_max (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed short
-vec_max (vector signed short a1, vector signed short a2)
+inline __vector unsigned short
+vec_max (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vmaxsh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned int
-vec_max (vector signed int a1, vector unsigned int a2)
+inline __vector signed short
+vec_max (__vector __bool short a1, __vector signed short a2)
{
- return (vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned int
-vec_max (vector unsigned int a1, vector signed int a2)
+inline __vector signed short
+vec_max (__vector signed short a1, __vector __bool short a2)
{
- return (vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned int
-vec_max (vector unsigned int a1, vector unsigned int a2)
+inline __vector signed short
+vec_max (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed int
-vec_max (vector signed int a1, vector signed int a2)
+inline __vector unsigned int
+vec_max (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vmaxsw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_max (vector float a1, vector float a2)
+inline __vector unsigned int
+vec_max (__vector unsigned int a1, __vector __bool int a2)
{
- return (vector float) __builtin_altivec_vmaxfp ((vector float) a1, (vector float) a2);
+ return (__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_max (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_max (__vector __bool int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_max (__vector signed int a1, __vector __bool int a2)
+{
+ return (__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_max (__vector signed int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector float
+vec_max (__vector float a1, __vector float a2)
+{
+ return (__vector float) __builtin_altivec_vmaxfp ((__vector float) a1, (__vector float) a2);
}
/* vec_vmaxfp */
-inline vector float
-vec_vmaxfp (vector float a1, vector float a2)
+inline __vector float
+vec_vmaxfp (__vector float a1, __vector float a2)
{
- return (vector float) __builtin_altivec_vmaxfp ((vector float) a1, (vector float) a2);
+ return (__vector float) __builtin_altivec_vmaxfp ((__vector float) a1, (__vector float) a2);
}
/* vec_vmaxsw */
-inline vector signed int
-vec_vmaxsw (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_vmaxsw (__vector __bool int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_vmaxsw (__vector signed int a1, __vector __bool int a2)
{
- return (vector signed int) __builtin_altivec_vmaxsw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_vmaxsw (__vector signed int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vmaxuw */
-inline vector unsigned int
-vec_vmaxuw (vector signed int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vmaxuw (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vmaxuw (vector unsigned int a1, vector signed int a2)
+inline __vector unsigned int
+vec_vmaxuw (__vector unsigned int a1, __vector __bool int a2)
{
- return (vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vmaxuw (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vmaxuw (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vmaxsh */
-inline vector signed short
-vec_vmaxsh (vector signed short a1, vector signed short a2)
+inline __vector signed short
+vec_vmaxsh (__vector __bool short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vmaxsh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector signed short
+vec_vmaxsh (__vector signed short a1, __vector __bool short a2)
+{
+ return (__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector signed short
+vec_vmaxsh (__vector signed short a1, __vector signed short a2)
+{
+ return (__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vmaxuh */
-inline vector unsigned short
-vec_vmaxuh (vector signed short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vmaxuh (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vmaxuh (vector unsigned short a1, vector signed short a2)
+inline __vector unsigned short
+vec_vmaxuh (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vmaxuh (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vmaxuh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vmaxsb */
-inline vector signed char
-vec_vmaxsb (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_vmaxsb (__vector __bool char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vmaxsb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_vmaxsb (__vector signed char a1, __vector __bool char a2)
+{
+ return (__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_vmaxsb (__vector signed char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_vmaxub */
-inline vector unsigned char
-vec_vmaxub (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vmaxub (__vector __bool char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vmaxub (vector unsigned char a1, vector signed char a2)
+inline __vector unsigned char
+vec_vmaxub (__vector unsigned char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vmaxub (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vmaxub (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_mergeh */
-inline vector signed char
-vec_mergeh (vector signed char a1, vector signed char a2)
+inline __vector __bool char
+vec_mergeh (__vector __bool char a1, __vector __bool char a2)
+{
+ return (__vector __bool char) __builtin_altivec_vmrghb ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_mergeh (__vector signed char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vmrghb ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector unsigned char
+vec_mergeh (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vmrghb ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector __bool short
+vec_mergeh (__vector __bool short a1, __vector __bool short a2)
+{
+ return (__vector __bool short) __builtin_altivec_vmrghh ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector __pixel
+vec_mergeh (__vector __pixel a1, __vector __pixel a2)
{
- return (vector signed char) __builtin_altivec_vmrghb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector __pixel) __builtin_altivec_vmrghh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned char
-vec_mergeh (vector unsigned char a1, vector unsigned char a2)
+inline __vector signed short
+vec_mergeh (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned char) __builtin_altivec_vmrghb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed short) __builtin_altivec_vmrghh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed short
-vec_mergeh (vector signed short a1, vector signed short a2)
+inline __vector unsigned short
+vec_mergeh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vmrghh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vmrghh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_mergeh (vector unsigned short a1, vector unsigned short a2)
+inline __vector float
+vec_mergeh (__vector float a1, __vector float a2)
{
- return (vector unsigned short) __builtin_altivec_vmrghh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector float) __builtin_altivec_vmrghw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_mergeh (vector float a1, vector float a2)
+inline __vector __bool int
+vec_mergeh (__vector __bool int a1, __vector __bool int a2)
{
- return (vector float) __builtin_altivec_vmrghw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vmrghw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_mergeh (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_mergeh (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vmrghw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vmrghw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_mergeh (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_mergeh (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vmrghw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vmrghw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vmrghw */
-inline vector float
-vec_vmrghw (vector float a1, vector float a2)
+inline __vector float
+vec_vmrghw (__vector float a1, __vector float a2)
{
- return (vector float) __builtin_altivec_vmrghw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector float) __builtin_altivec_vmrghw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_vmrghw (vector signed int a1, vector signed int a2)
+inline __vector __bool int
+vec_vmrghw (__vector __bool int a1, __vector __bool int a2)
{
- return (vector signed int) __builtin_altivec_vmrghw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vmrghw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vmrghw (vector unsigned int a1, vector unsigned int a2)
+inline __vector signed int
+vec_vmrghw (__vector signed int a1, __vector signed int a2)
{
- return (vector unsigned int) __builtin_altivec_vmrghw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vmrghw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_vmrghw (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vmrghw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vmrghh */
-inline vector signed short
-vec_vmrghh (vector signed short a1, vector signed short a2)
+inline __vector __bool short
+vec_vmrghh (__vector __bool short a1, __vector __bool short a2)
+{
+ return (__vector __bool short) __builtin_altivec_vmrghh ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector signed short
+vec_vmrghh (__vector signed short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vmrghh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vmrghh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vmrghh (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vmrghh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vmrghh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vmrghh ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector __pixel
+vec_vmrghh (__vector __pixel a1, __vector __pixel a2)
+{
+ return (__vector __pixel) __builtin_altivec_vmrghh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vmrghb */
-inline vector signed char
-vec_vmrghb (vector signed char a1, vector signed char a2)
+inline __vector __bool char
+vec_vmrghb (__vector __bool char a1, __vector __bool char a2)
+{
+ return (__vector __bool char) __builtin_altivec_vmrghb ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_vmrghb (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vmrghb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vmrghb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vmrghb (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vmrghb (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vmrghb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vmrghb ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_mergel */
-inline vector signed char
-vec_mergel (vector signed char a1, vector signed char a2)
+inline __vector __bool char
+vec_mergel (__vector __bool char a1, __vector __bool char a2)
+{
+ return (__vector __bool char) __builtin_altivec_vmrglb ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_mergel (__vector signed char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vmrglb ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector unsigned char
+vec_mergel (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vmrglb ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector __bool short
+vec_mergel (__vector __bool short a1, __vector __bool short a2)
+{
+ return (__vector __bool short) __builtin_altivec_vmrglh ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector __pixel
+vec_mergel (__vector __pixel a1, __vector __pixel a2)
{
- return (vector signed char) __builtin_altivec_vmrglb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector __pixel) __builtin_altivec_vmrglh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned char
-vec_mergel (vector unsigned char a1, vector unsigned char a2)
+inline __vector signed short
+vec_mergel (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned char) __builtin_altivec_vmrglb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed short) __builtin_altivec_vmrglh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed short
-vec_mergel (vector signed short a1, vector signed short a2)
+inline __vector unsigned short
+vec_mergel (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vmrglh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vmrglh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_mergel (vector unsigned short a1, vector unsigned short a2)
+inline __vector float
+vec_mergel (__vector float a1, __vector float a2)
{
- return (vector unsigned short) __builtin_altivec_vmrglh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector float) __builtin_altivec_vmrglw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_mergel (vector float a1, vector float a2)
+inline __vector __bool int
+vec_mergel (__vector __bool int a1, __vector __bool int a2)
{
- return (vector float) __builtin_altivec_vmrglw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vmrglw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_mergel (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_mergel (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vmrglw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vmrglw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_mergel (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_mergel (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vmrglw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vmrglw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vmrglw */
-inline vector float
-vec_vmrglw (vector float a1, vector float a2)
+inline __vector float
+vec_vmrglw (__vector float a1, __vector float a2)
+{
+ return (__vector float) __builtin_altivec_vmrglw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_vmrglw (__vector signed int a1, __vector signed int a2)
{
- return (vector float) __builtin_altivec_vmrglw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vmrglw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_vmrglw (vector signed int a1, vector signed int a2)
+inline __vector unsigned int
+vec_vmrglw (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vmrglw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vmrglw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vmrglw (vector unsigned int a1, vector unsigned int a2)
+inline __vector __bool int
+vec_vmrglw (__vector __bool int a1, __vector __bool int a2)
{
- return (vector unsigned int) __builtin_altivec_vmrglw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vmrglw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vmrglh */
-inline vector signed short
-vec_vmrglh (vector signed short a1, vector signed short a2)
+inline __vector __bool short
+vec_vmrglh (__vector __bool short a1, __vector __bool short a2)
+{
+ return (__vector __bool short) __builtin_altivec_vmrglh ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector signed short
+vec_vmrglh (__vector signed short a1, __vector signed short a2)
+{
+ return (__vector signed short) __builtin_altivec_vmrglh ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector unsigned short
+vec_vmrglh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vmrglh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vmrglh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vmrglh (vector unsigned short a1, vector unsigned short a2)
+inline __vector __pixel
+vec_vmrglh (__vector __pixel a1, __vector __pixel a2)
{
- return (vector unsigned short) __builtin_altivec_vmrglh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector __pixel) __builtin_altivec_vmrglh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vmrglb */
-inline vector signed char
-vec_vmrglb (vector signed char a1, vector signed char a2)
+inline __vector __bool char
+vec_vmrglb (__vector __bool char a1, __vector __bool char a2)
{
- return (vector signed char) __builtin_altivec_vmrglb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector __bool char) __builtin_altivec_vmrglb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vmrglb (vector unsigned char a1, vector unsigned char a2)
+inline __vector signed char
+vec_vmrglb (__vector signed char a1, __vector signed char a2)
{
- return (vector unsigned char) __builtin_altivec_vmrglb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vmrglb ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector unsigned char
+vec_vmrglb (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vmrglb ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_mfvscr */
-inline vector unsigned short
+inline __vector unsigned short
vec_mfvscr (void)
{
- return (vector unsigned short) __builtin_altivec_mfvscr ();
+ return (__vector unsigned short) __builtin_altivec_mfvscr ();
}
/* vec_min */
-inline vector unsigned char
-vec_min (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_min (__vector __bool char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector unsigned char
+vec_min (__vector unsigned char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vminub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_min (vector unsigned char a1, vector signed char a2)
+inline __vector unsigned char
+vec_min (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vminub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_min (vector unsigned char a1, vector unsigned char a2)
+inline __vector signed char
+vec_min (__vector __bool char a1, __vector signed char a2)
{
- return (vector unsigned char) __builtin_altivec_vminub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vminsb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed char
-vec_min (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_min (__vector signed char a1, __vector __bool char a2)
{
- return (vector signed char) __builtin_altivec_vminsb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vminsb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned short
-vec_min (vector signed short a1, vector unsigned short a2)
+inline __vector signed char
+vec_min (__vector signed char a1, __vector signed char a2)
{
- return (vector unsigned short) __builtin_altivec_vminuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed char) __builtin_altivec_vminsb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned short
-vec_min (vector unsigned short a1, vector signed short a2)
+inline __vector unsigned short
+vec_min (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vminuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_min (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_min (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector unsigned short) __builtin_altivec_vminuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed short
-vec_min (vector signed short a1, vector signed short a2)
+inline __vector unsigned short
+vec_min (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vminsh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned int
-vec_min (vector signed int a1, vector unsigned int a2)
+inline __vector signed short
+vec_min (__vector __bool short a1, __vector signed short a2)
{
- return (vector unsigned int) __builtin_altivec_vminuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vminsh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned int
-vec_min (vector unsigned int a1, vector signed int a2)
+inline __vector signed short
+vec_min (__vector signed short a1, __vector __bool short a2)
{
- return (vector unsigned int) __builtin_altivec_vminuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vminsh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned int
-vec_min (vector unsigned int a1, vector unsigned int a2)
+inline __vector signed short
+vec_min (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned int) __builtin_altivec_vminuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vminsh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed int
-vec_min (vector signed int a1, vector signed int a2)
+inline __vector unsigned int
+vec_min (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vminsw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_min (vector float a1, vector float a2)
+inline __vector unsigned int
+vec_min (__vector unsigned int a1, __vector __bool int a2)
{
- return (vector float) __builtin_altivec_vminfp ((vector float) a1, (vector float) a2);
+ return (__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_min (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_min (__vector __bool int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vminsw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_min (__vector signed int a1, __vector __bool int a2)
+{
+ return (__vector signed int) __builtin_altivec_vminsw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_min (__vector signed int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vminsw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector float
+vec_min (__vector float a1, __vector float a2)
+{
+ return (__vector float) __builtin_altivec_vminfp ((__vector float) a1, (__vector float) a2);
}
/* vec_vminfp */
-inline vector float
-vec_vminfp (vector float a1, vector float a2)
+inline __vector float
+vec_vminfp (__vector float a1, __vector float a2)
{
- return (vector float) __builtin_altivec_vminfp ((vector float) a1, (vector float) a2);
+ return (__vector float) __builtin_altivec_vminfp ((__vector float) a1, (__vector float) a2);
}
/* vec_vminsw */
-inline vector signed int
-vec_vminsw (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_vminsw (__vector __bool int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vminsw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_vminsw (__vector signed int a1, __vector __bool int a2)
{
- return (vector signed int) __builtin_altivec_vminsw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vminsw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_vminsw (__vector signed int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vminsw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vminuw */
-inline vector unsigned int
-vec_vminuw (vector signed int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vminuw (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vminuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vminuw (vector unsigned int a1, vector signed int a2)
+inline __vector unsigned int
+vec_vminuw (__vector unsigned int a1, __vector __bool int a2)
{
- return (vector unsigned int) __builtin_altivec_vminuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vminuw (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vminuw (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vminuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vminsh */
-inline vector signed short
-vec_vminsh (vector signed short a1, vector signed short a2)
+inline __vector signed short
+vec_vminsh (__vector __bool short a1, __vector signed short a2)
+{
+ return (__vector signed short) __builtin_altivec_vminsh ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector signed short
+vec_vminsh (__vector signed short a1, __vector __bool short a2)
{
- return (vector signed short) __builtin_altivec_vminsh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vminsh ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector signed short
+vec_vminsh (__vector signed short a1, __vector signed short a2)
+{
+ return (__vector signed short) __builtin_altivec_vminsh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vminuh */
-inline vector unsigned short
-vec_vminuh (vector signed short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vminuh (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vminuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vminuh (vector unsigned short a1, vector signed short a2)
+inline __vector unsigned short
+vec_vminuh (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector unsigned short) __builtin_altivec_vminuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vminuh (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vminuh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vminuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vminsb */
-inline vector signed char
-vec_vminsb (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_vminsb (__vector __bool char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vminsb ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_vminsb (__vector signed char a1, __vector __bool char a2)
{
- return (vector signed char) __builtin_altivec_vminsb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vminsb ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_vminsb (__vector signed char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vminsb ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_vminub */
-inline vector unsigned char
-vec_vminub (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vminub (__vector __bool char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vminub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vminub (vector unsigned char a1, vector signed char a2)
+inline __vector unsigned char
+vec_vminub (__vector unsigned char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vminub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vminub (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vminub (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vminub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_mladd */
-inline vector signed short
-vec_mladd (vector signed short a1, vector signed short a2, vector signed short a3)
+inline __vector signed short
+vec_mladd (__vector signed short a1, __vector signed short a2, __vector signed short a3)
{
- return (vector signed short) __builtin_altivec_vmladduhm ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3);
+ return (__vector signed short) __builtin_altivec_vmladduhm ((__vector signed short) a1, (__vector signed short) a2, (__vector signed short) a3);
}
-inline vector signed short
-vec_mladd (vector signed short a1, vector unsigned short a2, vector unsigned short a3)
+inline __vector signed short
+vec_mladd (__vector signed short a1, __vector unsigned short a2, __vector unsigned short a3)
{
- return (vector signed short) __builtin_altivec_vmladduhm ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3);
+ return (__vector signed short) __builtin_altivec_vmladduhm ((__vector signed short) a1, (__vector signed short) a2, (__vector signed short) a3);
}
-inline vector signed short
-vec_mladd (vector unsigned short a1, vector signed short a2, vector signed short a3)
+inline __vector signed short
+vec_mladd (__vector unsigned short a1, __vector signed short a2, __vector signed short a3)
{
- return (vector signed short) __builtin_altivec_vmladduhm ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3);
+ return (__vector signed short) __builtin_altivec_vmladduhm ((__vector signed short) a1, (__vector signed short) a2, (__vector signed short) a3);
}
-inline vector unsigned short
-vec_mladd (vector unsigned short a1, vector unsigned short a2, vector unsigned short a3)
+inline __vector unsigned short
+vec_mladd (__vector unsigned short a1, __vector unsigned short a2, __vector unsigned short a3)
{
- return (vector unsigned short) __builtin_altivec_vmladduhm ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3);
+ return (__vector unsigned short) __builtin_altivec_vmladduhm ((__vector signed short) a1, (__vector signed short) a2, (__vector signed short) a3);
}
/* vec_mradds */
-inline vector signed short
-vec_mradds (vector signed short a1, vector signed short a2, vector signed short a3)
+inline __vector signed short
+vec_mradds (__vector signed short a1, __vector signed short a2, __vector signed short a3)
{
- return (vector signed short) __builtin_altivec_vmhraddshs ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3);
+ return (__vector signed short) __builtin_altivec_vmhraddshs ((__vector signed short) a1, (__vector signed short) a2, (__vector signed short) a3);
}
/* vec_msum */
-inline vector unsigned int
-vec_msum (vector unsigned char a1, vector unsigned char a2, vector unsigned int a3)
+inline __vector unsigned int
+vec_msum (__vector unsigned char a1, __vector unsigned char a2, __vector unsigned int a3)
{
- return (vector unsigned int) __builtin_altivec_vmsumubm ((vector signed char) a1, (vector signed char) a2, (vector signed int) a3);
+ return (__vector unsigned int) __builtin_altivec_vmsumubm ((__vector signed char) a1, (__vector signed char) a2, (__vector signed int) a3);
}
-inline vector signed int
-vec_msum (vector signed char a1, vector unsigned char a2, vector signed int a3)
+inline __vector signed int
+vec_msum (__vector signed char a1, __vector unsigned char a2, __vector signed int a3)
{
- return (vector signed int) __builtin_altivec_vmsummbm ((vector signed char) a1, (vector signed char) a2, (vector signed int) a3);
+ return (__vector signed int) __builtin_altivec_vmsummbm ((__vector signed char) a1, (__vector signed char) a2, (__vector signed int) a3);
}
-inline vector unsigned int
-vec_msum (vector unsigned short a1, vector unsigned short a2, vector unsigned int a3)
+inline __vector unsigned int
+vec_msum (__vector unsigned short a1, __vector unsigned short a2, __vector unsigned int a3)
{
- return (vector unsigned int) __builtin_altivec_vmsumuhm ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3);
+ return (__vector unsigned int) __builtin_altivec_vmsumuhm ((__vector signed short) a1, (__vector signed short) a2, (__vector signed int) a3);
}
-inline vector signed int
-vec_msum (vector signed short a1, vector signed short a2, vector signed int a3)
+inline __vector signed int
+vec_msum (__vector signed short a1, __vector signed short a2, __vector signed int a3)
{
- return (vector signed int) __builtin_altivec_vmsumshm ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3);
+ return (__vector signed int) __builtin_altivec_vmsumshm ((__vector signed short) a1, (__vector signed short) a2, (__vector signed int) a3);
}
/* vec_vmsumshm */
-inline vector signed int
-vec_vmsumshm (vector signed short a1, vector signed short a2, vector signed int a3)
+inline __vector signed int
+vec_vmsumshm (__vector signed short a1, __vector signed short a2, __vector signed int a3)
{
- return (vector signed int) __builtin_altivec_vmsumshm ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3);
+ return (__vector signed int) __builtin_altivec_vmsumshm ((__vector signed short) a1, (__vector signed short) a2, (__vector signed int) a3);
}
/* vec_vmsumuhm */
-inline vector unsigned int
-vec_vmsumuhm (vector unsigned short a1, vector unsigned short a2, vector unsigned int a3)
+inline __vector unsigned int
+vec_vmsumuhm (__vector unsigned short a1, __vector unsigned short a2, __vector unsigned int a3)
{
- return (vector unsigned int) __builtin_altivec_vmsumuhm ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3);
+ return (__vector unsigned int) __builtin_altivec_vmsumuhm ((__vector signed short) a1, (__vector signed short) a2, (__vector signed int) a3);
}
/* vec_vmsummbm */
-inline vector signed int
-vec_vmsummbm (vector signed char a1, vector unsigned char a2, vector signed int a3)
+inline __vector signed int
+vec_vmsummbm (__vector signed char a1, __vector unsigned char a2, __vector signed int a3)
{
- return (vector signed int) __builtin_altivec_vmsummbm ((vector signed char) a1, (vector signed char) a2, (vector signed int) a3);
+ return (__vector signed int) __builtin_altivec_vmsummbm ((__vector signed char) a1, (__vector signed char) a2, (__vector signed int) a3);
}
/* vec_vmsumubm */
-inline vector unsigned int
-vec_vmsumubm (vector unsigned char a1, vector unsigned char a2, vector unsigned int a3)
+inline __vector unsigned int
+vec_vmsumubm (__vector unsigned char a1, __vector unsigned char a2, __vector unsigned int a3)
{
- return (vector unsigned int) __builtin_altivec_vmsumubm ((vector signed char) a1, (vector signed char) a2, (vector signed int) a3);
+ return (__vector unsigned int) __builtin_altivec_vmsumubm ((__vector signed char) a1, (__vector signed char) a2, (__vector signed int) a3);
}
/* vec_msums */
-inline vector unsigned int
-vec_msums (vector unsigned short a1, vector unsigned short a2, vector unsigned int a3)
+inline __vector unsigned int
+vec_msums (__vector unsigned short a1, __vector unsigned short a2, __vector unsigned int a3)
{
- return (vector unsigned int) __builtin_altivec_vmsumuhs ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3);
+ return (__vector unsigned int) __builtin_altivec_vmsumuhs ((__vector signed short) a1, (__vector signed short) a2, (__vector signed int) a3);
}
-inline vector signed int
-vec_msums (vector signed short a1, vector signed short a2, vector signed int a3)
+inline __vector signed int
+vec_msums (__vector signed short a1, __vector signed short a2, __vector signed int a3)
{
- return (vector signed int) __builtin_altivec_vmsumshs ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3);
+ return (__vector signed int) __builtin_altivec_vmsumshs ((__vector signed short) a1, (__vector signed short) a2, (__vector signed int) a3);
}
/* vec_vmsumshs */
-inline vector signed int
-vec_vmsumshs (vector signed short a1, vector signed short a2, vector signed int a3)
+inline __vector signed int
+vec_vmsumshs (__vector signed short a1, __vector signed short a2, __vector signed int a3)
{
- return (vector signed int) __builtin_altivec_vmsumshs ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3);
+ return (__vector signed int) __builtin_altivec_vmsumshs ((__vector signed short) a1, (__vector signed short) a2, (__vector signed int) a3);
}
/* vec_vmsumuhs */
-inline vector unsigned int
-vec_vmsumuhs (vector unsigned short a1, vector unsigned short a2, vector unsigned int a3)
+inline __vector unsigned int
+vec_vmsumuhs (__vector unsigned short a1, __vector unsigned short a2, __vector unsigned int a3)
{
- return (vector unsigned int) __builtin_altivec_vmsumuhs ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3);
+ return (__vector unsigned int) __builtin_altivec_vmsumuhs ((__vector signed short) a1, (__vector signed short) a2, (__vector signed int) a3);
}
/* vec_mtvscr */
inline void
-vec_mtvscr (vector signed int a1)
+vec_mtvscr (__vector signed int a1)
+{
+ __builtin_altivec_mtvscr ((__vector signed int) a1);
+}
+
+inline void
+vec_mtvscr (__vector unsigned int a1)
+{
+ __builtin_altivec_mtvscr ((__vector signed int) a1);
+}
+
+inline void
+vec_mtvscr (__vector __bool int a1)
{
- __builtin_altivec_mtvscr ((vector signed int) a1);
+ __builtin_altivec_mtvscr ((__vector signed int) a1);
}
inline void
-vec_mtvscr (vector unsigned int a1)
+vec_mtvscr (__vector signed short a1)
{
- __builtin_altivec_mtvscr ((vector signed int) a1);
+ __builtin_altivec_mtvscr ((__vector signed int) a1);
}
inline void
-vec_mtvscr (vector signed short a1)
+vec_mtvscr (__vector unsigned short a1)
{
- __builtin_altivec_mtvscr ((vector signed int) a1);
+ __builtin_altivec_mtvscr ((__vector signed int) a1);
}
inline void
-vec_mtvscr (vector unsigned short a1)
+vec_mtvscr (__vector __bool short a1)
{
- __builtin_altivec_mtvscr ((vector signed int) a1);
+ __builtin_altivec_mtvscr ((__vector signed int) a1);
}
inline void
-vec_mtvscr (vector signed char a1)
+vec_mtvscr (__vector __pixel a1)
{
- __builtin_altivec_mtvscr ((vector signed int) a1);
+ __builtin_altivec_mtvscr ((__vector signed int) a1);
}
inline void
-vec_mtvscr (vector unsigned char a1)
+vec_mtvscr (__vector signed char a1)
{
- __builtin_altivec_mtvscr ((vector signed int) a1);
+ __builtin_altivec_mtvscr ((__vector signed int) a1);
+}
+
+inline void
+vec_mtvscr (__vector unsigned char a1)
+{
+ __builtin_altivec_mtvscr ((__vector signed int) a1);
+}
+
+inline void
+vec_mtvscr (__vector __bool char a1)
+{
+ __builtin_altivec_mtvscr ((__vector signed int) a1);
}
/* vec_mule */
-inline vector unsigned short
-vec_mule (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned short
+vec_mule (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned short) __builtin_altivec_vmuleub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned short) __builtin_altivec_vmuleub ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed short
-vec_mule (vector signed char a1, vector signed char a2)
+inline __vector signed short
+vec_mule (__vector signed char a1, __vector signed char a2)
{
- return (vector signed short) __builtin_altivec_vmulesb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed short) __builtin_altivec_vmulesb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned int
-vec_mule (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned int
+vec_mule (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned int) __builtin_altivec_vmuleuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned int) __builtin_altivec_vmuleuh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed int
-vec_mule (vector signed short a1, vector signed short a2)
+inline __vector signed int
+vec_mule (__vector signed short a1, __vector signed short a2)
{
- return (vector signed int) __builtin_altivec_vmulesh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed int) __builtin_altivec_vmulesh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vmulesh */
-inline vector signed int
-vec_vmulesh (vector signed short a1, vector signed short a2)
+inline __vector signed int
+vec_vmulesh (__vector signed short a1, __vector signed short a2)
{
- return (vector signed int) __builtin_altivec_vmulesh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed int) __builtin_altivec_vmulesh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vmuleuh */
-inline vector unsigned int
-vec_vmuleuh (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned int
+vec_vmuleuh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned int) __builtin_altivec_vmuleuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned int) __builtin_altivec_vmuleuh ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+/* vec_vmulesb */
+
+inline __vector signed short
+vec_vmulesb (__vector signed char a1, __vector signed char a2)
+{
+ return (__vector signed short) __builtin_altivec_vmuleub ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_vmuleub */
-inline vector unsigned short
-vec_vmuleub (vector unsigned char a1, vector unsigned char a2)
+
+inline __vector unsigned short
+vec_vmuleub (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned short) __builtin_altivec_vmuleub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned short) __builtin_altivec_vmuleub ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_mulo */
-inline vector unsigned short
-vec_mulo (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned short
+vec_mulo (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned short) __builtin_altivec_vmuloub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned short) __builtin_altivec_vmuloub ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed short
-vec_mulo (vector signed char a1, vector signed char a2)
+inline __vector signed short
+vec_mulo (__vector signed char a1, __vector signed char a2)
{
- return (vector signed short) __builtin_altivec_vmulosb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed short) __builtin_altivec_vmulosb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned int
-vec_mulo (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned int
+vec_mulo (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned int) __builtin_altivec_vmulouh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned int) __builtin_altivec_vmulouh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed int
-vec_mulo (vector signed short a1, vector signed short a2)
+inline __vector signed int
+vec_mulo (__vector signed short a1, __vector signed short a2)
{
- return (vector signed int) __builtin_altivec_vmulosh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed int) __builtin_altivec_vmulosh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vmulosh */
-inline vector signed int
-vec_vmulosh (vector signed short a1, vector signed short a2)
+inline __vector signed int
+vec_vmulosh (__vector signed short a1, __vector signed short a2)
{
- return (vector signed int) __builtin_altivec_vmulosh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed int) __builtin_altivec_vmulosh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vmulouh */
-inline vector unsigned int
-vec_vmulouh (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned int
+vec_vmulouh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned int) __builtin_altivec_vmulouh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned int) __builtin_altivec_vmulouh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vmulosb */
-inline vector signed short
-vec_vmulosb (vector signed char a1, vector signed char a2)
+inline __vector signed short
+vec_vmulosb (__vector signed char a1, __vector signed char a2)
{
- return (vector signed short) __builtin_altivec_vmulosb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed short) __builtin_altivec_vmulosb ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_vmuloub */
-inline vector unsigned short
-vec_vmuloub (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned short
+vec_vmuloub (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned short) __builtin_altivec_vmuloub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned short) __builtin_altivec_vmuloub ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_nmsub */
-inline vector float
-vec_nmsub (vector float a1, vector float a2, vector float a3)
+inline __vector float
+vec_nmsub (__vector float a1, __vector float a2, __vector float a3)
{
- return (vector float) __builtin_altivec_vnmsubfp ((vector float) a1, (vector float) a2, (vector float) a3);
+ return (__vector float) __builtin_altivec_vnmsubfp ((__vector float) a1, (__vector float) a2, (__vector float) a3);
}
/* vec_nor */
-inline vector float
-vec_nor (vector float a1, vector float a2)
+inline __vector float
+vec_nor (__vector float a1, __vector float a2)
+{
+ return (__vector float) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_nor (__vector signed int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_nor (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool int
+vec_nor (__vector __bool int a1, __vector __bool int a2)
{
- return (vector float) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_nor (vector signed int a1, vector signed int a2)
+inline __vector signed short
+vec_nor (__vector signed short a1, __vector signed short a2)
{
- return (vector signed int) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_nor (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned short
+vec_nor (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned int) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_nor (vector signed short a1, vector signed short a2)
+inline __vector __bool short
+vec_nor (__vector __bool short a1, __vector __bool short a2)
{
- return (vector signed short) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool short) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_nor (vector unsigned short a1, vector unsigned short a2)
+inline __vector signed char
+vec_nor (__vector signed char a1, __vector signed char a2)
{
- return (vector unsigned short) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_nor (vector signed char a1, vector signed char a2)
+inline __vector unsigned char
+vec_nor (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned char) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_nor (vector unsigned char a1, vector unsigned char a2)
+inline __vector __bool char
+vec_nor (__vector __bool char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool char) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_or */
-inline vector float
-vec_or (vector float a1, vector float a2)
+inline __vector float
+vec_or (__vector float a1, __vector float a2)
+{
+ return (__vector float) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector float
+vec_or (__vector float a1, __vector __bool int a2)
+{
+ return (__vector float) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector float
+vec_or (__vector __bool int a1, __vector float a2)
+{
+ return (__vector float) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool int
+vec_or (__vector __bool int a1, __vector __bool int a2)
+{
+ return (__vector __bool int) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_or (__vector __bool int a1, __vector signed int a2)
{
- return (vector float) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_or (vector float a1, vector signed int a2)
+inline __vector signed int
+vec_or (__vector signed int a1, __vector __bool int a2)
{
- return (vector float) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_or (vector signed int a1, vector float a2)
+inline __vector signed int
+vec_or (__vector signed int a1, __vector signed int a2)
{
- return (vector float) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_or (vector signed int a1, vector signed int a2)
+inline __vector unsigned int
+vec_or (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_or (vector signed int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_or (__vector unsigned int a1, __vector __bool int a2)
{
- return (vector unsigned int) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_or (vector unsigned int a1, vector signed int a2)
+inline __vector unsigned int
+vec_or (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_or (vector unsigned int a1, vector unsigned int a2)
+inline __vector __bool short
+vec_or (__vector __bool short a1, __vector __bool short a2)
{
- return (vector unsigned int) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool short) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_or (vector signed short a1, vector signed short a2)
+inline __vector signed short
+vec_or (__vector __bool short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_or (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_or (__vector signed short a1, __vector __bool short a2)
{
- return (vector unsigned short) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_or (vector unsigned short a1, vector signed short a2)
+inline __vector signed short
+vec_or (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned short) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_or (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_or (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_or (vector signed char a1, vector signed char a2)
+inline __vector unsigned short
+vec_or (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector signed char) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_or (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned short
+vec_or (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_or (vector unsigned char a1, vector signed char a2)
+inline __vector signed char
+vec_or (__vector __bool char a1, __vector signed char a2)
{
- return (vector unsigned char) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_or (vector unsigned char a1, vector unsigned char a2)
+inline __vector __bool char
+vec_or (__vector __bool char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool char) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed char
+vec_or (__vector signed char a1, __vector __bool char a2)
+{
+ return (__vector signed char) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed char
+vec_or (__vector signed char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_or (__vector __bool char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_or (__vector unsigned char a1, __vector __bool char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_or (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_pack */
-inline vector signed char
-vec_pack (vector signed short a1, vector signed short a2)
+inline __vector signed char
+vec_pack (__vector signed short a1, __vector signed short a2)
{
- return (vector signed char) __builtin_altivec_vpkuhum ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed char) __builtin_altivec_vpkuhum ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned char
-vec_pack (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned char
+vec_pack (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vpkuhum ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned char) __builtin_altivec_vpkuhum ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed short
-vec_pack (vector signed int a1, vector signed int a2)
+inline __vector __bool char
+vec_pack (__vector __bool short a1, __vector __bool short a2)
{
- return (vector signed short) __builtin_altivec_vpkuwum ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool char) __builtin_altivec_vpkuhum ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_pack (vector unsigned int a1, vector unsigned int a2)
+inline __vector signed short
+vec_pack (__vector signed int a1, __vector signed int a2)
{
- return (vector unsigned short) __builtin_altivec_vpkuwum ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vpkuwum ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned short
+vec_pack (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return (__vector unsigned short) __builtin_altivec_vpkuwum ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool short
+vec_pack (__vector __bool int a1, __vector __bool int a2)
+{
+ return (__vector __bool short) __builtin_altivec_vpkuwum ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vpkuwum */
-inline vector signed short
-vec_vpkuwum (vector signed int a1, vector signed int a2)
+inline __vector __bool short
+vec_vpkuwum (__vector __bool int a1, __vector __bool int a2)
+{
+ return (__vector __bool short) __builtin_altivec_vpkuwum ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed short
+vec_vpkuwum (__vector signed int a1, __vector signed int a2)
{
- return (vector signed short) __builtin_altivec_vpkuwum ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vpkuwum ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_vpkuwum (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned short
+vec_vpkuwum (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned short) __builtin_altivec_vpkuwum ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vpkuwum ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vpkuhum */
-inline vector signed char
-vec_vpkuhum (vector signed short a1, vector signed short a2)
+inline __vector __bool char
+vec_vpkuhum (__vector __bool short a1, __vector __bool short a2)
+{
+ return (__vector __bool char) __builtin_altivec_vpkuhum ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector signed char
+vec_vpkuhum (__vector signed short a1, __vector signed short a2)
{
- return (vector signed char) __builtin_altivec_vpkuhum ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed char) __builtin_altivec_vpkuhum ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned char
-vec_vpkuhum (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned char
+vec_vpkuhum (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vpkuhum ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned char) __builtin_altivec_vpkuhum ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_packpx */
-inline vector unsigned short
-vec_packpx (vector unsigned int a1, vector unsigned int a2)
+inline __vector __pixel
+vec_packpx (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned short) __builtin_altivec_vpkpx ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __pixel) __builtin_altivec_vpkpx ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_packs */
-inline vector unsigned char
-vec_packs (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned char
+vec_packs (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vpkuhus ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned char) __builtin_altivec_vpkuhus ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed char
-vec_packs (vector signed short a1, vector signed short a2)
+inline __vector signed char
+vec_packs (__vector signed short a1, __vector signed short a2)
{
- return (vector signed char) __builtin_altivec_vpkshss ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed char) __builtin_altivec_vpkshss ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_packs (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned short
+vec_packs (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned short) __builtin_altivec_vpkuwus ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vpkuwus ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_packs (vector signed int a1, vector signed int a2)
+inline __vector signed short
+vec_packs (__vector signed int a1, __vector signed int a2)
{
- return (vector signed short) __builtin_altivec_vpkswss ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vpkswss ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vpkswss */
-inline vector signed short
-vec_vpkswss (vector signed int a1, vector signed int a2)
+inline __vector signed short
+vec_vpkswss (__vector signed int a1, __vector signed int a2)
{
- return (vector signed short) __builtin_altivec_vpkswss ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vpkswss ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vpkuwus */
-inline vector unsigned short
-vec_vpkuwus (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned short
+vec_vpkuwus (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned short) __builtin_altivec_vpkuwus ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vpkuwus ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vpkshss */
-inline vector signed char
-vec_vpkshss (vector signed short a1, vector signed short a2)
+inline __vector signed char
+vec_vpkshss (__vector signed short a1, __vector signed short a2)
{
- return (vector signed char) __builtin_altivec_vpkshss ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed char) __builtin_altivec_vpkshss ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vpkuhus */
-inline vector unsigned char
-vec_vpkuhus (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned char
+vec_vpkuhus (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vpkuhus ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned char) __builtin_altivec_vpkuhus ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_packsu */
-inline vector unsigned char
-vec_packsu (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned char
+vec_packsu (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vpkuhus ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned char) __builtin_altivec_vpkuhus ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned char
-vec_packsu (vector signed short a1, vector signed short a2)
+inline __vector unsigned char
+vec_packsu (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned char) __builtin_altivec_vpkshus ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned char) __builtin_altivec_vpkshus ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_packsu (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned short
+vec_packsu (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned short) __builtin_altivec_vpkuwus ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vpkuwus ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_packsu (vector signed int a1, vector signed int a2)
+inline __vector unsigned short
+vec_packsu (__vector signed int a1, __vector signed int a2)
{
- return (vector unsigned short) __builtin_altivec_vpkswus ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vpkswus ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vpkswus */
-inline vector unsigned short
-vec_vpkswus (vector signed int a1, vector signed int a2)
+inline __vector unsigned short
+vec_vpkswus (__vector signed int a1, __vector signed int a2)
{
- return (vector unsigned short) __builtin_altivec_vpkswus ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vpkswus ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vpkshus */
-inline vector unsigned char
-vec_vpkshus (vector signed short a1, vector signed short a2)
+inline __vector unsigned char
+vec_vpkshus (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned char) __builtin_altivec_vpkshus ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned char) __builtin_altivec_vpkshus ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_perm */
-inline vector float
-vec_perm (vector float a1, vector float a2, vector unsigned char a3)
+inline __vector float
+vec_perm (__vector float a1, __vector float a2, __vector unsigned char a3)
+{
+ return (__vector float) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
+}
+
+inline __vector signed int
+vec_perm (__vector signed int a1, __vector signed int a2, __vector unsigned char a3)
+{
+ return (__vector signed int) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
+}
+
+inline __vector unsigned int
+vec_perm (__vector unsigned int a1, __vector unsigned int a2, __vector unsigned char a3)
{
- return (vector float) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3);
+ return (__vector unsigned int) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
}
-inline vector signed int
-vec_perm (vector signed int a1, vector signed int a2, vector unsigned char a3)
+inline __vector __bool int
+vec_perm (__vector __bool int a1, __vector __bool int a2, __vector unsigned char a3)
{
- return (vector signed int) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3);
+ return (__vector __bool int) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
}
-inline vector unsigned int
-vec_perm (vector unsigned int a1, vector unsigned int a2, vector unsigned char a3)
+inline __vector signed short
+vec_perm (__vector signed short a1, __vector signed short a2, __vector unsigned char a3)
{
- return (vector unsigned int) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3);
+ return (__vector signed short) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
}
-inline vector signed short
-vec_perm (vector signed short a1, vector signed short a2, vector unsigned char a3)
+inline __vector unsigned short
+vec_perm (__vector unsigned short a1, __vector unsigned short a2, __vector unsigned char a3)
{
- return (vector signed short) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3);
+ return (__vector unsigned short) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
}
-inline vector unsigned short
-vec_perm (vector unsigned short a1, vector unsigned short a2, vector unsigned char a3)
+inline __vector __bool short
+vec_perm (__vector __bool short a1, __vector __bool short a2, __vector unsigned char a3)
{
- return (vector unsigned short) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3);
+ return (__vector __bool short) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
}
-inline vector signed char
-vec_perm (vector signed char a1, vector signed char a2, vector unsigned char a3)
+inline __vector __pixel
+vec_perm (__vector __pixel a1, __vector __pixel a2, __vector unsigned char a3)
{
- return (vector signed char) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3);
+ return (__vector __pixel) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
}
-inline vector unsigned char
-vec_perm (vector unsigned char a1, vector unsigned char a2, vector unsigned char a3)
+inline __vector signed char
+vec_perm (__vector signed char a1, __vector signed char a2, __vector unsigned char a3)
{
- return (vector unsigned char) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3);
+ return (__vector signed char) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
+}
+
+inline __vector unsigned char
+vec_perm (__vector unsigned char a1, __vector unsigned char a2, __vector unsigned char a3)
+{
+ return (__vector unsigned char) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
+}
+
+inline __vector __bool char
+vec_perm (__vector __bool char a1, __vector __bool char a2, __vector unsigned char a3)
+{
+ return (__vector __bool char) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
}
/* vec_re */
-inline vector float
-vec_re (vector float a1)
+inline __vector float
+vec_re (__vector float a1)
{
- return (vector float) __builtin_altivec_vrefp ((vector float) a1);
+ return (__vector float) __builtin_altivec_vrefp ((__vector float) a1);
}
/* vec_rl */
-inline vector signed char
-vec_rl (vector signed char a1, vector unsigned char a2)
+inline __vector signed char
+vec_rl (__vector signed char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vrlb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vrlb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_rl (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_rl (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vrlb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vrlb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed short
-vec_rl (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_rl (__vector signed short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vrlh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vrlh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_rl (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_rl (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vrlh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vrlh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed int
-vec_rl (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_rl (__vector signed int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vrlw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vrlw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_rl (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_rl (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vrlw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vrlw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vrlw */
-inline vector signed int
-vec_vrlw (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_vrlw (__vector signed int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vrlw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vrlw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vrlw (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vrlw (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vrlw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vrlw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vrlh */
-inline vector signed short
-vec_vrlh (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_vrlh (__vector signed short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vrlh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vrlh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vrlh (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vrlh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vrlh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vrlh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vrlb */
-inline vector signed char
-vec_vrlb (vector signed char a1, vector unsigned char a2)
+inline __vector signed char
+vec_vrlb (__vector signed char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vrlb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vrlb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vrlb (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vrlb (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vrlb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vrlb ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_round */
-inline vector float
-vec_round (vector float a1)
+inline __vector float
+vec_round (__vector float a1)
{
- return (vector float) __builtin_altivec_vrfin ((vector float) a1);
+ return (__vector float) __builtin_altivec_vrfin ((__vector float) a1);
}
/* vec_rsqrte */
-inline vector float
-vec_rsqrte (vector float a1)
+inline __vector float
+vec_rsqrte (__vector float a1)
{
- return (vector float) __builtin_altivec_vrsqrtefp ((vector float) a1);
+ return (__vector float) __builtin_altivec_vrsqrtefp ((__vector float) a1);
}
/* vec_sel */
-inline vector float
-vec_sel (vector float a1, vector float a2, vector signed int a3)
+inline __vector float
+vec_sel (__vector float a1, __vector float a2, __vector __bool int a3)
+{
+ return (__vector float) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
+}
+
+inline __vector float
+vec_sel (__vector float a1, __vector float a2, __vector unsigned int a3)
+{
+ return (__vector float) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
+}
+
+inline __vector signed int
+vec_sel (__vector signed int a1, __vector signed int a2, __vector __bool int a3)
{
- return (vector float) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector signed int) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector float
-vec_sel (vector float a1, vector float a2, vector unsigned int a3)
+inline __vector signed int
+vec_sel (__vector signed int a1, __vector signed int a2, __vector unsigned int a3)
{
- return (vector float) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector signed int) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector signed int
-vec_sel (vector signed int a1, vector signed int a2, vector signed int a3)
+inline __vector unsigned int
+vec_sel (__vector unsigned int a1, __vector unsigned int a2, __vector __bool int a3)
{
- return (vector signed int) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector unsigned int) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector signed int
-vec_sel (vector signed int a1, vector signed int a2, vector unsigned int a3)
+inline __vector unsigned int
+vec_sel (__vector unsigned int a1, __vector unsigned int a2, __vector unsigned int a3)
{
- return (vector signed int) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector unsigned int) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector unsigned int
-vec_sel (vector unsigned int a1, vector unsigned int a2, vector signed int a3)
+inline __vector __bool int
+vec_sel (__vector __bool int a1, __vector __bool int a2, __vector __bool int a3)
{
- return (vector unsigned int) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector __bool int) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector unsigned int
-vec_sel (vector unsigned int a1, vector unsigned int a2, vector unsigned int a3)
+inline __vector __bool int
+vec_sel (__vector __bool int a1, __vector __bool int a2, __vector unsigned int a3)
{
- return (vector unsigned int) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector __bool int) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector signed short
-vec_sel (vector signed short a1, vector signed short a2, vector signed short a3)
+inline __vector signed short
+vec_sel (__vector signed short a1, __vector signed short a2, __vector __bool short a3)
{
- return (vector signed short) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector signed short) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector signed short
-vec_sel (vector signed short a1, vector signed short a2, vector unsigned short a3)
+inline __vector signed short
+vec_sel (__vector signed short a1, __vector signed short a2, __vector unsigned short a3)
{
- return (vector signed short) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector signed short) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector unsigned short
-vec_sel (vector unsigned short a1, vector unsigned short a2, vector signed short a3)
+inline __vector unsigned short
+vec_sel (__vector unsigned short a1, __vector unsigned short a2, __vector __bool short a3)
{
- return (vector unsigned short) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector unsigned short) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector unsigned short
-vec_sel (vector unsigned short a1, vector unsigned short a2, vector unsigned short a3)
+inline __vector unsigned short
+vec_sel (__vector unsigned short a1, __vector unsigned short a2, __vector unsigned short a3)
{
- return (vector unsigned short) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector unsigned short) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector signed char
-vec_sel (vector signed char a1, vector signed char a2, vector signed char a3)
+inline __vector __bool short
+vec_sel (__vector __bool short a1, __vector __bool short a2, __vector __bool short a3)
{
- return (vector signed char) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector __bool short) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector signed char
-vec_sel (vector signed char a1, vector signed char a2, vector unsigned char a3)
+inline __vector __bool short
+vec_sel (__vector __bool short a1, __vector __bool short a2, __vector unsigned short a3)
{
- return (vector signed char) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector __bool short) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector unsigned char
-vec_sel (vector unsigned char a1, vector unsigned char a2, vector signed char a3)
+inline __vector signed char
+vec_sel (__vector signed char a1, __vector signed char a2, __vector __bool char a3)
{
- return (vector unsigned char) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector signed char) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector unsigned char
-vec_sel (vector unsigned char a1, vector unsigned char a2, vector unsigned char a3)
+inline __vector signed char
+vec_sel (__vector signed char a1, __vector signed char a2, __vector unsigned char a3)
{
- return (vector unsigned char) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector signed char) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
+}
+
+inline __vector unsigned char
+vec_sel (__vector unsigned char a1, __vector unsigned char a2, __vector __bool char a3)
+{
+ return (__vector unsigned char) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
+}
+
+inline __vector unsigned char
+vec_sel (__vector unsigned char a1, __vector unsigned char a2, __vector unsigned char a3)
+{
+ return (__vector unsigned char) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
+}
+
+inline __vector __bool char
+vec_sel (__vector __bool char a1, __vector __bool char a2, __vector __bool char a3)
+{
+ return (__vector __bool char) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
+}
+
+inline __vector __bool char
+vec_sel (__vector __bool char a1, __vector __bool char a2, __vector unsigned char a3)
+{
+ return (__vector __bool char) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
/* vec_sl */
-inline vector signed char
-vec_sl (vector signed char a1, vector unsigned char a2)
+inline __vector signed char
+vec_sl (__vector signed char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vslb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vslb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_sl (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_sl (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vslb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vslb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed short
-vec_sl (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_sl (__vector signed short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vslh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vslh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_sl (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_sl (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vslh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vslh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed int
-vec_sl (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_sl (__vector signed int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vslw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vslw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_sl (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_sl (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vslw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vslw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vslw */
-inline vector signed int
-vec_vslw (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_vslw (__vector signed int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vslw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vslw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vslw (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vslw (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vslw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vslw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vslh */
-inline vector signed short
-vec_vslh (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_vslh (__vector signed short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vslh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vslh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vslh (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vslh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vslh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vslh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vslb */
-inline vector signed char
-vec_vslb (vector signed char a1, vector unsigned char a2)
+inline __vector signed char
+vec_vslb (__vector signed char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vslb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vslb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vslb (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vslb (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vslb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vslb ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_sld */
-inline vector float
-vec_sld (vector float a1, vector float a2, const char a3)
+inline __vector float
+vec_sld (__vector float a1, __vector float a2, const int a3)
+{
+ return (__vector float) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
+}
+
+inline __vector signed int
+vec_sld (__vector signed int a1, __vector signed int a2, const int a3)
+{
+ return (__vector signed int) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
+}
+
+inline __vector unsigned int
+vec_sld (__vector unsigned int a1, __vector unsigned int a2, const int a3)
+{
+ return (__vector unsigned int) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
+}
+
+inline __vector __bool int
+vec_sld (__vector __bool int a1, __vector __bool int a2, const int a3)
+{
+ return (__vector __bool int) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
+}
+
+inline __vector signed short
+vec_sld (__vector signed short a1, __vector signed short a2, const int a3)
{
- return (vector float) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, a3);
+ return (__vector signed short) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
}
-inline vector signed int
-vec_sld (vector signed int a1, vector signed int a2, const char a3)
+inline __vector unsigned short
+vec_sld (__vector unsigned short a1, __vector unsigned short a2, const int a3)
{
- return (vector signed int) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, a3);
+ return (__vector unsigned short) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
}
-inline vector unsigned int
-vec_sld (vector unsigned int a1, vector unsigned int a2, const char a3)
+inline __vector __bool short
+vec_sld (__vector __bool short a1, __vector __bool short a2, const int a3)
{
- return (vector unsigned int) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, a3);
+ return (__vector __bool short) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
}
-inline vector signed short
-vec_sld (vector signed short a1, vector signed short a2, const char a3)
+inline __vector __pixel
+vec_sld (__vector __pixel a1, __vector __pixel a2, const int a3)
{
- return (vector signed short) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, a3);
+ return (__vector __pixel) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
}
-inline vector unsigned short
-vec_sld (vector unsigned short a1, vector unsigned short a2, const char a3)
+inline __vector signed char
+vec_sld (__vector signed char a1, __vector signed char a2, const int a3)
{
- return (vector unsigned short) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, a3);
+ return (__vector signed char) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
}
-inline vector signed char
-vec_sld (vector signed char a1, vector signed char a2, const char a3)
+inline __vector unsigned char
+vec_sld (__vector unsigned char a1, __vector unsigned char a2, const int a3)
{
- return (vector signed char) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, a3);
+ return (__vector unsigned char) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
}
-inline vector unsigned char
-vec_sld (vector unsigned char a1, vector unsigned char a2, const char a3)
+inline __vector __bool char
+vec_sld (__vector __bool char a1, __vector __bool char a2, const int a3)
{
- return (vector unsigned char) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, a3);
+ return (__vector __bool char) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
}
/* vec_sll */
-inline vector signed int
-vec_sll (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_sll (__vector signed int a1, __vector unsigned int a2)
+{
+ return (__vector signed int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_sll (__vector signed int a1, __vector unsigned short a2)
+{
+ return (__vector signed int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_sll (__vector signed int a1, __vector unsigned char a2)
+{
+ return (__vector signed int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_sll (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_sll (__vector unsigned int a1, __vector unsigned short a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_sll (__vector unsigned int a1, __vector unsigned char a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool int
+vec_sll (__vector __bool int a1, __vector unsigned int a2)
+{
+ return (__vector __bool int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool int
+vec_sll (__vector __bool int a1, __vector unsigned short a2)
{
- return (vector signed int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_sll (vector signed int a1, vector unsigned short a2)
+inline __vector __bool int
+vec_sll (__vector __bool int a1, __vector unsigned char a2)
{
- return (vector signed int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_sll (vector signed int a1, vector unsigned char a2)
+inline __vector signed short
+vec_sll (__vector signed short a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_sll (vector unsigned int a1, vector unsigned int a2)
+inline __vector signed short
+vec_sll (__vector signed short a1, __vector unsigned short a2)
{
- return (vector unsigned int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_sll (vector unsigned int a1, vector unsigned short a2)
+inline __vector signed short
+vec_sll (__vector signed short a1, __vector unsigned char a2)
{
- return (vector unsigned int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_sll (vector unsigned int a1, vector unsigned char a2)
+inline __vector unsigned short
+vec_sll (__vector unsigned short a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_sll (vector signed short a1, vector unsigned int a2)
+inline __vector unsigned short
+vec_sll (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_sll (vector signed short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_sll (__vector unsigned short a1, __vector unsigned char a2)
{
- return (vector signed short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_sll (vector signed short a1, vector unsigned char a2)
+inline __vector __bool short
+vec_sll (__vector __bool short a1, __vector unsigned int a2)
{
- return (vector signed short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_sll (vector unsigned short a1, vector unsigned int a2)
+inline __vector __bool short
+vec_sll (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_sll (vector unsigned short a1, vector unsigned short a2)
+inline __vector __bool short
+vec_sll (__vector __bool short a1, __vector unsigned char a2)
{
- return (vector unsigned short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_sll (vector unsigned short a1, vector unsigned char a2)
+inline __vector __pixel
+vec_sll (__vector __pixel a1, __vector unsigned int a2)
{
- return (vector unsigned short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __pixel) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_sll (vector signed char a1, vector unsigned int a2)
+inline __vector __pixel
+vec_sll (__vector __pixel a1, __vector unsigned short a2)
{
- return (vector signed char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __pixel) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_sll (vector signed char a1, vector unsigned short a2)
+inline __vector __pixel
+vec_sll (__vector __pixel a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __pixel) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_sll (vector signed char a1, vector unsigned char a2)
+inline __vector signed char
+vec_sll (__vector signed char a1, __vector unsigned int a2)
{
- return (vector signed char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_sll (vector unsigned char a1, vector unsigned int a2)
+inline __vector signed char
+vec_sll (__vector signed char a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_sll (vector unsigned char a1, vector unsigned short a2)
+inline __vector signed char
+vec_sll (__vector signed char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_sll (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_sll (__vector unsigned char a1, __vector unsigned int a2)
{
- return (vector unsigned char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_sll (__vector unsigned char a1, __vector unsigned short a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_sll (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool char
+vec_sll (__vector __bool char a1, __vector unsigned int a2)
+{
+ return (__vector __bool char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool char
+vec_sll (__vector __bool char a1, __vector unsigned short a2)
+{
+ return (__vector __bool char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool char
+vec_sll (__vector __bool char a1, __vector unsigned char a2)
+{
+ return (__vector __bool char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_slo */
-inline vector float
-vec_slo (vector float a1, vector signed char a2)
+inline __vector float
+vec_slo (__vector float a1, __vector signed char a2)
+{
+ return (__vector float) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector float
+vec_slo (__vector float a1, __vector unsigned char a2)
{
- return (vector float) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector float) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_slo (vector float a1, vector unsigned char a2)
+inline __vector signed int
+vec_slo (__vector signed int a1, __vector signed char a2)
{
- return (vector float) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_slo (vector signed int a1, vector signed char a2)
+inline __vector signed int
+vec_slo (__vector signed int a1, __vector unsigned char a2)
{
- return (vector signed int) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_slo (vector signed int a1, vector unsigned char a2)
+inline __vector unsigned int
+vec_slo (__vector unsigned int a1, __vector signed char a2)
{
- return (vector signed int) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_slo (vector unsigned int a1, vector signed char a2)
+inline __vector unsigned int
+vec_slo (__vector unsigned int a1, __vector unsigned char a2)
{
- return (vector unsigned int) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_slo (vector unsigned int a1, vector unsigned char a2)
+inline __vector signed short
+vec_slo (__vector signed short a1, __vector signed char a2)
{
- return (vector unsigned int) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_slo (vector signed short a1, vector signed char a2)
+inline __vector signed short
+vec_slo (__vector signed short a1, __vector unsigned char a2)
{
- return (vector signed short) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_slo (vector signed short a1, vector unsigned char a2)
+inline __vector unsigned short
+vec_slo (__vector unsigned short a1, __vector signed char a2)
{
- return (vector signed short) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_slo (vector unsigned short a1, vector signed char a2)
+inline __vector unsigned short
+vec_slo (__vector unsigned short a1, __vector unsigned char a2)
{
- return (vector unsigned short) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_slo (vector unsigned short a1, vector unsigned char a2)
+inline __vector __pixel
+vec_slo (__vector __pixel a1, __vector signed char a2)
{
- return (vector unsigned short) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __pixel) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_slo (vector signed char a1, vector signed char a2)
+inline __vector __pixel
+vec_slo (__vector __pixel a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __pixel) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_slo (vector signed char a1, vector unsigned char a2)
+inline __vector signed char
+vec_slo (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_slo (vector unsigned char a1, vector signed char a2)
+inline __vector signed char
+vec_slo (__vector signed char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_slo (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_slo (__vector unsigned char a1, __vector signed char a2)
{
- return (vector unsigned char) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned char) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_slo (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_splat */
-inline vector signed char
-vec_splat (vector signed char a1, const char a2)
+inline __vector signed char
+vec_splat (__vector signed char a1, const int a2)
{
- return (vector signed char) __builtin_altivec_vspltb ((vector signed char) a1, a2);
+ return (__vector signed char) __builtin_altivec_vspltb ((__vector signed char) a1, a2);
}
-inline vector unsigned char
-vec_splat (vector unsigned char a1, const char a2)
+inline __vector unsigned char
+vec_splat (__vector unsigned char a1, const int a2)
{
- return (vector unsigned char) __builtin_altivec_vspltb ((vector signed char) a1, a2);
+ return (__vector unsigned char) __builtin_altivec_vspltb ((__vector signed char) a1, a2);
}
-inline vector signed short
-vec_splat (vector signed short a1, const char a2)
+inline __vector __bool char
+vec_splat (__vector __bool char a1, const int a2)
{
- return (vector signed short) __builtin_altivec_vsplth ((vector signed short) a1, a2);
+ return (__vector __bool char) __builtin_altivec_vspltb ((__vector signed char) a1, a2);
}
-inline vector unsigned short
-vec_splat (vector unsigned short a1, const char a2)
+inline __vector signed short
+vec_splat (__vector signed short a1, const int a2)
{
- return (vector unsigned short) __builtin_altivec_vsplth ((vector signed short) a1, a2);
+ return (__vector signed short) __builtin_altivec_vsplth ((__vector signed short) a1, a2);
}
-inline vector float
-vec_splat (vector float a1, const char a2)
+inline __vector unsigned short
+vec_splat (__vector unsigned short a1, const int a2)
{
- return (vector float) __builtin_altivec_vspltw ((vector signed int) a1, a2);
+ return (__vector unsigned short) __builtin_altivec_vsplth ((__vector signed short) a1, a2);
}
-inline vector signed int
-vec_splat (vector signed int a1, const char a2)
+inline __vector __bool short
+vec_splat (__vector __bool short a1, const int a2)
{
- return (vector signed int) __builtin_altivec_vspltw ((vector signed int) a1, a2);
+ return (__vector __bool short) __builtin_altivec_vsplth ((__vector signed short) a1, a2);
}
-inline vector unsigned int
-vec_splat (vector unsigned int a1, const char a2)
+inline __vector __pixel
+vec_splat (__vector __pixel a1, const int a2)
{
- return (vector unsigned int) __builtin_altivec_vspltw ((vector signed int) a1, a2);
+ return (__vector __pixel) __builtin_altivec_vsplth ((__vector signed short) a1, a2);
+}
+
+inline __vector float
+vec_splat (__vector float a1, const int a2)
+{
+ return (__vector float) __builtin_altivec_vspltw ((__vector signed int) a1, a2);
+}
+
+inline __vector signed int
+vec_splat (__vector signed int a1, const int a2)
+{
+ return (__vector signed int) __builtin_altivec_vspltw ((__vector signed int) a1, a2);
+}
+
+inline __vector unsigned int
+vec_splat (__vector unsigned int a1, const int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vspltw ((__vector signed int) a1, a2);
+}
+
+inline __vector __bool int
+vec_splat (__vector __bool int a1, const int a2)
+{
+ return (__vector __bool int) __builtin_altivec_vspltw ((__vector signed int) a1, a2);
}
/* vec_vspltw */
-inline vector float
-vec_vspltw (vector float a1, const char a2)
+inline __vector float
+vec_vspltw (__vector float a1, const int a2)
{
- return (vector float) __builtin_altivec_vspltw ((vector signed int) a1, a2);
+ return (__vector float) __builtin_altivec_vspltw ((__vector signed int) a1, a2);
}
-inline vector signed int
-vec_vspltw (vector signed int a1, const char a2)
+inline __vector signed int
+vec_vspltw (__vector signed int a1, const int a2)
{
- return (vector signed int) __builtin_altivec_vspltw ((vector signed int) a1, a2);
+ return (__vector signed int) __builtin_altivec_vspltw ((__vector signed int) a1, a2);
}
-inline vector unsigned int
-vec_vspltw (vector unsigned int a1, const char a2)
+inline __vector unsigned int
+vec_vspltw (__vector unsigned int a1, const int a2)
{
- return (vector unsigned int) __builtin_altivec_vspltw ((vector signed int) a1, a2);
+ return (__vector unsigned int) __builtin_altivec_vspltw ((__vector signed int) a1, a2);
+}
+
+inline __vector __bool int
+vec_vspltw (__vector __bool int a1, const int a2)
+{
+ return (__vector __bool int) __builtin_altivec_vspltw ((__vector signed int) a1, a2);
}
/* vec_vsplth */
-inline vector signed short
-vec_vsplth (vector signed short a1, const char a2)
+inline __vector __bool short
+vec_vsplth (__vector __bool short a1, const int a2)
+{
+ return (__vector __bool short) __builtin_altivec_vsplth ((__vector signed short) a1, a2);
+}
+
+inline __vector signed short
+vec_vsplth (__vector signed short a1, const int a2)
{
- return (vector signed short) __builtin_altivec_vsplth ((vector signed short) a1, a2);
+ return (__vector signed short) __builtin_altivec_vsplth ((__vector signed short) a1, a2);
}
-inline vector unsigned short
-vec_vsplth (vector unsigned short a1, const char a2)
+inline __vector unsigned short
+vec_vsplth (__vector unsigned short a1, const int a2)
{
- return (vector unsigned short) __builtin_altivec_vsplth ((vector signed short) a1, a2);
+ return (__vector unsigned short) __builtin_altivec_vsplth ((__vector signed short) a1, a2);
+}
+
+inline __vector __pixel
+vec_vsplth (__vector __pixel a1, const int a2)
+{
+ return (__vector __pixel) __builtin_altivec_vsplth ((__vector signed short) a1, a2);
}
/* vec_vspltb */
-inline vector signed char
-vec_vspltb (vector signed char a1, const char a2)
+inline __vector signed char
+vec_vspltb (__vector signed char a1, const int a2)
{
- return (vector signed char) __builtin_altivec_vspltb ((vector signed char) a1, a2);
+ return (__vector signed char) __builtin_altivec_vspltb ((__vector signed char) a1, a2);
}
-inline vector unsigned char
-vec_vspltb (vector unsigned char a1, const char a2)
+inline __vector unsigned char
+vec_vspltb (__vector unsigned char a1, const int a2)
{
- return (vector unsigned char) __builtin_altivec_vspltb ((vector signed char) a1, a2);
+ return (__vector unsigned char) __builtin_altivec_vspltb ((__vector signed char) a1, a2);
+}
+
+inline __vector __bool char
+vec_vspltb (__vector __bool char a1, const int a2)
+{
+ return (__vector __bool char) __builtin_altivec_vspltb ((__vector signed char) a1, a2);
}
/* vec_splat_s8 */
-inline vector signed char
-vec_splat_s8 (const char a1)
+inline __vector signed char
+vec_splat_s8 (const int a1)
{
- return (vector signed char) __builtin_altivec_vspltisb (a1);
+ return (__vector signed char) __builtin_altivec_vspltisb (a1);
}
/* vec_splat_s16 */
-inline vector signed short
-vec_splat_s16 (const char a1)
+inline __vector signed short
+vec_splat_s16 (const int a1)
{
- return (vector signed short) __builtin_altivec_vspltish (a1);
+ return (__vector signed short) __builtin_altivec_vspltish (a1);
}
/* vec_splat_s32 */
-inline vector signed int
-vec_splat_s32 (const char a1)
+inline __vector signed int
+vec_splat_s32 (const int a1)
{
- return (vector signed int) __builtin_altivec_vspltisw (a1);
+ return (__vector signed int) __builtin_altivec_vspltisw (a1);
}
/* vec_splat_u8 */
-inline vector unsigned char
-vec_splat_u8 (const char a1)
+inline __vector unsigned char
+vec_splat_u8 (const int a1)
{
- return (vector unsigned char) __builtin_altivec_vspltisb (a1);
+ return (__vector unsigned char) __builtin_altivec_vspltisb (a1);
}
/* vec_splat_u16 */
-inline vector unsigned short
-vec_splat_u16 (const char a1)
+inline __vector unsigned short
+vec_splat_u16 (const int a1)
{
- return (vector unsigned short) __builtin_altivec_vspltish (a1);
+ return (__vector unsigned short) __builtin_altivec_vspltish (a1);
}
/* vec_splat_u32 */
-inline vector unsigned int
-vec_splat_u32 (const char a1)
+inline __vector unsigned int
+vec_splat_u32 (const int a1)
{
- return (vector unsigned int) __builtin_altivec_vspltisw (a1);
+ return (__vector unsigned int) __builtin_altivec_vspltisw (a1);
}
/* vec_sr */
-inline vector signed char
-vec_sr (vector signed char a1, vector unsigned char a2)
+inline __vector signed char
+vec_sr (__vector signed char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vsrb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vsrb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_sr (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_sr (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsrb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsrb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed short
-vec_sr (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_sr (__vector signed short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vsrh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vsrh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_sr (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_sr (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vsrh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vsrh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed int
-vec_sr (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_sr (__vector signed int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vsrw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsrw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_sr (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_sr (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vsrw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsrw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vsrw */
-inline vector signed int
-vec_vsrw (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_vsrw (__vector signed int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vsrw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsrw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vsrw (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vsrw (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vsrw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsrw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vsrh */
-inline vector signed short
-vec_vsrh (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_vsrh (__vector signed short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vsrh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vsrh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vsrh (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vsrh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vsrh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vsrh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vsrb */
-inline vector signed char
-vec_vsrb (vector signed char a1, vector unsigned char a2)
+inline __vector signed char
+vec_vsrb (__vector signed char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vsrb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vsrb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vsrb (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vsrb (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsrb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsrb ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_sra */
-inline vector signed char
-vec_sra (vector signed char a1, vector unsigned char a2)
+inline __vector signed char
+vec_sra (__vector signed char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vsrab ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vsrab ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_sra (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_sra (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsrab ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsrab ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed short
-vec_sra (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_sra (__vector signed short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vsrah ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vsrah ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_sra (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_sra (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vsrah ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vsrah ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed int
-vec_sra (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_sra (__vector signed int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vsraw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsraw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_sra (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_sra (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vsraw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsraw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vsraw */
-inline vector signed int
-vec_vsraw (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_vsraw (__vector signed int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vsraw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsraw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vsraw (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vsraw (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vsraw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsraw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vsrah */
-inline vector signed short
-vec_vsrah (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_vsrah (__vector signed short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vsrah ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vsrah ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vsrah (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vsrah (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vsrah ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vsrah ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vsrab */
-inline vector signed char
-vec_vsrab (vector signed char a1, vector unsigned char a2)
+inline __vector signed char
+vec_vsrab (__vector signed char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vsrab ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vsrab ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vsrab (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vsrab (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsrab ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsrab ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_srl */
-inline vector signed int
-vec_srl (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_srl (__vector signed int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_srl (vector signed int a1, vector unsigned short a2)
+inline __vector signed int
+vec_srl (__vector signed int a1, __vector unsigned short a2)
{
- return (vector signed int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_srl (vector signed int a1, vector unsigned char a2)
+inline __vector signed int
+vec_srl (__vector signed int a1, __vector unsigned char a2)
{
- return (vector signed int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_srl (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_srl (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_srl (vector unsigned int a1, vector unsigned short a2)
+inline __vector unsigned int
+vec_srl (__vector unsigned int a1, __vector unsigned short a2)
{
- return (vector unsigned int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_srl (vector unsigned int a1, vector unsigned char a2)
+inline __vector unsigned int
+vec_srl (__vector unsigned int a1, __vector unsigned char a2)
{
- return (vector unsigned int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_srl (vector signed short a1, vector unsigned int a2)
+inline __vector __bool int
+vec_srl (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector signed short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_srl (vector signed short a1, vector unsigned short a2)
+inline __vector __bool int
+vec_srl (__vector __bool int a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_srl (vector signed short a1, vector unsigned char a2)
+inline __vector __bool int
+vec_srl (__vector __bool int a1, __vector unsigned char a2)
{
- return (vector signed short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_srl (vector unsigned short a1, vector unsigned int a2)
+inline __vector signed short
+vec_srl (__vector signed short a1, __vector unsigned int a2)
{
- return (vector unsigned short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_srl (vector unsigned short a1, vector unsigned short a2)
+inline __vector signed short
+vec_srl (__vector signed short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_srl (vector unsigned short a1, vector unsigned char a2)
+inline __vector signed short
+vec_srl (__vector signed short a1, __vector unsigned char a2)
{
- return (vector unsigned short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_srl (vector signed char a1, vector unsigned int a2)
+inline __vector unsigned short
+vec_srl (__vector unsigned short a1, __vector unsigned int a2)
{
- return (vector signed char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_srl (vector signed char a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_srl (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_srl (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned short
+vec_srl (__vector unsigned short a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_srl (vector unsigned char a1, vector unsigned int a2)
+inline __vector __bool short
+vec_srl (__vector __bool short a1, __vector unsigned int a2)
{
- return (vector unsigned char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_srl (vector unsigned char a1, vector unsigned short a2)
+inline __vector __bool short
+vec_srl (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_srl (vector unsigned char a1, vector unsigned char a2)
+inline __vector __bool short
+vec_srl (__vector __bool short a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __pixel
+vec_srl (__vector __pixel a1, __vector unsigned int a2)
+{
+ return (__vector __pixel) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __pixel
+vec_srl (__vector __pixel a1, __vector unsigned short a2)
+{
+ return (__vector __pixel) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __pixel
+vec_srl (__vector __pixel a1, __vector unsigned char a2)
+{
+ return (__vector __pixel) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed char
+vec_srl (__vector signed char a1, __vector unsigned int a2)
+{
+ return (__vector signed char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed char
+vec_srl (__vector signed char a1, __vector unsigned short a2)
+{
+ return (__vector signed char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed char
+vec_srl (__vector signed char a1, __vector unsigned char a2)
+{
+ return (__vector signed char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_srl (__vector unsigned char a1, __vector unsigned int a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_srl (__vector unsigned char a1, __vector unsigned short a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_srl (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool char
+vec_srl (__vector __bool char a1, __vector unsigned int a2)
+{
+ return (__vector __bool char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool char
+vec_srl (__vector __bool char a1, __vector unsigned short a2)
+{
+ return (__vector __bool char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool char
+vec_srl (__vector __bool char a1, __vector unsigned char a2)
+{
+ return (__vector __bool char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_sro */
-inline vector float
-vec_sro (vector float a1, vector signed char a2)
+inline __vector float
+vec_sro (__vector float a1, __vector signed char a2)
+{
+ return (__vector float) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector float
+vec_sro (__vector float a1, __vector unsigned char a2)
{
- return (vector float) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector float) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_sro (vector float a1, vector unsigned char a2)
+inline __vector signed int
+vec_sro (__vector signed int a1, __vector signed char a2)
{
- return (vector float) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_sro (vector signed int a1, vector signed char a2)
+inline __vector signed int
+vec_sro (__vector signed int a1, __vector unsigned char a2)
{
- return (vector signed int) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_sro (vector signed int a1, vector unsigned char a2)
+inline __vector unsigned int
+vec_sro (__vector unsigned int a1, __vector signed char a2)
{
- return (vector signed int) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_sro (vector unsigned int a1, vector signed char a2)
+inline __vector unsigned int
+vec_sro (__vector unsigned int a1, __vector unsigned char a2)
{
- return (vector unsigned int) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_sro (vector unsigned int a1, vector unsigned char a2)
+inline __vector signed short
+vec_sro (__vector signed short a1, __vector signed char a2)
{
- return (vector unsigned int) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_sro (vector signed short a1, vector signed char a2)
+inline __vector signed short
+vec_sro (__vector signed short a1, __vector unsigned char a2)
{
- return (vector signed short) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_sro (vector signed short a1, vector unsigned char a2)
+inline __vector unsigned short
+vec_sro (__vector unsigned short a1, __vector signed char a2)
{
- return (vector signed short) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_sro (vector unsigned short a1, vector signed char a2)
+inline __vector unsigned short
+vec_sro (__vector unsigned short a1, __vector unsigned char a2)
{
- return (vector unsigned short) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_sro (vector unsigned short a1, vector unsigned char a2)
+inline __vector __pixel
+vec_sro (__vector __pixel a1, __vector signed char a2)
{
- return (vector unsigned short) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __pixel) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_sro (vector signed char a1, vector signed char a2)
+inline __vector __pixel
+vec_sro (__vector __pixel a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __pixel) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_sro (vector signed char a1, vector unsigned char a2)
+inline __vector signed char
+vec_sro (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_sro (vector unsigned char a1, vector signed char a2)
+inline __vector signed char
+vec_sro (__vector signed char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_sro (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_sro (__vector unsigned char a1, __vector signed char a2)
{
- return (vector unsigned char) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned char) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_sro (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_st */
inline void
-vec_st (vector float a1, int a2, void *a3)
+vec_st (__vector float a1, int a2, __vector float *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector float a1, int a2, float *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector signed int a1, int a2, __vector signed int *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector signed int a1, int a2, int *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector unsigned int a1, int a2, __vector unsigned int *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector unsigned int a1, int a2, unsigned int *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector __bool int a1, int a2, __vector __bool int *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector __bool int a1, int a2, unsigned int *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector __bool int a1, int a2, int *a3)
{
- __builtin_altivec_stvx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_st (vector signed int a1, int a2, void *a3)
+vec_st (__vector signed short a1, int a2, __vector signed short *a3)
{
- __builtin_altivec_stvx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_st (vector unsigned int a1, int a2, void *a3)
+vec_st (__vector signed short a1, int a2, short *a3)
{
- __builtin_altivec_stvx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_st (vector signed short a1, int a2, void *a3)
+vec_st (__vector unsigned short a1, int a2, __vector unsigned short *a3)
{
- __builtin_altivec_stvx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_st (vector unsigned short a1, int a2, void *a3)
+vec_st (__vector unsigned short a1, int a2, unsigned short *a3)
{
- __builtin_altivec_stvx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_st (vector signed char a1, int a2, void *a3)
+vec_st (__vector __bool short a1, int a2, __vector __bool short *a3)
{
- __builtin_altivec_stvx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_st (vector unsigned char a1, int a2, void *a3)
+vec_st (__vector __bool short a1, int a2, unsigned short *a3)
{
- __builtin_altivec_stvx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector __pixel a1, int a2, __vector __pixel *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector __pixel a1, int a2, unsigned short *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector __pixel a1, int a2, short *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector __bool short a1, int a2, short *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector signed char a1, int a2, __vector signed char *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector signed char a1, int a2, signed char *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector unsigned char a1, int a2, __vector unsigned char *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector unsigned char a1, int a2, unsigned char *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector __bool char a1, int a2, __vector __bool char *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector __bool char a1, int a2, unsigned char *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector __bool char a1, int a2, signed char *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
}
/* vec_ste */
inline void
-vec_ste (vector signed char a1, int a2, void *a3)
+vec_ste (__vector signed char a1, int a2, signed char *a3)
+{
+ __builtin_altivec_stvebx ((__vector signed char) a1, a2, (void *) a3);
+}
+
+inline void
+vec_ste (__vector unsigned char a1, int a2, unsigned char *a3)
+{
+ __builtin_altivec_stvebx ((__vector signed char) a1, a2, (void *) a3);
+}
+
+inline void
+vec_ste (__vector __bool char a1, int a2, signed char *a3)
+{
+ __builtin_altivec_stvebx ((__vector signed char) a1, a2, (void *) a3);
+}
+
+inline void
+vec_ste (__vector __bool char a1, int a2, unsigned char *a3)
+{
+ __builtin_altivec_stvebx ((__vector signed char) a1, a2, (void *) a3);
+}
+
+inline void
+vec_ste (__vector signed short a1, int a2, short *a3)
+{
+ __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
+}
+
+inline void
+vec_ste (__vector unsigned short a1, int a2, unsigned short *a3)
+{
+ __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
+}
+
+inline void
+vec_ste (__vector __bool short a1, int a2, short *a3)
{
- __builtin_altivec_stvebx ((vector signed char) a1, a2, (void *) a3);
+ __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
}
inline void
-vec_ste (vector unsigned char a1, int a2, void *a3)
+vec_ste (__vector __bool short a1, int a2, unsigned short *a3)
{
- __builtin_altivec_stvebx ((vector signed char) a1, a2, (void *) a3);
+ __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
}
inline void
-vec_ste (vector signed short a1, int a2, void *a3)
+vec_ste (__vector __pixel a1, int a2, short *a3)
{
- __builtin_altivec_stvehx ((vector signed short) a1, a2, (void *) a3);
+ __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
}
inline void
-vec_ste (vector unsigned short a1, int a2, void *a3)
+vec_ste (__vector __pixel a1, int a2, unsigned short *a3)
{
- __builtin_altivec_stvehx ((vector signed short) a1, a2, (void *) a3);
+ __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
}
inline void
-vec_ste (vector float a1, int a2, void *a3)
+vec_ste (__vector float a1, int a2, float *a3)
{
- __builtin_altivec_stvewx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_ste (vector signed int a1, int a2, void *a3)
+vec_ste (__vector signed int a1, int a2, int *a3)
{
- __builtin_altivec_stvewx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_ste (vector unsigned int a1, int a2, void *a3)
+vec_ste (__vector unsigned int a1, int a2, unsigned int *a3)
{
- __builtin_altivec_stvewx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_ste (__vector __bool int a1, int a2, int *a3)
+{
+ __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_ste (__vector __bool int a1, int a2, unsigned int *a3)
+{
+ __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
}
/* vec_stvewx */
inline void
-vec_stvewx (vector float a1, int a2, void *a3)
+vec_stvewx (__vector float a1, int a2, float *a3)
+{
+ __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stvewx (__vector signed int a1, int a2, int *a3)
{
- __builtin_altivec_stvewx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_stvewx (vector signed int a1, int a2, void *a3)
+vec_stvewx (__vector unsigned int a1, int a2, unsigned int *a3)
{
- __builtin_altivec_stvewx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_stvewx (vector unsigned int a1, int a2, void *a3)
+vec_stvewx (__vector __bool int a1, int a2, int *a3)
{
- __builtin_altivec_stvewx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stvewx (__vector __bool int a1, int a2, unsigned int *a3)
+{
+ __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
}
/* vec_stvehx */
inline void
-vec_stvehx (vector signed short a1, int a2, void *a3)
+vec_stvehx (__vector signed short a1, int a2, short *a3)
{
- __builtin_altivec_stvehx ((vector signed short) a1, a2, (void *) a3);
+ __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
}
inline void
-vec_stvehx (vector unsigned short a1, int a2, void *a3)
+vec_stvehx (__vector unsigned short a1, int a2, unsigned short *a3)
{
- __builtin_altivec_stvehx ((vector signed short) a1, a2, (void *) a3);
+ __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stvehx (__vector __bool short a1, int a2, short *a3)
+{
+ __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stvehx (__vector __bool short a1, int a2, unsigned short *a3)
+{
+ __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stvehx (__vector __pixel a1, int a2, short *a3)
+{
+ __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stvehx (__vector __pixel a1, int a2, unsigned short *a3)
+{
+ __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
}
/* vec_stvebx */
inline void
-vec_stvebx (vector signed char a1, int a2, void *a3)
+vec_stvebx (__vector signed char a1, int a2, signed char *a3)
+{
+ __builtin_altivec_stvebx ((__vector signed char) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stvebx (__vector unsigned char a1, int a2, unsigned char *a3)
+{
+ __builtin_altivec_stvebx ((__vector signed char) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stvebx (__vector __bool char a1, int a2, signed char *a3)
{
- __builtin_altivec_stvebx ((vector signed char) a1, a2, (void *) a3);
+ __builtin_altivec_stvebx ((__vector signed char) a1, a2, (void *) a3);
}
inline void
-vec_stvebx (vector unsigned char a1, int a2, void *a3)
+vec_stvebx (__vector __bool char a1, int a2, unsigned char *a3)
{
- __builtin_altivec_stvebx ((vector signed char) a1, a2, (void *) a3);
+ __builtin_altivec_stvebx ((__vector signed char) a1, a2, (void *) a3);
}
/* vec_stl */
inline void
-vec_stl (vector float a1, int a2, void *a3)
+vec_stl (__vector float a1, int a2, __vector float *a3)
{
- __builtin_altivec_stvxl ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_stl (vector signed int a1, int a2, void *a3)
+vec_stl (__vector float a1, int a2, float *a3)
{
- __builtin_altivec_stvxl ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_stl (vector unsigned int a1, int a2, void *a3)
+vec_stl (__vector signed int a1, int a2, __vector signed int *a3)
{
- __builtin_altivec_stvxl ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_stl (vector signed short a1, int a2, void *a3)
+vec_stl (__vector signed int a1, int a2, int *a3)
{
- __builtin_altivec_stvxl ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_stl (vector unsigned short a1, int a2, void *a3)
+vec_stl (__vector unsigned int a1, int a2, __vector unsigned int *a3)
{
- __builtin_altivec_stvxl ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_stl (vector signed char a1, int a2, void *a3)
+vec_stl (__vector unsigned int a1, int a2, unsigned int *a3)
{
- __builtin_altivec_stvxl ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_stl (vector unsigned char a1, int a2, void *a3)
+vec_stl (__vector __bool int a1, int a2, __vector __bool int *a3)
{
- __builtin_altivec_stvxl ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector __bool int a1, int a2, unsigned int *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector __bool int a1, int a2, int *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector signed short a1, int a2, __vector signed short *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector signed short a1, int a2, short *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector unsigned short a1, int a2, __vector unsigned short *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector unsigned short a1, int a2, unsigned short *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector __bool short a1, int a2, __vector __bool short *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector __bool short a1, int a2, unsigned short *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector __bool short a1, int a2, short *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector __pixel a1, int a2, __vector __pixel *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector __pixel a1, int a2, unsigned short *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector __pixel a1, int a2, short *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector signed char a1, int a2, __vector signed char *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector signed char a1, int a2, signed char *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector unsigned char a1, int a2, __vector unsigned char *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector unsigned char a1, int a2, unsigned char *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector __bool char a1, int a2, __vector __bool char *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector __bool char a1, int a2, unsigned char *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector __bool char a1, int a2, signed char *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
}
/* vec_sub */
-inline vector signed char
-vec_sub (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_sub (__vector __bool char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_sub (__vector signed char a1, __vector __bool char a2)
{
- return (vector signed char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_sub (vector signed char a1, vector unsigned char a2)
+inline __vector signed char
+vec_sub (__vector signed char a1, __vector signed char a2)
{
- return (vector unsigned char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_sub (vector unsigned char a1, vector signed char a2)
+inline __vector unsigned char
+vec_sub (__vector __bool char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_sub (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_sub (__vector unsigned char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed short
-vec_sub (vector signed short a1, vector signed short a2)
+inline __vector unsigned char
+vec_sub (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector signed short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned short
-vec_sub (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_sub (__vector __bool short a1, __vector signed short a2)
{
- return (vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_sub (vector unsigned short a1, vector signed short a2)
+inline __vector signed short
+vec_sub (__vector signed short a1, __vector __bool short a2)
{
- return (vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_sub (vector unsigned short a1, vector unsigned short a2)
+inline __vector signed short
+vec_sub (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed int
-vec_sub (vector signed int a1, vector signed int a2)
+inline __vector unsigned short
+vec_sub (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector signed int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned int
-vec_sub (vector signed int a1, vector unsigned int a2)
+inline __vector unsigned short
+vec_sub (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned int
-vec_sub (vector unsigned int a1, vector signed int a2)
+inline __vector unsigned short
+vec_sub (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned int
-vec_sub (vector unsigned int a1, vector unsigned int a2)
+inline __vector signed int
+vec_sub (__vector __bool int a1, __vector signed int a2)
{
- return (vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_sub (vector float a1, vector float a2)
+inline __vector signed int
+vec_sub (__vector signed int a1, __vector __bool int a2)
{
- return (vector float) __builtin_altivec_vsubfp ((vector float) a1, (vector float) a2);
+ return (__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_sub (__vector signed int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_sub (__vector __bool int a1, __vector unsigned int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_sub (__vector unsigned int a1, __vector __bool int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_sub (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector float
+vec_sub (__vector float a1, __vector float a2)
+{
+ return (__vector float) __builtin_altivec_vsubfp ((__vector float) a1, (__vector float) a2);
}
/* vec_vsubfp */
-inline vector float
-vec_vsubfp (vector float a1, vector float a2)
+inline __vector float
+vec_vsubfp (__vector float a1, __vector float a2)
{
- return (vector float) __builtin_altivec_vsubfp ((vector float) a1, (vector float) a2);
+ return (__vector float) __builtin_altivec_vsubfp ((__vector float) a1, (__vector float) a2);
}
/* vec_vsubuwm */
-inline vector signed int
-vec_vsubuwm (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_vsubuwm (__vector __bool int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vsubuwm (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_vsubuwm (__vector signed int a1, __vector __bool int a2)
{
- return (vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vsubuwm (vector unsigned int a1, vector signed int a2)
+inline __vector signed int
+vec_vsubuwm (__vector signed int a1, __vector signed int a2)
{
- return (vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vsubuwm (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vsubuwm (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_vsubuwm (__vector unsigned int a1, __vector __bool int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_vsubuwm (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vsubuhm */
-inline vector signed short
-vec_vsubuhm (vector signed short a1, vector signed short a2)
+inline __vector signed short
+vec_vsubuhm (__vector __bool short a1, __vector signed short a2)
+{
+ return (__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector signed short
+vec_vsubuhm (__vector signed short a1, __vector __bool short a2)
+{
+ return (__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector signed short
+vec_vsubuhm (__vector signed short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vsubuhm (vector signed short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vsubuhm (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vsubuhm (vector unsigned short a1, vector signed short a2)
+inline __vector unsigned short
+vec_vsubuhm (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vsubuhm (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vsubuhm (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vsububm */
-inline vector signed char
-vec_vsububm (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_vsububm (__vector __bool char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_vsububm (__vector signed char a1, __vector __bool char a2)
+{
+ return (__vector signed char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_vsububm (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vsububm (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vsububm (__vector __bool char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vsububm (vector unsigned char a1, vector signed char a2)
+inline __vector unsigned char
+vec_vsububm (__vector unsigned char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vsububm (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vsububm (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_subc */
-inline vector unsigned int
-vec_subc (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_subc (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vsubcuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsubcuw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_subs */
-inline vector unsigned char
-vec_subs (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_subs (__vector __bool char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsububs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_subs (vector unsigned char a1, vector signed char a2)
+inline __vector unsigned char
+vec_subs (__vector unsigned char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vsububs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_subs (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_subs (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsububs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed char
-vec_subs (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_subs (__vector __bool char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vsubsbs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned short
-vec_subs (vector signed short a1, vector unsigned short a2)
+inline __vector signed char
+vec_subs (__vector signed char a1, __vector __bool char a2)
{
- return (vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned short
-vec_subs (vector unsigned short a1, vector signed short a2)
+inline __vector signed char
+vec_subs (__vector signed char a1, __vector signed char a2)
{
- return (vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned short
-vec_subs (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_subs (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed short
-vec_subs (vector signed short a1, vector signed short a2)
+inline __vector unsigned short
+vec_subs (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector signed short) __builtin_altivec_vsubshs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned int
-vec_subs (vector signed int a1, vector unsigned int a2)
+inline __vector unsigned short
+vec_subs (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned int
-vec_subs (vector unsigned int a1, vector signed int a2)
+inline __vector signed short
+vec_subs (__vector __bool short a1, __vector signed short a2)
{
- return (vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vsubshs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned int
-vec_subs (vector unsigned int a1, vector unsigned int a2)
+inline __vector signed short
+vec_subs (__vector signed short a1, __vector __bool short a2)
{
- return (vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vsubshs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed int
-vec_subs (vector signed int a1, vector signed int a2)
+inline __vector signed short
+vec_subs (__vector signed short a1, __vector signed short a2)
{
- return (vector signed int) __builtin_altivec_vsubsws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vsubshs ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector unsigned int
+vec_subs (__vector __bool int a1, __vector unsigned int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vsubuws ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_subs (__vector unsigned int a1, __vector __bool int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vsubuws ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_subs (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vsubuws ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_subs (__vector __bool int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vsubsws ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_subs (__vector signed int a1, __vector __bool int a2)
+{
+ return (__vector signed int) __builtin_altivec_vsubsws ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_subs (__vector signed int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vsubsws ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vsubsws */
-inline vector signed int
-vec_vsubsws (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_vsubsws (__vector __bool int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vsubsws ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_vsubsws (__vector signed int a1, __vector __bool int a2)
+{
+ return (__vector signed int) __builtin_altivec_vsubsws ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_vsubsws (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vsubsws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsubsws ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vsubuws */
-inline vector unsigned int
-vec_vsubuws (vector signed int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vsubuws (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsubuws ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vsubuws (vector unsigned int a1, vector signed int a2)
+inline __vector unsigned int
+vec_vsubuws (__vector unsigned int a1, __vector __bool int a2)
{
- return (vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsubuws ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vsubuws (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vsubuws (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsubuws ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vsubshs */
-inline vector signed short
-vec_vsubshs (vector signed short a1, vector signed short a2)
+inline __vector signed short
+vec_vsubshs (__vector __bool short a1, __vector signed short a2)
+{
+ return (__vector signed short) __builtin_altivec_vsubshs ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector signed short
+vec_vsubshs (__vector signed short a1, __vector __bool short a2)
+{
+ return (__vector signed short) __builtin_altivec_vsubshs ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector signed short
+vec_vsubshs (__vector signed short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vsubshs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vsubshs ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vsubuhs */
-inline vector unsigned short
-vec_vsubuhs (vector signed short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vsubuhs (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vsubuhs (vector unsigned short a1, vector signed short a2)
+inline __vector unsigned short
+vec_vsubuhs (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vsubuhs (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vsubuhs (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vsubsbs */
-inline vector signed char
-vec_vsubsbs (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_vsubsbs (__vector __bool char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_vsubsbs (__vector signed char a1, __vector __bool char a2)
{
- return (vector signed char) __builtin_altivec_vsubsbs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_vsubsbs (__vector signed char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_vsububs */
-inline vector unsigned char
-vec_vsububs (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vsububs (__vector __bool char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsububs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vsububs (vector unsigned char a1, vector signed char a2)
+inline __vector unsigned char
+vec_vsububs (__vector unsigned char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vsububs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vsububs (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vsububs (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsububs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_sum4s */
-inline vector unsigned int
-vec_sum4s (vector unsigned char a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_sum4s (__vector unsigned char a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vsum4ubs ((vector signed char) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsum4ubs ((__vector signed char) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_sum4s (vector signed char a1, vector signed int a2)
+inline __vector signed int
+vec_sum4s (__vector signed char a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vsum4sbs ((vector signed char) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsum4sbs ((__vector signed char) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_sum4s (vector signed short a1, vector signed int a2)
+inline __vector signed int
+vec_sum4s (__vector signed short a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vsum4shs ((vector signed short) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsum4shs ((__vector signed short) a1, (__vector signed int) a2);
}
/* vec_vsum4shs */
-inline vector signed int
-vec_vsum4shss (vector signed short a1, vector signed int a2)
+inline __vector signed int
+vec_vsum4shs (__vector signed short a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vsum4shs ((vector signed short) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsum4shs ((__vector signed short) a1, (__vector signed int) a2);
}
/* vec_vsum4sbs */
-inline vector signed int
-vec_vsum4sbs (vector signed char a1, vector signed int a2)
+inline __vector signed int
+vec_vsum4sbs (__vector signed char a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vsum4sbs ((vector signed char) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsum4sbs ((__vector signed char) a1, (__vector signed int) a2);
}
/* vec_vsum4ubs */
-inline vector unsigned int
-vec_vsum4ubs (vector unsigned char a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vsum4ubs (__vector unsigned char a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vsum4ubs ((vector signed char) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsum4ubs ((__vector signed char) a1, (__vector signed int) a2);
}
/* vec_sum2s */
-inline vector signed int
-vec_sum2s (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_sum2s (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vsum2sws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsum2sws ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_sums */
-inline vector signed int
-vec_sums (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_sums (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vsumsws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsumsws ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_trunc */
-inline vector float
-vec_trunc (vector float a1)
+inline __vector float
+vec_trunc (__vector float a1)
{
- return (vector float) __builtin_altivec_vrfiz ((vector float) a1);
+ return (__vector float) __builtin_altivec_vrfiz ((__vector float) a1);
}
/* vec_unpackh */
-inline vector signed short
-vec_unpackh (vector signed char a1)
+inline __vector signed short
+vec_unpackh (__vector signed char a1)
+{
+ return (__vector signed short) __builtin_altivec_vupkhsb ((__vector signed char) a1);
+}
+
+inline __vector __bool short
+vec_unpackh (__vector __bool char a1)
{
- return (vector signed short) __builtin_altivec_vupkhsb ((vector signed char) a1);
+ return (__vector __bool short) __builtin_altivec_vupkhsb ((__vector signed char) a1);
}
-inline vector signed int
-vec_unpackh (vector signed short a1)
+inline __vector signed int
+vec_unpackh (__vector signed short a1)
{
- return (vector signed int) __builtin_altivec_vupkhsh ((vector signed short) a1);
+ return (__vector signed int) __builtin_altivec_vupkhsh ((__vector signed short) a1);
}
-inline vector unsigned int
-vec_unpackh (vector unsigned short a1)
+inline __vector __bool int
+vec_unpackh (__vector __bool short a1)
{
- return (vector unsigned int) __builtin_altivec_vupkhpx ((vector signed short) a1);
+ return (__vector __bool int) __builtin_altivec_vupkhsh ((__vector signed short) a1);
+}
+
+inline __vector unsigned int
+vec_unpackh (__vector __pixel a1)
+{
+ return (__vector unsigned int) __builtin_altivec_vupkhpx ((__vector signed short) a1);
}
/* vec_vupkhsh */
-inline vector signed int
-vec_vupkhsh (vector signed short a1)
+inline __vector __bool int
+vec_vupkhsh (__vector __bool short a1)
{
- return (vector signed int) __builtin_altivec_vupkhsh ((vector signed short) a1);
+ return (__vector __bool int) __builtin_altivec_vupkhsh ((__vector signed short) a1);
+}
+
+inline __vector signed int
+vec_vupkhsh (__vector signed short a1)
+{
+ return (__vector signed int) __builtin_altivec_vupkhsh ((__vector signed short) a1);
}
/* vec_vupkhpx */
-inline vector unsigned int
-vec_vupkhpx (vector unsigned short a1)
+inline __vector unsigned int
+vec_vupkhpx (__vector __pixel a1)
{
- return (vector unsigned int) __builtin_altivec_vupkhpx ((vector signed short) a1);
+ return (__vector unsigned int) __builtin_altivec_vupkhpx ((__vector signed short) a1);
}
/* vec_vupkhsb */
-inline vector signed short
-vec_vupkhsb (vector signed char a1)
+inline __vector __bool short
+vec_vupkhsb (__vector __bool char a1)
{
- return (vector signed short) __builtin_altivec_vupkhsb ((vector signed char) a1);
+ return (__vector __bool short) __builtin_altivec_vupkhsb ((__vector signed char) a1);
+}
+
+inline __vector signed short
+vec_vupkhsb (__vector signed char a1)
+{
+ return (__vector signed short) __builtin_altivec_vupkhsb ((__vector signed char) a1);
}
/* vec_unpackl */
-inline vector signed short
-vec_unpackl (vector signed char a1)
+inline __vector signed short
+vec_unpackl (__vector signed char a1)
+{
+ return (__vector signed short) __builtin_altivec_vupklsb ((__vector signed char) a1);
+}
+
+inline __vector __bool short
+vec_unpackl (__vector __bool char a1)
+{
+ return (__vector __bool short) __builtin_altivec_vupklsb ((__vector signed char) a1);
+}
+
+inline __vector unsigned int
+vec_unpackl (__vector __pixel a1)
{
- return (vector signed short) __builtin_altivec_vupklsb ((vector signed char) a1);
+ return (__vector unsigned int) __builtin_altivec_vupklpx ((__vector signed short) a1);
}
-inline vector unsigned int
-vec_unpackl (vector unsigned short a1)
+inline __vector signed int
+vec_unpackl (__vector signed short a1)
{
- return (vector unsigned int) __builtin_altivec_vupklpx ((vector signed short) a1);
+ return (__vector signed int) __builtin_altivec_vupklsh ((__vector signed short) a1);
}
-inline vector signed int
-vec_unpackl (vector signed short a1)
+inline __vector __bool int
+vec_unpackl (__vector __bool short a1)
{
- return (vector signed int) __builtin_altivec_vupklsh ((vector signed short) a1);
+ return (__vector __bool int) __builtin_altivec_vupklsh ((__vector signed short) a1);
}
/* vec_vupklpx */
-inline vector unsigned int
-vec_vupklpx (vector unsigned short a1)
+inline __vector unsigned int
+vec_vupklpx (__vector __pixel a1)
{
- return (vector unsigned int) __builtin_altivec_vupklpx ((vector signed short) a1);
+ return (__vector unsigned int) __builtin_altivec_vupklpx ((__vector signed short) a1);
}
/* vec_upklsh */
-inline vector signed int
-vec_vupklsh (vector signed short a1)
+inline __vector __bool int
+vec_vupklsh (__vector __bool short a1)
+{
+ return (__vector __bool int) __builtin_altivec_vupklsh ((__vector signed short) a1);
+}
+
+inline __vector signed int
+vec_vupklsh (__vector signed short a1)
{
- return (vector signed int) __builtin_altivec_vupklsh ((vector signed short) a1);
+ return (__vector signed int) __builtin_altivec_vupklsh ((__vector signed short) a1);
}
/* vec_vupklsb */
-inline vector signed short
-vec_vupklsb (vector signed char a1)
+inline __vector __bool short
+vec_vupklsb (__vector __bool char a1)
{
- return (vector signed short) __builtin_altivec_vupklsb ((vector signed char) a1);
+ return (__vector __bool short) __builtin_altivec_vupklsb ((__vector signed char) a1);
+}
+
+inline __vector signed short
+vec_vupklsb (__vector signed char a1)
+{
+ return (__vector signed short) __builtin_altivec_vupklsb ((__vector signed char) a1);
}
/* vec_xor */
-inline vector float
-vec_xor (vector float a1, vector float a2)
+inline __vector float
+vec_xor (__vector float a1, __vector float a2)
+{
+ return (__vector float) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector float
+vec_xor (__vector float a1, __vector __bool int a2)
+{
+ return (__vector float) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector float
+vec_xor (__vector __bool int a1, __vector float a2)
+{
+ return (__vector float) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool int
+vec_xor (__vector __bool int a1, __vector __bool int a2)
{
- return (vector float) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_xor (vector float a1, vector signed int a2)
+inline __vector signed int
+vec_xor (__vector __bool int a1, __vector signed int a2)
{
- return (vector float) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_xor (vector signed int a1, vector float a2)
+inline __vector signed int
+vec_xor (__vector signed int a1, __vector __bool int a2)
{
- return (vector float) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_xor (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_xor (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_xor (vector signed int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_xor (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_xor (vector unsigned int a1, vector signed int a2)
+inline __vector unsigned int
+vec_xor (__vector unsigned int a1, __vector __bool int a2)
{
- return (vector unsigned int) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_xor (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_xor (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_xor (vector signed short a1, vector signed short a2)
+inline __vector __bool short
+vec_xor (__vector __bool short a1, __vector __bool short a2)
{
- return (vector signed short) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool short) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_xor (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_xor (__vector __bool short a1, __vector signed short a2)
{
- return (vector unsigned short) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_xor (vector unsigned short a1, vector signed short a2)
+inline __vector signed short
+vec_xor (__vector signed short a1, __vector __bool short a2)
{
- return (vector unsigned short) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_xor (vector unsigned short a1, vector unsigned short a2)
+inline __vector signed short
+vec_xor (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned short) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_xor (vector signed char a1, vector signed char a2)
+inline __vector unsigned short
+vec_xor (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector signed char) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_xor (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned short
+vec_xor (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector unsigned char) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_xor (vector unsigned char a1, vector signed char a2)
+inline __vector unsigned short
+vec_xor (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_xor (vector unsigned char a1, vector unsigned char a2)
+inline __vector signed char
+vec_xor (__vector __bool char a1, __vector signed char a2)
{
- return (vector unsigned char) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool char
+vec_xor (__vector __bool char a1, __vector __bool char a2)
+{
+ return (__vector __bool char) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed char
+vec_xor (__vector signed char a1, __vector __bool char a2)
+{
+ return (__vector signed char) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed char
+vec_xor (__vector signed char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_xor (__vector __bool char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_xor (__vector unsigned char a1, __vector __bool char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_xor (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vxor ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_all_eq */
inline int
-vec_all_eq (vector signed char a1, vector unsigned char a2)
+vec_all_eq (__vector signed char a1, __vector __bool char a2)
{
- return __builtin_altivec_vcmpequb_p (__CR6_LT, a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpequb_p (__CR6_LT, a1, (__vector signed char) a2);
}
inline int
-vec_all_eq (vector signed char a1, vector signed char a2)
+vec_all_eq (__vector signed char a1, __vector signed char a2)
{
return __builtin_altivec_vcmpequb_p (__CR6_LT, a1, a2);
}
inline int
-vec_all_eq (vector unsigned char a1, vector signed char a2)
+vec_all_eq (__vector unsigned char a1, __vector __bool char a2)
+{
+ return __builtin_altivec_vcmpequb_p (__CR6_LT, (__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline int
+vec_all_eq (__vector unsigned char a1, __vector unsigned char a2)
{
- return __builtin_altivec_vcmpequb_p (__CR6_LT, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpequb_p (__CR6_LT, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_all_eq (vector unsigned char a1, vector unsigned char a2)
+vec_all_eq (__vector __bool char a1, __vector __bool char a2)
{
- return __builtin_altivec_vcmpequb_p (__CR6_LT, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpequb_p (__CR6_LT, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_all_eq (vector signed short a1, vector unsigned short a2)
+vec_all_eq (__vector __bool char a1, __vector unsigned char a2)
{
- return __builtin_altivec_vcmpequh_p (__CR6_LT, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpequb_p (__CR6_LT, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_all_eq (vector signed short a1, vector signed short a2)
+vec_all_eq (__vector __bool char a1, __vector signed char a2)
{
- return __builtin_altivec_vcmpequh_p (__CR6_LT, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpequb_p (__CR6_LT, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_all_eq (vector unsigned short a1, vector signed short a2)
+vec_all_eq (__vector signed short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpequh_p (__CR6_LT, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_all_eq (vector unsigned short a1, vector unsigned short a2)
+vec_all_eq (__vector signed short a1, __vector signed short a2)
{
- return __builtin_altivec_vcmpequh_p (__CR6_LT, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_all_eq (vector signed int a1, vector unsigned int a2)
+vec_all_eq (__vector unsigned short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpequw_p (__CR6_LT, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_all_eq (vector signed int a1, vector signed int a2)
+vec_all_eq (__vector unsigned short a1, __vector unsigned short a2)
{
- return __builtin_altivec_vcmpequw_p (__CR6_LT, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_all_eq (vector unsigned int a1, vector signed int a2)
+vec_all_eq (__vector __bool short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpequw_p (__CR6_LT, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_all_eq (vector unsigned int a1, vector unsigned int a2)
+vec_all_eq (__vector __bool short a1, __vector unsigned short a2)
{
- return __builtin_altivec_vcmpequw_p (__CR6_LT, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_all_eq (vector float a1, vector float a2)
+vec_all_eq (__vector __bool short a1, __vector signed short a2)
+{
+ return __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline int
+vec_all_eq (__vector __pixel a1, __vector __pixel a2)
+{
+ return __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline int
+vec_all_eq (__vector signed int a1, __vector __bool int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_all_eq (__vector signed int a1, __vector signed int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_all_eq (__vector unsigned int a1, __vector __bool int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_all_eq (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_all_eq (__vector __bool int a1, __vector __bool int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_all_eq (__vector __bool int a1, __vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_all_eq (__vector __bool int a1, __vector signed int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_all_eq (__vector float a1, __vector float a2)
{
return __builtin_altivec_vcmpeqfp_p (__CR6_LT, a1, a2);
}
@@ -5064,79 +6846,115 @@ vec_all_eq (vector float a1, vector float a2)
/* vec_all_ge */
inline int
-vec_all_ge (vector signed char a1, vector unsigned char a2)
+vec_all_ge (__vector __bool char a1, __vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_EQ, (__vector signed char) a2, (__vector signed char) a1);
+}
+
+inline int
+vec_all_ge (__vector unsigned char a1, __vector __bool char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_EQ, (__vector signed char) a2, (__vector signed char) a1);
+}
+
+inline int
+vec_all_ge (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_EQ, (__vector signed char) a2, (__vector signed char) a1);
+}
+
+inline int
+vec_all_ge (__vector __bool char a1, __vector signed char a2)
+{
+ return __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (__vector signed char) a2, (__vector signed char) a1);
+}
+
+inline int
+vec_all_ge (__vector signed char a1, __vector __bool char a2)
+{
+ return __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (__vector signed char) a2, (__vector signed char) a1);
+}
+
+inline int
+vec_all_ge (__vector signed char a1, __vector signed char a2)
{
- return __builtin_altivec_vcmpgtub_p (__CR6_EQ, (vector signed char) a2, (vector signed char) a1);
+ return __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (__vector signed char) a2, (__vector signed char) a1);
}
inline int
-vec_all_ge (vector unsigned char a1, vector signed char a2)
+vec_all_ge (__vector __bool short a1, __vector unsigned short a2)
{
- return __builtin_altivec_vcmpgtub_p (__CR6_EQ, (vector signed char) a2, (vector signed char) a1);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (__vector signed short) a2, (__vector signed short) a1);
}
inline int
-vec_all_ge (vector unsigned char a1, vector unsigned char a2)
+vec_all_ge (__vector unsigned short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpgtub_p (__CR6_EQ, (vector signed char) a2, (vector signed char) a1);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (__vector signed short) a2, (__vector signed short) a1);
}
inline int
-vec_all_ge (vector signed char a1, vector signed char a2)
+vec_all_ge (__vector unsigned short a1, __vector unsigned short a2)
{
- return __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (vector signed char) a2, (vector signed char) a1);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (__vector signed short) a2, (__vector signed short) a1);
}
inline int
-vec_all_ge (vector signed short a1, vector unsigned short a2)
+vec_all_ge (__vector signed short a1, __vector signed short a2)
{
- return __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (vector signed short) a2, (vector signed short) a1);
+ return __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (__vector signed short) a2, (__vector signed short) a1);
}
inline int
-vec_all_ge (vector unsigned short a1, vector signed short a2)
+vec_all_ge (__vector __bool short a1, __vector signed short a2)
{
- return __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (vector signed short) a2, (vector signed short) a1);
+ return __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (__vector signed short) a2, (__vector signed short) a1);
}
inline int
-vec_all_ge (vector unsigned short a1, vector unsigned short a2)
+vec_all_ge (__vector signed short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (vector signed short) a2, (vector signed short) a1);
+ return __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (__vector signed short) a2, (__vector signed short) a1);
}
inline int
-vec_all_ge (vector signed short a1, vector signed short a2)
+vec_all_ge (__vector __bool int a1, __vector unsigned int a2)
{
- return __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (vector signed short) a2, (vector signed short) a1);
+ return __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (__vector signed int) a2, (__vector signed int) a1);
}
inline int
-vec_all_ge (vector signed int a1, vector unsigned int a2)
+vec_all_ge (__vector unsigned int a1, __vector __bool int a2)
{
- return __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (vector signed int) a2, (vector signed int) a1);
+ return __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (__vector signed int) a2, (__vector signed int) a1);
}
inline int
-vec_all_ge (vector unsigned int a1, vector signed int a2)
+vec_all_ge (__vector unsigned int a1, __vector unsigned int a2)
{
- return __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (vector signed int) a2, (vector signed int) a1);
+ return __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (__vector signed int) a2, (__vector signed int) a1);
}
inline int
-vec_all_ge (vector unsigned int a1, vector unsigned int a2)
+vec_all_ge (__vector __bool int a1, __vector signed int a2)
{
- return __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (vector signed int) a2, (vector signed int) a1);
+ return __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (__vector signed int) a2, (__vector signed int) a1);
}
inline int
-vec_all_ge (vector signed int a1, vector signed int a2)
+vec_all_ge (__vector signed int a1, __vector __bool int a2)
{
- return __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (vector signed int) a2, (vector signed int) a1);
+ return __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (__vector signed int) a2, (__vector signed int) a1);
}
inline int
-vec_all_ge (vector float a1, vector float a2)
+vec_all_ge (__vector signed int a1, __vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (__vector signed int) a2, (__vector signed int) a1);
+}
+
+inline int
+vec_all_ge (__vector float a1, __vector float a2)
{
return __builtin_altivec_vcmpgefp_p (__CR6_LT, a1, a2);
}
@@ -5144,79 +6962,115 @@ vec_all_ge (vector float a1, vector float a2)
/* vec_all_gt */
inline int
-vec_all_gt (vector signed char a1, vector unsigned char a2)
+vec_all_gt (__vector __bool char a1, __vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT, (__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline int
+vec_all_gt (__vector unsigned char a1, __vector __bool char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT, (__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline int
+vec_all_gt (__vector unsigned char a1, __vector unsigned char a2)
{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_all_gt (vector unsigned char a1, vector signed char a2)
+vec_all_gt (__vector __bool char a1, __vector signed char a2)
{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpgtsb_p (__CR6_LT, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_all_gt (vector unsigned char a1, vector unsigned char a2)
+vec_all_gt (__vector signed char a1, __vector __bool char a2)
{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpgtsb_p (__CR6_LT, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_all_gt (vector signed char a1, vector signed char a2)
+vec_all_gt (__vector signed char a1, __vector signed char a2)
{
- return __builtin_altivec_vcmpgtsb_p (__CR6_LT, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpgtsb_p (__CR6_LT, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_all_gt (vector signed short a1, vector unsigned short a2)
+vec_all_gt (__vector __bool short a1, __vector unsigned short a2)
{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_all_gt (vector unsigned short a1, vector signed short a2)
+vec_all_gt (__vector unsigned short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_all_gt (vector unsigned short a1, vector unsigned short a2)
+vec_all_gt (__vector unsigned short a1, __vector unsigned short a2)
{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_all_gt (vector signed short a1, vector signed short a2)
+vec_all_gt (__vector __bool short a1, __vector signed short a2)
{
- return __builtin_altivec_vcmpgtsh_p (__CR6_LT, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpgtsh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_all_gt (vector signed int a1, vector unsigned int a2)
+vec_all_gt (__vector signed short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpgtsh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_all_gt (vector unsigned int a1, vector signed int a2)
+vec_all_gt (__vector signed short a1, __vector signed short a2)
{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpgtsh_p (__CR6_LT, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_all_gt (vector unsigned int a1, vector unsigned int a2)
+vec_all_gt (__vector __bool int a1, __vector unsigned int a2)
{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
}
inline int
-vec_all_gt (vector signed int a1, vector signed int a2)
+vec_all_gt (__vector unsigned int a1, __vector __bool int a2)
{
- return __builtin_altivec_vcmpgtsw_p (__CR6_LT, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
}
inline int
-vec_all_gt (vector float a1, vector float a2)
+vec_all_gt (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_all_gt (__vector __bool int a1, __vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtsw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_all_gt (__vector signed int a1, __vector __bool int a2)
+{
+ return __builtin_altivec_vcmpgtsw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_all_gt (__vector signed int a1, __vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtsw_p (__CR6_LT, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_all_gt (__vector float a1, __vector float a2)
{
return __builtin_altivec_vcmpgtfp_p (__CR6_LT, a1, a2);
}
@@ -5224,7 +7078,7 @@ vec_all_gt (vector float a1, vector float a2)
/* vec_all_in */
inline int
-vec_all_in (vector float a1, vector float a2)
+vec_all_in (__vector float a1, __vector float a2)
{
return __builtin_altivec_vcmpbfp_p (__CR6_EQ, a1, a2);
}
@@ -5232,79 +7086,115 @@ vec_all_in (vector float a1, vector float a2)
/* vec_all_le */
inline int
-vec_all_le (vector signed char a1, vector unsigned char a2)
+vec_all_le (__vector __bool char a1, __vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline int
+vec_all_le (__vector unsigned char a1, __vector __bool char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline int
+vec_all_le (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline int
+vec_all_le (__vector __bool char a1, __vector signed char a2)
{
- return __builtin_altivec_vcmpgtub_p (__CR6_EQ, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_all_le (vector unsigned char a1, vector signed char a2)
+vec_all_le (__vector signed char a1, __vector __bool char a2)
{
- return __builtin_altivec_vcmpgtub_p (__CR6_EQ, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_all_le (vector unsigned char a1, vector unsigned char a2)
+vec_all_le (__vector signed char a1, __vector signed char a2)
{
- return __builtin_altivec_vcmpgtub_p (__CR6_EQ, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_all_le (vector signed char a1, vector signed char a2)
+vec_all_le (__vector __bool short a1, __vector unsigned short a2)
{
- return __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_all_le (vector signed short a1, vector unsigned short a2)
+vec_all_le (__vector unsigned short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_all_le (vector unsigned short a1, vector signed short a2)
+vec_all_le (__vector unsigned short a1, __vector unsigned short a2)
{
- return __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_all_le (vector unsigned short a1, vector unsigned short a2)
+vec_all_le (__vector __bool short a1, __vector signed short a2)
{
- return __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_all_le (vector signed short a1, vector signed short a2)
+vec_all_le (__vector signed short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_all_le (vector signed int a1, vector unsigned int a2)
+vec_all_le (__vector signed short a1, __vector signed short a2)
{
- return __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_all_le (vector unsigned int a1, vector signed int a2)
+vec_all_le (__vector __bool int a1, __vector unsigned int a2)
{
- return __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
}
inline int
-vec_all_le (vector unsigned int a1, vector unsigned int a2)
+vec_all_le (__vector unsigned int a1, __vector __bool int a2)
{
- return __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
}
inline int
-vec_all_le (vector signed int a1, vector signed int a2)
+vec_all_le (__vector unsigned int a1, __vector unsigned int a2)
{
- return __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
}
inline int
-vec_all_le (vector float a1, vector float a2)
+vec_all_le (__vector __bool int a1, __vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_all_le (__vector signed int a1, __vector __bool int a2)
+{
+ return __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_all_le (__vector signed int a1, __vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_all_le (__vector float a1, __vector float a2)
{
return __builtin_altivec_vcmpgefp_p (__CR6_LT, a2, a1);
}
@@ -5312,79 +7202,115 @@ vec_all_le (vector float a1, vector float a2)
/* vec_all_lt */
inline int
-vec_all_lt (vector signed char a1, vector unsigned char a2)
+vec_all_lt (__vector __bool char a1, __vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT, (__vector signed char) a2, (__vector signed char) a1);
+}
+
+inline int
+vec_all_lt (__vector unsigned char a1, __vector __bool char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT, (__vector signed char) a2, (__vector signed char) a1);
+}
+
+inline int
+vec_all_lt (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT, (__vector signed char) a2, (__vector signed char) a1);
+}
+
+inline int
+vec_all_lt (__vector __bool char a1, __vector signed char a2)
+{
+ return __builtin_altivec_vcmpgtsb_p (__CR6_LT, (__vector signed char) a2, (__vector signed char) a1);
+}
+
+inline int
+vec_all_lt (__vector signed char a1, __vector __bool char a2)
{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT, (vector signed char) a2, (vector signed char) a1);
+ return __builtin_altivec_vcmpgtsb_p (__CR6_LT, (__vector signed char) a2, (__vector signed char) a1);
}
inline int
-vec_all_lt (vector unsigned char a1, vector signed char a2)
+vec_all_lt (__vector signed char a1, __vector signed char a2)
{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT, (vector signed char) a2, (vector signed char) a1);
+ return __builtin_altivec_vcmpgtsb_p (__CR6_LT, (__vector signed char) a2, (__vector signed char) a1);
}
inline int
-vec_all_lt (vector unsigned char a1, vector unsigned char a2)
+vec_all_lt (__vector __bool short a1, __vector unsigned short a2)
{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT, (vector signed char) a2, (vector signed char) a1);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT, (__vector signed short) a2, (__vector signed short) a1);
}
inline int
-vec_all_lt (vector signed char a1, vector signed char a2)
+vec_all_lt (__vector unsigned short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpgtsb_p (__CR6_LT, (vector signed char) a2, (vector signed char) a1);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT, (__vector signed short) a2, (__vector signed short) a1);
}
inline int
-vec_all_lt (vector signed short a1, vector unsigned short a2)
+vec_all_lt (__vector unsigned short a1, __vector unsigned short a2)
{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT, (vector signed short) a2, (vector signed short) a1);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT, (__vector signed short) a2, (__vector signed short) a1);
}
inline int
-vec_all_lt (vector unsigned short a1, vector signed short a2)
+vec_all_lt (__vector __bool short a1, __vector signed short a2)
{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT, (vector signed short) a2, (vector signed short) a1);
+ return __builtin_altivec_vcmpgtsh_p (__CR6_LT, (__vector signed short) a2, (__vector signed short) a1);
}
inline int
-vec_all_lt (vector unsigned short a1, vector unsigned short a2)
+vec_all_lt (__vector signed short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT, (vector signed short) a2, (vector signed short) a1);
+ return __builtin_altivec_vcmpgtsh_p (__CR6_LT, (__vector signed short) a2, (__vector signed short) a1);
}
inline int
-vec_all_lt (vector signed short a1, vector signed short a2)
+vec_all_lt (__vector signed short a1, __vector signed short a2)
{
- return __builtin_altivec_vcmpgtsh_p (__CR6_LT, (vector signed short) a2, (vector signed short) a1);
+ return __builtin_altivec_vcmpgtsh_p (__CR6_LT, (__vector signed short) a2, (__vector signed short) a1);
}
inline int
-vec_all_lt (vector signed int a1, vector unsigned int a2)
+vec_all_lt (__vector __bool int a1, __vector unsigned int a2)
{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT, (vector signed int) a2, (vector signed int) a1);
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT, (__vector signed int) a2, (__vector signed int) a1);
}
inline int
-vec_all_lt (vector unsigned int a1, vector signed int a2)
+vec_all_lt (__vector unsigned int a1, __vector __bool int a2)
{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT, (vector signed int) a2, (vector signed int) a1);
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT, (__vector signed int) a2, (__vector signed int) a1);
}
inline int
-vec_all_lt (vector unsigned int a1, vector unsigned int a2)
+vec_all_lt (__vector unsigned int a1, __vector unsigned int a2)
{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT, (vector signed int) a2, (vector signed int) a1);
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT, (__vector signed int) a2, (__vector signed int) a1);
}
inline int
-vec_all_lt (vector signed int a1, vector signed int a2)
+vec_all_lt (__vector __bool int a1, __vector signed int a2)
{
- return __builtin_altivec_vcmpgtsw_p (__CR6_LT, (vector signed int) a2, (vector signed int) a1);
+ return __builtin_altivec_vcmpgtsw_p (__CR6_LT, (__vector signed int) a2, (__vector signed int) a1);
}
inline int
-vec_all_lt (vector float a1, vector float a2)
+vec_all_lt (__vector signed int a1, __vector __bool int a2)
+{
+ return __builtin_altivec_vcmpgtsw_p (__CR6_LT, (__vector signed int) a2, (__vector signed int) a1);
+}
+
+inline int
+vec_all_lt (__vector signed int a1, __vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtsw_p (__CR6_LT, (__vector signed int) a2, (__vector signed int) a1);
+}
+
+inline int
+vec_all_lt (__vector float a1, __vector float a2)
{
return __builtin_altivec_vcmpgtfp_p (__CR6_LT, a2, a1);
}
@@ -5392,7 +7318,7 @@ vec_all_lt (vector float a1, vector float a2)
/* vec_all_nan */
inline int
-vec_all_nan (vector float a1)
+vec_all_nan (__vector float a1)
{
return __builtin_altivec_vcmpeqfp_p (__CR6_EQ, a1, a1);
}
@@ -5400,79 +7326,139 @@ vec_all_nan (vector float a1)
/* vec_all_ne */
inline int
-vec_all_ne (vector signed char a1, vector unsigned char a2)
+vec_all_ne (__vector signed char a1, __vector __bool char a2)
+{
+ return __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline int
+vec_all_ne (__vector signed char a1, __vector signed char a2)
{
- return __builtin_altivec_vcmpequb_p (__CR6_EQ, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_all_ne (vector signed char a1, vector signed char a2)
+vec_all_ne (__vector unsigned char a1, __vector __bool char a2)
{
- return __builtin_altivec_vcmpequb_p (__CR6_EQ, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_all_ne (vector unsigned char a1, vector signed char a2)
+vec_all_ne (__vector unsigned char a1, __vector unsigned char a2)
{
- return __builtin_altivec_vcmpequb_p (__CR6_EQ, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_all_ne (vector unsigned char a1, vector unsigned char a2)
+vec_all_ne (__vector __bool char a1, __vector __bool char a2)
{
- return __builtin_altivec_vcmpequb_p (__CR6_EQ, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_all_ne (vector signed short a1, vector unsigned short a2)
+vec_all_ne (__vector __bool char a1, __vector unsigned char a2)
{
- return __builtin_altivec_vcmpequh_p (__CR6_EQ, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_all_ne (vector signed short a1, vector signed short a2)
+vec_all_ne (__vector __bool char a1, __vector signed char a2)
{
- return __builtin_altivec_vcmpequh_p (__CR6_EQ, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_all_ne (vector unsigned short a1, vector signed short a2)
+vec_all_ne (__vector signed short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpequh_p (__CR6_EQ, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_all_ne (vector unsigned short a1, vector unsigned short a2)
+vec_all_ne (__vector signed short a1, __vector signed short a2)
{
- return __builtin_altivec_vcmpequh_p (__CR6_EQ, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_all_ne (vector signed int a1, vector unsigned int a2)
+vec_all_ne (__vector unsigned short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpequw_p (__CR6_EQ, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_all_ne (vector signed int a1, vector signed int a2)
+vec_all_ne (__vector unsigned short a1, __vector unsigned short a2)
{
- return __builtin_altivec_vcmpequw_p (__CR6_EQ, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_all_ne (vector unsigned int a1, vector signed int a2)
+vec_all_ne (__vector __bool short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpequw_p (__CR6_EQ, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_all_ne (vector unsigned int a1, vector unsigned int a2)
+vec_all_ne (__vector __bool short a1, __vector unsigned short a2)
{
- return __builtin_altivec_vcmpequw_p (__CR6_EQ, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_all_ne (vector float a1, vector float a2)
+vec_all_ne (__vector __bool short a1, __vector signed short a2)
+{
+ return __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline int
+vec_all_ne (__vector __pixel a1, __vector __pixel a2)
+{
+ return __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline int
+vec_all_ne (__vector signed int a1, __vector __bool int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_all_ne (__vector signed int a1, __vector signed int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_all_ne (__vector unsigned int a1, __vector __bool int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_all_ne (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_all_ne (__vector __bool int a1, __vector __bool int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_all_ne (__vector __bool int a1, __vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_all_ne (__vector __bool int a1, __vector signed int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_all_ne (__vector float a1, __vector float a2)
{
return __builtin_altivec_vcmpeqfp_p (__CR6_EQ, a1, a2);
}
@@ -5480,7 +7466,7 @@ vec_all_ne (vector float a1, vector float a2)
/* vec_all_nge */
inline int
-vec_all_nge (vector float a1, vector float a2)
+vec_all_nge (__vector float a1, __vector float a2)
{
return __builtin_altivec_vcmpgefp_p (__CR6_EQ, a1, a2);
}
@@ -5488,7 +7474,7 @@ vec_all_nge (vector float a1, vector float a2)
/* vec_all_ngt */
inline int
-vec_all_ngt (vector float a1, vector float a2)
+vec_all_ngt (__vector float a1, __vector float a2)
{
return __builtin_altivec_vcmpgtfp_p (__CR6_EQ, a1, a2);
}
@@ -5496,7 +7482,7 @@ vec_all_ngt (vector float a1, vector float a2)
/* vec_all_nle */
inline int
-vec_all_nle (vector float a1, vector float a2)
+vec_all_nle (__vector float a1, __vector float a2)
{
return __builtin_altivec_vcmpgefp_p (__CR6_EQ, a2, a1);
}
@@ -5504,7 +7490,7 @@ vec_all_nle (vector float a1, vector float a2)
/* vec_all_nlt */
inline int
-vec_all_nlt (vector float a1, vector float a2)
+vec_all_nlt (__vector float a1, __vector float a2)
{
return __builtin_altivec_vcmpgtfp_p (__CR6_EQ, a2, a1);
}
@@ -5512,7 +7498,7 @@ vec_all_nlt (vector float a1, vector float a2)
/* vec_all_numeric */
inline int
-vec_all_numeric (vector float a1)
+vec_all_numeric (__vector float a1)
{
return __builtin_altivec_vcmpeqfp_p (__CR6_LT, a1, a1);
}
@@ -5520,79 +7506,139 @@ vec_all_numeric (vector float a1)
/* vec_any_eq */
inline int
-vec_any_eq (vector signed char a1, vector unsigned char a2)
+vec_any_eq (__vector signed char a1, __vector __bool char a2)
+{
+ return __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline int
+vec_any_eq (__vector signed char a1, __vector signed char a2)
{
- return __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_any_eq (vector signed char a1, vector signed char a2)
+vec_any_eq (__vector unsigned char a1, __vector __bool char a2)
{
- return __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_any_eq (vector unsigned char a1, vector signed char a2)
+vec_any_eq (__vector unsigned char a1, __vector unsigned char a2)
{
- return __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_any_eq (vector unsigned char a1, vector unsigned char a2)
+vec_any_eq (__vector __bool char a1, __vector __bool char a2)
{
- return __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_any_eq (vector signed short a1, vector unsigned short a2)
+vec_any_eq (__vector __bool char a1, __vector unsigned char a2)
{
- return __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_any_eq (vector signed short a1, vector signed short a2)
+vec_any_eq (__vector __bool char a1, __vector signed char a2)
{
- return __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_any_eq (vector unsigned short a1, vector signed short a2)
+vec_any_eq (__vector signed short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_any_eq (vector unsigned short a1, vector unsigned short a2)
+vec_any_eq (__vector signed short a1, __vector signed short a2)
{
- return __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_any_eq (vector signed int a1, vector unsigned int a2)
+vec_any_eq (__vector unsigned short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_any_eq (vector signed int a1, vector signed int a2)
+vec_any_eq (__vector unsigned short a1, __vector unsigned short a2)
{
- return __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_any_eq (vector unsigned int a1, vector signed int a2)
+vec_any_eq (__vector __bool short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_any_eq (vector unsigned int a1, vector unsigned int a2)
+vec_any_eq (__vector __bool short a1, __vector unsigned short a2)
{
- return __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_any_eq (vector float a1, vector float a2)
+vec_any_eq (__vector __bool short a1, __vector signed short a2)
+{
+ return __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline int
+vec_any_eq (__vector __pixel a1, __vector __pixel a2)
+{
+ return __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline int
+vec_any_eq (__vector signed int a1, __vector __bool int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_any_eq (__vector signed int a1, __vector signed int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_any_eq (__vector unsigned int a1, __vector __bool int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_any_eq (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_any_eq (__vector __bool int a1, __vector __bool int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_any_eq (__vector __bool int a1, __vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_any_eq (__vector __bool int a1, __vector signed int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_any_eq (__vector float a1, __vector float a2)
{
return __builtin_altivec_vcmpeqfp_p (__CR6_EQ_REV, a1, a2);
}
@@ -5600,79 +7646,115 @@ vec_any_eq (vector float a1, vector float a2)
/* vec_any_ge */
inline int
-vec_any_ge (vector signed char a1, vector unsigned char a2)
+vec_any_ge (__vector signed char a1, __vector __bool char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) a2, (__vector signed char) a1);
+}
+
+inline int
+vec_any_ge (__vector unsigned char a1, __vector __bool char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) a2, (__vector signed char) a1);
+}
+
+inline int
+vec_any_ge (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) a2, (__vector signed char) a1);
+}
+
+inline int
+vec_any_ge (__vector signed char a1, __vector signed char a2)
+{
+ return __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (__vector signed char) a2, (__vector signed char) a1);
+}
+
+inline int
+vec_any_ge (__vector __bool char a1, __vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) a2, (__vector signed char) a1);
+}
+
+inline int
+vec_any_ge (__vector __bool char a1, __vector signed char a2)
{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (vector signed char) a2, (vector signed char) a1);
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) a2, (__vector signed char) a1);
}
inline int
-vec_any_ge (vector unsigned char a1, vector signed char a2)
+vec_any_ge (__vector unsigned short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (vector signed char) a2, (vector signed char) a1);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) a2, (__vector signed short) a1);
}
inline int
-vec_any_ge (vector unsigned char a1, vector unsigned char a2)
+vec_any_ge (__vector unsigned short a1, __vector unsigned short a2)
{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (vector signed char) a2, (vector signed char) a1);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) a2, (__vector signed short) a1);
}
inline int
-vec_any_ge (vector signed char a1, vector signed char a2)
+vec_any_ge (__vector signed short a1, __vector signed short a2)
{
- return __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (vector signed char) a2, (vector signed char) a1);
+ return __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (__vector signed short) a2, (__vector signed short) a1);
}
inline int
-vec_any_ge (vector signed short a1, vector unsigned short a2)
+vec_any_ge (__vector signed short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (vector signed short) a2, (vector signed short) a1);
+ return __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (__vector signed short) a2, (__vector signed short) a1);
}
inline int
-vec_any_ge (vector unsigned short a1, vector signed short a2)
+vec_any_ge (__vector __bool short a1, __vector unsigned short a2)
{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (vector signed short) a2, (vector signed short) a1);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) a2, (__vector signed short) a1);
}
inline int
-vec_any_ge (vector unsigned short a1, vector unsigned short a2)
+vec_any_ge (__vector __bool short a1, __vector signed short a2)
{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (vector signed short) a2, (vector signed short) a1);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) a2, (__vector signed short) a1);
}
inline int
-vec_any_ge (vector signed short a1, vector signed short a2)
+vec_any_ge (__vector signed int a1, __vector __bool int a2)
{
- return __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (vector signed short) a2, (vector signed short) a1);
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) a2, (__vector signed int) a1);
}
inline int
-vec_any_ge (vector signed int a1, vector unsigned int a2)
+vec_any_ge (__vector unsigned int a1, __vector __bool int a2)
{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (vector signed int) a2, (vector signed int) a1);
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) a2, (__vector signed int) a1);
}
inline int
-vec_any_ge (vector unsigned int a1, vector signed int a2)
+vec_any_ge (__vector unsigned int a1, __vector unsigned int a2)
{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (vector signed int) a2, (vector signed int) a1);
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) a2, (__vector signed int) a1);
}
inline int
-vec_any_ge (vector unsigned int a1, vector unsigned int a2)
+vec_any_ge (__vector signed int a1, __vector signed int a2)
{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (vector signed int) a2, (vector signed int) a1);
+ return __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (__vector signed int) a2, (__vector signed int) a1);
}
inline int
-vec_any_ge (vector signed int a1, vector signed int a2)
+vec_any_ge (__vector __bool int a1, __vector unsigned int a2)
{
- return __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (vector signed int) a2, (vector signed int) a1);
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) a2, (__vector signed int) a1);
}
inline int
-vec_any_ge (vector float a1, vector float a2)
+vec_any_ge (__vector __bool int a1, __vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) a2, (__vector signed int) a1);
+}
+
+inline int
+vec_any_ge (__vector float a1, __vector float a2)
{
return __builtin_altivec_vcmpgefp_p (__CR6_EQ_REV, a1, a2);
}
@@ -5680,79 +7762,115 @@ vec_any_ge (vector float a1, vector float a2)
/* vec_any_gt */
inline int
-vec_any_gt (vector signed char a1, vector unsigned char a2)
+vec_any_gt (__vector __bool char a1, __vector unsigned char a2)
{
- return __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_any_gt (vector unsigned char a1, vector signed char a2)
+vec_any_gt (__vector unsigned char a1, __vector __bool char a2)
{
- return __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_any_gt (vector unsigned char a1, vector unsigned char a2)
+vec_any_gt (__vector unsigned char a1, __vector unsigned char a2)
{
- return __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_any_gt (vector signed char a1, vector signed char a2)
+vec_any_gt (__vector __bool char a1, __vector signed char a2)
{
- return __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_any_gt (vector signed short a1, vector unsigned short a2)
+vec_any_gt (__vector signed char a1, __vector __bool char a2)
{
- return __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_any_gt (vector unsigned short a1, vector signed short a2)
+vec_any_gt (__vector signed char a1, __vector signed char a2)
{
- return __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_any_gt (vector unsigned short a1, vector unsigned short a2)
+vec_any_gt (__vector __bool short a1, __vector unsigned short a2)
{
- return __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_any_gt (vector signed short a1, vector signed short a2)
+vec_any_gt (__vector unsigned short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_any_gt (vector signed int a1, vector unsigned int a2)
+vec_any_gt (__vector unsigned short a1, __vector unsigned short a2)
{
- return __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_any_gt (vector unsigned int a1, vector signed int a2)
+vec_any_gt (__vector __bool short a1, __vector signed short a2)
{
- return __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_any_gt (vector unsigned int a1, vector unsigned int a2)
+vec_any_gt (__vector signed short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_any_gt (vector signed int a1, vector signed int a2)
+vec_any_gt (__vector signed short a1, __vector signed short a2)
{
- return __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_any_gt (vector float a1, vector float a2)
+vec_any_gt (__vector __bool int a1, __vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_any_gt (__vector unsigned int a1, __vector __bool int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_any_gt (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_any_gt (__vector __bool int a1, __vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_any_gt (__vector signed int a1, __vector __bool int a2)
+{
+ return __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_any_gt (__vector signed int a1, __vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_any_gt (__vector float a1, __vector float a2)
{
return __builtin_altivec_vcmpgtfp_p (__CR6_EQ_REV, a1, a2);
}
@@ -5760,79 +7878,115 @@ vec_any_gt (vector float a1, vector float a2)
/* vec_any_le */
inline int
-vec_any_le (vector signed char a1, vector unsigned char a2)
+vec_any_le (__vector __bool char a1, __vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline int
+vec_any_le (__vector unsigned char a1, __vector __bool char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline int
+vec_any_le (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline int
+vec_any_le (__vector __bool char a1, __vector signed char a2)
+{
+ return __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline int
+vec_any_le (__vector signed char a1, __vector __bool char a2)
+{
+ return __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline int
+vec_any_le (__vector signed char a1, __vector signed char a2)
{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_any_le (vector unsigned char a1, vector signed char a2)
+vec_any_le (__vector __bool short a1, __vector unsigned short a2)
{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_any_le (vector unsigned char a1, vector unsigned char a2)
+vec_any_le (__vector unsigned short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_any_le (vector signed char a1, vector signed char a2)
+vec_any_le (__vector unsigned short a1, __vector unsigned short a2)
{
- return __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_any_le (vector signed short a1, vector unsigned short a2)
+vec_any_le (__vector __bool short a1, __vector signed short a2)
{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_any_le (vector unsigned short a1, vector signed short a2)
+vec_any_le (__vector signed short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_any_le (vector unsigned short a1, vector unsigned short a2)
+vec_any_le (__vector signed short a1, __vector signed short a2)
{
- return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_any_le (vector signed short a1, vector signed short a2)
+vec_any_le (__vector __bool int a1, __vector unsigned int a2)
{
- return __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
}
inline int
-vec_any_le (vector signed int a1, vector unsigned int a2)
+vec_any_le (__vector unsigned int a1, __vector __bool int a2)
{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
}
inline int
-vec_any_le (vector unsigned int a1, vector signed int a2)
+vec_any_le (__vector unsigned int a1, __vector unsigned int a2)
{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
}
inline int
-vec_any_le (vector unsigned int a1, vector unsigned int a2)
+vec_any_le (__vector __bool int a1, __vector signed int a2)
{
- return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
}
inline int
-vec_any_le (vector signed int a1, vector signed int a2)
+vec_any_le (__vector signed int a1, __vector __bool int a2)
{
- return __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
}
inline int
-vec_any_le (vector float a1, vector float a2)
+vec_any_le (__vector signed int a1, __vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_any_le (__vector float a1, __vector float a2)
{
return __builtin_altivec_vcmpgefp_p (__CR6_EQ_REV, a2, a1);
}
@@ -5840,79 +7994,115 @@ vec_any_le (vector float a1, vector float a2)
/* vec_any_lt */
inline int
-vec_any_lt (vector signed char a1, vector unsigned char a2)
+vec_any_lt (__vector __bool char a1, __vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (__vector signed char) a2, (__vector signed char) a1);
+}
+
+inline int
+vec_any_lt (__vector unsigned char a1, __vector __bool char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (__vector signed char) a2, (__vector signed char) a1);
+}
+
+inline int
+vec_any_lt (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (__vector signed char) a2, (__vector signed char) a1);
+}
+
+inline int
+vec_any_lt (__vector __bool char a1, __vector signed char a2)
+{
+ return __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (__vector signed char) a2, (__vector signed char) a1);
+}
+
+inline int
+vec_any_lt (__vector signed char a1, __vector __bool char a2)
{
- return __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (vector signed char) a2, (vector signed char) a1);
+ return __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (__vector signed char) a2, (__vector signed char) a1);
}
inline int
-vec_any_lt (vector unsigned char a1, vector signed char a2)
+vec_any_lt (__vector signed char a1, __vector signed char a2)
{
- return __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (vector signed char) a2, (vector signed char) a1);
+ return __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (__vector signed char) a2, (__vector signed char) a1);
}
inline int
-vec_any_lt (vector unsigned char a1, vector unsigned char a2)
+vec_any_lt (__vector __bool short a1, __vector unsigned short a2)
{
- return __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (vector signed char) a2, (vector signed char) a1);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (__vector signed short) a2, (__vector signed short) a1);
}
inline int
-vec_any_lt (vector signed char a1, vector signed char a2)
+vec_any_lt (__vector unsigned short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (vector signed char) a2, (vector signed char) a1);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (__vector signed short) a2, (__vector signed short) a1);
}
inline int
-vec_any_lt (vector signed short a1, vector unsigned short a2)
+vec_any_lt (__vector unsigned short a1, __vector unsigned short a2)
{
- return __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (vector signed short) a2, (vector signed short) a1);
+ return __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (__vector signed short) a2, (__vector signed short) a1);
}
inline int
-vec_any_lt (vector unsigned short a1, vector signed short a2)
+vec_any_lt (__vector __bool short a1, __vector signed short a2)
{
- return __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (vector signed short) a2, (vector signed short) a1);
+ return __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (__vector signed short) a2, (__vector signed short) a1);
}
inline int
-vec_any_lt (vector unsigned short a1, vector unsigned short a2)
+vec_any_lt (__vector signed short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (vector signed short) a2, (vector signed short) a1);
+ return __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (__vector signed short) a2, (__vector signed short) a1);
}
inline int
-vec_any_lt (vector signed short a1, vector signed short a2)
+vec_any_lt (__vector signed short a1, __vector signed short a2)
{
- return __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (vector signed short) a2, (vector signed short) a1);
+ return __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (__vector signed short) a2, (__vector signed short) a1);
}
inline int
-vec_any_lt (vector signed int a1, vector unsigned int a2)
+vec_any_lt (__vector __bool int a1, __vector unsigned int a2)
{
- return __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (vector signed int) a2, (vector signed int) a1);
+ return __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (__vector signed int) a2, (__vector signed int) a1);
}
inline int
-vec_any_lt (vector unsigned int a1, vector signed int a2)
+vec_any_lt (__vector unsigned int a1, __vector __bool int a2)
{
- return __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (vector signed int) a2, (vector signed int) a1);
+ return __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (__vector signed int) a2, (__vector signed int) a1);
}
inline int
-vec_any_lt (vector unsigned int a1, vector unsigned int a2)
+vec_any_lt (__vector unsigned int a1, __vector unsigned int a2)
{
- return __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (vector signed int) a2, (vector signed int) a1);
+ return __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (__vector signed int) a2, (__vector signed int) a1);
}
inline int
-vec_any_lt (vector signed int a1, vector signed int a2)
+vec_any_lt (__vector __bool int a1, __vector signed int a2)
{
- return __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (vector signed int) a2, (vector signed int) a1);
+ return __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (__vector signed int) a2, (__vector signed int) a1);
}
inline int
-vec_any_lt (vector float a1, vector float a2)
+vec_any_lt (__vector signed int a1, __vector __bool int a2)
+{
+ return __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (__vector signed int) a2, (__vector signed int) a1);
+}
+
+inline int
+vec_any_lt (__vector signed int a1, __vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (__vector signed int) a2, (__vector signed int) a1);
+}
+
+inline int
+vec_any_lt (__vector float a1, __vector float a2)
{
return __builtin_altivec_vcmpgtfp_p (__CR6_EQ_REV, a2, a1);
}
@@ -5920,7 +8110,7 @@ vec_any_lt (vector float a1, vector float a2)
/* vec_any_nan */
inline int
-vec_any_nan (vector float a1)
+vec_any_nan (__vector float a1)
{
return __builtin_altivec_vcmpeqfp_p (__CR6_LT_REV, a1, a1);
}
@@ -5928,79 +8118,139 @@ vec_any_nan (vector float a1)
/* vec_any_ne */
inline int
-vec_any_ne (vector signed char a1, vector unsigned char a2)
+vec_any_ne (__vector signed char a1, __vector __bool char a2)
+{
+ return __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline int
+vec_any_ne (__vector signed char a1, __vector signed char a2)
{
- return __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_any_ne (vector signed char a1, vector signed char a2)
+vec_any_ne (__vector unsigned char a1, __vector __bool char a2)
{
- return __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_any_ne (vector unsigned char a1, vector signed char a2)
+vec_any_ne (__vector unsigned char a1, __vector unsigned char a2)
{
- return __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_any_ne (vector unsigned char a1, vector unsigned char a2)
+vec_any_ne (__vector __bool char a1, __vector __bool char a2)
{
- return __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (vector signed char) a1, (vector signed char) a2);
+ return __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_any_ne (vector signed short a1, vector unsigned short a2)
+vec_any_ne (__vector __bool char a1, __vector unsigned char a2)
{
- return __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_any_ne (vector signed short a1, vector signed short a2)
+vec_any_ne (__vector __bool char a1, __vector signed char a2)
{
- return __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) a1, (__vector signed char) a2);
}
inline int
-vec_any_ne (vector unsigned short a1, vector signed short a2)
+vec_any_ne (__vector signed short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_any_ne (vector unsigned short a1, vector unsigned short a2)
+vec_any_ne (__vector signed short a1, __vector signed short a2)
{
- return __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (vector signed short) a1, (vector signed short) a2);
+ return __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_any_ne (vector signed int a1, vector unsigned int a2)
+vec_any_ne (__vector unsigned short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_any_ne (vector signed int a1, vector signed int a2)
+vec_any_ne (__vector unsigned short a1, __vector unsigned short a2)
{
- return __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_any_ne (vector unsigned int a1, vector signed int a2)
+vec_any_ne (__vector __bool short a1, __vector __bool short a2)
{
- return __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_any_ne (vector unsigned int a1, vector unsigned int a2)
+vec_any_ne (__vector __bool short a1, __vector unsigned short a2)
{
- return __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (vector signed int) a1, (vector signed int) a2);
+ return __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
}
inline int
-vec_any_ne (vector float a1, vector float a2)
+vec_any_ne (__vector __bool short a1, __vector signed short a2)
+{
+ return __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline int
+vec_any_ne (__vector __pixel a1, __vector __pixel a2)
+{
+ return __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline int
+vec_any_ne (__vector signed int a1, __vector __bool int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_any_ne (__vector signed int a1, __vector signed int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_any_ne (__vector unsigned int a1, __vector __bool int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_any_ne (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_any_ne (__vector __bool int a1, __vector __bool int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_any_ne (__vector __bool int a1, __vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_any_ne (__vector __bool int a1, __vector signed int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline int
+vec_any_ne (__vector float a1, __vector float a2)
{
return __builtin_altivec_vcmpeqfp_p (__CR6_LT_REV, a1, a2);
}
@@ -6008,7 +8258,7 @@ vec_any_ne (vector float a1, vector float a2)
/* vec_any_nge */
inline int
-vec_any_nge (vector float a1, vector float a2)
+vec_any_nge (__vector float a1, __vector float a2)
{
return __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, a1, a2);
}
@@ -6016,7 +8266,7 @@ vec_any_nge (vector float a1, vector float a2)
/* vec_any_ngt */
inline int
-vec_any_ngt (vector float a1, vector float a2)
+vec_any_ngt (__vector float a1, __vector float a2)
{
return __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, a1, a2);
}
@@ -6024,7 +8274,7 @@ vec_any_ngt (vector float a1, vector float a2)
/* vec_any_nle */
inline int
-vec_any_nle (vector float a1, vector float a2)
+vec_any_nle (__vector float a1, __vector float a2)
{
return __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, a2, a1);
}
@@ -6032,7 +8282,7 @@ vec_any_nle (vector float a1, vector float a2)
/* vec_any_nlt */
inline int
-vec_any_nlt (vector float a1, vector float a2)
+vec_any_nlt (__vector float a1, __vector float a2)
{
return __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, a2, a1);
}
@@ -6040,7 +8290,7 @@ vec_any_nlt (vector float a1, vector float a2)
/* vec_any_numeric */
inline int
-vec_any_numeric (vector float a1)
+vec_any_numeric (__vector float a1)
{
return __builtin_altivec_vcmpeqfp_p (__CR6_EQ_REV, a1, a1);
}
@@ -6048,2480 +8298,3208 @@ vec_any_numeric (vector float a1)
/* vec_any_out */
inline int
-vec_any_out (vector float a1, vector float a2)
+vec_any_out (__vector float a1, __vector float a2)
{
return __builtin_altivec_vcmpbfp_p (__CR6_EQ_REV, a1, a2);
}
-/* vec_step */
-
-template<typename _Tp>
-struct __vec_step_help
-{
- // All proper vector types will specialize _S_elem.
-};
-
-template<>
-struct __vec_step_help<vector signed short>
-{
- static const int _S_elem = 8;
-};
+} /* extern "C++" */
-template<>
-struct __vec_step_help<vector unsigned short>
-{
- static const int _S_elem = 8;
-};
-
-template<>
-struct __vec_step_help<vector signed int>
-{
- static const int _S_elem = 4;
-};
-
-template<>
-struct __vec_step_help<vector unsigned int>
-{
- static const int _S_elem = 4;
-};
+#else /* not C++ */
-template<>
-struct __vec_step_help<vector unsigned char>
-{
- static const int _S_elem = 16;
-};
+/* "... and so I think no man in a century will suffer as greatly as
+ you will." */
-template<>
-struct __vec_step_help<vector signed char>
-{
- static const int _S_elem = 16;
-};
+/* Helper macros. */
-template<>
-struct __vec_step_help<vector float>
-{
- static const int _S_elem = 4;
-};
+#define __un_args_eq(xtype, x) \
+ __builtin_types_compatible_p (xtype, typeof (x))
-#define vec_step(t) __vec_step_help<typeof(t)>::_S_elem
+#define __bin_args_eq(xtype, x, ytype, y) \
+ (__builtin_types_compatible_p (xtype, typeof (x)) \
+ && __builtin_types_compatible_p (ytype, typeof (y)))
-}//extern "C++"
+#define __tern_args_eq(xtype, x, ytype, y, ztype, z) \
+ (__builtin_types_compatible_p (xtype, typeof (x)) \
+ && __builtin_types_compatible_p (ytype, typeof (y)) \
+ && __builtin_types_compatible_p (ztype, typeof (z)))
-#else /* not C++ */
+#define __ch(x, y, z) __builtin_choose_expr (x, y, z)
-/* "... and so I think no man in a century will suffer as greatly as
- you will." */
+#define vec_step(t) \
+ __ch (__builtin_types_compatible_p (typeof (t), __vector signed int), 4, \
+ __ch (__builtin_types_compatible_p (typeof (t), __vector unsigned int), 4, \
+ __ch (__builtin_types_compatible_p (typeof (t), __vector __bool int), 4, \
+ __ch (__builtin_types_compatible_p (typeof (t), __vector signed short), 8, \
+ __ch (__builtin_types_compatible_p (typeof (t), __vector unsigned short), 8, \
+ __ch (__builtin_types_compatible_p (typeof (t), __vector __bool short), 8, \
+ __ch (__builtin_types_compatible_p (typeof (t), __vector __pixel), 8, \
+ __ch (__builtin_types_compatible_p (typeof (t), __vector signed char), 16, \
+ __ch (__builtin_types_compatible_p (typeof (t), __vector unsigned char), 16, \
+ __ch (__builtin_types_compatible_p (typeof (t), __vector __bool char), 16, \
+ __ch (__builtin_types_compatible_p (typeof (t), __vector float), 4, \
+ __builtin_altivec_compiletime_error ("vec_step"))))))))))))
#define vec_abs(a) \
- __ch (__un_args_eq (vector signed char, (a)), \
- ((vector signed char) __builtin_altivec_abs_v16qi ((vector signed char) (a))), \
- __ch (__un_args_eq (vector signed short, (a)), \
- ((vector signed short) __builtin_altivec_abs_v8hi ((vector signed short) (a))), \
- __ch (__un_args_eq (vector signed int, (a)), \
- ((vector signed int) __builtin_altivec_abs_v4si ((vector signed int) (a))), \
- __ch (__un_args_eq (vector float, (a)), \
- ((vector float) __builtin_altivec_abs_v4sf ((vector float) (a))), \
- __altivec_link_error_invalid_argument ()))))
+ __ch (__un_args_eq (__vector signed char, (a)), \
+ ((__vector signed char) __builtin_altivec_abs_v16qi ((__vector signed char) (a))), \
+ __ch (__un_args_eq (__vector signed short, (a)), \
+ ((__vector signed short) __builtin_altivec_abs_v8hi ((__vector signed short) (a))), \
+ __ch (__un_args_eq (__vector signed int, (a)), \
+ ((__vector signed int) __builtin_altivec_abs_v4si ((__vector signed int) (a))), \
+ __ch (__un_args_eq (__vector float, (a)), \
+ ((__vector float) __builtin_altivec_abs_v4sf ((__vector float) (a))), \
+ __builtin_altivec_compiletime_error ("vec_abs")))))
#define vec_abss(a) \
- __ch (__un_args_eq (vector signed char, (a)), \
- ((vector signed char) __builtin_altivec_abss_v16qi ((vector signed char) (a))), \
- __ch (__un_args_eq (vector signed short, (a)), \
- ((vector signed short) __builtin_altivec_abss_v8hi ((vector signed short) (a))), \
- __ch (__un_args_eq (vector signed int, (a)), \
- ((vector signed int) __builtin_altivec_abss_v4si ((vector signed int) (a))), \
- __altivec_link_error_invalid_argument ())))
-
-#define vec_step(t) \
- __ch (__builtin_types_compatible_p (typeof (t), vector signed int), 4, \
- __ch (__builtin_types_compatible_p (typeof (t), vector unsigned int), 4, \
- __ch (__builtin_types_compatible_p (typeof (t), vector signed short), 8, \
- __ch (__builtin_types_compatible_p (typeof (t), vector unsigned short), 8, \
- __ch (__builtin_types_compatible_p (typeof (t), vector signed char), 16, \
- __ch (__builtin_types_compatible_p (typeof (t), vector unsigned char), 16, \
- __ch (__builtin_types_compatible_p (typeof (t), vector float), 4, \
- __altivec_link_error_invalid_argument ())))))))
+ __ch (__un_args_eq (__vector signed char, (a)), \
+ ((__vector signed char) __builtin_altivec_abss_v16qi ((__vector signed char) (a))), \
+ __ch (__un_args_eq (__vector signed short, (a)), \
+ ((__vector signed short) __builtin_altivec_abss_v8hi ((__vector signed short) (a))), \
+ __ch (__un_args_eq (__vector signed int, (a)), \
+ ((__vector signed int) __builtin_altivec_abss_v4si ((__vector signed int) (a))), \
+ __builtin_altivec_compiletime_error ("vec_abss"))))
#define vec_vaddubm(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vaddubm ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) (a1), (vector signed char) (a2))), \
- __altivec_link_error_invalid_argument ()))))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) (a1), (__vector signed char) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vaddubm")))))))
#define vec_vadduhm(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vadduhm ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) (a1), (vector signed short) (a2))), \
- __altivec_link_error_invalid_argument ()))))
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vadduhm")))))))
#define vec_vadduwm(a1, a2) \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vadduwm ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ()))))
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vadduwm")))))))
#define vec_vaddfp(a1, a2) \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- ((vector float) __builtin_altivec_vaddfp ((vector float) (a1), (vector float) (a2))), \
- __altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector float) __builtin_altivec_vaddfp ((__vector float) (a1), (__vector float) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vaddfp"))
#define vec_add(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vaddubm ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vadduhm ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vadduwm ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- ((vector float) __builtin_altivec_vaddfp ((vector float) (a1), (vector float) (a2))), \
- __altivec_link_error_invalid_argument ())))))))))))))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector float) __builtin_altivec_vaddfp ((__vector float) (a1), (__vector float) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_add"))))))))))))))))))))
#define vec_addc(a1, a2) \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vaddcuw ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vaddcuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_addc"))
#define vec_adds(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vaddsbs ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vaddshs ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vadduws ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vadduws ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vadduws ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vaddsws ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ()))))))))))))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_adds")))))))))))))))))))
#define vec_vaddsws(a1, a2) \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vaddsws ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vaddsws"))))
#define vec_vadduws(a1, a2) \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vadduws ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vadduws ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vadduws ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ())))
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vadduws"))))
#define vec_vaddshs(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vaddshs ((vector signed short) (a1), (vector signed short) (a2))), \
- __altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) (a1), (__vector signed short) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vaddshs"))))
#define vec_vadduhs(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) (a1), (vector signed short) (a2))), \
- __altivec_link_error_invalid_argument ())))
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) (a1), (__vector signed short) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vadduhs"))))
#define vec_vaddsbs(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vaddsbs ((vector signed char) (a1), (vector signed char) (a2))), \
- __altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) (a1), (__vector signed char) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vaddsbs"))))
#define vec_vaddubs(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) (a1), (vector signed char) (a2))), \
- __altivec_link_error_invalid_argument ())))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) (a1), (__vector signed char) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vaddubs"))))
#define vec_and(a1, a2) \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- ((vector float) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector float, (a1), vector signed int, (a2)), \
- ((vector float) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector float, (a2)), \
- ((vector float) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ())))))))))))))))
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector float) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector float, (a1), __vector __bool int, (a2)), \
+ ((__vector float) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector float, (a2)), \
+ ((__vector float) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vand ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_and")))))))))))))))))))))))))
#define vec_andc(a1, a2) \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- ((vector float) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector float, (a1), vector unsigned int, (a2)), \
- ((vector float) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector float, (a2)), \
- ((vector float) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector float, (a2)), \
- ((vector float) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector float, (a1), vector signed int, (a2)), \
- ((vector float) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector signed short) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector signed char) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ())))))))))))))))))))))))
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector float) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector float, (a1), __vector __bool int, (a2)), \
+ ((__vector float) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector float, (a2)), \
+ ((__vector float) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vandc ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_andc")))))))))))))))))))))))))
#define vec_avg(a1, a2) \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vavgub ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vavgsb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vavguh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vavgsh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vavguw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vavgsw ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ()))))))
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vavgub ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vavgsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vavguh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vavgsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vavguw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vavgsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_avg")))))))
#define vec_vavgsw(a1, a2) \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vavgsw ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vavgsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vavgsw"))
#define vec_vavguw(a1, a2) \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vavguw ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vavguw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vavguw"))
#define vec_vavgsh(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vavgsh ((vector signed short) (a1), (vector signed short) (a2))), \
- __altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vavgsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vavgsh"))
#define vec_vavguh(a1, a2) \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vavguh ((vector signed short) (a1), (vector signed short) (a2))), \
- __altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vavguh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vavguh"))
#define vec_vavgsb(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vavgsb ((vector signed char) (a1), (vector signed char) (a2))), \
- __altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vavgsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vavgsb"))
#define vec_vavgub(a1, a2) \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vavgub ((vector signed char) (a1), (vector signed char) (a2))), \
- __altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vavgub ((__vector signed char) (a1), (__vector signed char) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vavgub"))
-#define vec_ceil(a1) __builtin_altivec_vrfip ((a1))
+#define vec_ceil(a1) \
+__ch (__un_args_eq (__vector float, (a1)), \
+ ((__vector float) __builtin_altivec_vrfip ((__vector float) (a1))), \
+ __builtin_altivec_compiletime_error ("vec_ceil"))
-#define vec_cmpb(a1, a2) __builtin_altivec_vcmpbfp ((a1), (a2))
+#define vec_cmpb(a1, a2) \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector signed int) __builtin_altivec_vcmpbfp ((__vector float) (a1), (__vector float) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_cmpb"))
#define vec_cmpeq(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vcmpequb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector signed char) __builtin_altivec_vcmpequb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vcmpequh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector signed short) __builtin_altivec_vcmpequh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vcmpequw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector signed int) __builtin_altivec_vcmpequw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- ((vector signed int) __builtin_altivec_vcmpeqfp ((vector float) (a1), (vector float) (a2))), \
- __altivec_link_error_invalid_argument ())))))))
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vcmpequb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vcmpequb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vcmpequh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vcmpequh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vcmpequw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vcmpequw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vcmpeqfp ((__vector float) (a1), (__vector float) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_cmpeq"))))))))
#define vec_vcmpeqfp(a1, a2) \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- ((vector signed int) __builtin_altivec_vcmpeqfp ((vector float) (a1), (vector float) (a2))), \
- __altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vcmpeqfp ((__vector float) (a1), (__vector float) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vcmpeqfp"))
#define vec_vcmpequw(a1, a2) \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vcmpequw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector signed int) __builtin_altivec_vcmpequw ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ()))
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vcmpequw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vcmpequw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vcmpequw")))
#define vec_vcmpequh(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vcmpequh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector signed short) __builtin_altivec_vcmpequh ((vector signed short) (a1), (vector signed short) (a2))), \
- __altivec_link_error_invalid_argument ()))
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vcmpequh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vcmpequh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vcmpequh")))
#define vec_vcmpequb(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vcmpequb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector signed char) __builtin_altivec_vcmpequb ((vector signed char) (a1), (vector signed char) (a2))), \
- __altivec_link_error_invalid_argument ()))
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vcmpequb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vcmpequb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vcmpequb")))
-#define vec_cmpge(a1, a2) (vector signed int) __builtin_altivec_vcmpgefp ((a1), (a2))
+#define vec_cmpge(a1, a2) \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vcmpgefp ((__vector float) (a1), (__vector float) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_cmpge"))
#define vec_cmpgt(a1, a2) \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector signed char) __builtin_altivec_vcmpgtub ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vcmpgtsb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector signed short) __builtin_altivec_vcmpgtuh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vcmpgtsh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector signed int) __builtin_altivec_vcmpgtuw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vcmpgtsw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- ((vector signed int) __builtin_altivec_vcmpgtfp ((vector float) (a1), (vector float) (a2))), \
- __altivec_link_error_invalid_argument ())))))))
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vcmpgtub ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vcmpgtsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vcmpgtuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vcmpgtsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vcmpgtuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vcmpgtsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vcmpgtfp ((__vector float) (a1), (__vector float) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_cmpgt"))))))))
#define vec_vcmpgtfp(a1, a2) \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- ((vector signed int) __builtin_altivec_vcmpgtfp ((vector float) (a1), (vector float) (a2))), \
- __altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vcmpgtfp ((__vector float) (a1), (__vector float) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vcmpgtfp"))
#define vec_vcmpgtsw(a1, a2) \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vcmpgtsw ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vcmpgtsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vcmpgtsw"))
#define vec_vcmpgtuw(a1, a2) \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector signed int) __builtin_altivec_vcmpgtuw ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vcmpgtuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vcmpgtuw"))
#define vec_vcmpgtsh(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vcmpgtsh ((vector signed short) (a1), (vector signed short) (a2))), \
- __altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vcmpgtsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vcmpgtsh"))
#define vec_vcmpgtuh(a1, a2) \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector signed short) __builtin_altivec_vcmpgtuh ((vector signed short) (a1), (vector signed short) (a2))), \
- __altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vcmpgtuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vcmpgtuh"))
#define vec_vcmpgtsb(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vcmpgtsb ((vector signed char) (a1), (vector signed char) (a2))), \
- __altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vcmpgtsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vcmpgtsb"))
#define vec_vcmpgtub(a1, a2) \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector signed char) __builtin_altivec_vcmpgtub ((vector signed char) (a1), (vector signed char) (a2))), \
- __altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vcmpgtub ((__vector signed char) (a1), (__vector signed char) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vcmpgtub"))
-#define vec_cmple(a1, a2) __builtin_altivec_vcmpgefp ((a2), (a1))
+#define vec_cmple(a1, a2) \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vcmpgefp ((__vector float) (a2), (__vector float) (a1))), \
+ __builtin_altivec_compiletime_error ("vec_cmple"))
#define vec_cmplt(a2, a1) \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector signed char) __builtin_altivec_vcmpgtub ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vcmpgtsb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector signed short) __builtin_altivec_vcmpgtuh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vcmpgtsh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector signed int) __builtin_altivec_vcmpgtuw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vcmpgtsw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- ((vector signed int) __builtin_altivec_vcmpgtfp ((vector float) (a1), (vector float) (a2))), \
- __altivec_link_error_invalid_argument ())))))))
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vcmpgtub ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vcmpgtsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vcmpgtuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vcmpgtsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vcmpgtuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vcmpgtsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vcmpgtfp ((__vector float) (a1), (__vector float) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_cmplt"))))))))
#define vec_ctf(a1, a2) \
-__ch (__bin_args_eq (vector unsigned int, (a1), int, (a2)), \
- ((vector float) __builtin_altivec_vcfux ((vector signed int) (a1), (const char) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), int, (a2)), \
- ((vector float) __builtin_altivec_vcfsx ((vector signed int) (a1), (const char) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), unsigned int, (a2)), \
- ((vector float) __builtin_altivec_vcfux ((vector signed int) (a1), (const char) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), unsigned int, (a2)), \
- ((vector float) __builtin_altivec_vcfsx ((vector signed int) (a1), (const char) (a2))), \
- __altivec_link_error_invalid_argument ()))))
+__ch (__un_args_eq (__vector unsigned int, (a1)), \
+ ((__vector float) __builtin_altivec_vcfux ((__vector signed int) (a1), (const int) (a2))), \
+__ch (__un_args_eq (__vector signed int, (a1)), \
+ ((__vector float) __builtin_altivec_vcfsx ((__vector signed int) (a1), (const int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_ctf")))
#define vec_vcfsx(a1, a2) \
-__ch (__bin_args_eq (vector signed int, (a1), int, (a2)), \
- ((vector float) __builtin_altivec_vcfsx ((vector signed int) (a1), (const char) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), unsigned int, (a2)), \
- ((vector float) __builtin_altivec_vcfsx ((vector signed int) (a1), (const char) (a2))), \
- __altivec_link_error_invalid_argument ()))
+__ch (__un_args_eq (__vector signed int, (a1)), \
+ ((__vector float) __builtin_altivec_vcfsx ((__vector signed int) (a1), (const int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vcfsx"))
#define vec_vcfux(a1, a2) \
-__ch (__bin_args_eq (vector unsigned int, (a1), int, (a2)), \
- ((vector float) __builtin_altivec_vcfux ((vector signed int) (a1), (const char) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), unsigned int, (a2)), \
- ((vector float) __builtin_altivec_vcfux ((vector signed int) (a1), (const char) (a2))), \
- __altivec_link_error_invalid_argument ()))
+__ch (__un_args_eq (__vector unsigned int, (a1)), \
+ ((__vector float) __builtin_altivec_vcfux ((__vector signed int) (a1), (const int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_vcfux"))
-#define vec_cts(a1, a2) __builtin_altivec_vctsxs ((a1), (a2))
+#define vec_cts(a1, a2) \
+__ch (__un_args_eq (__vector float, (a1)), \
+ ((__vector signed int) __builtin_altivec_vctsxs ((__vector float) (a1), (const int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_cts"))
-#define vec_ctu(a1, a2) (vector unsigned int) __builtin_altivec_vctuxs ((a1), (a2))
+#define vec_ctu(a1, a2) \
+__ch (__un_args_eq (__vector float, (a1)), \
+ ((__vector unsigned int) __builtin_altivec_vctuxs ((__vector float) (a1), (const int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_ctu"))
-#define vec_dss(a1) __builtin_altivec_dss ((a1))
+#define vec_dss(a1) __builtin_altivec_dss ((const int) (a1));
#define vec_dssall() __builtin_altivec_dssall ()
#define vec_dst(a1, a2, a3) \
-__ch (__un_args_eq (vector unsigned char, *(a1)), \
+__ch (__un_args_eq (const __vector unsigned char, *(a1)), \
+ __builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
+__ch (__un_args_eq (const __vector signed char, *(a1)), \
__builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (vector signed char, *(a1)), \
+__ch (__un_args_eq (const __vector __bool char, *(a1)), \
__builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (vector unsigned short, *(a1)), \
+__ch (__un_args_eq (const __vector unsigned short, *(a1)), \
__builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (vector signed short, *(a1)), \
+__ch (__un_args_eq (const __vector signed short, *(a1)), \
__builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (vector unsigned int, *(a1)), \
+__ch (__un_args_eq (const __vector __bool short, *(a1)), \
__builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (vector signed int, *(a1)), \
+__ch (__un_args_eq (const __vector __pixel, *(a1)), \
__builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (vector float, *(a1)), \
+__ch (__un_args_eq (const __vector unsigned int, *(a1)), \
__builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (unsigned char, *(a1)), \
+__ch (__un_args_eq (const __vector signed int, *(a1)), \
__builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (signed char, *(a1)), \
+__ch (__un_args_eq (const __vector __bool int, *(a1)), \
__builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (unsigned short, *(a1)), \
+__ch (__un_args_eq (const __vector float, *(a1)), \
__builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (signed short, *(a1)), \
+__ch (__un_args_eq (const unsigned char, *(a1)), \
__builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (unsigned int, *(a1)), \
+__ch (__un_args_eq (const signed char, *(a1)), \
__builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (signed int, *(a1)), \
+__ch (__un_args_eq (const unsigned short, *(a1)), \
__builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (unsigned long, *(a1)), \
+__ch (__un_args_eq (const short, *(a1)), \
__builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (signed long, *(a1)), \
+__ch (__un_args_eq (const unsigned int, *(a1)), \
__builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (float, *(a1)), \
+__ch (__un_args_eq (const int, *(a1)), \
__builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
- __altivec_link_error_invalid_argument ()))))))))))))))))
+__ch (__un_args_eq (const unsigned long, *(a1)), \
+ __builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
+__ch (__un_args_eq (const long, *(a1)), \
+ __builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
+__ch (__un_args_eq (const float, *(a1)), \
+ __builtin_altivec_dst ((void *) (a1), (a2), (a3)), \
+ __builtin_altivec_compiletime_error ("vec_dst")))))))))))))))))))))
#define vec_dstst(a1, a2, a3) \
-__ch (__un_args_eq (vector unsigned char, *(a1)), \
+__ch (__un_args_eq (const __vector unsigned char, *(a1)), \
+ __builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
+__ch (__un_args_eq (const __vector signed char, *(a1)), \
+ __builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
+__ch (__un_args_eq (const __vector __bool char, *(a1)), \
+ __builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
+__ch (__un_args_eq (const __vector unsigned short, *(a1)), \
__builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (vector signed char, *(a1)), \
+__ch (__un_args_eq (const __vector signed short, *(a1)), \
__builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (vector unsigned short, *(a1)), \
+__ch (__un_args_eq (const __vector __bool short, *(a1)), \
__builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (vector signed short, *(a1)), \
+__ch (__un_args_eq (const __vector __pixel, *(a1)), \
__builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (vector unsigned int, *(a1)), \
+__ch (__un_args_eq (const __vector unsigned int, *(a1)), \
__builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (vector signed int, *(a1)), \
+__ch (__un_args_eq (const __vector signed int, *(a1)), \
__builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (vector float, *(a1)), \
+__ch (__un_args_eq (const __vector __bool int, *(a1)), \
__builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (unsigned char, *(a1)), \
+__ch (__un_args_eq (const __vector float, *(a1)), \
__builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (signed char, *(a1)), \
+__ch (__un_args_eq (const unsigned char, *(a1)), \
__builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (unsigned short, *(a1)), \
+__ch (__un_args_eq (const signed char, *(a1)), \
__builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (signed short, *(a1)), \
+__ch (__un_args_eq (const unsigned short, *(a1)), \
__builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (unsigned int, *(a1)), \
+__ch (__un_args_eq (const short, *(a1)), \
__builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (signed int, *(a1)), \
+__ch (__un_args_eq (const unsigned int, *(a1)), \
__builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (unsigned long, *(a1)), \
+__ch (__un_args_eq (const int, *(a1)), \
__builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (signed long, *(a1)), \
+__ch (__un_args_eq (const unsigned long, *(a1)), \
__builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (float, *(a1)), \
+__ch (__un_args_eq (const long, *(a1)), \
__builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
- __altivec_link_error_invalid_argument ()))))))))))))))))
+__ch (__un_args_eq (const float, *(a1)), \
+ __builtin_altivec_dstst ((void *) (a1), (a2), (a3)), \
+ __builtin_altivec_compiletime_error ("vec_dstst")))))))))))))))))))))
#define vec_dststt(a1, a2, a3) \
-__ch (__un_args_eq (vector unsigned char, *(a1)), \
+__ch (__un_args_eq (const __vector unsigned char, *(a1)), \
+ __builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
+__ch (__un_args_eq (const __vector signed char, *(a1)), \
+ __builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
+__ch (__un_args_eq (const __vector __bool char, *(a1)), \
__builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (vector signed char, *(a1)), \
+__ch (__un_args_eq (const __vector unsigned short, *(a1)), \
__builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (vector unsigned short, *(a1)), \
+__ch (__un_args_eq (const __vector signed short, *(a1)), \
__builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (vector signed short, *(a1)), \
+__ch (__un_args_eq (const __vector __bool short, *(a1)), \
__builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (vector unsigned int, *(a1)), \
+__ch (__un_args_eq (const __vector __pixel, *(a1)), \
__builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (vector signed int, *(a1)), \
+__ch (__un_args_eq (const __vector unsigned int, *(a1)), \
__builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (vector float, *(a1)), \
+__ch (__un_args_eq (const __vector signed int, *(a1)), \
__builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (unsigned char, *(a1)), \
+__ch (__un_args_eq (const __vector __bool int, *(a1)), \
__builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (signed char, *(a1)), \
+__ch (__un_args_eq (const __vector float, *(a1)), \
__builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (unsigned short, *(a1)), \
+__ch (__un_args_eq (const unsigned char, *(a1)), \
__builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (signed short, *(a1)), \
+__ch (__un_args_eq (const signed char, *(a1)), \
__builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (unsigned int, *(a1)), \
+__ch (__un_args_eq (const unsigned short, *(a1)), \
__builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (signed int, *(a1)), \
+__ch (__un_args_eq (const short, *(a1)), \
__builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (unsigned long, *(a1)), \
+__ch (__un_args_eq (const unsigned int, *(a1)), \
__builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (signed long, *(a1)), \
+__ch (__un_args_eq (const int, *(a1)), \
__builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (float, *(a1)), \
+__ch (__un_args_eq (const unsigned long, *(a1)), \
__builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
- __altivec_link_error_invalid_argument ()))))))))))))))))
+__ch (__un_args_eq (const long, *(a1)), \
+ __builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
+__ch (__un_args_eq (const float, *(a1)), \
+ __builtin_altivec_dststt ((void *) (a1), (a2), (a3)), \
+ __builtin_altivec_compiletime_error ("vec_dststt")))))))))))))))))))))
#define vec_dstt(a1, a2, a3) \
-__ch (__un_args_eq (vector unsigned char, *(a1)), \
+__ch (__un_args_eq (const __vector unsigned char, *(a1)), \
+ __builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
+__ch (__un_args_eq (const __vector signed char, *(a1)), \
__builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (vector signed char, *(a1)), \
+__ch (__un_args_eq (const __vector __bool char, *(a1)), \
__builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (vector unsigned short, *(a1)), \
+__ch (__un_args_eq (const __vector unsigned short, *(a1)), \
__builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (vector signed short, *(a1)), \
+__ch (__un_args_eq (const __vector signed short, *(a1)), \
__builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (vector unsigned int, *(a1)), \
+__ch (__un_args_eq (const __vector __bool short, *(a1)), \
__builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (vector signed int, *(a1)), \
+__ch (__un_args_eq (const __vector __pixel, *(a1)), \
__builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (vector float, *(a1)), \
+__ch (__un_args_eq (const __vector unsigned int, *(a1)), \
__builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (unsigned char, *(a1)), \
+__ch (__un_args_eq (const __vector signed int, *(a1)), \
__builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (signed char, *(a1)), \
+__ch (__un_args_eq (const __vector __bool int, *(a1)), \
__builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (unsigned short, *(a1)), \
+__ch (__un_args_eq (const __vector float, *(a1)), \
__builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (signed short, *(a1)), \
+__ch (__un_args_eq (const unsigned char, *(a1)), \
__builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (unsigned int, *(a1)), \
+__ch (__un_args_eq (const signed char, *(a1)), \
__builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (signed int, *(a1)), \
+__ch (__un_args_eq (const unsigned short, *(a1)), \
__builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (unsigned long, *(a1)), \
+__ch (__un_args_eq (const short, *(a1)), \
__builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (signed long, *(a1)), \
+__ch (__un_args_eq (const unsigned int, *(a1)), \
__builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
-__ch (__un_args_eq (float, *(a1)), \
+__ch (__un_args_eq (const int, *(a1)), \
__builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
- __altivec_link_error_invalid_argument ()))))))))))))))))
+__ch (__un_args_eq (const unsigned long, *(a1)), \
+ __builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
+__ch (__un_args_eq (const long, *(a1)), \
+ __builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
+__ch (__un_args_eq (const float, *(a1)), \
+ __builtin_altivec_dstt ((void *) (a1), (a2), (a3)), \
+ __builtin_altivec_compiletime_error ("vec_dstt")))))))))))))))))))))
-#define vec_expte(a1) __builtin_altivec_vexptefp ((a1))
+#define vec_expte(a1) \
+__ch (__un_args_eq (__vector float, (a1)), \
+ ((__vector float) __builtin_altivec_vexptefp ((__vector float) (a1))), \
+ __builtin_altivec_compiletime_error ("vec_expte"))
-#define vec_floor(a1) __builtin_altivec_vrfim (a1)
+#define vec_floor(a1) \
+__ch (__un_args_eq (__vector float, (a1)), \
+ ((__vector float) __builtin_altivec_vrfim ((__vector float) (a1))), \
+ __builtin_altivec_compiletime_error ("vec_floor"))
#define vec_ld(a, b) \
-__ch (__un_args_eq (vector unsigned char, *(b)), \
- ((vector unsigned char) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (unsigned char, *(b)), \
- ((vector unsigned char) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (vector signed char, *(b)), \
- ((vector signed char) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (signed char, *(b)), \
- ((vector signed char) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (vector unsigned short, *(b)), \
- ((vector unsigned short) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (unsigned short, *(b)), \
- ((vector unsigned short) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (vector signed short, *(b)), \
- ((vector signed short) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (signed short, *(b)), \
- ((vector signed short) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (vector unsigned int, *(b)), \
- ((vector unsigned int) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (unsigned int, *(b)), \
- ((vector unsigned int) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (unsigned long, *(b)), \
- ((vector unsigned int) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (vector signed int, *(b)), \
- ((vector signed int) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (signed int, *(b)), \
- ((vector signed int) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (signed long, *(b)), \
- ((vector signed int) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (vector float, *(b)), \
- ((vector float) __builtin_altivec_lvx ((a), (b))), \
-__ch (__un_args_eq (float, *(b)), \
- ((vector float) __builtin_altivec_lvx ((a), (b))), \
-__altivec_link_error_invalid_argument ()))))))))))))))))
+__ch (__un_args_eq (const __vector unsigned char, *(b)), \
+ ((__vector unsigned char) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (const unsigned char, *(b)), \
+ ((__vector unsigned char) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (const __vector signed char, *(b)), \
+ ((__vector signed char) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (const signed char, *(b)), \
+ ((__vector signed char) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (const __vector __bool char, *(b)), \
+ ((__vector __bool char) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (const __vector unsigned short, *(b)), \
+ ((__vector unsigned short) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (const unsigned short, *(b)), \
+ ((__vector unsigned short) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (const __vector signed short, *(b)), \
+ ((__vector signed short) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (const short, *(b)), \
+ ((__vector signed short) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (const __vector __bool short, *(b)), \
+ ((__vector __bool short) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (const __vector __pixel, *(b)), \
+ ((__vector __pixel) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (const __vector unsigned int, *(b)), \
+ ((__vector unsigned int) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (const unsigned int, *(b)), \
+ ((__vector unsigned int) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (const unsigned long, *(b)), \
+ ((__vector unsigned int) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (const __vector signed int, *(b)), \
+ ((__vector signed int) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (const int, *(b)), \
+ ((__vector signed int) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (const long, *(b)), \
+ ((__vector signed int) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (const __vector __bool int, *(b)), \
+ ((__vector __bool int) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (const __vector float, *(b)), \
+ ((__vector float) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (const float, *(b)), \
+ ((__vector float) __builtin_altivec_lvx ((a), (b))), \
+__builtin_altivec_compiletime_error ("vec_ld")))))))))))))))))))))
#define vec_lde(a, b) \
-__ch (__un_args_eq (unsigned char, *(b)), \
- ((vector unsigned char) __builtin_altivec_lvebx ((a), (b))), \
-__ch (__un_args_eq (signed char, *(b)), \
- ((vector signed char) __builtin_altivec_lvebx ((a), (b))), \
-__ch (__un_args_eq (unsigned short, *(b)), \
- ((vector unsigned short) __builtin_altivec_lvehx ((a), (b))), \
-__ch (__un_args_eq (signed short, *(b)), \
- ((vector signed short) __builtin_altivec_lvehx ((a), (b))), \
-__ch (__un_args_eq (unsigned long, *(b)), \
- ((vector unsigned int) __builtin_altivec_lvewx ((a), (b))), \
-__ch (__un_args_eq (signed long, *(b)), \
- ((vector signed int) __builtin_altivec_lvewx ((a), (b))), \
-__ch (__un_args_eq (unsigned int, *(b)), \
- ((vector unsigned int) __builtin_altivec_lvewx ((a), (b))), \
-__ch (__un_args_eq (signed int, *(b)), \
- ((vector signed int) __builtin_altivec_lvewx ((a), (b))), \
-__ch (__un_args_eq (float, *(b)), \
- ((vector float) __builtin_altivec_lvewx ((a), (b))), \
-__altivec_link_error_invalid_argument ())))))))))
+__ch (__un_args_eq (const unsigned char, *(b)), \
+ ((__vector unsigned char) __builtin_altivec_lvebx ((a), (b))), \
+__ch (__un_args_eq (const signed char, *(b)), \
+ ((__vector signed char) __builtin_altivec_lvebx ((a), (b))), \
+__ch (__un_args_eq (const unsigned short, *(b)), \
+ ((__vector unsigned short) __builtin_altivec_lvehx ((a), (b))), \
+__ch (__un_args_eq (const short, *(b)), \
+ ((__vector signed short) __builtin_altivec_lvehx ((a), (b))), \
+__ch (__un_args_eq (const unsigned long, *(b)), \
+ ((__vector unsigned int) __builtin_altivec_lvewx ((a), (b))), \
+__ch (__un_args_eq (const long, *(b)), \
+ ((__vector signed int) __builtin_altivec_lvewx ((a), (b))), \
+__ch (__un_args_eq (const unsigned int, *(b)), \
+ ((__vector unsigned int) __builtin_altivec_lvewx ((a), (b))), \
+__ch (__un_args_eq (const int, *(b)), \
+ ((__vector signed int) __builtin_altivec_lvewx ((a), (b))), \
+__ch (__un_args_eq (const float, *(b)), \
+ ((__vector float) __builtin_altivec_lvewx ((a), (b))), \
+__builtin_altivec_compiletime_error ("vec_lde"))))))))))
#define vec_lvewx(a, b) \
__ch (__un_args_eq (unsigned int, *(b)), \
- ((vector unsigned int) __builtin_altivec_lvewx ((a), (b))), \
+ ((__vector unsigned int) __builtin_altivec_lvewx ((a), (b))), \
__ch (__un_args_eq (signed int, *(b)), \
- ((vector signed int) __builtin_altivec_lvewx ((a), (b))), \
+ ((__vector signed int) __builtin_altivec_lvewx ((a), (b))), \
__ch (__un_args_eq (unsigned long, *(b)), \
- ((vector unsigned int) __builtin_altivec_lvewx ((a), (b))), \
+ ((__vector unsigned int) __builtin_altivec_lvewx ((a), (b))), \
__ch (__un_args_eq (signed long, *(b)), \
- ((vector signed int) __builtin_altivec_lvewx ((a), (b))), \
+ ((__vector signed int) __builtin_altivec_lvewx ((a), (b))), \
__ch (__un_args_eq (float, *(b)), \
- ((vector float) __builtin_altivec_lvewx ((a), (b))), \
-__altivec_link_error_invalid_argument ())))))
+ ((__vector float) __builtin_altivec_lvewx ((a), (b))), \
+__builtin_altivec_compiletime_error ("vec_lvewx"))))))
#define vec_lvehx(a, b) \
__ch (__un_args_eq (unsigned short, *(b)), \
- ((vector unsigned short) __builtin_altivec_lvehx ((a), (b))), \
+ ((__vector unsigned short) __builtin_altivec_lvehx ((a), (b))), \
__ch (__un_args_eq (signed short, *(b)), \
- ((vector signed short) __builtin_altivec_lvehx ((a), (b))), \
-__altivec_link_error_invalid_argument ()))
+ ((__vector signed short) __builtin_altivec_lvehx ((a), (b))), \
+__builtin_altivec_compiletime_error ("vec_lvehx")))
#define vec_lvebx(a, b) \
__ch (__un_args_eq (unsigned char, *(b)), \
- ((vector unsigned char) __builtin_altivec_lvebx ((a), (b))), \
+ ((__vector unsigned char) __builtin_altivec_lvebx ((a), (b))), \
__ch (__un_args_eq (signed char, *(b)), \
- ((vector signed char) __builtin_altivec_lvebx ((a), (b))), \
-__altivec_link_error_invalid_argument ()))
+ ((__vector signed char) __builtin_altivec_lvebx ((a), (b))), \
+__builtin_altivec_compiletime_error ("vec_lvebx")))
#define vec_ldl(a, b) \
-__ch (__un_args_eq (vector unsigned char, *(b)), \
- ((vector unsigned char) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (unsigned char, *(b)), \
- ((vector unsigned char) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (vector signed char, *(b)), \
- ((vector signed char) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (signed char, *(b)), \
- ((vector signed char) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (vector unsigned short, *(b)), \
- ((vector unsigned short) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (unsigned short, *(b)), \
- ((vector unsigned short) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (vector signed short, *(b)), \
- ((vector signed short) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (signed short, *(b)), \
- ((vector signed short) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (vector unsigned int, *(b)), \
- ((vector unsigned int) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (unsigned int, *(b)), \
- ((vector unsigned int) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (unsigned long, *(b)), \
- ((vector unsigned int) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (vector signed int, *(b)), \
- ((vector signed int) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (signed int, *(b)), \
- ((vector signed int) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (signed long, *(b)), \
- ((vector signed int) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (vector float, *(b)), \
- ((vector float) __builtin_altivec_lvxl ((a), (b))), \
-__ch (__un_args_eq (float, *(b)), \
- ((vector float) __builtin_altivec_lvxl ((a), (b))), \
-__altivec_link_error_invalid_argument ()))))))))))))))))
-
-#define vec_loge(a1) __builtin_altivec_vlogefp ((a1))
+__ch (__un_args_eq (const __vector unsigned char, *(b)), \
+ ((__vector unsigned char) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (const unsigned char, *(b)), \
+ ((__vector unsigned char) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (const __vector signed char, *(b)), \
+ ((__vector signed char) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (const signed char, *(b)), \
+ ((__vector signed char) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (const __vector __bool char, *(b)), \
+ ((__vector __bool char) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (const __vector unsigned short, *(b)), \
+ ((__vector unsigned short) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (const unsigned short, *(b)), \
+ ((__vector unsigned short) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (const __vector signed short, *(b)), \
+ ((__vector signed short) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (const short, *(b)), \
+ ((__vector signed short) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (const __vector __bool short, *(b)), \
+ ((__vector __bool short) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (const __vector __pixel, *(b)), \
+ ((__vector __pixel) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (const __vector unsigned int, *(b)), \
+ ((__vector unsigned int) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (const unsigned int, *(b)), \
+ ((__vector unsigned int) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (const unsigned long, *(b)), \
+ ((__vector unsigned int) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (const __vector signed int, *(b)), \
+ ((__vector signed int) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (const int, *(b)), \
+ ((__vector signed int) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (const long, *(b)), \
+ ((__vector signed int) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (const __vector __bool int, *(b)), \
+ ((__vector __bool int) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (const __vector float, *(b)), \
+ ((__vector float) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (const float, *(b)), \
+ ((__vector float) __builtin_altivec_lvxl ((a), (b))), \
+__builtin_altivec_compiletime_error ("vec_ldl")))))))))))))))))))))
+
+#define vec_loge(a1) \
+__ch (__un_args_eq (__vector float, (a1)), \
+ ((__vector float) __builtin_altivec_vlogefp ((__vector float) (a1))), \
+ __builtin_altivec_compiletime_error ("vec_loge"))
#define vec_lvsl(a1, a2) \
-__ch (__un_args_eq (unsigned char, *(a2)), \
- ((vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
-__ch (__un_args_eq (signed char, *(a2)), \
- ((vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
-__ch (__un_args_eq (unsigned short, *(a2)), \
- ((vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
-__ch (__un_args_eq (signed short, *(a2)), \
- ((vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
-__ch (__un_args_eq (unsigned int, *(a2)), \
- ((vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
-__ch (__un_args_eq (signed int, *(a2)), \
- ((vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
-__ch (__un_args_eq (unsigned long, *(a2)), \
- ((vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
-__ch (__un_args_eq (signed long, *(a2)), \
- ((vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
-__ch (__un_args_eq (float, *(a2)), \
- ((vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
-__altivec_link_error_invalid_argument ())))))))))
+__ch (__un_args_eq (const volatile unsigned char, *(a2)), \
+ ((__vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
+__ch (__un_args_eq (const volatile signed char, *(a2)), \
+ ((__vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
+__ch (__un_args_eq (const volatile unsigned short, *(a2)), \
+ ((__vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
+__ch (__un_args_eq (const volatile signed short, *(a2)), \
+ ((__vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
+__ch (__un_args_eq (const volatile unsigned int, *(a2)), \
+ ((__vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
+__ch (__un_args_eq (const volatile signed int, *(a2)), \
+ ((__vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
+__ch (__un_args_eq (const volatile unsigned long, *(a2)), \
+ ((__vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
+__ch (__un_args_eq (const volatile signed long, *(a2)), \
+ ((__vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
+__ch (__un_args_eq (const volatile float, *(a2)), \
+ ((__vector unsigned char) __builtin_altivec_lvsl ((a1), (void *) (a2))), \
+__builtin_altivec_compiletime_error ("vec_lvsl"))))))))))
#define vec_lvsr(a1, a2) \
-__ch (__un_args_eq (unsigned char, *(a2)), \
- ((vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
-__ch (__un_args_eq (signed char, *(a2)), \
- ((vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
-__ch (__un_args_eq (unsigned short, *(a2)), \
- ((vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
-__ch (__un_args_eq (signed short, *(a2)), \
- ((vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
-__ch (__un_args_eq (unsigned int, *(a2)), \
- ((vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
-__ch (__un_args_eq (signed int, *(a2)), \
- ((vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
-__ch (__un_args_eq (unsigned long, *(a2)), \
- ((vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
-__ch (__un_args_eq (signed long, *(a2)), \
- ((vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
-__ch (__un_args_eq (float, *(a2)), \
- ((vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
-__altivec_link_error_invalid_argument ())))))))))
-
-#define vec_madd(a1, a2, a3) (__builtin_altivec_vmaddfp ((a1), (a2), (a3)))
-
-#define vec_madds(a1, a2, a3) __builtin_altivec_vmhaddshs ((a1), (a2), (a3))
+__ch (__un_args_eq (const volatile unsigned char, *(a2)), \
+ ((__vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
+__ch (__un_args_eq (const volatile signed char, *(a2)), \
+ ((__vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
+__ch (__un_args_eq (const volatile unsigned short, *(a2)), \
+ ((__vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
+__ch (__un_args_eq (const volatile signed short, *(a2)), \
+ ((__vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
+__ch (__un_args_eq (const volatile unsigned int, *(a2)), \
+ ((__vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
+__ch (__un_args_eq (const volatile signed int, *(a2)), \
+ ((__vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
+__ch (__un_args_eq (const volatile unsigned long, *(a2)), \
+ ((__vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
+__ch (__un_args_eq (const volatile signed long, *(a2)), \
+ ((__vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
+__ch (__un_args_eq (const volatile float, *(a2)), \
+ ((__vector unsigned char) __builtin_altivec_lvsr ((a1), (void *) (a2))), \
+__builtin_altivec_compiletime_error ("vec_lvsr"))))))))))
+
+#define vec_madd(a1, a2, a3) \
+__ch (__tern_args_eq (__vector float, (a1), __vector float, (a2), __vector float, (a3)), \
+ ((__vector float) __builtin_altivec_vmaddfp ((a1), (a2), (a3))), \
+__builtin_altivec_compiletime_error ("vec_madd"))
+
+#define vec_madds(a1, a2, a3) \
+__ch (__tern_args_eq (__vector signed short, (a1), __vector signed short, (a2), __vector signed short, (a3)), \
+ ((__vector signed short) __builtin_altivec_vmhaddshs ((a1), (a2), (a3))), \
+__builtin_altivec_compiletime_error ("vec_madds"))
#define vec_max(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vmaxsb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vmaxsh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vmaxsw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- ((vector float) __builtin_altivec_vmaxfp ((vector float) (a1), (vector float) (a2))), \
- __altivec_link_error_invalid_argument ())))))))))))))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector float) __builtin_altivec_vmaxfp ((__vector float) (a1), (__vector float) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_max"))))))))))))))))))))
#define vec_vmaxfp(a1, a2) \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- ((vector float) __builtin_altivec_vmaxfp ((vector float) (a1), (vector float) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector float) __builtin_altivec_vmaxfp ((__vector float) (a1), (__vector float) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vmaxfp"))
#define vec_vmaxsw(a1, a2) \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vmaxsw ((vector signed int) (a1), (vector signed int) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vmaxsw"))))
#define vec_vmaxuw(a1, a2) \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) (a1), (vector signed int) (a2))), \
-__altivec_link_error_invalid_argument ())))
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vmaxuw"))))
#define vec_vmaxsh(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vmaxsh ((vector signed short) (a1), (vector signed short) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vmaxsh"))))
#define vec_vmaxuh(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) (a1), (vector signed short) (a2))), \
-__altivec_link_error_invalid_argument ())))
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vmaxuh"))))
#define vec_vmaxsb(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vmaxsb ((vector signed char) (a1), (vector signed char) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vmaxsb"))))
#define vec_vmaxub(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) (a1), (vector signed char) (a2))), \
-__altivec_link_error_invalid_argument ())))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vmaxub"))))
#define vec_mergeh(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vmrghb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vmrghb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vmrghh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vmrghh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- ((vector float) __builtin_altivec_vmrghw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vmrghw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vmrghw ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ())))))))
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vmrghb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vmrghb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vmrghb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vmrghh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vmrghh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vmrghh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector __pixel, (a1), __vector __pixel, (a2)), \
+ ((__vector __pixel) __builtin_altivec_vmrghh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector float) __builtin_altivec_vmrghw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vmrghw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vmrghw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vmrghw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_mergeh"))))))))))))
#define vec_vmrghw(a1, a2) \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- ((vector float) __builtin_altivec_vmrghw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vmrghw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vmrghw ((vector signed int) (a1), (vector signed int) (a2))), \
-__altivec_link_error_invalid_argument ())))
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector float) __builtin_altivec_vmrghw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vmrghw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vmrghw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vmrghw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vmrghw")))))
#define vec_vmrghh(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vmrghh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vmrghh ((vector signed short) (a1), (vector signed short) (a2))), \
-__altivec_link_error_invalid_argument ()))
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vmrghh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vmrghh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vmrghh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector __pixel, (a1), __vector __pixel, (a2)), \
+ ((__vector __pixel) __builtin_altivec_vmrghh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vmrghh")))))
#define vec_vmrghb(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vmrghb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vmrghb ((vector signed char) (a1), (vector signed char) (a2))), \
-__altivec_link_error_invalid_argument ()))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vmrghb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vmrghb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vmrghb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vmrghb"))))
#define vec_mergel(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vmrglb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vmrglb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vmrglh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vmrglh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- ((vector float) __builtin_altivec_vmrglw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vmrglw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vmrglw ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ())))))))
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vmrglb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vmrglb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vmrglb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vmrglh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vmrglh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vmrglh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector __pixel, (a1), __vector __pixel, (a2)), \
+ ((__vector __pixel) __builtin_altivec_vmrglh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector float) __builtin_altivec_vmrglw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vmrglw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vmrglw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vmrglw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_mergel"))))))))))))
#define vec_vmrglw(a1, a2) \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- ((vector float) __builtin_altivec_vmrglw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vmrglw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vmrglw ((vector signed int) (a1), (vector signed int) (a2))), \
-__altivec_link_error_invalid_argument ())))
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector float) __builtin_altivec_vmrglw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vmrglw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vmrglw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vmrglw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vmrglw")))))
#define vec_vmrglh(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vmrglh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vmrglh ((vector signed short) (a1), (vector signed short) (a2))), \
-__altivec_link_error_invalid_argument ()))
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vmrglh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vmrglh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vmrglh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector __pixel, (a1), __vector __pixel, (a2)), \
+ ((__vector __pixel) __builtin_altivec_vmrglh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vmrglh")))))
#define vec_vmrglb(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vmrglb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vmrglb ((vector signed char) (a1), (vector signed char) (a2))), \
-__altivec_link_error_invalid_argument ()))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vmrglb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vmrglb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vmrglb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vmrglb"))))
-#define vec_mfvscr() (((vector unsigned short) __builtin_altivec_mfvscr ()))
+#define vec_mfvscr() (((__vector unsigned short) __builtin_altivec_mfvscr ()))
#define vec_min(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vminub ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vminub ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vminub ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vminsb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vminuh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vminuh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vminuh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vminsh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vminuw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vminuw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vminuw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vminsw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- ((vector float) __builtin_altivec_vminfp ((vector float) (a1), (vector float) (a2))), \
- __altivec_link_error_invalid_argument ())))))))))))))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vminsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vminsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vminsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vminsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vminsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vminsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vminsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vminsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vminsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector float) __builtin_altivec_vminfp ((__vector float) (a1), (__vector float) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_min"))))))))))))))))))))
#define vec_vminfp(a1, a2) \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- ((vector float) __builtin_altivec_vminfp ((vector float) (a1), (vector float) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector float) __builtin_altivec_vminfp ((__vector float) (a1), (__vector float) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vminfp"))
#define vec_vminsw(a1, a2) \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vminsw ((vector signed int) (a1), (vector signed int) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vminsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vminsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vminsw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vminsw"))))
#define vec_vminuw(a1, a2) \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vminuw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vminuw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vminuw ((vector signed int) (a1), (vector signed int) (a2))), \
-__altivec_link_error_invalid_argument ())))
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vminuw"))))
#define vec_vminsh(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vminsh ((vector signed short) (a1), (vector signed short) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vminsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vminsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vminsh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vminsh"))))
#define vec_vminuh(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vminuh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vminuh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vminuh ((vector signed short) (a1), (vector signed short) (a2))), \
-__altivec_link_error_invalid_argument ())))
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vminuh"))))
#define vec_vminsb(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vminsb ((vector signed char) (a1), (vector signed char) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vminsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vminsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vminsb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__builtin_altivec_compiletime_error ("vec_minsb"))))
#define vec_vminub(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vminub ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vminub ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vminub ((vector signed char) (a1), (vector signed char) (a2))), \
-__altivec_link_error_invalid_argument ())))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vminub"))))
#define vec_mladd(a1, a2, a3) \
-__ch (__tern_args_eq (vector signed short, (a1), vector signed short, (a2), vector signed short, (a3)), \
- ((vector signed short) __builtin_altivec_vmladduhm ((vector signed short) (a1), (vector signed short) (a2), (vector signed short) (a3))), \
-__ch (__tern_args_eq (vector signed short, (a1), vector unsigned short, (a2), vector unsigned short, (a3)), \
- ((vector signed short) __builtin_altivec_vmladduhm ((vector signed short) (a1), (vector signed short) (a2), (vector signed short) (a3))), \
-__ch (__tern_args_eq (vector unsigned short, (a1), vector signed short, (a2), vector signed short, (a3)), \
- ((vector signed short) __builtin_altivec_vmladduhm ((vector signed short) (a1), (vector signed short) (a2), (vector signed short) (a3))), \
-__ch (__tern_args_eq (vector unsigned short, (a1), vector unsigned short, (a2), vector unsigned short, (a3)), \
- ((vector unsigned short) __builtin_altivec_vmladduhm ((vector signed short) (a1), (vector signed short) (a2), (vector signed short) (a3))), \
- __altivec_link_error_invalid_argument ()))))
-
-#define vec_mradds(a1, a2, a3) __builtin_altivec_vmhraddshs ((a1), (a2), (a3))
+__ch (__tern_args_eq (__vector signed short, (a1), __vector signed short, (a2), __vector signed short, (a3)), \
+ ((__vector signed short) __builtin_altivec_vmladduhm ((__vector signed short) (a1), (__vector signed short) (a2), (__vector signed short) (a3))), \
+__ch (__tern_args_eq (__vector signed short, (a1), __vector unsigned short, (a2), __vector unsigned short, (a3)), \
+ ((__vector signed short) __builtin_altivec_vmladduhm ((__vector signed short) (a1), (__vector signed short) (a2), (__vector signed short) (a3))), \
+__ch (__tern_args_eq (__vector unsigned short, (a1), __vector signed short, (a2), __vector signed short, (a3)), \
+ ((__vector signed short) __builtin_altivec_vmladduhm ((__vector signed short) (a1), (__vector signed short) (a2), (__vector signed short) (a3))), \
+__ch (__tern_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2), __vector unsigned short, (a3)), \
+ ((__vector unsigned short) __builtin_altivec_vmladduhm ((__vector signed short) (a1), (__vector signed short) (a2), (__vector signed short) (a3))), \
+ __builtin_altivec_compiletime_error ("vec_mladd")))))
+
+#define vec_mradds(a1, a2, a3) \
+__ch (__tern_args_eq (__vector signed short, (a1), __vector signed short, (a2), __vector signed short, (a3)), \
+ ((__vector signed short) __builtin_altivec_vmhraddshs ((a1), (a2), (a3))), \
+__builtin_altivec_compiletime_error ("vec_mradds"))
#define vec_msum(a1, a2, a3) \
-__ch (__tern_args_eq (vector unsigned char, (a1), vector unsigned char, (a2), vector unsigned int, (a3)), \
- ((vector unsigned int) __builtin_altivec_vmsumubm ((vector signed char) (a1), (vector signed char) (a2), (vector signed int) (a3))), \
-__ch (__tern_args_eq (vector signed char, (a1), vector unsigned char, (a2), vector signed int, (a3)), \
- ((vector signed int) __builtin_altivec_vmsummbm ((vector signed char) (a1), (vector signed char) (a2), (vector signed int) (a3))), \
-__ch (__tern_args_eq (vector unsigned short, (a1), vector unsigned short, (a2), vector unsigned int, (a3)), \
- ((vector unsigned int) __builtin_altivec_vmsumuhm ((vector signed short) (a1), (vector signed short) (a2), (vector signed int) (a3))), \
-__ch (__tern_args_eq (vector signed short, (a1), vector signed short, (a2), vector signed int, (a3)), \
- ((vector signed int) __builtin_altivec_vmsumshm ((vector signed short) (a1), (vector signed short) (a2), (vector signed int) (a3))), \
- __altivec_link_error_invalid_argument ()))))
+__ch (__tern_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2), __vector unsigned int, (a3)), \
+ ((__vector unsigned int) __builtin_altivec_vmsumubm ((__vector signed char) (a1), (__vector signed char) (a2), (__vector signed int) (a3))), \
+__ch (__tern_args_eq (__vector signed char, (a1), __vector unsigned char, (a2), __vector signed int, (a3)), \
+ ((__vector signed int) __builtin_altivec_vmsummbm ((__vector signed char) (a1), (__vector signed char) (a2), (__vector signed int) (a3))), \
+__ch (__tern_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2), __vector unsigned int, (a3)), \
+ ((__vector unsigned int) __builtin_altivec_vmsumuhm ((__vector signed short) (a1), (__vector signed short) (a2), (__vector signed int) (a3))), \
+__ch (__tern_args_eq (__vector signed short, (a1), __vector signed short, (a2), __vector signed int, (a3)), \
+ ((__vector signed int) __builtin_altivec_vmsumshm ((__vector signed short) (a1), (__vector signed short) (a2), (__vector signed int) (a3))), \
+ __builtin_altivec_compiletime_error ("vec_msum")))))
#define vec_vmsumshm(a1, a2, a3) \
-__ch (__tern_args_eq (vector signed short, (a1), vector signed short, (a2), vector signed int, (a3)), \
- ((vector signed int) __builtin_altivec_vmsumshm ((vector signed short) (a1), (vector signed short) (a2), (vector signed int) (a3))), \
-__altivec_link_error_invalid_argument ())
+__ch (__tern_args_eq (__vector signed short, (a1), __vector signed short, (a2), __vector signed int, (a3)), \
+ ((__vector signed int) __builtin_altivec_vmsumshm ((__vector signed short) (a1), (__vector signed short) (a2), (__vector signed int) (a3))), \
+__builtin_altivec_compiletime_error ("vec_vmsumshm"))
#define vec_vmsumuhm(a1, a2, a3) \
-__ch (__tern_args_eq (vector unsigned short, (a1), vector unsigned short, (a2), vector unsigned int, (a3)), \
- ((vector unsigned int) __builtin_altivec_vmsumuhm ((vector signed short) (a1), (vector signed short) (a2), (vector signed int) (a3))), \
-__altivec_link_error_invalid_argument ())
+__ch (__tern_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2), __vector unsigned int, (a3)), \
+ ((__vector unsigned int) __builtin_altivec_vmsumuhm ((__vector signed short) (a1), (__vector signed short) (a2), (__vector signed int) (a3))), \
+__builtin_altivec_compiletime_error ("vec_vmsumuhm"))
#define vec_vmsummbm(a1, a2, a3) \
-__ch (__tern_args_eq (vector signed char, (a1), vector unsigned char, (a2), vector signed int, (a3)), \
- ((vector signed int) __builtin_altivec_vmsummbm ((vector signed char) (a1), (vector signed char) (a2), (vector signed int) (a3))), \
-__altivec_link_error_invalid_argument ())
+__ch (__tern_args_eq (__vector signed char, (a1), __vector unsigned char, (a2), __vector signed int, (a3)), \
+ ((__vector signed int) __builtin_altivec_vmsummbm ((__vector signed char) (a1), (__vector signed char) (a2), (__vector signed int) (a3))), \
+__builtin_altivec_compiletime_error ("vec_vmsummbm"))
#define vec_vmsumubm(a1, a2, a3) \
-__ch (__tern_args_eq (vector unsigned char, (a1), vector unsigned char, (a2), vector unsigned int, (a3)), \
- ((vector unsigned int) __builtin_altivec_vmsumubm ((vector signed char) (a1), (vector signed char) (a2), (vector signed int) (a3))), \
-__altivec_link_error_invalid_argument ())
+__ch (__tern_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2), __vector unsigned int, (a3)), \
+ ((__vector unsigned int) __builtin_altivec_vmsumubm ((__vector signed char) (a1), (__vector signed char) (a2), (__vector signed int) (a3))), \
+__builtin_altivec_compiletime_error ("vec_vmsummbm"))
#define vec_msums(a1, a2, a3) \
-__ch (__tern_args_eq (vector unsigned short, (a1), vector unsigned short, (a2), vector unsigned int, (a3)), \
- ((vector unsigned int) __builtin_altivec_vmsumuhs ((vector signed short) (a1), (vector signed short) (a2), (vector signed int) (a3))), \
-__ch (__tern_args_eq (vector signed short, (a1), vector signed short, (a2), vector signed int, (a3)), \
- ((vector signed int) __builtin_altivec_vmsumshs ((vector signed short) (a1), (vector signed short) (a2), (vector signed int) (a3))), \
- __altivec_link_error_invalid_argument ()))
+__ch (__tern_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2), __vector unsigned int, (a3)), \
+ ((__vector unsigned int) __builtin_altivec_vmsumuhs ((__vector signed short) (a1), (__vector signed short) (a2), (__vector signed int) (a3))), \
+__ch (__tern_args_eq (__vector signed short, (a1), __vector signed short, (a2), __vector signed int, (a3)), \
+ ((__vector signed int) __builtin_altivec_vmsumshs ((__vector signed short) (a1), (__vector signed short) (a2), (__vector signed int) (a3))), \
+ __builtin_altivec_compiletime_error ("vec_msums")))
#define vec_vmsumshs(a1, a2, a3) \
-__ch (__tern_args_eq (vector signed short, (a1), vector signed short, (a2), vector signed int, (a3)), \
- ((vector signed int) __builtin_altivec_vmsumshs ((vector signed short) (a1), (vector signed short) (a2), (vector signed int) (a3))), \
-__altivec_link_error_invalid_argument ())
+__ch (__tern_args_eq (__vector signed short, (a1), __vector signed short, (a2), __vector signed int, (a3)), \
+ ((__vector signed int) __builtin_altivec_vmsumshs ((__vector signed short) (a1), (__vector signed short) (a2), (__vector signed int) (a3))), \
+__builtin_altivec_compiletime_error ("vec_vmsumshs"))
#define vec_vmsumuhs(a1, a2, a3) \
-__ch (__tern_args_eq (vector unsigned short, (a1), vector unsigned short, (a2), vector unsigned int, (a3)), \
- ((vector unsigned int) __builtin_altivec_vmsumuhs ((vector signed short) (a1), (vector signed short) (a2), (vector signed int) (a3))), \
-__altivec_link_error_invalid_argument ())
+__ch (__tern_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2), __vector unsigned int, (a3)), \
+ ((__vector unsigned int) __builtin_altivec_vmsumuhs ((__vector signed short) (a1), (__vector signed short) (a2), (__vector signed int) (a3))), \
+__builtin_altivec_compiletime_error ("vec_vmsumuhs"))
#define vec_mtvscr(a1) \
-__ch (__un_args_eq (vector signed int, (a1)), \
- __builtin_altivec_mtvscr ((vector signed int) (a1)), \
-__ch (__un_args_eq (vector unsigned int, (a1)), \
- __builtin_altivec_mtvscr ((vector signed int) (a1)), \
-__ch (__un_args_eq (vector signed short, (a1)), \
- __builtin_altivec_mtvscr ((vector signed int) (a1)), \
-__ch (__un_args_eq (vector unsigned short, (a1)), \
- __builtin_altivec_mtvscr ((vector signed int) (a1)), \
-__ch (__un_args_eq (vector signed char, (a1)), \
- __builtin_altivec_mtvscr ((vector signed int) (a1)), \
-__ch (__un_args_eq (vector unsigned char, (a1)), \
- __builtin_altivec_mtvscr ((vector signed int) (a1)), \
- __altivec_link_error_invalid_argument ()))))))
+__ch (__un_args_eq (__vector signed int, (a1)), \
+ __builtin_altivec_mtvscr ((__vector signed int) (a1)), \
+__ch (__un_args_eq (__vector unsigned int, (a1)), \
+ __builtin_altivec_mtvscr ((__vector signed int) (a1)), \
+__ch (__un_args_eq (__vector __bool int, (a1)), \
+ __builtin_altivec_mtvscr ((__vector signed int) (a1)), \
+__ch (__un_args_eq (__vector signed short, (a1)), \
+ __builtin_altivec_mtvscr ((__vector signed int) (a1)), \
+__ch (__un_args_eq (__vector unsigned short, (a1)), \
+ __builtin_altivec_mtvscr ((__vector signed int) (a1)), \
+__ch (__un_args_eq (__vector __bool short, (a1)), \
+ __builtin_altivec_mtvscr ((__vector signed int) (a1)), \
+__ch (__un_args_eq (__vector __pixel, (a1)), \
+ __builtin_altivec_mtvscr ((__vector signed int) (a1)), \
+__ch (__un_args_eq (__vector signed char, (a1)), \
+ __builtin_altivec_mtvscr ((__vector signed int) (a1)), \
+__ch (__un_args_eq (__vector unsigned char, (a1)), \
+ __builtin_altivec_mtvscr ((__vector signed int) (a1)), \
+__ch (__un_args_eq (__vector __bool char, (a1)), \
+ __builtin_altivec_mtvscr ((__vector signed int) (a1)), \
+ __builtin_altivec_compiletime_error ("vec_mtvscr")))))))))))
#define vec_mule(a1, a2) \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned short) __builtin_altivec_vmuleub ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed short) __builtin_altivec_vmulesb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned int) __builtin_altivec_vmuleuh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed int) __builtin_altivec_vmulesh ((vector signed short) (a1), (vector signed short) (a2))), \
- __altivec_link_error_invalid_argument ()))))
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vmuleub ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed short) __builtin_altivec_vmulesb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vmuleuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed int) __builtin_altivec_vmulesh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_mule")))))
#define vec_vmulesh(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed int) __builtin_altivec_vmulesh ((vector signed short) (a1), (vector signed short) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed int) __builtin_altivec_vmulesh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vmulesh"))
#define vec_vmuleuh(a1, a2) \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned int) __builtin_altivec_vmuleuh ((vector signed short) (a1), (vector signed short) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vmuleuh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vmuleuh"))
#define vec_vmulesb(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed short) __builtin_altivec_vmulesb ((vector signed char) (a1), (vector signed char) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed short) __builtin_altivec_vmulesb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vmulesb"))
#define vec_vmuleub(a1, a2) \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned short) __builtin_altivec_vmuleub ((vector signed char) (a1), (vector signed char) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vmuleub ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vmuleub"))
#define vec_mulo(a1, a2) \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned short) __builtin_altivec_vmuloub ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed short) __builtin_altivec_vmulosb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned int) __builtin_altivec_vmulouh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed int) __builtin_altivec_vmulosh ((vector signed short) (a1), (vector signed short) (a2))), \
- __altivec_link_error_invalid_argument ()))))
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vmuloub ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed short) __builtin_altivec_vmulosb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vmulouh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed int) __builtin_altivec_vmulosh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_mulo")))))
#define vec_vmulosh(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed int) __builtin_altivec_vmulosh ((vector signed short) (a1), (vector signed short) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed int) __builtin_altivec_vmulosh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vmulosh"))
#define vec_vmulouh(a1, a2) \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned int) __builtin_altivec_vmulouh ((vector signed short) (a1), (vector signed short) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vmulouh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vmulouh"))
#define vec_vmulosb(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed short) __builtin_altivec_vmulosb ((vector signed char) (a1), (vector signed char) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed short) __builtin_altivec_vmulosb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vmulosb"))
#define vec_vmuloub(a1, a2) \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned short) __builtin_altivec_vmuloub ((vector signed char) (a1), (vector signed char) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vmuloub ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vmuloub"))
#define vec_nmsub(a1, a2, a3) \
-__ch (__tern_args_eq (vector float, ((a1)), vector float, ((a2)) , vector float, ((a3))), \
- ((vector float) __builtin_altivec_vnmsubfp ((vector float) ((a1)), (vector float) ((a2)), (vector float)((a3)))), \
- __altivec_link_error_invalid_argument ())
+__ch (__tern_args_eq (__vector float, (a1), __vector float, (a2), __vector float, (a3)), \
+ ((__vector float) __builtin_altivec_vnmsubfp ((__vector float) (a1), (__vector float) (a2), (__vector float) (a3))), \
+ __builtin_altivec_compiletime_error ("vec_nmsub"))
#define vec_nor(a1, a2) \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- ((vector float) __builtin_altivec_vnor ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vnor ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vnor ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vnor ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vnor ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vnor ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vnor ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ())))))))
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector float) __builtin_altivec_vnor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vnor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vnor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vnor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vnor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vnor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vnor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vnor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vnor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vnor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_nor")))))))))))
#define vec_or(a1, a2) \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- ((vector float) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector float, (a1), vector signed int, (a2)), \
- ((vector float) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector float, (a2)), \
- ((vector float) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ())))))))))))))))
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector float) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector float, (a1), __vector __bool int, (a2)), \
+ ((__vector float) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector float, (a2)), \
+ ((__vector float) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_or")))))))))))))))))))))))))
#define vec_pack(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed char) __builtin_altivec_vpkuhum ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned char) __builtin_altivec_vpkuhum ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed short) __builtin_altivec_vpkuwum ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned short) __builtin_altivec_vpkuwum ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ()))))
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed char) __builtin_altivec_vpkuhum ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vpkuhum ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vpkuhum ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed short) __builtin_altivec_vpkuwum ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vpkuwum ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vpkuwum ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_pack")))))))
#define vec_vpkuwum(a1, a2) \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed short) __builtin_altivec_vpkuwum ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned short) __builtin_altivec_vpkuwum ((vector signed int) (a1), (vector signed int) (a2))), \
-__altivec_link_error_invalid_argument ()))
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vpkuwum ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed short) __builtin_altivec_vpkuwum ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vpkuwum ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vpkuwum"))))
#define vec_vpkuhum(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed char) __builtin_altivec_vpkuhum ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned char) __builtin_altivec_vpkuhum ((vector signed short) (a1), (vector signed short) (a2))), \
-__altivec_link_error_invalid_argument ()))
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vpkuhum ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed char) __builtin_altivec_vpkuhum ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vpkuhum ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vpkuhum"))))
#define vec_packpx(a1, a2) \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- (vector unsigned short) __builtin_altivec_vpkpx ((vector signed int) (a1), (vector signed int) (a2)), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector __pixel) __builtin_altivec_vpkpx ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_packpx"))
#define vec_packs(a1, a2) \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned char) __builtin_altivec_vpkuhus ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed char) __builtin_altivec_vpkshss ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned short) __builtin_altivec_vpkuwus ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed short) __builtin_altivec_vpkswss ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ()))))
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vpkuhus ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed char) __builtin_altivec_vpkshss ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vpkuwus ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed short) __builtin_altivec_vpkswss ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_packs")))))
#define vec_vpkswss(a1, a2) \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed short) __builtin_altivec_vpkswss ((vector signed int) (a1), (vector signed int) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed short) __builtin_altivec_vpkswss ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vpkswss"))
#define vec_vpkuwus(a1, a2) \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned short) __builtin_altivec_vpkuwus ((vector signed int) (a1), (vector signed int) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vpkuwus ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vpkuwus"))
#define vec_vpkshss(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed char) __builtin_altivec_vpkshss ((vector signed short) (a1), (vector signed short) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed char) __builtin_altivec_vpkshss ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vpkshss"))
#define vec_vpkuhus(a1, a2) \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned char) __builtin_altivec_vpkuhus ((vector signed short) (a1), (vector signed short) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vpkuhus ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vpkuhus"))
#define vec_packsu(a1, a2) \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned char) __builtin_altivec_vpkuhus ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector unsigned char) __builtin_altivec_vpkshus ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned short) __builtin_altivec_vpkuwus ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector unsigned short) __builtin_altivec_vpkswus ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ()))))
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vpkuhus ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vpkshus ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vpkuwus ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vpkswus ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_packsu")))))
#define vec_vpkswus(a1, a2) \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector unsigned short) __builtin_altivec_vpkswus ((vector signed int) (a1), (vector signed int) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vpkswus ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vpkswus"))
#define vec_vpkshus(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector unsigned char) __builtin_altivec_vpkshus ((vector signed short) (a1), (vector signed short) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vpkshus ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vpkshus"))
#define vec_perm(a1, a2, a3) \
-__ch (__tern_args_eq (vector float, (a1), vector float, (a2), vector unsigned char, (a3)), \
- ((vector float) __builtin_altivec_vperm_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed char) (a3))), \
-__ch (__tern_args_eq (vector signed int, (a1), vector signed int, (a2), vector unsigned char, (a3)), \
- ((vector signed int) __builtin_altivec_vperm_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed char) (a3))), \
-__ch (__tern_args_eq (vector unsigned int, (a1), vector unsigned int, (a2), vector unsigned char, (a3)), \
- ((vector unsigned int) __builtin_altivec_vperm_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed char) (a3))), \
-__ch (__tern_args_eq (vector signed short, (a1), vector signed short, (a2), vector unsigned char, (a3)), \
- ((vector signed short) __builtin_altivec_vperm_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed char) (a3))), \
-__ch (__tern_args_eq (vector unsigned short, (a1), vector unsigned short, (a2), vector unsigned char, (a3)), \
- ((vector unsigned short) __builtin_altivec_vperm_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed char) (a3))), \
-__ch (__tern_args_eq (vector signed char, (a1), vector signed char, (a2), vector unsigned char, (a3)), \
- ((vector signed char) __builtin_altivec_vperm_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed char) (a3))), \
-__ch (__tern_args_eq (vector unsigned char, (a1), vector unsigned char, (a2), vector unsigned char, (a3)), \
- ((vector unsigned char) __builtin_altivec_vperm_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed char) (a3))), \
- __altivec_link_error_invalid_argument ())))))))
-
-#define vec_re(a1) __builtin_altivec_vrefp ((a1))
+__ch (__tern_args_eq (__vector float, (a1), __vector float, (a2), __vector unsigned char, (a3)), \
+ ((__vector float) __builtin_altivec_vperm_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed char) (a3))), \
+__ch (__tern_args_eq (__vector signed int, (a1), __vector signed int, (a2), __vector unsigned char, (a3)), \
+ ((__vector signed int) __builtin_altivec_vperm_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed char) (a3))), \
+__ch (__tern_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2), __vector unsigned char, (a3)), \
+ ((__vector unsigned int) __builtin_altivec_vperm_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed char) (a3))), \
+__ch (__tern_args_eq (__vector __bool int, (a1), __vector __bool int, (a2), __vector unsigned char, (a3)), \
+ ((__vector __bool int) __builtin_altivec_vperm_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed char) (a3))), \
+__ch (__tern_args_eq (__vector signed short, (a1), __vector signed short, (a2), __vector unsigned char, (a3)), \
+ ((__vector signed short) __builtin_altivec_vperm_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed char) (a3))), \
+__ch (__tern_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2), __vector unsigned char, (a3)), \
+ ((__vector unsigned short) __builtin_altivec_vperm_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed char) (a3))), \
+__ch (__tern_args_eq (__vector __bool short, (a1), __vector __bool short, (a2), __vector unsigned char, (a3)), \
+ ((__vector __bool short) __builtin_altivec_vperm_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed char) (a3))), \
+__ch (__tern_args_eq (__vector __pixel, (a1), __vector __pixel, (a2), __vector unsigned char, (a3)), \
+ ((__vector __pixel) __builtin_altivec_vperm_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed char) (a3))), \
+__ch (__tern_args_eq (__vector signed char, (a1), __vector signed char, (a2), __vector unsigned char, (a3)), \
+ ((__vector signed char) __builtin_altivec_vperm_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed char) (a3))), \
+__ch (__tern_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2), __vector unsigned char, (a3)), \
+ ((__vector unsigned char) __builtin_altivec_vperm_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed char) (a3))), \
+__ch (__tern_args_eq (__vector __bool char, (a1), __vector __bool char, (a2), __vector unsigned char, (a3)), \
+ ((__vector __bool char) __builtin_altivec_vperm_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed char) (a3))), \
+ __builtin_altivec_compiletime_error ("vec_perm"))))))))))))
+
+#define vec_re(a1) \
+__ch (__un_args_eq (__vector float, (a1)), \
+ ((__vector float) __builtin_altivec_vrefp ((__vector float) (a1))), \
+__builtin_altivec_compiletime_error ("vec_re"))
#define vec_rl(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector signed char) __builtin_altivec_vrlb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vrlb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector signed short) __builtin_altivec_vrlh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vrlh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector signed int) __builtin_altivec_vrlw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vrlw ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ()))))))
+__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vrlb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vrlb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vrlh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vrlh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vrlw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vrlw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_rl")))))))
#define vec_vrlw(a1, a2) \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector signed int) __builtin_altivec_vrlw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vrlw ((vector signed int) (a1), (vector signed int) (a2))), \
-__altivec_link_error_invalid_argument ()))
+__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vrlw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vrlw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vrlw")))
#define vec_vrlh(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector signed short) __builtin_altivec_vrlh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vrlh ((vector signed short) (a1), (vector signed short) (a2))), \
-__altivec_link_error_invalid_argument ()))
+__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vrlh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vrlh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vrlh")))
#define vec_vrlb(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector signed char) __builtin_altivec_vrlb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vrlb ((vector signed char) (a1), (vector signed char) (a2))), \
-__altivec_link_error_invalid_argument ()))
-
-#define vec_round(a1) __builtin_altivec_vrfin ((a1))
-
-#define vec_rsqrte(a1) __builtin_altivec_vrsqrtefp ((a1))
+__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vrlb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vrlb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vrlb")))
+
+#define vec_round(a1) \
+__ch (__un_args_eq (__vector float, (a1)), \
+ ((__vector float) __builtin_altivec_vrfin ((__vector float) (a1))), \
+__builtin_altivec_compiletime_error ("vec_round"))
+
+#define vec_rsqrte(a1) \
+__ch (__un_args_eq (__vector float, (a1)), \
+ ((__vector float) __builtin_altivec_vrsqrtefp ((__vector float) (a1))), \
+__builtin_altivec_compiletime_error ("vec_rsqrte"))
#define vec_sel(a1, a2, a3) \
-__ch (__tern_args_eq (vector float, (a1), vector float, (a2), vector signed int, (a3)), \
- ((vector float) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
-__ch (__tern_args_eq (vector float, (a1), vector float, (a2), vector unsigned int, (a3)), \
- ((vector float) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
-__ch (__tern_args_eq (vector signed int, (a1), vector signed int, (a2), vector signed int, (a3)), \
- ((vector signed int) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
-__ch (__tern_args_eq (vector signed int, (a1), vector signed int, (a2), vector unsigned int, (a3)), \
- ((vector signed int) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
-__ch (__tern_args_eq (vector unsigned int, (a1), vector unsigned int, (a2), vector signed int, (a3)), \
- ((vector unsigned int) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
-__ch (__tern_args_eq (vector unsigned int, (a1), vector unsigned int, (a2), vector unsigned int, (a3)), \
- ((vector unsigned int) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
-__ch (__tern_args_eq (vector signed short, (a1), vector signed short, (a2), vector signed short, (a3)), \
- ((vector signed short) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
-__ch (__tern_args_eq (vector signed short, (a1), vector signed short, (a2), vector unsigned short, (a3)), \
- ((vector signed short) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
-__ch (__tern_args_eq (vector unsigned short, (a1), vector unsigned short, (a2), vector signed short, (a3)), \
- ((vector unsigned short) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
-__ch (__tern_args_eq (vector unsigned short, (a1), vector unsigned short, (a2), vector unsigned short, (a3)), \
- ((vector unsigned short) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
-__ch (__tern_args_eq (vector signed char, (a1), vector signed char, (a2), vector signed char, (a3)), \
- ((vector signed char) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
-__ch (__tern_args_eq (vector signed char, (a1), vector signed char, (a2), vector unsigned char, (a3)), \
- ((vector signed char) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
-__ch (__tern_args_eq (vector unsigned char, (a1), vector unsigned char, (a2), vector signed char, (a3)), \
- ((vector unsigned char) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
-__ch (__tern_args_eq (vector unsigned char, (a1), vector unsigned char, (a2), vector unsigned char, (a3)), \
- ((vector unsigned char) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
- __altivec_link_error_invalid_argument ()))))))))))))))
+__ch (__tern_args_eq (__vector float, (a1), __vector float, (a2), __vector __bool int, (a3)), \
+ ((__vector float) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
+__ch (__tern_args_eq (__vector float, (a1), __vector float, (a2), __vector unsigned int, (a3)), \
+ ((__vector float) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
+__ch (__tern_args_eq (__vector __bool int, (a1), __vector __bool int, (a2), __vector __bool int, (a3)), \
+ ((__vector __bool int) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
+__ch (__tern_args_eq (__vector __bool int, (a1), __vector __bool int, (a2), __vector unsigned int, (a3)), \
+ ((__vector __bool int) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
+__ch (__tern_args_eq (__vector signed int, (a1), __vector signed int, (a2), __vector __bool int, (a3)), \
+ ((__vector signed int) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
+__ch (__tern_args_eq (__vector signed int, (a1), __vector signed int, (a2), __vector unsigned int, (a3)), \
+ ((__vector signed int) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
+__ch (__tern_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2), __vector __bool int, (a3)), \
+ ((__vector unsigned int) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
+__ch (__tern_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2), __vector unsigned int, (a3)), \
+ ((__vector unsigned int) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
+__ch (__tern_args_eq (__vector __bool short, (a1), __vector __bool short, (a2), __vector __bool short, (a3)), \
+ ((__vector __bool short) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
+__ch (__tern_args_eq (__vector __bool short, (a1), __vector __bool short, (a2), __vector unsigned short, (a3)), \
+ ((__vector __bool short) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
+__ch (__tern_args_eq (__vector signed short, (a1), __vector signed short, (a2), __vector __bool short, (a3)), \
+ ((__vector signed short) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
+__ch (__tern_args_eq (__vector signed short, (a1), __vector signed short, (a2), __vector unsigned short, (a3)), \
+ ((__vector signed short) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
+__ch (__tern_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2), __vector __bool short, (a3)), \
+ ((__vector unsigned short) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
+__ch (__tern_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2), __vector unsigned short, (a3)), \
+ ((__vector unsigned short) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
+__ch (__tern_args_eq (__vector __bool char, (a1), __vector __bool char, (a2), __vector __bool char, (a3)), \
+ ((__vector __bool char) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
+__ch (__tern_args_eq (__vector __bool char, (a1), __vector __bool char, (a2), __vector unsigned char, (a3)), \
+ ((__vector __bool char) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
+__ch (__tern_args_eq (__vector signed char, (a1), __vector signed char, (a2), __vector __bool char, (a3)), \
+ ((__vector signed char) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
+__ch (__tern_args_eq (__vector signed char, (a1), __vector signed char, (a2), __vector unsigned char, (a3)), \
+ ((__vector signed char) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
+__ch (__tern_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2), __vector __bool char, (a3)), \
+ ((__vector unsigned char) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
+__ch (__tern_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2), __vector unsigned char, (a3)), \
+ ((__vector unsigned char) __builtin_altivec_vsel_4si ((__vector signed int) (a1), (__vector signed int) (a2), (__vector signed int) (a3))), \
+ __builtin_altivec_compiletime_error ("vec_sel")))))))))))))))))))))
#define vec_sl(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector signed char) __builtin_altivec_vslb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vslb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector signed short) __builtin_altivec_vslh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vslh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector signed int) __builtin_altivec_vslw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vslw ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ()))))))
+__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vslb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vslb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vslh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vslh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vslw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vslw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_sl")))))))
#define vec_vslw(a1, a2) \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector signed int) __builtin_altivec_vslw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vslw ((vector signed int) (a1), (vector signed int) (a2))), \
-__altivec_link_error_invalid_argument ()))
+__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vslw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vslw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vslw")))
#define vec_vslh(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector signed short) __builtin_altivec_vslh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vslh ((vector signed short) (a1), (vector signed short) (a2))), \
-__altivec_link_error_invalid_argument ()))
+__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vslh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vslh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vslh")))
#define vec_vslb(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector signed char) __builtin_altivec_vslb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vslb ((vector signed char) (a1), (vector signed char) (a2))), \
-__altivec_link_error_invalid_argument ()))
+__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vslb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vslb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vslb")))
#define vec_sld(a1, a2, a3) \
-__ch (__tern_args_eq (vector float, (a1), vector float, (a2), int, (a3)), \
- ((vector float) __builtin_altivec_vsldoi_4si ((vector signed int) (a1), (vector signed int) (a2), (const char) (a3))), \
-__ch (__tern_args_eq (vector float, (a1), vector float, (a2), unsigned int, (a3)), \
- ((vector float) __builtin_altivec_vsldoi_4si ((vector signed int) (a1), (vector signed int) (a2), (const char) (a3))), \
-__ch (__tern_args_eq (vector signed int, (a1), vector signed int, (a2), int, (a3)), \
- ((vector signed int) __builtin_altivec_vsldoi_4si ((vector signed int) (a1), (vector signed int) (a2), (const char) (a3))), \
-__ch (__tern_args_eq (vector signed int, (a1), vector signed int, (a2), unsigned int, (a3)), \
- ((vector signed int) __builtin_altivec_vsldoi_4si ((vector signed int) (a1), (vector signed int) (a2), (const char) (a3))), \
-__ch (__tern_args_eq (vector unsigned int, (a1), vector unsigned int, (a2), int, (a3)), \
- ((vector unsigned int) __builtin_altivec_vsldoi_4si ((vector signed int) (a1), (vector signed int) (a2), (const char) (a3))), \
-__ch (__tern_args_eq (vector unsigned int, (a1), vector unsigned int, (a2), unsigned int, (a3)), \
- ((vector unsigned int) __builtin_altivec_vsldoi_4si ((vector signed int) (a1), (vector signed int) (a2), (const char) (a3))), \
-__ch (__tern_args_eq (vector signed short, (a1), vector signed short, (a2), int, (a3)), \
- ((vector signed short) __builtin_altivec_vsldoi_4si ((vector signed int) (a1), (vector signed int) (a2), (const char) (a3))), \
-__ch (__tern_args_eq (vector signed short, (a1), vector signed short, (a2), unsigned int, (a3)), \
- ((vector signed short) __builtin_altivec_vsldoi_4si ((vector signed int) (a1), (vector signed int) (a2), (const char) (a3))), \
-__ch (__tern_args_eq (vector unsigned short, (a1), vector unsigned short, (a2), int, (a3)), \
- ((vector unsigned short) __builtin_altivec_vsldoi_4si ((vector signed int) (a1), (vector signed int) (a2), (const char) (a3))), \
-__ch (__tern_args_eq (vector unsigned short, (a1), vector unsigned short, (a2), unsigned int, (a3)), \
- ((vector unsigned short) __builtin_altivec_vsldoi_4si ((vector signed int) (a1), (vector signed int) (a2), (const char) (a3))), \
-__ch (__tern_args_eq (vector signed char, (a1), vector signed char, (a2), int, (a3)), \
- ((vector signed char) __builtin_altivec_vsldoi_4si ((vector signed int) (a1), (vector signed int) (a2), (const char) (a3))), \
-__ch (__tern_args_eq (vector signed char, (a1), vector signed char, (a2), unsigned int, (a3)), \
- ((vector signed char) __builtin_altivec_vsldoi_4si ((vector signed int) (a1), (vector signed int) (a2), (const char) (a3))), \
-__ch (__tern_args_eq (vector unsigned char, (a1), vector unsigned char, (a2), int, (a3)), \
- ((vector unsigned char) __builtin_altivec_vsldoi_4si ((vector signed int) (a1), (vector signed int) (a2), (const char) (a3))), \
-__ch (__tern_args_eq (vector unsigned char, (a1), vector unsigned char, (a2), unsigned int, (a3)), \
- ((vector unsigned char) __builtin_altivec_vsldoi_4si ((vector signed int) (a1), (vector signed int) (a2), (const char) (a3))), \
- __altivec_link_error_invalid_argument ()))))))))))))))
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector float) __builtin_altivec_vsldoi_4si ((__vector signed int) (a1), (__vector signed int) (a2), (const int) (a3))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsldoi_4si ((__vector signed int) (a1), (__vector signed int) (a2), (const int) (a3))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsldoi_4si ((__vector signed int) (a1), (__vector signed int) (a2), (const int) (a3))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vsldoi_4si ((__vector signed int) (a1), (__vector signed int) (a2), (const int) (a3))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsldoi_4si ((__vector signed int) (a1), (__vector signed int) (a2), (const int) (a3))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsldoi_4si ((__vector signed int) (a1), (__vector signed int) (a2), (const int) (a3))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vsldoi_4si ((__vector signed int) (a1), (__vector signed int) (a2), (const int) (a3))), \
+__ch (__bin_args_eq (__vector __pixel, (a1), __vector __pixel, (a2)), \
+ ((__vector __pixel) __builtin_altivec_vsldoi_4si ((__vector signed int) (a1), (__vector signed int) (a2), (const int) (a3))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsldoi_4si ((__vector signed int) (a1), (__vector signed int) (a2), (const int) (a3))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsldoi_4si ((__vector signed int) (a1), (__vector signed int) (a2), (const int) (a3))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vsldoi_4si ((__vector signed int) (a1), (__vector signed int) (a2), (const int) (a3))), \
+ __builtin_altivec_compiletime_error ("vec_sld"))))))))))))
#define vec_sll(a1, a2) \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector signed int) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned short, (a2)), \
- ((vector signed int) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned char, (a2)), \
- ((vector signed int) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned short, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned char, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned int, (a2)), \
- ((vector signed short) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector signed short) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned char, (a2)), \
- ((vector signed short) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned int, (a2)), \
- ((vector unsigned short) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned char, (a2)), \
- ((vector unsigned short) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned int, (a2)), \
- ((vector signed char) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned short, (a2)), \
- ((vector signed char) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector signed char) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned int, (a2)), \
- ((vector unsigned char) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned short, (a2)), \
- ((vector unsigned char) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ()))))))))))))))))))
+__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned short, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned char, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned short, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned char, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned int, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned char, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned int, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned char, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __pixel, (a1), __vector unsigned int, (a2)), \
+ ((__vector __pixel) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __pixel, (a1), __vector unsigned short, (a2)), \
+ ((__vector __pixel) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __pixel, (a1), __vector unsigned char, (a2)), \
+ ((__vector __pixel) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned int, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned short, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned int, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned short, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vsl ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_sll")))))))))))))))))))))))))))))))
#define vec_slo(a1, a2) \
-__ch (__bin_args_eq (vector float, (a1), vector signed char, (a2)), \
- ((vector float) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector float, (a1), vector unsigned char, (a2)), \
- ((vector float) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed char, (a2)), \
- ((vector signed int) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned char, (a2)), \
- ((vector signed int) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed char, (a2)), \
- ((vector unsigned int) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned char, (a2)), \
- ((vector unsigned int) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed char, (a2)), \
- ((vector signed short) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned char, (a2)), \
- ((vector signed short) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed char, (a2)), \
- ((vector unsigned short) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned char, (a2)), \
- ((vector unsigned short) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector signed char) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ()))))))))))))))
+__ch (__bin_args_eq (__vector float, (a1), __vector signed char, (a2)), \
+ ((__vector float) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector float, (a1), __vector unsigned char, (a2)), \
+ ((__vector float) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed char, (a2)), \
+ ((__vector signed int) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned char, (a2)), \
+ ((__vector signed int) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector signed char, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed char, (a2)), \
+ ((__vector signed short) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned char, (a2)), \
+ ((__vector signed short) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector signed char, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __pixel, (a1), __vector signed char, (a2)), \
+ ((__vector __pixel) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __pixel, (a1), __vector unsigned char, (a2)), \
+ ((__vector __pixel) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector signed char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vslo ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_slo")))))))))))))))))
#define vec_splat(a1, a2) \
-__ch (__bin_args_eq (vector signed char, ((a1)), int, ((a2))), \
- ((vector signed char) __builtin_altivec_vspltb ((vector signed char) ((a1)), (const char) ((a2)))), \
-__ch (__bin_args_eq (vector signed char, ((a1)), unsigned int, ((a2))), \
- ((vector signed char) __builtin_altivec_vspltb ((vector signed char) ((a1)), (const char) ((a2)))), \
-__ch (__bin_args_eq (vector unsigned char, ((a1)), int, ((a2))), \
- ((vector unsigned char) __builtin_altivec_vspltb ((vector signed char) ((a1)), (const char) ((a2)))), \
-__ch (__bin_args_eq (vector unsigned char, ((a1)), unsigned int, ((a2))), \
- ((vector unsigned char) __builtin_altivec_vspltb ((vector signed char) ((a1)), (const char) ((a2)))), \
-__ch (__bin_args_eq (vector signed short, ((a1)), int, ((a2))), \
- ((vector signed short) __builtin_altivec_vsplth ((vector signed short) ((a1)), (const char) ((a2)))), \
-__ch (__bin_args_eq (vector signed short, ((a1)), unsigned int, ((a2))), \
- ((vector signed short) __builtin_altivec_vsplth ((vector signed short) ((a1)), (const char) ((a2)))), \
-__ch (__bin_args_eq (vector unsigned short, ((a1)), int, ((a2))), \
- ((vector unsigned short) __builtin_altivec_vsplth ((vector signed short) ((a1)), (const char) ((a2)))), \
-__ch (__bin_args_eq (vector unsigned short, ((a1)), unsigned int, ((a2))), \
- ((vector unsigned short) __builtin_altivec_vsplth ((vector signed short) ((a1)), (const char) ((a2)))), \
-__ch (__bin_args_eq (vector float, ((a1)), int, ((a2))), \
- ((vector float) __builtin_altivec_vspltw ((vector signed int) ((a1)), (const char) ((a2)))), \
-__ch (__bin_args_eq (vector float, ((a1)), unsigned int, ((a2))), \
- ((vector float) __builtin_altivec_vspltw ((vector signed int) ((a1)), (const char) ((a2)))), \
-__ch (__bin_args_eq (vector signed int, ((a1)), int, ((a2))), \
- ((vector signed int) __builtin_altivec_vspltw ((vector signed int) ((a1)), (const char) ((a2)))), \
-__ch (__bin_args_eq (vector signed int, ((a1)), unsigned int, ((a2))), \
- ((vector signed int) __builtin_altivec_vspltw ((vector signed int) ((a1)), (const char) ((a2)))), \
-__ch (__bin_args_eq (vector unsigned int, ((a1)), int, ((a2))), \
- ((vector unsigned int) __builtin_altivec_vspltw ((vector signed int) (a1), (const char) ((a2)))), \
-__ch (__bin_args_eq (vector unsigned int, ((a1)), unsigned int, ((a2))), \
- ((vector unsigned int) __builtin_altivec_vspltw ((vector signed int) (a1), (const char) ((a2)))), \
- __altivec_link_error_invalid_argument ()))))))))))))))
+__ch (__un_args_eq (__vector signed char, (a1)), \
+ ((__vector signed char) __builtin_altivec_vspltb ((__vector signed char) (a1), (const int) (a2))), \
+__ch (__un_args_eq (__vector unsigned char, (a1)), \
+ ((__vector unsigned char) __builtin_altivec_vspltb ((__vector signed char) (a1), (const int) (a2))), \
+__ch (__un_args_eq (__vector __bool char, (a1)), \
+ ((__vector __bool char) __builtin_altivec_vspltb ((__vector signed char) (a1), (const int) (a2))), \
+__ch (__un_args_eq (__vector signed short, (a1)), \
+ ((__vector signed short) __builtin_altivec_vsplth ((__vector signed short) (a1), (const int) (a2))), \
+__ch (__un_args_eq (__vector unsigned short, (a1)), \
+ ((__vector unsigned short) __builtin_altivec_vsplth ((__vector signed short) (a1), (const int) (a2))), \
+__ch (__un_args_eq (__vector __bool short, (a1)), \
+ ((__vector __bool short) __builtin_altivec_vsplth ((__vector signed short) (a1), (const int) (a2))), \
+__ch (__un_args_eq (__vector __pixel, (a1)), \
+ ((__vector __pixel) __builtin_altivec_vsplth ((__vector signed short) (a1), (const int) (a2))), \
+__ch (__un_args_eq (__vector float, (a1)), \
+ ((__vector float) __builtin_altivec_vspltw ((__vector signed int) (a1), (const int) (a2))), \
+__ch (__un_args_eq (__vector signed int, (a1)), \
+ ((__vector signed int) __builtin_altivec_vspltw ((__vector signed int) (a1), (const int) (a2))), \
+__ch (__un_args_eq (__vector unsigned int, (a1)), \
+ ((__vector unsigned int) __builtin_altivec_vspltw ((__vector signed int) (a1), (const int) (a2))), \
+__ch (__un_args_eq (__vector __bool int, (a1)), \
+ ((__vector __bool int) __builtin_altivec_vspltw ((__vector signed int) (a1), (const int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_splat"))))))))))))
#define vec_vspltw(a1, a2) \
-__ch (__bin_args_eq (vector float, ((a1)), int, ((a2))), \
- ((vector float) __builtin_altivec_vspltw ((vector signed int) ((a1)), (const char) ((a2)))), \
-__ch (__bin_args_eq (vector float, ((a1)), unsigned int, ((a2))), \
- ((vector float) __builtin_altivec_vspltw ((vector signed int) ((a1)), (const char) ((a2)))), \
-__ch (__bin_args_eq (vector signed int, ((a1)), int, ((a2))), \
- ((vector signed int) __builtin_altivec_vspltw ((vector signed int) ((a1)), (const char) ((a2)))), \
-__ch (__bin_args_eq (vector signed int, ((a1)), unsigned int, ((a2))), \
- ((vector signed int) __builtin_altivec_vspltw ((vector signed int) ((a1)), (const char) ((a2)))), \
-__ch (__bin_args_eq (vector unsigned int, ((a1)), int, ((a2))), \
- ((vector unsigned int) __builtin_altivec_vspltw ((vector signed int) (a1), (const char) ((a2)))), \
-__ch (__bin_args_eq (vector unsigned int, ((a1)), unsigned int, ((a2))), \
- ((vector unsigned int) __builtin_altivec_vspltw ((vector signed int) (a1), (const char) ((a2)))), \
-__altivec_link_error_invalid_argument ()))))))
+__ch (__un_args_eq (__vector float, (a1)), \
+ ((__vector float) __builtin_altivec_vspltw ((__vector signed int) (a1), (const int) (a2))), \
+__ch (__un_args_eq (__vector __bool int, (a1)), \
+ ((__vector __bool int) __builtin_altivec_vspltw ((__vector signed int) (a1), (const int) (a2))), \
+__ch (__un_args_eq (__vector signed int, (a1)), \
+ ((__vector signed int) __builtin_altivec_vspltw ((__vector signed int) (a1), (const int) (a2))), \
+__ch (__un_args_eq (__vector unsigned int, (a1)), \
+ ((__vector unsigned int) __builtin_altivec_vspltw ((__vector signed int) (a1), (const int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vspltw")))))
#define vec_vsplth(a1, a2) \
-__ch (__bin_args_eq (vector signed short, ((a1)), int, ((a2))), \
- ((vector signed short) __builtin_altivec_vsplth ((vector signed short) ((a1)), (const char) ((a2)))), \
-__ch (__bin_args_eq (vector signed short, ((a1)), unsigned int, ((a2))), \
- ((vector signed short) __builtin_altivec_vsplth ((vector signed short) ((a1)), (const char) ((a2)))), \
-__ch (__bin_args_eq (vector unsigned short, ((a1)), int, ((a2))), \
- ((vector unsigned short) __builtin_altivec_vsplth ((vector signed short) ((a1)), (const char) ((a2)))), \
-__ch (__bin_args_eq (vector unsigned short, ((a1)), unsigned int, ((a2))), \
- ((vector unsigned short) __builtin_altivec_vsplth ((vector signed short) ((a1)), (const char) ((a2)))), \
-__altivec_link_error_invalid_argument ()))))
+__ch (__un_args_eq (__vector __bool short, (a1)), \
+ ((__vector __bool short) __builtin_altivec_vsplth ((__vector signed short) (a1), (const int) (a2))), \
+__ch (__un_args_eq (__vector signed short, (a1)), \
+ ((__vector signed short) __builtin_altivec_vsplth ((__vector signed short) (a1), (const int) (a2))), \
+__ch (__un_args_eq (__vector unsigned short, (a1)), \
+ ((__vector unsigned short) __builtin_altivec_vsplth ((__vector signed short) (a1), (const int) (a2))), \
+__ch (__un_args_eq (__vector __pixel, (a1)), \
+ ((__vector __pixel) __builtin_altivec_vsplth ((__vector signed short) (a1), (const int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vsplth")))))
#define vec_vspltb(a1, a2) \
-__ch (__bin_args_eq (vector signed char, ((a1)), int, ((a2))), \
- ((vector signed char) __builtin_altivec_vspltb ((vector signed char) ((a1)), (const char) ((a2)))), \
-__ch (__bin_args_eq (vector signed char, ((a1)), unsigned int, ((a2))), \
- ((vector signed char) __builtin_altivec_vspltb ((vector signed char) ((a1)), (const char) ((a2)))), \
-__ch (__bin_args_eq (vector unsigned char, ((a1)), int, ((a2))), \
- ((vector unsigned char) __builtin_altivec_vspltb ((vector signed char) ((a1)), (const char) ((a2)))), \
-__ch (__bin_args_eq (vector unsigned char, ((a1)), unsigned int, ((a2))), \
- ((vector unsigned char) __builtin_altivec_vspltb ((vector signed char) ((a1)), (const char) ((a2)))), \
-__altivec_link_error_invalid_argument ()))))
+__ch (__un_args_eq (__vector __bool char, (a1)), \
+ ((__vector __bool char) __builtin_altivec_vspltb ((__vector signed char) (a1), (const int) (a2))), \
+__ch (__un_args_eq (__vector signed char, (a1)), \
+ ((__vector signed char) __builtin_altivec_vspltb ((__vector signed char) (a1), (const int) (a2))), \
+__ch (__un_args_eq (__vector unsigned char, (a1)), \
+ ((__vector unsigned char) __builtin_altivec_vspltb ((__vector signed char) (a1), (const int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vspltb"))))
-#define vec_splat_s8(a1) __builtin_altivec_vspltisb ((a1))
+#define vec_splat_s8(a1) ((__vector signed char) __builtin_altivec_vspltisb (a1))
-#define vec_splat_s16(a1) __builtin_altivec_vspltish ((a1))
+#define vec_splat_s16(a1) ((__vector signed short) __builtin_altivec_vspltish (a1))
-#define vec_splat_s32(a1) __builtin_altivec_vspltisw ((a1))
+#define vec_splat_s32(a1) ((__vector signed int) __builtin_altivec_vspltisw (a1))
-#define vec_splat_u8(a1) ((vector unsigned char) __builtin_altivec_vspltisb ((a1)))
+#define vec_splat_u8(a1) ((__vector unsigned char) __builtin_altivec_vspltisb (a1))
-#define vec_splat_u16(a1) ((vector unsigned short) __builtin_altivec_vspltish ((a1)))
+#define vec_splat_u16(a1) ((__vector unsigned short) __builtin_altivec_vspltish (a1))
-#define vec_splat_u32(a1) ((vector unsigned int) __builtin_altivec_vspltisw ((a1)))
+#define vec_splat_u32(a1) ((__vector unsigned int) __builtin_altivec_vspltisw (a1))
#define vec_sr(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector signed char) __builtin_altivec_vsrb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vsrb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector signed short) __builtin_altivec_vsrh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vsrh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector signed int) __builtin_altivec_vsrw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsrw ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ()))))))
+__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsrb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsrb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsrh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsrh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsrw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsrw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_sr")))))))
#define vec_vsrw(a1, a2) \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector signed int) __builtin_altivec_vsrw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsrw ((vector signed int) (a1), (vector signed int) (a2))), \
-__altivec_link_error_invalid_argument ()))
+__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsrw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsrw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vsrw")))
#define vec_vsrh(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector signed short) __builtin_altivec_vsrh ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vsrh ((vector signed short) (a1), (vector signed short) (a2))), \
-__altivec_link_error_invalid_argument ()))
+__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsrh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsrh ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vsrh")))
#define vec_vsrb(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector signed char) __builtin_altivec_vsrb ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vsrb ((vector signed char) (a1), (vector signed char) (a2))), \
-__altivec_link_error_invalid_argument ()))
+__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsrb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsrb ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vsrb")))
#define vec_sra(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector signed char) __builtin_altivec_vsrab ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vsrab ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector signed short) __builtin_altivec_vsrah ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vsrah ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector signed int) __builtin_altivec_vsraw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsraw ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ()))))))
+__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsrab ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsrab ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsrah ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsrah ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsraw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsraw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_sra")))))))
#define vec_vsraw(a1, a2) \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector signed int) __builtin_altivec_vsraw ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsraw ((vector signed int) (a1), (vector signed int) (a2))), \
-__altivec_link_error_invalid_argument ()))
+__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsraw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsraw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vsraw")))
#define vec_vsrah(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector signed short) __builtin_altivec_vsrah ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vsrah ((vector signed short) (a1), (vector signed short) (a2))), \
-__altivec_link_error_invalid_argument ()))
+__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsrah ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsrah ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vsrah")))
#define vec_vsrab(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector signed char) __builtin_altivec_vsrab ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vsrab ((vector signed char) (a1), (vector signed char) (a2))), \
-__altivec_link_error_invalid_argument ()))
+__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsrab ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsrab ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vsrab")))
#define vec_srl(a1, a2) \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector signed int) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned short, (a2)), \
- ((vector signed int) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned char, (a2)), \
- ((vector signed int) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned short, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned char, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned int, (a2)), \
- ((vector signed short) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector signed short) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned char, (a2)), \
- ((vector signed short) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned int, (a2)), \
- ((vector unsigned short) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned char, (a2)), \
- ((vector unsigned short) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned int, (a2)), \
- ((vector signed char) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned short, (a2)), \
- ((vector signed char) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector signed char) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned int, (a2)), \
- ((vector unsigned char) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned short, (a2)), \
- ((vector unsigned char) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ()))))))))))))))))))
+__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned short, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned char, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned short, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned char, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned int, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned char, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned int, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned char, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __pixel, (a1), __vector unsigned int, (a2)), \
+ ((__vector __pixel) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __pixel, (a1), __vector unsigned short, (a2)), \
+ ((__vector __pixel) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __pixel, (a1), __vector unsigned char, (a2)), \
+ ((__vector __pixel) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned int, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned short, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned int, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned short, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vsr ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_srl")))))))))))))))))))))))))))))))
#define vec_sro(a1, a2) \
-__ch (__bin_args_eq (vector float, (a1), vector signed char, (a2)), \
- ((vector float) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector float, (a1), vector unsigned char, (a2)), \
- ((vector float) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed char, (a2)), \
- ((vector signed int) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned char, (a2)), \
- ((vector signed int) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed char, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned char, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed char, (a2)), \
- ((vector signed short) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned char, (a2)), \
- ((vector signed short) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed char, (a2)), \
- ((vector unsigned short) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned char, (a2)), \
- ((vector unsigned short) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector signed char) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ()))))))))))))))
+__ch (__bin_args_eq (__vector float, (a1), __vector signed char, (a2)), \
+ ((__vector float) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector float, (a1), __vector unsigned char, (a2)), \
+ ((__vector float) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed char, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector unsigned char, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector signed char, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed char, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector unsigned char, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __pixel, (a1), __vector signed char, (a2)), \
+ ((__vector __pixel) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __pixel, (a1), __vector unsigned char, (a2)), \
+ ((__vector __pixel) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector signed char, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector unsigned char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector signed char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsro ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_sro")))))))))))))))))
#define vec_st(a1, a2, a3) \
- __builtin_altivec_stvx ((vector signed int) (a1), (a2), (a3))
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), unsigned char, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector signed char, (a1), signed char, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector __bool char, (a1), unsigned char, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector __bool char, (a1), signed char, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), unsigned short, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector signed short, (a1), short, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), unsigned short, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), short, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector __pixel, (a1), __vector __pixel, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector __pixel, (a1), unsigned short, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector __pixel, (a1), short, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), unsigned int, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector signed int, (a1), int, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), unsigned int, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), int, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector float, (a1), float, *(a3)), \
+ __builtin_altivec_stvx ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__builtin_altivec_compiletime_error ("vec_st")))))))))))))))))))))))))))
#define vec_stl(a1, a2, a3) \
- __builtin_altivec_stvxl ((vector signed int) (a1), (a2), (a3))
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), unsigned char, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector signed char, (a1), signed char, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector __bool char, (a1), unsigned char, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector __bool char, (a1), signed char, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), unsigned short, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector signed short, (a1), short, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), unsigned short, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), short, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector __pixel, (a1), __vector __pixel, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector __pixel, (a1), unsigned short, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector __pixel, (a1), short, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), unsigned int, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector signed int, (a1), int, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), unsigned int, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), int, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__ch (__bin_args_eq (__vector float, (a1), float, *(a3)), \
+ __builtin_altivec_stvxl ((__vector signed int) (a1), (a2), (void *) (a3)), \
+__builtin_altivec_compiletime_error ("vec_stl")))))))))))))))))))))))))))
#define vec_ste(a, b, c) \
-__ch (__un_args_eq (vector unsigned char, (a)), \
- __builtin_altivec_stvebx ((vector signed char) (a), (b), (c)), \
-__ch (__un_args_eq (vector signed char, (a)), \
- __builtin_altivec_stvebx ((vector signed char) (a), (b), (c)), \
-__ch (__un_args_eq (vector unsigned short, (a)), \
- __builtin_altivec_stvehx ((vector signed short) (a), (b), (c)), \
-__ch (__un_args_eq (vector signed short, (a)), \
- __builtin_altivec_stvehx ((vector signed short) (a), (b), (c)), \
-__ch (__un_args_eq (vector unsigned int, (a)), \
- __builtin_altivec_stvewx ((vector signed int) (a), (b), (c)), \
-__ch (__un_args_eq (vector signed int, (a)), \
- __builtin_altivec_stvewx ((vector signed int) (a), (b), (c)), \
-__ch (__un_args_eq (vector float, (a)), \
- __builtin_altivec_stvewx ((vector signed int) (a), (b), (c)), \
- __altivec_link_error_invalid_argument ())))))))
+__ch (__bin_args_eq (__vector unsigned char, (a), unsigned char, *(c)), \
+ __builtin_altivec_stvebx ((__vector signed char) (a), (b), (void *) (c)), \
+__ch (__bin_args_eq (__vector signed char, (a), signed char, *(c)), \
+ __builtin_altivec_stvebx ((__vector signed char) (a), (b), (void *) (c)), \
+__ch (__bin_args_eq (__vector __bool char, (a), unsigned char, *(c)), \
+ __builtin_altivec_stvebx ((__vector signed char) (a), (b), (void *) (c)), \
+__ch (__bin_args_eq (__vector __bool char, (a), signed char, *(c)), \
+ __builtin_altivec_stvebx ((__vector signed char) (a), (b), (void *) (c)), \
+__ch (__bin_args_eq (__vector unsigned short, (a), unsigned short, *(c)), \
+ __builtin_altivec_stvehx ((__vector signed short) (a), (b), (void *) (c)), \
+__ch (__bin_args_eq (__vector signed short, (a), short, *(c)), \
+ __builtin_altivec_stvehx ((__vector signed short) (a), (b), (void *) (c)), \
+__ch (__bin_args_eq (__vector __bool short, (a), unsigned short, *(c)), \
+ __builtin_altivec_stvehx ((__vector signed short) (a), (b), (void *) (c)), \
+__ch (__bin_args_eq (__vector __bool short, (a), short, *(c)), \
+ __builtin_altivec_stvehx ((__vector signed short) (a), (b), (void *) (c)), \
+__ch (__bin_args_eq (__vector __pixel, (a), unsigned short, *(c)), \
+ __builtin_altivec_stvehx ((__vector signed short) (a), (b), (void *) (c)), \
+__ch (__bin_args_eq (__vector __pixel, (a), short, *(c)), \
+ __builtin_altivec_stvehx ((__vector signed short) (a), (b), (void *) (c)), \
+__ch (__bin_args_eq (__vector unsigned int, (a), unsigned int, *(c)), \
+ __builtin_altivec_stvewx ((__vector signed int) (a), (b), (void *) (c)), \
+__ch (__bin_args_eq (__vector signed int, (a), int, *(c)), \
+ __builtin_altivec_stvewx ((__vector signed int) (a), (b), (void *) (c)), \
+__ch (__bin_args_eq (__vector __bool int, (a), unsigned int, *(c)), \
+ __builtin_altivec_stvewx ((__vector signed int) (a), (b), (void *) (c)), \
+__ch (__bin_args_eq (__vector __bool int, (a), int, *(c)), \
+ __builtin_altivec_stvewx ((__vector signed int) (a), (b), (void *) (c)), \
+__ch (__bin_args_eq (__vector float, (a), float, *(c)), \
+ __builtin_altivec_stvewx ((__vector signed int) (a), (b), (void *) (c)), \
+ __builtin_altivec_compiletime_error ("vec_ste"))))))))))))))))
#define vec_stvewx(a, b, c) \
-__ch (__un_args_eq (vector unsigned int, (a)), \
- __builtin_altivec_stvewx ((vector signed int) (a), (b), (c)), \
-__ch (__un_args_eq (vector signed int, (a)), \
- __builtin_altivec_stvewx ((vector signed int) (a), (b), (c)), \
-__ch (__un_args_eq (vector float, (a)), \
- __builtin_altivec_stvewx ((vector signed int) (a), (b), (c)), \
-__altivec_link_error_invalid_argument ())))
+__ch (__bin_args_eq (__vector unsigned int, (a), unsigned int, *(c)), \
+ __builtin_altivec_stvewx ((__vector signed int) (a), (b), (c)), \
+__ch (__bin_args_eq (__vector signed int, (a), int, *(c)), \
+ __builtin_altivec_stvewx ((__vector signed int) (a), (b), (c)), \
+__ch (__bin_args_eq (__vector __bool int, (a), unsigned int, *(c)), \
+ __builtin_altivec_stvewx ((__vector signed int) (a), (b), (c)), \
+__ch (__bin_args_eq (__vector __bool int, (a), int, *(c)), \
+ __builtin_altivec_stvewx ((__vector signed int) (a), (b), (c)), \
+__ch (__bin_args_eq (__vector float, (a), float, *(c)), \
+ __builtin_altivec_stvewx ((__vector signed int) (a), (b), (c)), \
+__builtin_altivec_compiletime_error ("vec_stvewx"))))))
#define vec_stvehx(a, b, c) \
-__ch (__un_args_eq (vector unsigned short, (a)), \
- __builtin_altivec_stvehx ((vector signed short) (a), (b), (c)), \
-__ch (__un_args_eq (vector signed short, (a)), \
- __builtin_altivec_stvehx ((vector signed short) (a), (b), (c)), \
-__altivec_link_error_invalid_argument ()))
+__ch (__bin_args_eq (__vector unsigned short, (a), unsigned short, *(c)), \
+ __builtin_altivec_stvehx ((__vector signed short) (a), (b), (c)), \
+__ch (__bin_args_eq (__vector signed short, (a), short, *(c)), \
+ __builtin_altivec_stvehx ((__vector signed short) (a), (b), (c)), \
+__ch (__bin_args_eq (__vector __bool short, (a), unsigned short, *(c)), \
+ __builtin_altivec_stvehx ((__vector signed short) (a), (b), (c)), \
+__ch (__bin_args_eq (__vector __bool short, (a), short, *(c)), \
+ __builtin_altivec_stvehx ((__vector signed short) (a), (b), (c)), \
+__ch (__bin_args_eq (__vector __pixel, (a), unsigned short, *(c)), \
+ __builtin_altivec_stvehx ((__vector signed short) (a), (b), (c)), \
+__ch (__bin_args_eq (__vector __pixel, (a), short, *(c)), \
+ __builtin_altivec_stvehx ((__vector signed short) (a), (b), (c)), \
+__builtin_altivec_compiletime_error ("vec_stvehx")))))))
#define vec_stvebx(a, b, c) \
-__ch (__un_args_eq (vector unsigned char, (a)), \
- __builtin_altivec_stvebx ((vector signed char) (a), (b), (c)), \
-__ch (__un_args_eq (vector signed char, (a)), \
- __builtin_altivec_stvebx ((vector signed char) (a), (b), (c)), \
-__altivec_link_error_invalid_argument ()))
+__ch (__bin_args_eq (__vector unsigned char, (a), unsigned char, *(c)), \
+ __builtin_altivec_stvebx ((__vector signed char) (a), (b), (c)), \
+__ch (__bin_args_eq (__vector signed char, (a), signed char, *(c)), \
+ __builtin_altivec_stvebx ((__vector signed char) (a), (b), (c)), \
+__ch (__bin_args_eq (__vector __bool char, (a), unsigned char, *(c)), \
+ __builtin_altivec_stvebx ((__vector signed char) (a), (b), (c)), \
+__ch (__bin_args_eq (__vector __bool char, (a), signed char, *(c)), \
+ __builtin_altivec_stvebx ((__vector signed char) (a), (b), (c)), \
+__builtin_altivec_compiletime_error ("vec_stvebx")))))
#define vec_sub(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vsububm ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vsububm ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vsububm ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vsububm ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vsubuhm ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vsubuwm ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- ((vector float) __builtin_altivec_vsubfp ((vector float) (a1), (vector float) (a2))), \
- __altivec_link_error_invalid_argument ())))))))))))))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsububm ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsububm ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsububm ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector float) __builtin_altivec_vsubfp ((__vector float) (a1), (__vector float) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_sub"))))))))))))))))))))
#define vec_vsubfp(a1, a2) \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- ((vector float) __builtin_altivec_vsubfp ((vector float) (a1), (vector float) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector float) __builtin_altivec_vsubfp ((__vector float) (a1), (__vector float) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vsubfp"))
#define vec_vsubuwm(a1, a2) \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vsubuwm ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) (a1), (vector signed int) (a2))), \
-__altivec_link_error_invalid_argument ()))))
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vsubuwm")))))))
#define vec_vsubuhm(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vsubuhm ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) (a1), (vector signed short) (a2))), \
-__altivec_link_error_invalid_argument ()))))
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vsubuhm")))))))
#define vec_vsububm(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vsububm ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vsububm ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vsububm ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vsububm ((vector signed char) (a1), (vector signed char) (a2))), \
-__altivec_link_error_invalid_argument ()))))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsububm ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsububm ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsububm ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vsububm")))))))
#define vec_subc(a1, a2) \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsubcuw ((vector signed int) (a1), (vector signed int) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsubcuw ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_subc"))
#define vec_subs(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vsububs ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vsububs ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vsububs ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vsubsbs ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vsubshs ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vsubsws ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ()))))))))))))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsubshs ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsubshs ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsubshs ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsubuws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsubuws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsubuws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsubsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsubsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsubsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_subs")))))))))))))))))))
#define vec_vsubsws(a1, a2) \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vsubsws ((vector signed int) (a1), (vector signed int) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsubsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsubsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsubsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vsubsws"))))
#define vec_vsubuws(a1, a2) \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) (a1), (vector signed int) (a2))), \
-__altivec_link_error_invalid_argument ())))
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsubuws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsubuws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsubuws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vsubuws"))))
#define vec_vsubshs(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- ((vector signed short) __builtin_altivec_vsubshs ((vector signed short) (a1), (vector signed short) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsubshs ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsubshs ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vsubshs ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vsubshs"))))
#define vec_vsubuhs(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) (a1), (vector signed short) (a2))), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- ((vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) (a1), (vector signed short) (a2))), \
-__altivec_link_error_invalid_argument ())))
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) (a1), (__vector signed short) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vsubuhs"))))
#define vec_vsubsbs(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- ((vector signed char) __builtin_altivec_vsubsbs ((vector signed char) (a1), (vector signed char) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vsubsbs"))))
#define vec_vsububs(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vsububs ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vsububs ((vector signed char) (a1), (vector signed char) (a2))), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- ((vector unsigned char) __builtin_altivec_vsububs ((vector signed char) (a1), (vector signed char) (a2))), \
-__altivec_link_error_invalid_argument ())))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) (a1), (__vector signed char) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vsububs"))))
#define vec_sum4s(a1, a2) \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsum4ubs ((vector signed char) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vsum4sbs ((vector signed char) (a1), (vector signed int) (a2))), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vsum4shs ((vector signed short) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ())))
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsum4ubs ((__vector signed char) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsum4sbs ((__vector signed char) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsum4shs ((__vector signed short) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_sum4s"))))
#define vec_vsum4shs(a1, a2) \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vsum4shs ((vector signed short) (a1), (vector signed int) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsum4shs ((__vector signed short) (a1), (__vector signed int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vsum4shs"))
#define vec_vsum4sbs(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed int, (a2)), \
- ((vector signed int) __builtin_altivec_vsum4sbs ((vector signed char) (a1), (vector signed int) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsum4sbs ((__vector signed char) (a1), (__vector signed int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vsum4sbs"))
#define vec_vsum4ubs(a1, a2) \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned int, (a2)), \
- ((vector unsigned int) __builtin_altivec_vsum4ubs ((vector signed char) (a1), (vector signed int) (a2))), \
-__altivec_link_error_invalid_argument ())
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vsum4ubs ((__vector signed char) (a1), (__vector signed int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_vsum4ubs"))
-#define vec_sum2s(a1, a2) __builtin_altivec_vsum2sws ((a1), (a2))
+#define vec_sum2s(a1, a2) \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsum2sws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_sum2s"))
-#define vec_sums(a1, a2) __builtin_altivec_vsumsws ((a1), (a2))
+#define vec_sums(a1, a2) \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vsumsws ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__builtin_altivec_compiletime_error ("vec_sums"))
-#define vec_trunc(a1) __builtin_altivec_vrfiz ((a1))
+#define vec_trunc(a1) \
+__ch (__un_args_eq (__vector float, (a1)), \
+ ((__vector float) __builtin_altivec_vrfiz ((__vector float) (a1))), \
+__builtin_altivec_compiletime_error ("vec_trunc"))
#define vec_unpackh(a1) \
-__ch (__un_args_eq (vector signed char, (a1)), \
- ((vector signed short) __builtin_altivec_vupkhsb ((vector signed char) (a1))), \
-__ch (__un_args_eq (vector unsigned short, (a1)), \
- ((vector unsigned int) __builtin_altivec_vupkhpx ((vector signed short) (a1))), \
-__ch (__un_args_eq (vector signed short, (a1)), \
- ((vector signed int) __builtin_altivec_vupkhsh ((vector signed short) (a1))), \
- __altivec_link_error_invalid_argument ())))
+__ch (__un_args_eq (__vector signed char, (a1)), \
+ ((__vector signed short) __builtin_altivec_vupkhsb ((__vector signed char) (a1))), \
+__ch (__un_args_eq (__vector __bool char, (a1)), \
+ ((__vector __bool short) __builtin_altivec_vupkhsb ((__vector signed char) (a1))), \
+__ch (__un_args_eq (__vector __pixel, (a1)), \
+ ((__vector unsigned int) __builtin_altivec_vupkhpx ((__vector signed short) (a1))), \
+__ch (__un_args_eq (__vector signed short, (a1)), \
+ ((__vector signed int) __builtin_altivec_vupkhsh ((__vector signed short) (a1))), \
+__ch (__un_args_eq (__vector __bool short, (a1)), \
+ ((__vector __bool int) __builtin_altivec_vupkhsh ((__vector signed short) (a1))), \
+ __builtin_altivec_compiletime_error ("vec_unpackh"))))))
#define vec_vupkhsh(a1) \
-__ch (__un_args_eq (vector signed short, (a1)), \
- ((vector signed int) __builtin_altivec_vupkhsh ((vector signed short) (a1))), \
-__altivec_link_error_invalid_argument ())
+__ch (__un_args_eq (__vector __bool short, (a1)), \
+ ((__vector __bool int) __builtin_altivec_vupkhsh ((__vector signed short) (a1))), \
+__ch (__un_args_eq (__vector signed short, (a1)), \
+ ((__vector signed int) __builtin_altivec_vupkhsh ((__vector signed short) (a1))), \
+__builtin_altivec_compiletime_error ("vec_vupkhsh")))
#define vec_vupkhpx(a1) \
-__ch (__un_args_eq (vector unsigned short, (a1)), \
- ((vector unsigned int) __builtin_altivec_vupkhpx ((vector signed short) (a1))), \
-__altivec_link_error_invalid_argument ())
+__ch (__un_args_eq (__vector __pixel, (a1)), \
+ ((__vector unsigned int) __builtin_altivec_vupkhpx ((__vector signed short) (a1))), \
+__builtin_altivec_compiletime_error ("vec_vupkhpx"))
#define vec_vupkhsb(a1) \
-__ch (__un_args_eq (vector signed char, (a1)), \
- ((vector signed short) __builtin_altivec_vupkhsb ((vector signed char) (a1))), \
-__altivec_link_error_invalid_argument ())
+__ch (__un_args_eq (__vector __bool char, (a1)), \
+ ((__vector __bool short) __builtin_altivec_vupkhsb ((__vector signed char) (a1))), \
+__ch (__un_args_eq (__vector signed char, (a1)), \
+ ((__vector signed short) __builtin_altivec_vupkhsb ((__vector signed char) (a1))), \
+__builtin_altivec_compiletime_error ("vec_vupkhsb")))
#define vec_unpackl(a1) \
-__ch (__un_args_eq (vector signed char, (a1)), \
- ((vector signed short) __builtin_altivec_vupklsb ((vector signed char) (a1))), \
-__ch (__un_args_eq (vector unsigned short, (a1)), \
- ((vector unsigned int) __builtin_altivec_vupklpx ((vector signed short) (a1))), \
-__ch (__un_args_eq (vector signed short, (a1)), \
- ((vector signed int) __builtin_altivec_vupklsh ((vector signed short) (a1))), \
- __altivec_link_error_invalid_argument ())))
+__ch (__un_args_eq (__vector signed char, (a1)), \
+ ((__vector signed short) __builtin_altivec_vupklsb ((__vector signed char) (a1))), \
+__ch (__un_args_eq (__vector __bool char, (a1)), \
+ ((__vector __bool short) __builtin_altivec_vupklsb ((__vector signed char) (a1))), \
+__ch (__un_args_eq (__vector __pixel, (a1)), \
+ ((__vector unsigned int) __builtin_altivec_vupklpx ((__vector signed short) (a1))), \
+__ch (__un_args_eq (__vector signed short, (a1)), \
+ ((__vector signed int) __builtin_altivec_vupklsh ((__vector signed short) (a1))), \
+__ch (__un_args_eq (__vector __bool short, (a1)), \
+ ((__vector __bool int) __builtin_altivec_vupklsh ((__vector signed short) (a1))), \
+ __builtin_altivec_compiletime_error ("vec_unpackl"))))))
#define vec_vupklsh(a1) \
-__ch (__un_args_eq (vector signed short, (a1)), \
- ((vector signed int) __builtin_altivec_vupklsh ((vector signed short) (a1))), \
-__altivec_link_error_invalid_argument ())
+__ch (__un_args_eq (__vector __bool short, (a1)), \
+ ((__vector __bool int) __builtin_altivec_vupklsh ((__vector signed short) (a1))), \
+__ch (__un_args_eq (__vector signed short, (a1)), \
+ ((__vector signed int) __builtin_altivec_vupklsh ((__vector signed short) (a1))), \
+__builtin_altivec_compiletime_error ("vec_vupklsh")))
#define vec_vupklpx(a1) \
-__ch (__un_args_eq (vector unsigned short, (a1)), \
- ((vector unsigned int) __builtin_altivec_vupklpx ((vector signed short) (a1))), \
-__altivec_link_error_invalid_argument ())
+__ch (__un_args_eq (__vector __pixel, (a1)), \
+ ((__vector unsigned int) __builtin_altivec_vupklpx ((__vector signed short) (a1))), \
+__builtin_altivec_compiletime_error ("vec_vupklpx"))
#define vec_vupklsb(a1) \
-__ch (__un_args_eq (vector signed char, (a1)), \
- ((vector signed short) __builtin_altivec_vupklsb ((vector signed char) (a1))), \
-__altivec_link_error_invalid_argument ())
+__ch (__un_args_eq (__vector __bool char, (a1)), \
+ ((__vector __bool short) __builtin_altivec_vupklsb ((__vector signed char) (a1))), \
+__ch (__un_args_eq (__vector signed char, (a1)), \
+ ((__vector signed short) __builtin_altivec_vupklsb ((__vector signed char) (a1))), \
+__builtin_altivec_compiletime_error ("vec_vupklsb")))
#define vec_xor(a1, a2) \
-__ch (__bin_args_eq (vector float, ((a1)), vector float, ((a2))), \
- ((vector float) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
-__ch (__bin_args_eq (vector float, ((a1)), vector unsigned int, ((a2))), \
- ((vector float) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
-__ch (__bin_args_eq (vector unsigned int, ((a1)), vector float, ((a2))), \
- ((vector float) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
-__ch (__bin_args_eq (vector signed int, ((a1)), vector float, ((a2))), \
- ((vector float) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
-__ch (__bin_args_eq (vector float, ((a1)), vector signed int, ((a2))), \
- ((vector float) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
-__ch (__bin_args_eq (vector signed int, ((a1)), vector signed int, ((a2))), \
- ((vector signed int) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
-__ch (__bin_args_eq (vector signed int, ((a1)), vector unsigned int, ((a2))), \
- ((vector unsigned int) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
-__ch (__bin_args_eq (vector unsigned int, ((a1)), vector signed int, ((a2))), \
- ((vector unsigned int) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
-__ch (__bin_args_eq (vector unsigned int, ((a1)), vector unsigned int, ((a2))), \
- ((vector unsigned int) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
-__ch (__bin_args_eq (vector unsigned short, ((a1)), vector unsigned short, ((a2))), \
- ((vector unsigned short) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
-__ch (__bin_args_eq (vector signed short, ((a1)), vector unsigned short, ((a2))), \
- ((vector unsigned short) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
-__ch (__bin_args_eq (vector unsigned short, ((a1)), vector signed short, ((a2))), \
- ((vector unsigned short) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
-__ch (__bin_args_eq (vector unsigned short, ((a1)), vector unsigned short, ((a2))), \
- ((vector signed short) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
-__ch (__bin_args_eq (vector signed short, ((a1)), vector signed short, ((a2))), \
- ((vector signed short) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
-__ch (__bin_args_eq (vector signed short, ((a1)), vector unsigned short, ((a2))), \
- ((vector unsigned short) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
-__ch (__bin_args_eq (vector unsigned short, ((a1)), vector signed short, ((a2))), \
- ((vector unsigned short) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
-__ch (__bin_args_eq (vector unsigned short, ((a1)), vector unsigned short, ((a2))), \
- ((vector unsigned short) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
-__ch (__bin_args_eq (vector unsigned char, ((a1)), vector unsigned char, ((a2))), \
- ((vector unsigned char) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
-__ch (__bin_args_eq (vector signed char, ((a1)), vector unsigned char, ((a2))), \
- ((vector unsigned char) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
-__ch (__bin_args_eq (vector signed char, ((a1)), vector signed char, ((a2))), \
- ((vector signed char) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
-__ch (__bin_args_eq (vector unsigned char, ((a1)), vector unsigned char, ((a2))), \
- ((vector signed char) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
-__ch (__bin_args_eq (vector signed char, ((a1)), vector unsigned char, ((a2))), \
- ((vector signed char) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
-__ch (__bin_args_eq (vector unsigned char, ((a1)), vector signed char, ((a2))), \
- ((vector unsigned char) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
- __altivec_link_error_invalid_argument ())))))))))))))))))))))))
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ ((__vector float) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector float, (a1), __vector __bool int, (a2)), \
+ ((__vector float) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector float, (a2)), \
+ ((__vector float) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
+ ((__vector __bool int) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ ((__vector signed int) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ ((__vector unsigned int) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
+ ((__vector __bool short) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ ((__vector signed short) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ ((__vector unsigned short) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
+ ((__vector __bool char) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ ((__vector signed char) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ ((__vector unsigned char) __builtin_altivec_vxor ((__vector signed int) (a1), (__vector signed int) (a2))), \
+ __builtin_altivec_compiletime_error ("vec_xor")))))))))))))))))))))))))
/* Predicates. */
#define vec_all_eq(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_LT, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_LT, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_LT, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_LT, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_LT, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_LT, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_LT, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_LT, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_LT, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_LT, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_LT, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_LT, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- __builtin_altivec_vcmpeqfp_p (__CR6_LT, (vector float) (a1), (vector float) (a2)), \
- __altivec_link_error_invalid_argument ())))))))))))))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector __pixel, (a1), __vector __pixel, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ __builtin_altivec_vcmpeqfp_p (__CR6_LT, (__vector float) (a1), (__vector float) (a2)), \
+ __builtin_altivec_compiletime_error ("vec_all_eq"))))))))))))))))))))))))
#define vec_all_ge(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_EQ, (vector signed char) (a2), (vector signed char) (a1)), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_EQ, (vector signed char) (a2), (vector signed char) (a1)), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_EQ, (vector signed char) (a2), (vector signed char) (a1)), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (vector signed char) (a2), (vector signed char) (a1)), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (vector signed short) (a2), (vector signed short) (a1)), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (vector signed short) (a2), (vector signed short) (a1)), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (vector signed short) (a2), (vector signed short) (a1)), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (vector signed short) (a2), (vector signed short) (a1)), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (vector signed int) (a2), (vector signed int) (a1)), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (vector signed int) (a2), (vector signed int) (a1)), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (vector signed int) (a2), (vector signed int) (a1)), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (vector signed int) (a2), (vector signed int) (a1)), \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- __builtin_altivec_vcmpgefp_p (__CR6_LT, (vector float) (a1), (vector float) (a2)), \
- __altivec_link_error_invalid_argument ())))))))))))))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_EQ, (__vector signed char) (a2), (__vector signed char) (a1)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_EQ, (__vector signed char) (a2), (__vector signed char) (a1)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_EQ, (__vector signed char) (a2), (__vector signed char) (a1)), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (__vector signed char) (a2), (__vector signed char) (a1)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (__vector signed char) (a2), (__vector signed char) (a1)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (__vector signed char) (a2), (__vector signed char) (a1)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (__vector signed short) (a2), (__vector signed short) (a1)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (__vector signed short) (a2), (__vector signed short) (a1)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (__vector signed short) (a2), (__vector signed short) (a1)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (__vector signed short) (a2), (__vector signed short) (a1)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (__vector signed short) (a2), (__vector signed short) (a1)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (__vector signed short) (a2), (__vector signed short) (a1)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (__vector signed int) (a2), (__vector signed int) (a1)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (__vector signed int) (a2), (__vector signed int) (a1)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (__vector signed int) (a2), (__vector signed int) (a1)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (__vector signed int) (a2), (__vector signed int) (a1)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (__vector signed int) (a2), (__vector signed int) (a1)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (__vector signed int) (a2), (__vector signed int) (a1)), \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ __builtin_altivec_vcmpgefp_p (__CR6_LT, (__vector float) (a1), (__vector float) (a2)), \
+ __builtin_altivec_compiletime_error ("vec_all_ge"))))))))))))))))))))
#define vec_all_gt(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_LT, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_LT, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_LT, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_LT, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_LT, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_LT, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_LT, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_LT, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_LT, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_LT, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_LT, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_LT, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- __builtin_altivec_vcmpgtfp_p (__CR6_LT, (vector float) (a1), (vector float) (a2)), \
- __altivec_link_error_invalid_argument ())))))))))))))
-
-#define vec_all_in(a1, a2) __builtin_altivec_vcmpbfp_p (__CR6_EQ, (a1), (a2))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_LT, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_LT, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_LT, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ __builtin_altivec_vcmpgtfp_p (__CR6_LT, (__vector float) (a1), (__vector float) (a2)), \
+ __builtin_altivec_compiletime_error ("vec_all_gt"))))))))))))))))))))
+
+#define vec_all_in(a1, a2) \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ __builtin_altivec_vcmpbfp_p (__CR6_EQ, (a1), (a2)), \
+ __builtin_altivec_compiletime_error ("vec_all_in"))
#define vec_all_le(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_EQ, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_EQ, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_EQ, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- __builtin_altivec_vcmpgefp_p (__CR6_LT, (vector float) (a2), (vector float) (a1)), \
- __altivec_link_error_invalid_argument ())))))))))))))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ __builtin_altivec_vcmpgefp_p (__CR6_LT, (__vector float) (a2), (__vector float) (a1)), \
+ __builtin_altivec_compiletime_error ("vec_all_le"))))))))))))))))))))
#define vec_all_lt(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_LT, (vector signed char) (a2), (vector signed char) (a1)), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_LT, (vector signed char) (a2), (vector signed char) (a1)), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_LT, (vector signed char) (a2), (vector signed char) (a1)), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_LT, (vector signed char) (a2), (vector signed char) (a1)), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_LT, (vector signed short) (a2), (vector signed short) (a1)), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_LT, (vector signed short) (a2), (vector signed short) (a1)), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_LT, (vector signed short) (a2), (vector signed short) (a1)), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_LT, (vector signed short) (a2), (vector signed short) (a1)), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_LT, (vector signed int) (a2), (vector signed int) (a1)), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_LT, (vector signed int) (a2), (vector signed int) (a1)), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_LT, (vector signed int) (a2), (vector signed int) (a1)), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_LT, (vector signed int) (a2), (vector signed int) (a1)), \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- __builtin_altivec_vcmpgtfp_p (__CR6_LT, (vector float) (a2), (vector float) (a1)), \
- __altivec_link_error_invalid_argument ())))))))))))))
-
-#define vec_all_nan(a1) __builtin_altivec_vcmpeqfp_p (__CR6_EQ, (a1), (a1))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_LT, (__vector signed char) (a2), (__vector signed char) (a1)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_LT, (__vector signed char) (a2), (__vector signed char) (a1)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_LT, (__vector signed char) (a2), (__vector signed char) (a1)), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_LT, (__vector signed char) (a2), (__vector signed char) (a1)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_LT, (__vector signed char) (a2), (__vector signed char) (a1)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_LT, (__vector signed char) (a2), (__vector signed char) (a1)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_LT, (__vector signed short) (a2), (__vector signed short) (a1)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_LT, (__vector signed short) (a2), (__vector signed short) (a1)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_LT, (__vector signed short) (a2), (__vector signed short) (a1)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_LT, (__vector signed short) (a2), (__vector signed short) (a1)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_LT, (__vector signed short) (a2), (__vector signed short) (a1)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_LT, (__vector signed short) (a2), (__vector signed short) (a1)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_LT, (__vector signed int) (a2), (__vector signed int) (a1)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_LT, (__vector signed int) (a2), (__vector signed int) (a1)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_LT, (__vector signed int) (a2), (__vector signed int) (a1)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_LT, (__vector signed int) (a2), (__vector signed int) (a1)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_LT, (__vector signed int) (a2), (__vector signed int) (a1)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_LT, (__vector signed int) (a2), (__vector signed int) (a1)), \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ __builtin_altivec_vcmpgtfp_p (__CR6_LT, (__vector float) (a2), (__vector float) (a1)), \
+ __builtin_altivec_compiletime_error ("vec_all_lt"))))))))))))))))))))
+
+#define vec_all_nan(a1) \
+__ch (__un_args_eq (__vector float, (a1)), \
+ __builtin_altivec_vcmpeqfp_p (__CR6_EQ, (a1), (a1)), \
+ __builtin_altivec_compiletime_error ("vec_all_nan"))
#define vec_all_ne(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_EQ, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_EQ, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_EQ, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_EQ, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_EQ, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_EQ, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_EQ, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_EQ, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_EQ, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_EQ, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_EQ, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_EQ, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- __builtin_altivec_vcmpeqfp_p (__CR6_EQ, (vector float) (a1), (vector float) (a2)), \
- __altivec_link_error_invalid_argument ())))))))))))))
-
-#define vec_all_nge(a1, a2) __builtin_altivec_vcmpgefp_p (__CR6_EQ, (a1), (a2))
-
-#define vec_all_ngt(a1, a2) __builtin_altivec_vcmpgtfp_p (__CR6_EQ, (a1), (a2))
-
-#define vec_all_nle(a1, a2) __builtin_altivec_vcmpgefp_p (__CR6_EQ, (a2), (a1))
-
-#define vec_all_nlt(a1, a2) __builtin_altivec_vcmpgtfp_p (__CR6_EQ, (a2), (a1))
-
-#define vec_all_numeric(a1) __builtin_altivec_vcmpeqfp_p (__CR6_LT, (a1), (a1))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_EQ, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector __pixel, (a1), __vector __pixel, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_EQ, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_EQ, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ __builtin_altivec_vcmpeqfp_p (__CR6_EQ, (__vector float) (a1), (__vector float) (a2)), \
+ __builtin_altivec_compiletime_error ("vec_all_ne"))))))))))))))))))))))))
+
+#define vec_all_nge(a1, a2) \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ __builtin_altivec_vcmpgefp_p (__CR6_EQ, (a1), (a2)), \
+ __builtin_altivec_compiletime_error ("vec_all_nge"))
+
+#define vec_all_ngt(a1, a2) \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ __builtin_altivec_vcmpgtfp_p (__CR6_EQ, (a1), (a2)), \
+ __builtin_altivec_compiletime_error ("vec_all_ngt"))
+
+#define vec_all_nle(a1, a2) \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ __builtin_altivec_vcmpgefp_p (__CR6_EQ, (a2), (a1)), \
+ __builtin_altivec_compiletime_error ("vec_all_nle"))
+
+#define vec_all_nlt(a1, a2) \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ __builtin_altivec_vcmpgtfp_p (__CR6_EQ, (a2), (a1)), \
+ __builtin_altivec_compiletime_error ("vec_all_nlt"))
+
+#define vec_all_numeric(a1) \
+__ch (__un_args_eq (__vector float, (a1)), \
+ __builtin_altivec_vcmpeqfp_p (__CR6_LT, (a1), (a1)), \
+ __builtin_altivec_compiletime_error ("vec_all_numeric"))
#define vec_any_eq(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- __builtin_altivec_vcmpeqfp_p (__CR6_EQ_REV, (vector float) (a1), (vector float) (a2)), \
- __altivec_link_error_invalid_argument ())))))))))))))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector __pixel, (a1), __vector __pixel, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ __builtin_altivec_vcmpeqfp_p (__CR6_EQ_REV, (__vector float) (a1), (__vector float) (a2)), \
+ __builtin_altivec_compiletime_error ("vec_any_eq"))))))))))))))))))))))))
#define vec_any_ge(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (vector signed char) (a2), (vector signed char) (a1)), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (vector signed char) (a2), (vector signed char) (a1)), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (vector signed char) (a2), (vector signed char) (a1)), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (vector signed char) (a2), (vector signed char) (a1)), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (vector signed short) (a2), (vector signed short) (a1)), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (vector signed short) (a2), (vector signed short) (a1)), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (vector signed short) (a2), (vector signed short) (a1)), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (vector signed short) (a2), (vector signed short) (a1)), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (vector signed int) (a2), (vector signed int) (a1)), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (vector signed int) (a2), (vector signed int) (a1)), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (vector signed int) (a2), (vector signed int) (a1)), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (vector signed int) (a2), (vector signed int) (a1)), \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- __builtin_altivec_vcmpgefp_p (__CR6_EQ_REV, (vector float) (a1), (vector float) (a2)), \
- __altivec_link_error_invalid_argument ())))))))))))))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) (a2), (__vector signed char) (a1)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) (a2), (__vector signed char) (a1)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) (a2), (__vector signed char) (a1)), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (__vector signed char) (a2), (__vector signed char) (a1)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (__vector signed char) (a2), (__vector signed char) (a1)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (__vector signed char) (a2), (__vector signed char) (a1)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) (a2), (__vector signed short) (a1)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) (a2), (__vector signed short) (a1)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) (a2), (__vector signed short) (a1)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (__vector signed short) (a2), (__vector signed short) (a1)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (__vector signed short) (a2), (__vector signed short) (a1)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (__vector signed short) (a2), (__vector signed short) (a1)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) (a2), (__vector signed int) (a1)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) (a2), (__vector signed int) (a1)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) (a2), (__vector signed int) (a1)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (__vector signed int) (a2), (__vector signed int) (a1)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (__vector signed int) (a2), (__vector signed int) (a1)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (__vector signed int) (a2), (__vector signed int) (a1)), \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ __builtin_altivec_vcmpgefp_p (__CR6_EQ_REV, (__vector float) (a1), (__vector float) (a2)), \
+ __builtin_altivec_compiletime_error ("vec_any_ge"))))))))))))))))))))
#define vec_any_gt(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- __builtin_altivec_vcmpgtfp_p (__CR6_EQ_REV, (vector float) (a1), (vector float) (a2)), \
- __altivec_link_error_invalid_argument ())))))))))))))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ __builtin_altivec_vcmpgtfp_p (__CR6_EQ_REV, (__vector float) (a1), (__vector float) (a2)), \
+ __builtin_altivec_compiletime_error ("vec_any_gt"))))))))))))))))))))
#define vec_any_le(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- __builtin_altivec_vcmpgefp_p (__CR6_EQ_REV, (vector float) (a2), (vector float) (a1)), \
- __altivec_link_error_invalid_argument ())))))))))))))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ __builtin_altivec_vcmpgefp_p (__CR6_EQ_REV, (__vector float) (a2), (__vector float) (a1)), \
+ __builtin_altivec_compiletime_error ("vec_any_le"))))))))))))))))))))
#define vec_any_lt(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (vector signed char) (a2), (vector signed char) (a1)), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (vector signed char) (a2), (vector signed char) (a1)), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (vector signed char) (a2), (vector signed char) (a1)), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (vector signed char) (a2), (vector signed char) (a1)), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (vector signed short) (a2), (vector signed short) (a1)), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (vector signed short) (a2), (vector signed short) (a1)), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (vector signed short) (a2), (vector signed short) (a1)), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (vector signed short) (a2), (vector signed short) (a1)), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (vector signed int) (a2), (vector signed int) (a1)), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (vector signed int) (a2), (vector signed int) (a1)), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (vector signed int) (a2), (vector signed int) (a1)), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (vector signed int) (a2), (vector signed int) (a1)), \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- __builtin_altivec_vcmpgtfp_p (__CR6_EQ_REV, (vector float) (a2), (vector float) (a1)), \
- __altivec_link_error_invalid_argument ())))))))))))))
-
-#define vec_any_nan(a1) __builtin_altivec_vcmpeqfp_p (__CR6_LT_REV, a1, a1)
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (__vector signed char) (a2), (__vector signed char) (a1)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (__vector signed char) (a2), (__vector signed char) (a1)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (__vector signed char) (a2), (__vector signed char) (a1)), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (__vector signed char) (a2), (__vector signed char) (a1)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (__vector signed char) (a2), (__vector signed char) (a1)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (__vector signed char) (a2), (__vector signed char) (a1)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (__vector signed short) (a2), (__vector signed short) (a1)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (__vector signed short) (a2), (__vector signed short) (a1)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (__vector signed short) (a2), (__vector signed short) (a1)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (__vector signed short) (a2), (__vector signed short) (a1)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (__vector signed short) (a2), (__vector signed short) (a1)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (__vector signed short) (a2), (__vector signed short) (a1)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (__vector signed int) (a2), (__vector signed int) (a1)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (__vector signed int) (a2), (__vector signed int) (a1)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (__vector signed int) (a2), (__vector signed int) (a1)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (__vector signed int) (a2), (__vector signed int) (a1)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (__vector signed int) (a2), (__vector signed int) (a1)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (__vector signed int) (a2), (__vector signed int) (a1)), \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ __builtin_altivec_vcmpgtfp_p (__CR6_EQ_REV, (__vector float) (a2), (__vector float) (a1)), \
+ __builtin_altivec_compiletime_error ("vec_any_lt"))))))))))))))))))))
+
+#define vec_any_nan(a1) \
+__ch (__un_args_eq (__vector float, (a1)), \
+ __builtin_altivec_vcmpeqfp_p (__CR6_LT_REV, (a1), (a1)), \
+ __builtin_altivec_compiletime_error ("vec_any_nan"))
#define vec_any_ne(a1, a2) \
-__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
- __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (vector signed char) (a1), (vector signed char) (a2)), \
-__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
- __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (vector signed short) (a1), (vector signed short) (a2)), \
-__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
- __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (vector signed int) (a1), (vector signed int) (a2)), \
-__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
- __builtin_altivec_vcmpeqfp_p (__CR6_LT_REV, (vector float) (a1), (vector float) (a2)), \
- __altivec_link_error_invalid_argument ())))))))))))))
-
-#define vec_any_nge(a1, a2) __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, (a1), (a2))
-
-#define vec_any_ngt(a1, a2) __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, (a1), (a2))
-
-#define vec_any_nle(a1, a2) __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, (a2), (a1))
-
-#define vec_any_nlt(a1, a2) __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, (a2), (a1))
-
-#define vec_any_numeric(a1) __builtin_altivec_vcmpeqfp_p (__CR6_EQ_REV, (a1), (a1))
-
-#define vec_any_out(a1, a2) __builtin_altivec_vcmpbfp_p (__CR6_EQ_REV, (a1), (a2))
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector signed char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector signed char, (a1), __vector signed char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector unsigned char, (a1), __vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector __bool char, (a1), __vector __bool char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (__vector signed char) (a1), (__vector signed char) (a2)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector signed short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector signed short, (a1), __vector signed short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector unsigned short, (a1), __vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector __bool short, (a1), __vector __bool short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector __pixel, (a1), __vector __pixel, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (__vector signed short) (a1), (__vector signed short) (a2)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector signed int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector signed int, (a1), __vector signed int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector unsigned int, (a1), __vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector __bool int, (a1), __vector __bool int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (__vector signed int) (a1), (__vector signed int) (a2)), \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ __builtin_altivec_vcmpeqfp_p (__CR6_LT_REV, (__vector float) (a1), (__vector float) (a2)), \
+ __builtin_altivec_compiletime_error ("vec_any_ne"))))))))))))))))))))))))
+
+#define vec_any_nge(a1, a2) \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, (a1), (a2)), \
+ __builtin_altivec_compiletime_error ("vec_any_nge"))
+
+#define vec_any_ngt(a1, a2) \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, (a1), (a2)), \
+ __builtin_altivec_compiletime_error ("vec_any_ngt"))
+
+#define vec_any_nle(a1, a2) \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, (a2), (a1)), \
+ __builtin_altivec_compiletime_error ("vec_any_nle"))
+
+#define vec_any_nlt(a1, a2) \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, (a2), (a1)), \
+ __builtin_altivec_compiletime_error ("vec_any_nlt"))
+
+#define vec_any_numeric(a1) \
+__ch (__un_args_eq (__vector float, (a1)), \
+ __builtin_altivec_vcmpeqfp_p (__CR6_EQ_REV, (a1), (a1)), \
+ __builtin_altivec_compiletime_error ("vec_any_numeric"))
+
+#define vec_any_out(a1, a2) \
+__ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
+ __builtin_altivec_vcmpbfp_p (__CR6_EQ_REV, (a1), (a2)), \
+ __builtin_altivec_compiletime_error ("vec_any_out"))
#endif /* __cplusplus */
diff --git a/contrib/gcc/config/rs6000/altivec.md b/contrib/gcc/config/rs6000/altivec.md
index db341cb..505a573 100644
--- a/contrib/gcc/config/rs6000/altivec.md
+++ b/contrib/gcc/config/rs6000/altivec.md
@@ -594,9 +594,9 @@
;; Fused multiply subtract
(define_insn "altivec_vnmsubfp"
[(set (match_operand:V4SF 0 "register_operand" "=v")
- (minus:V4SF (mult:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (neg:V4SF (minus:V4SF (mult:V4SF (match_operand:V4SF 1 "register_operand" "v")
(match_operand:V4SF 2 "register_operand" "v"))
- (match_operand:V4SF 3 "register_operand" "v")))]
+ (match_operand:V4SF 3 "register_operand" "v"))))]
"TARGET_ALTIVEC"
"vnmsubfp %0,%1,%2,%3"
[(set_attr "type" "vecfloat")])
diff --git a/contrib/gcc/config/rs6000/beos.h b/contrib/gcc/config/rs6000/beos.h
index 1ce36bf..a9e88ac 100644
--- a/contrib/gcc/config/rs6000/beos.h
+++ b/contrib/gcc/config/rs6000/beos.h
@@ -23,18 +23,6 @@
#undef TARGET_VERSION
#define TARGET_VERSION fprintf (stderr, " (BeOS/PowerPC)");
-/* Enable AIX XL compiler calling convention breakage compatibility. */
-#define MASK_XL_CALL 0x40000000
-#define TARGET_XL_CALL (target_flags & MASK_XL_CALL)
-#undef SUBTARGET_SWITCHES
-#define SUBTARGET_SWITCHES \
- {"xl-call", MASK_XL_CALL, \
- N_("Always pass floating-point arguments in memory") }, \
- {"no-xl-call", - MASK_XL_CALL, \
- N_("Don't always pass floating-point arguments in memory") }, \
- {"threads", 0}, \
- {"pe", 0},
-
#undef ASM_SPEC
#define ASM_SPEC "-u %(asm_cpu)"
diff --git a/contrib/gcc/config/rs6000/darwin-ldouble-shared.c b/contrib/gcc/config/rs6000/darwin-ldouble-shared.c
new file mode 100644
index 0000000..8ceea0a
--- /dev/null
+++ b/contrib/gcc/config/rs6000/darwin-ldouble-shared.c
@@ -0,0 +1,2 @@
+#define IN_LIBGCC2_S 1
+#include "darwin-ldouble.c"
diff --git a/contrib/gcc/config/rs6000/darwin-ldouble.c b/contrib/gcc/config/rs6000/darwin-ldouble.c
index 3174ebb..210f2d6 100644
--- a/contrib/gcc/config/rs6000/darwin-ldouble.c
+++ b/contrib/gcc/config/rs6000/darwin-ldouble.c
@@ -1,5 +1,5 @@
/* 128-bit long double support routines for Darwin.
- Copyright (C) 1993, 2003, 2004 Free Software Foundation, Inc.
+ Copyright (C) 1993, 2003, 2004, 2005 Free Software Foundation, Inc.
This file is part of GCC.
@@ -48,7 +48,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
This code currently assumes big-endian. */
-#if !_SOFT_FLOAT && (defined (__MACH__) || defined (__powerpc64__))
+#if !_SOFT_FLOAT && (defined (__MACH__) || defined (__powerpc64__) || defined (_AIX))
#define fabs(x) __builtin_fabs(x)
@@ -58,10 +58,27 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
but GCC currently generates poor code when a union is used to turn
a long double into a pair of doubles. */
-extern long double _xlqadd (double, double, double, double);
-extern long double _xlqsub (double, double, double, double);
-extern long double _xlqmul (double, double, double, double);
-extern long double _xlqdiv (double, double, double, double);
+extern long double __gcc_qadd (double, double, double, double);
+extern long double __gcc_qsub (double, double, double, double);
+extern long double __gcc_qmul (double, double, double, double);
+extern long double __gcc_qdiv (double, double, double, double);
+
+#if defined __ELF__ && defined IN_LIBGCC2_S
+/* Provide definitions of the old symbol names to statisfy apps and
+ shared libs built against an older libgcc. To access the _xlq
+ symbols an explicit version reference is needed, so these won't
+ satisfy an unadorned reference like _xlqadd. If dot symbols are
+ not needed, the assembler will remove the aliases from the symbol
+ table. */
+__asm__ (".symver __gcc_qadd,_xlqadd@GCC_3.4\n\t"
+ ".symver __gcc_qsub,_xlqsub@GCC_3.4\n\t"
+ ".symver __gcc_qmul,_xlqmul@GCC_3.4\n\t"
+ ".symver __gcc_qdiv,_xlqdiv@GCC_3.4\n\t"
+ ".symver .__gcc_qadd,._xlqadd@GCC_3.4\n\t"
+ ".symver .__gcc_qsub,._xlqsub@GCC_3.4\n\t"
+ ".symver .__gcc_qmul,._xlqmul@GCC_3.4\n\t"
+ ".symver .__gcc_qdiv,._xlqdiv@GCC_3.4");
+#endif
typedef union
{
@@ -73,7 +90,7 @@ static const double FPKINF = 1.0/0.0;
/* Add two 'long double' values and return the result. */
long double
-_xlqadd (double a, double b, double c, double d)
+__gcc_qadd (double a, double b, double c, double d)
{
longDblUnion z;
double t, tau, u, FPR_zero, FPR_PosInf;
@@ -132,13 +149,13 @@ _xlqadd (double a, double b, double c, double d)
}
long double
-_xlqsub (double a, double b, double c, double d)
+__gcc_qsub (double a, double b, double c, double d)
{
- return _xlqadd (a, b, -c, -d);
+ return __gcc_qadd (a, b, -c, -d);
}
long double
-_xlqmul (double a, double b, double c, double d)
+__gcc_qmul (double a, double b, double c, double d)
{
longDblUnion z;
double t, tau, u, v, w, FPR_zero, FPR_PosInf;
@@ -169,7 +186,7 @@ _xlqmul (double a, double b, double c, double d)
}
long double
-_xlqdiv (double a, double b, double c, double d)
+__gcc_qdiv (double a, double b, double c, double d)
{
longDblUnion z;
double s, sigma, t, tau, u, v, w, FPR_zero, FPR_PosInf;
diff --git a/contrib/gcc/config/rs6000/darwin.h b/contrib/gcc/config/rs6000/darwin.h
index 62ed74c..cae8bac 100644
--- a/contrib/gcc/config/rs6000/darwin.h
+++ b/contrib/gcc/config/rs6000/darwin.h
@@ -111,6 +111,13 @@ do { \
#define SUBTARGET_EXTRA_SPECS \
{ "darwin_arch", "ppc" },
+/* The "-faltivec" option should have been called "-maltivec" all along. */
+#define SUBTARGET_OPTION_TRANSLATE_TABLE \
+ { "-faltivec", "-maltivec -include altivec.h" }, \
+ { "-fno-altivec", "-mno-altivec" }, \
+ { "-Waltivec-long-deprecated", "-mwarn-altivec-long" }, \
+ { "-Wno-altivec-long-deprecated", "-mno-warn-altivec-long" }
+
/* Make both r2 and r3 available for allocation. */
#define FIXED_R2 0
#define FIXED_R13 0
diff --git a/contrib/gcc/config/rs6000/eabi.asm b/contrib/gcc/config/rs6000/eabi.asm
index 058f9b9..c7876bc 100644
--- a/contrib/gcc/config/rs6000/eabi.asm
+++ b/contrib/gcc/config/rs6000/eabi.asm
@@ -252,7 +252,7 @@ FUNC_START(__eabi_convert)
.Lcvt:
lwzu 6,4(3) /* pointer to convert */
- cmpi 0,6,0
+ cmpwi 0,6,0
beq- .Lcvt2 /* if pointer is null, don't convert */
add 6,6,12 /* convert pointer */
diff --git a/contrib/gcc/config/rs6000/libgcc-ppc64.ver b/contrib/gcc/config/rs6000/libgcc-ppc64.ver
index 116d5e7..b27b4b4 100644
--- a/contrib/gcc/config/rs6000/libgcc-ppc64.ver
+++ b/contrib/gcc/config/rs6000/libgcc-ppc64.ver
@@ -1,7 +1,7 @@
-GCC_3.4 {
+GCC_3.4.4 {
# long double support
- _xlqadd
- _xlqsub
- _xlqmul
- _xlqdiv
+ __gcc_qadd
+ __gcc_qsub
+ __gcc_qmul
+ __gcc_qdiv
}
diff --git a/contrib/gcc/config/rs6000/linux-unwind.h b/contrib/gcc/config/rs6000/linux-unwind.h
new file mode 100644
index 0000000..842fd10
--- /dev/null
+++ b/contrib/gcc/config/rs6000/linux-unwind.h
@@ -0,0 +1,322 @@
+/* DWARF2 EH unwinding support for PowerPC and PowerPC64 Linux.
+ Copyright (C) 2004, 2005 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ In addition to the permissions in the GNU General Public License,
+ the Free Software Foundation gives you unlimited permission to link
+ the compiled version of this file with other programs, and to
+ distribute those programs without any restriction coming from the
+ use of this file. (The General Public License restrictions do
+ apply in other respects; for example, they cover modification of
+ the file, and distribution when not linked into another program.)
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the
+ Free Software Foundation, 59 Temple Place - Suite 330, Boston,
+ MA 02111-1307, USA. */
+
+/* This file defines our own versions of various kernel and user
+ structs, so that system headers are not needed, which otherwise
+ can make bootstrapping a new toolchain difficult. Do not use
+ these structs elsewhere; Many fields are missing, particularly
+ from the end of the structures. */
+
+struct gcc_vregs
+{
+ __attribute__ ((vector_size (16))) int vr[32];
+#ifdef __powerpc64__
+ unsigned int pad1[3];
+ unsigned int vscr;
+ unsigned int vsave;
+ unsigned int pad2[3];
+#else
+ unsigned int vsave;
+ unsigned int pad[2];
+ unsigned int vscr;
+#endif
+};
+
+struct gcc_regs
+{
+ unsigned long gpr[32];
+ unsigned long nip;
+ unsigned long msr;
+ unsigned long orig_gpr3;
+ unsigned long ctr;
+ unsigned long link;
+ unsigned long xer;
+ unsigned long ccr;
+ unsigned long softe;
+ unsigned long trap;
+ unsigned long dar;
+ unsigned long dsisr;
+ unsigned long result;
+ unsigned long pad1[4];
+ double fpr[32];
+ unsigned int pad2;
+ unsigned int fpscr;
+#ifdef __powerpc64__
+ struct gcc_vregs *vp;
+#else
+ unsigned int pad3[2];
+#endif
+ struct gcc_vregs vregs;
+};
+
+struct gcc_ucontext
+{
+#ifdef __powerpc64__
+ unsigned long pad[28];
+#else
+ unsigned long pad[12];
+#endif
+ struct gcc_regs *regs;
+ struct gcc_regs rsave;
+};
+
+#ifdef __powerpc64__
+
+enum { SIGNAL_FRAMESIZE = 128 };
+
+/* If the current unwind info (FS) does not contain explicit info
+ saving R2, then we have to do a minor amount of code reading to
+ figure out if it was saved. The big problem here is that the
+ code that does the save/restore is generated by the linker, so
+ we have no good way to determine at compile time what to do. */
+
+#define MD_FROB_UPDATE_CONTEXT(CTX, FS) \
+ do { \
+ if ((FS)->regs.reg[2].how == REG_UNSAVED) \
+ { \
+ unsigned int *insn \
+ = (unsigned int *) \
+ _Unwind_GetGR ((CTX), LINK_REGISTER_REGNUM); \
+ if (*insn == 0xE8410028) \
+ _Unwind_SetGRPtr ((CTX), 2, (CTX)->cfa + 40); \
+ } \
+ } while (0)
+
+/* If PC is at a sigreturn trampoline, return a pointer to the
+ regs. Otherwise return NULL. */
+
+#define PPC_LINUX_GET_REGS(CONTEXT) \
+({ \
+ const unsigned char *pc = (CONTEXT)->ra; \
+ struct gcc_regs *regs = NULL; \
+ \
+ /* addi r1, r1, 128; li r0, 0x0077; sc (sigreturn) */ \
+ /* addi r1, r1, 128; li r0, 0x00AC; sc (rt_sigreturn) */ \
+ if (*(unsigned int *) (pc + 0) != 0x38210000 + SIGNAL_FRAMESIZE \
+ || *(unsigned int *) (pc + 8) != 0x44000002) \
+ ; \
+ else if (*(unsigned int *) (pc + 4) == 0x38000077) \
+ { \
+ struct sigframe { \
+ char gap[SIGNAL_FRAMESIZE]; \
+ unsigned long pad[7]; \
+ struct gcc_regs *regs; \
+ } *frame = (struct sigframe *) (CONTEXT)->cfa; \
+ regs = frame->regs; \
+ } \
+ else if (*(unsigned int *) (pc + 4) == 0x380000AC) \
+ { \
+ /* This works for 2.4 kernels, but not for 2.6 kernels with vdso \
+ because pc isn't pointing into the stack. Can be removed when \
+ no one is running 2.4.19 or 2.4.20, the first two ppc64 \
+ kernels released. */ \
+ struct rt_sigframe_24 { \
+ int tramp[6]; \
+ void *pinfo; \
+ struct gcc_ucontext *puc; \
+ } *frame24 = (struct rt_sigframe_24 *) pc; \
+ \
+ /* Test for magic value in *puc of vdso. */ \
+ if ((long) frame24->puc != -21 * 8) \
+ regs = frame24->puc->regs; \
+ else \
+ { \
+ /* This works for 2.4.21 and later kernels. */ \
+ struct rt_sigframe { \
+ char gap[SIGNAL_FRAMESIZE]; \
+ struct gcc_ucontext uc; \
+ unsigned long pad[2]; \
+ int tramp[6]; \
+ void *pinfo; \
+ struct gcc_ucontext *puc; \
+ } *frame = (struct rt_sigframe *) (CONTEXT)->cfa; \
+ regs = frame->uc.regs; \
+ } \
+ } \
+ regs; \
+})
+
+#define LINUX_HWCAP_DEFAULT 0xc0000000
+
+#define PPC_LINUX_VREGS(REGS) (REGS)->vp
+
+#else /* !__powerpc64__ */
+
+enum { SIGNAL_FRAMESIZE = 64 };
+
+#define PPC_LINUX_GET_REGS(CONTEXT) \
+({ \
+ const unsigned char *pc = (CONTEXT)->ra; \
+ struct gcc_regs *regs = NULL; \
+ \
+ /* li r0, 0x7777; sc (sigreturn old) */ \
+ /* li r0, 0x0077; sc (sigreturn new) */ \
+ /* li r0, 0x6666; sc (rt_sigreturn old) */ \
+ /* li r0, 0x00AC; sc (rt_sigreturn new) */ \
+ if (*(unsigned int *) (pc + 4) != 0x44000002) \
+ ; \
+ else if (*(unsigned int *) (pc + 0) == 0x38007777 \
+ || *(unsigned int *) (pc + 0) == 0x38000077) \
+ { \
+ struct sigframe { \
+ char gap[SIGNAL_FRAMESIZE]; \
+ unsigned long pad[7]; \
+ struct gcc_regs *regs; \
+ } *frame = (struct sigframe *) (CONTEXT)->cfa; \
+ regs = frame->regs; \
+ } \
+ else if (*(unsigned int *) (pc + 0) == 0x38006666 \
+ || *(unsigned int *) (pc + 0) == 0x380000AC) \
+ { \
+ struct rt_sigframe { \
+ char gap[SIGNAL_FRAMESIZE + 16]; \
+ char siginfo[128]; \
+ struct gcc_ucontext uc; \
+ } *frame = (struct rt_sigframe *) (CONTEXT)->cfa; \
+ regs = frame->uc.regs; \
+ } \
+ regs; \
+})
+
+#define LINUX_HWCAP_DEFAULT 0x80000000
+
+#define PPC_LINUX_VREGS(REGS) &(REGS)->vregs
+
+#endif
+
+/* Do code reading to identify a signal frame, and set the frame
+ state data appropriately. See unwind-dw2.c for the structs. */
+
+#define MD_FALLBACK_FRAME_STATE_FOR(CONTEXT, FS, SUCCESS) \
+ do { \
+ static long hwcap = 0; \
+ struct gcc_regs *regs = PPC_LINUX_GET_REGS (CONTEXT); \
+ long new_cfa; \
+ int i; \
+ \
+ if (regs == NULL) \
+ break; \
+ \
+ new_cfa = regs->gpr[STACK_POINTER_REGNUM]; \
+ (FS)->cfa_how = CFA_REG_OFFSET; \
+ (FS)->cfa_reg = STACK_POINTER_REGNUM; \
+ (FS)->cfa_offset = new_cfa - (long) (CONTEXT)->cfa; \
+ \
+ for (i = 0; i < 32; i++) \
+ if (i != STACK_POINTER_REGNUM) \
+ { \
+ (FS)->regs.reg[i].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[i].loc.offset \
+ = (long) &regs->gpr[i] - new_cfa; \
+ } \
+ \
+ (FS)->regs.reg[CR2_REGNO].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[CR2_REGNO].loc.offset \
+ = (long) &regs->ccr - new_cfa; \
+ \
+ (FS)->regs.reg[LINK_REGISTER_REGNUM].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[LINK_REGISTER_REGNUM].loc.offset \
+ = (long) &regs->link - new_cfa; \
+ \
+ (FS)->regs.reg[ARG_POINTER_REGNUM].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[ARG_POINTER_REGNUM].loc.offset \
+ = (long) &regs->nip - new_cfa; \
+ (FS)->retaddr_column = ARG_POINTER_REGNUM; \
+ \
+ if (hwcap == 0) \
+ { \
+ /* __libc_stack_end holds the original stack passed to a \
+ process. */ \
+ extern long *__libc_stack_end; \
+ long argc; \
+ char **argv; \
+ char **envp; \
+ struct auxv \
+ { \
+ long a_type; \
+ long a_val; \
+ } *auxp; \
+ \
+ /* The Linux kernel puts argc first on the stack. */ \
+ argc = __libc_stack_end[0]; \
+ /* Followed by argv, NULL terminated. */ \
+ argv = (char **) __libc_stack_end + 1; \
+ /* Followed by environment string pointers, NULL terminated. */ \
+ envp = argv + argc + 1; \
+ while (*envp++) \
+ continue; \
+ /* Followed by the aux vector, zero terminated. */ \
+ for (auxp = (struct auxv *) envp; auxp->a_type != 0; ++auxp) \
+ if (auxp->a_type == 16) \
+ { \
+ hwcap = auxp->a_val; \
+ break; \
+ } \
+ \
+ /* These will already be set if we found AT_HWCAP. A non-zero \
+ value stops us looking again if for some reason we couldn't \
+ find AT_HWCAP. */ \
+ hwcap |= LINUX_HWCAP_DEFAULT; \
+ } \
+ \
+ /* If we have a FPU... */ \
+ if (hwcap & 0x08000000) \
+ for (i = 0; i < 32; i++) \
+ { \
+ (FS)->regs.reg[i + 32].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[i + 32].loc.offset \
+ = (long) &regs->fpr[i] - new_cfa; \
+ } \
+ \
+ /* If we have a VMX unit... */ \
+ if (hwcap & 0x10000000) \
+ { \
+ struct gcc_vregs *vregs; \
+ vregs = PPC_LINUX_VREGS (regs); \
+ if (regs->msr & (1 << 25)) \
+ { \
+ for (i = 0; i < 32; i++) \
+ { \
+ (FS)->regs.reg[i + FIRST_ALTIVEC_REGNO].how \
+ = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[i + FIRST_ALTIVEC_REGNO].loc.offset \
+ = (long) &vregs[i] - new_cfa; \
+ } \
+ \
+ (FS)->regs.reg[VSCR_REGNO].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[VSCR_REGNO].loc.offset \
+ = (long) &vregs->vscr - new_cfa; \
+ } \
+ \
+ (FS)->regs.reg[VRSAVE_REGNO].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[VRSAVE_REGNO].loc.offset \
+ = (long) &vregs->vsave - new_cfa; \
+ } \
+ \
+ goto SUCCESS; \
+ } while (0)
diff --git a/contrib/gcc/config/rs6000/linux.h b/contrib/gcc/config/rs6000/linux.h
index 009ac66..84cdeed 100644
--- a/contrib/gcc/config/rs6000/linux.h
+++ b/contrib/gcc/config/rs6000/linux.h
@@ -1,6 +1,6 @@
/* Definitions of target machine for GNU compiler,
for PowerPC machines running Linux.
- Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003
+ Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
Free Software Foundation, Inc.
Contributed by Michael Meissner (meissner@cygnus.com).
@@ -24,6 +24,14 @@
#undef MD_EXEC_PREFIX
#undef MD_STARTFILE_PREFIX
+/* Linux doesn't support saving and restoring 64-bit regs in a 32-bit
+ process. */
+#define OS_MISSING_POWERPC64 1
+
+/* glibc has float and long double forms of math functions. */
+#undef TARGET_C99_FUNCTIONS
+#define TARGET_C99_FUNCTIONS 1
+
#undef TARGET_OS_CPP_BUILTINS
#define TARGET_OS_CPP_BUILTINS() \
do \
@@ -86,94 +94,16 @@
#undef TARGET_64BIT
#define TARGET_64BIT 0
-/* We don't need to generate entries in .fixup. */
+/* We don't need to generate entries in .fixup, except when
+ -mrelocatable or -mrelocatable-lib is given. */
#undef RELOCATABLE_NEEDS_FIXUP
+#define RELOCATABLE_NEEDS_FIXUP \
+ (target_flags & target_flags_explicit & MASK_RELOCATABLE)
#define TARGET_ASM_FILE_END file_end_indicate_exec_stack
#define TARGET_HAS_F_SETLKW
-/* Do code reading to identify a signal frame, and set the frame
- state data appropriately. See unwind-dw2.c for the structs. */
-
#ifdef IN_LIBGCC2
-#include <signal.h>
-
-/* During the 2.5 kernel series the kernel ucontext was changed, but
- the new layout is compatible with the old one, so we just define
- and use the old one here for simplicity and compatibility. */
-
-struct kernel_old_ucontext {
- unsigned long uc_flags;
- struct ucontext *uc_link;
- stack_t uc_stack;
- struct sigcontext_struct uc_mcontext;
- sigset_t uc_sigmask;
-};
-
-enum { SIGNAL_FRAMESIZE = 64 };
+#include "config/rs6000/linux-unwind.h"
#endif
-
-#define MD_FALLBACK_FRAME_STATE_FOR(CONTEXT, FS, SUCCESS) \
- do { \
- unsigned char *pc_ = (CONTEXT)->ra; \
- struct sigcontext *sc_; \
- long new_cfa_; \
- int i_; \
- \
- /* li r0, 0x7777; sc (sigreturn old) */ \
- /* li r0, 0x0077; sc (sigreturn new) */ \
- /* li r0, 0x6666; sc (rt_sigreturn old) */ \
- /* li r0, 0x00AC; sc (rt_sigreturn new) */ \
- if (*(unsigned int *) (pc_+4) != 0x44000002) \
- break; \
- if (*(unsigned int *) (pc_+0) == 0x38007777 \
- || *(unsigned int *) (pc_+0) == 0x38000077) \
- { \
- struct sigframe { \
- char gap[SIGNAL_FRAMESIZE]; \
- struct sigcontext sigctx; \
- } *rt_ = (CONTEXT)->cfa; \
- sc_ = &rt_->sigctx; \
- } \
- else if (*(unsigned int *) (pc_+0) == 0x38006666 \
- || *(unsigned int *) (pc_+0) == 0x380000AC) \
- { \
- struct rt_sigframe { \
- char gap[SIGNAL_FRAMESIZE]; \
- unsigned long _unused[2]; \
- struct siginfo *pinfo; \
- void *puc; \
- struct siginfo info; \
- struct kernel_old_ucontext uc; \
- } *rt_ = (CONTEXT)->cfa; \
- sc_ = &rt_->uc.uc_mcontext; \
- } \
- else \
- break; \
- \
- new_cfa_ = sc_->regs->gpr[STACK_POINTER_REGNUM]; \
- (FS)->cfa_how = CFA_REG_OFFSET; \
- (FS)->cfa_reg = STACK_POINTER_REGNUM; \
- (FS)->cfa_offset = new_cfa_ - (long) (CONTEXT)->cfa; \
- \
- for (i_ = 0; i_ < 32; i_++) \
- if (i_ != STACK_POINTER_REGNUM) \
- { \
- (FS)->regs.reg[i_].how = REG_SAVED_OFFSET; \
- (FS)->regs.reg[i_].loc.offset \
- = (long)&(sc_->regs->gpr[i_]) - new_cfa_; \
- } \
- \
- (FS)->regs.reg[LINK_REGISTER_REGNUM].how = REG_SAVED_OFFSET; \
- (FS)->regs.reg[LINK_REGISTER_REGNUM].loc.offset \
- = (long)&(sc_->regs->link) - new_cfa_; \
- \
- (FS)->regs.reg[CR0_REGNO].how = REG_SAVED_OFFSET; \
- (FS)->regs.reg[CR0_REGNO].loc.offset \
- = (long)&(sc_->regs->nip) - new_cfa_; \
- (FS)->retaddr_column = CR0_REGNO; \
- goto SUCCESS; \
- } while (0)
-
-#define OS_MISSING_POWERPC64 1
diff --git a/contrib/gcc/config/rs6000/linux64.h b/contrib/gcc/config/rs6000/linux64.h
index 4fe4199..013e23a 100644
--- a/contrib/gcc/config/rs6000/linux64.h
+++ b/contrib/gcc/config/rs6000/linux64.h
@@ -1,6 +1,6 @@
/* Definitions of target machine for GNU compiler,
for 64 bit PowerPC linux.
- Copyright (C) 2000, 2001, 2002, 2003, 2004
+ Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005
Free Software Foundation, Inc.
This file is part of GCC.
@@ -53,8 +53,11 @@
#undef PROCESSOR_DEFAULT64
#define PROCESSOR_DEFAULT64 PROCESSOR_PPC630
-#undef TARGET_RELOCATABLE
-#define TARGET_RELOCATABLE (!TARGET_64BIT && (target_flags & MASK_RELOCATABLE))
+/* We don't need to generate entries in .fixup, except when
+ -mrelocatable or -mrelocatable-lib is given. */
+#undef RELOCATABLE_NEEDS_FIXUP
+#define RELOCATABLE_NEEDS_FIXUP \
+ (target_flags & target_flags_explicit & MASK_RELOCATABLE)
#undef RS6000_ABI_NAME
#define RS6000_ABI_NAME (TARGET_64BIT ? "aixdesc" : "sysv")
@@ -188,6 +191,8 @@
#define TARGET_EABI 0
#undef TARGET_PROTOTYPE
#define TARGET_PROTOTYPE 0
+#undef RELOCATABLE_NEEDS_FIXUP
+#define RELOCATABLE_NEEDS_FIXUP 0
#endif
@@ -212,9 +217,6 @@
#define PROFILE_HOOK(LABEL) \
do { if (TARGET_64BIT) output_profile_hook (LABEL); } while (0)
-/* We don't need to generate entries in .fixup. */
-#undef RELOCATABLE_NEEDS_FIXUP
-
/* PowerPC64 Linux word-aligns FP doubles when -malign-power is given. */
#undef ADJUST_FIELD_ALIGN
#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED) \
@@ -287,6 +289,14 @@
#undef MD_EXEC_PREFIX
#undef MD_STARTFILE_PREFIX
+/* Linux doesn't support saving and restoring 64-bit regs in a 32-bit
+ process. */
+#define OS_MISSING_POWERPC64 !TARGET_64BIT
+
+/* glibc has float and long double forms of math functions. */
+#undef TARGET_C99_FUNCTIONS
+#define TARGET_C99_FUNCTIONS 1
+
#undef TARGET_OS_CPP_BUILTINS
#define TARGET_OS_CPP_BUILTINS() \
do \
@@ -541,182 +551,13 @@ while (0)
#undef DRAFT_V4_STRUCT_RET
#define DRAFT_V4_STRUCT_RET (!TARGET_64BIT)
-#define TARGET_ASM_FILE_END file_end_indicate_exec_stack
+#define TARGET_ASM_FILE_END rs6000_elf_end_indicate_exec_stack
#define TARGET_HAS_F_SETLKW
#define LINK_GCC_C_SEQUENCE_SPEC \
"%{static:--start-group} %G %L %{static:--end-group}%{!static:%G}"
-/* Do code reading to identify a signal frame, and set the frame
- state data appropriately. See unwind-dw2.c for the structs. */
-
#ifdef IN_LIBGCC2
-#include <signal.h>
-#ifdef __powerpc64__
-#include <sys/ucontext.h>
-
-enum { SIGNAL_FRAMESIZE = 128 };
-
-#else
-
-/* During the 2.5 kernel series the kernel ucontext was changed, but
- the new layout is compatible with the old one, so we just define
- and use the old one here for simplicity and compatibility. */
-
-struct kernel_old_ucontext {
- unsigned long uc_flags;
- struct ucontext *uc_link;
- stack_t uc_stack;
- struct sigcontext_struct uc_mcontext;
- sigset_t uc_sigmask;
-};
-enum { SIGNAL_FRAMESIZE = 64 };
+#include "config/rs6000/linux-unwind.h"
#endif
-
-#endif
-
-#ifdef __powerpc64__
-
-/* If the current unwind info (FS) does not contain explicit info
- saving R2, then we have to do a minor amount of code reading to
- figure out if it was saved. The big problem here is that the
- code that does the save/restore is generated by the linker, so
- we have no good way to determine at compile time what to do. */
-
-#define MD_FROB_UPDATE_CONTEXT(CTX, FS) \
- do { \
- if ((FS)->regs.reg[2].how == REG_UNSAVED) \
- { \
- unsigned int *insn \
- = (unsigned int *) \
- _Unwind_GetGR ((CTX), LINK_REGISTER_REGNUM); \
- if (*insn == 0xE8410028) \
- _Unwind_SetGRPtr ((CTX), 2, (CTX)->cfa + 40); \
- } \
- } while (0)
-
-#define MD_FALLBACK_FRAME_STATE_FOR(CONTEXT, FS, SUCCESS) \
- do { \
- unsigned char *pc_ = (CONTEXT)->ra; \
- struct sigcontext *sc_; \
- long new_cfa_; \
- int i_; \
- \
- /* addi r1, r1, 128; li r0, 0x0077; sc (sigreturn) */ \
- /* addi r1, r1, 128; li r0, 0x00AC; sc (rt_sigreturn) */ \
- if (*(unsigned int *) (pc_+0) != 0x38210000 + SIGNAL_FRAMESIZE \
- || *(unsigned int *) (pc_+8) != 0x44000002) \
- break; \
- if (*(unsigned int *) (pc_+4) == 0x38000077) \
- { \
- struct sigframe { \
- char gap[SIGNAL_FRAMESIZE]; \
- struct sigcontext sigctx; \
- } *rt_ = (CONTEXT)->cfa; \
- sc_ = &rt_->sigctx; \
- } \
- else if (*(unsigned int *) (pc_+4) == 0x380000AC) \
- { \
- struct rt_sigframe { \
- int tramp[6]; \
- struct siginfo *pinfo; \
- struct ucontext *puc; \
- } *rt_ = (struct rt_sigframe *) pc_; \
- sc_ = &rt_->puc->uc_mcontext; \
- } \
- else \
- break; \
- \
- new_cfa_ = sc_->regs->gpr[STACK_POINTER_REGNUM]; \
- (FS)->cfa_how = CFA_REG_OFFSET; \
- (FS)->cfa_reg = STACK_POINTER_REGNUM; \
- (FS)->cfa_offset = new_cfa_ - (long) (CONTEXT)->cfa; \
- \
- for (i_ = 0; i_ < 32; i_++) \
- if (i_ != STACK_POINTER_REGNUM) \
- { \
- (FS)->regs.reg[i_].how = REG_SAVED_OFFSET; \
- (FS)->regs.reg[i_].loc.offset \
- = (long)&(sc_->regs->gpr[i_]) - new_cfa_; \
- } \
- \
- (FS)->regs.reg[LINK_REGISTER_REGNUM].how = REG_SAVED_OFFSET; \
- (FS)->regs.reg[LINK_REGISTER_REGNUM].loc.offset \
- = (long)&(sc_->regs->link) - new_cfa_; \
- \
- (FS)->regs.reg[ARG_POINTER_REGNUM].how = REG_SAVED_OFFSET; \
- (FS)->regs.reg[ARG_POINTER_REGNUM].loc.offset \
- = (long)&(sc_->regs->nip) - new_cfa_; \
- (FS)->retaddr_column = ARG_POINTER_REGNUM; \
- goto SUCCESS; \
- } while (0)
-
-#else
-
-#define MD_FALLBACK_FRAME_STATE_FOR(CONTEXT, FS, SUCCESS) \
- do { \
- unsigned char *pc_ = (CONTEXT)->ra; \
- struct sigcontext *sc_; \
- long new_cfa_; \
- int i_; \
- \
- /* li r0, 0x7777; sc (sigreturn old) */ \
- /* li r0, 0x0077; sc (sigreturn new) */ \
- /* li r0, 0x6666; sc (rt_sigreturn old) */ \
- /* li r0, 0x00AC; sc (rt_sigreturn new) */ \
- if (*(unsigned int *) (pc_+4) != 0x44000002) \
- break; \
- if (*(unsigned int *) (pc_+0) == 0x38007777 \
- || *(unsigned int *) (pc_+0) == 0x38000077) \
- { \
- struct sigframe { \
- char gap[SIGNAL_FRAMESIZE]; \
- struct sigcontext sigctx; \
- } *rt_ = (CONTEXT)->cfa; \
- sc_ = &rt_->sigctx; \
- } \
- else if (*(unsigned int *) (pc_+0) == 0x38006666 \
- || *(unsigned int *) (pc_+0) == 0x380000AC) \
- { \
- struct rt_sigframe { \
- char gap[SIGNAL_FRAMESIZE]; \
- unsigned long _unused[2]; \
- struct siginfo *pinfo; \
- void *puc; \
- struct siginfo info; \
- struct kernel_old_ucontext uc; \
- } *rt_ = (CONTEXT)->cfa; \
- sc_ = &rt_->uc.uc_mcontext; \
- } \
- else \
- break; \
- \
- new_cfa_ = sc_->regs->gpr[STACK_POINTER_REGNUM]; \
- (FS)->cfa_how = CFA_REG_OFFSET; \
- (FS)->cfa_reg = STACK_POINTER_REGNUM; \
- (FS)->cfa_offset = new_cfa_ - (long) (CONTEXT)->cfa; \
- \
- for (i_ = 0; i_ < 32; i_++) \
- if (i_ != STACK_POINTER_REGNUM) \
- { \
- (FS)->regs.reg[i_].how = REG_SAVED_OFFSET; \
- (FS)->regs.reg[i_].loc.offset \
- = (long)&(sc_->regs->gpr[i_]) - new_cfa_; \
- } \
- \
- (FS)->regs.reg[LINK_REGISTER_REGNUM].how = REG_SAVED_OFFSET; \
- (FS)->regs.reg[LINK_REGISTER_REGNUM].loc.offset \
- = (long)&(sc_->regs->link) - new_cfa_; \
- \
- (FS)->regs.reg[CR0_REGNO].how = REG_SAVED_OFFSET; \
- (FS)->regs.reg[CR0_REGNO].loc.offset \
- = (long)&(sc_->regs->nip) - new_cfa_; \
- (FS)->retaddr_column = CR0_REGNO; \
- goto SUCCESS; \
- } while (0)
-
-#endif
-
-
-#define OS_MISSING_POWERPC64 !TARGET_64BIT
diff --git a/contrib/gcc/config/rs6000/rs6000-c.c b/contrib/gcc/config/rs6000/rs6000-c.c
index a47afee..13d0ca6 100644
--- a/contrib/gcc/config/rs6000/rs6000-c.c
+++ b/contrib/gcc/config/rs6000/rs6000-c.c
@@ -62,13 +62,13 @@ rs6000_pragma_longcall (cpp_reader *pfile ATTRIBUTE_UNUSED)
if (c_lex (&x) != CPP_CLOSE_PAREN)
SYNTAX_ERROR ("missing close paren");
- if (n != integer_zero_node && n != integer_one_node)
+ if (!integer_zerop (n) && !integer_onep (n))
SYNTAX_ERROR ("number must be 0 or 1");
if (c_lex (&x) != CPP_EOF)
warning ("junk at end of #pragma longcall");
- rs6000_default_long_calls = (n == integer_one_node);
+ rs6000_default_long_calls = integer_onep (n);
}
/* Handle defining many CPP flags based on TARGET_xxx. As a general
@@ -92,7 +92,15 @@ rs6000_cpu_cpp_builtins (cpp_reader *pfile)
if (! TARGET_POWER && ! TARGET_POWER2 && ! TARGET_POWERPC)
builtin_define ("_ARCH_COM");
if (TARGET_ALTIVEC)
- builtin_define ("__ALTIVEC__");
+ {
+ builtin_define ("__ALTIVEC__");
+ builtin_define ("__VEC__=10206");
+
+ /* Define the AltiVec syntactic elements. */
+ builtin_define ("__vector=__attribute__((altivec(vector__)))");
+ builtin_define ("__pixel=__attribute__((altivec(pixel__))) unsigned short");
+ builtin_define ("__bool=__attribute__((altivec(bool__))) unsigned");
+ }
if (TARGET_SPE)
builtin_define ("__SPE__");
if (TARGET_SOFT_FLOAT)
diff --git a/contrib/gcc/config/rs6000/rs6000-protos.h b/contrib/gcc/config/rs6000/rs6000-protos.h
index acac75a..3d17162 100644
--- a/contrib/gcc/config/rs6000/rs6000-protos.h
+++ b/contrib/gcc/config/rs6000/rs6000-protos.h
@@ -116,7 +116,7 @@ extern enum rtx_code rs6000_reverse_condition (enum machine_mode,
extern void rs6000_emit_sCOND (enum rtx_code, rtx);
extern void rs6000_emit_cbranch (enum rtx_code, rtx);
extern char * output_cbranch (rtx, const char *, int, rtx);
-extern char * output_e500_flip_gt_bit (rtx, rtx);
+extern char * output_e500_flip_eq_bit (rtx, rtx);
extern rtx rs6000_emit_set_const (rtx, enum machine_mode, rtx, int);
extern int rs6000_emit_cmove (rtx, rtx, rtx, rtx);
extern void rs6000_emit_minmax (rtx, enum rtx_code, rtx, rtx);
diff --git a/contrib/gcc/config/rs6000/rs6000.c b/contrib/gcc/config/rs6000/rs6000.c
index 33f47ce..1ffd4da 100644
--- a/contrib/gcc/config/rs6000/rs6000.c
+++ b/contrib/gcc/config/rs6000/rs6000.c
@@ -1,6 +1,6 @@
/* Subroutines used for code generation on IBM RS/6000.
Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
- 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+ 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
This file is part of GCC.
@@ -220,6 +220,20 @@ static GTY(()) tree opaque_V2SI_type_node;
static GTY(()) tree opaque_V2SF_type_node;
static GTY(()) tree opaque_p_V2SI_type_node;
+/* AltiVec requires a few more basic types in addition to the vector
+ types already defined in tree.c. */
+static GTY(()) tree bool_char_type_node; /* __bool char */
+static GTY(()) tree bool_short_type_node; /* __bool short */
+static GTY(()) tree bool_int_type_node; /* __bool int */
+static GTY(()) tree pixel_type_node; /* __pixel */
+static GTY(()) tree bool_V16QI_type_node; /* __vector __bool char */
+static GTY(()) tree bool_V8HI_type_node; /* __vector __bool short */
+static GTY(()) tree bool_V4SI_type_node; /* __vector __bool int */
+static GTY(()) tree pixel_V8HI_type_node; /* __vector __pixel */
+
+int rs6000_warn_altivec_long = 1; /* On by default. */
+const char *rs6000_warn_altivec_long_switch;
+
const char *rs6000_traceback_name;
static enum {
traceback_default = 0,
@@ -238,7 +252,8 @@ static GTY(()) int rs6000_sr_alias_set;
/* Call distance, overridden by -mlongcall and #pragma longcall(1).
The only place that looks at this is rs6000_set_default_type_attributes;
everywhere else should rely on the presence or absence of a longcall
- attribute on the function declaration. */
+ attribute on the function declaration. Exception: init_cumulative_args
+ looks at it too, for libcalls. */
int rs6000_default_long_calls;
const char *rs6000_longcall_switch;
@@ -290,6 +305,8 @@ static void rs6000_assemble_visibility (tree, int);
#endif
static int rs6000_ra_ever_killed (void);
static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
+static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
+static const char *rs6000_mangle_fundamental_type (tree);
extern const struct attribute_spec rs6000_attribute_table[];
static void rs6000_set_default_type_attributes (tree);
static void rs6000_output_function_prologue (FILE *, HOST_WIDE_INT);
@@ -303,6 +320,7 @@ static void rs6000_file_start (void);
static unsigned int rs6000_elf_section_type_flags (tree, const char *, int);
static void rs6000_elf_asm_out_constructor (rtx, int);
static void rs6000_elf_asm_out_destructor (rtx, int);
+static void rs6000_elf_end_indicate_exec_stack (void) ATTRIBUTE_UNUSED;
static void rs6000_elf_select_section (tree, int, unsigned HOST_WIDE_INT);
static void rs6000_elf_unique_section (tree, int);
static void rs6000_elf_select_rtx_section (enum machine_mode, rtx,
@@ -398,8 +416,7 @@ static int rs6000_get_some_local_dynamic_name_1 (rtx *, void *);
static rtx rs6000_complex_function_value (enum machine_mode);
static rtx rs6000_spe_function_arg (CUMULATIVE_ARGS *,
enum machine_mode, tree);
-static rtx rs6000_mixed_function_arg (CUMULATIVE_ARGS *,
- enum machine_mode, tree, int);
+static rtx rs6000_mixed_function_arg (enum machine_mode, tree, int);
static void rs6000_move_block_from_reg (int regno, rtx x, int nregs);
static void setup_incoming_varargs (CUMULATIVE_ARGS *,
enum machine_mode, tree,
@@ -565,6 +582,9 @@ static const char alt_reg_names[][8] =
#undef TARGET_EXPAND_BUILTIN
#define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
+#undef TARGET_MANGLE_FUNDAMENTAL_TYPE
+#define TARGET_MANGLE_FUNDAMENTAL_TYPE rs6000_mangle_fundamental_type
+
#undef TARGET_INIT_LIBFUNCS
#define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
@@ -735,9 +755,8 @@ rs6000_override_options (const char *default_cpu)
set_masks &= ~MASK_ALTIVEC;
#endif
- /* Don't override these by the processor default if given explicitly. */
- set_masks &= ~(target_flags_explicit
- & (MASK_MULTIPLE | MASK_STRING | MASK_SOFT_FLOAT));
+ /* Don't override by the processor default if given explicitly. */
+ set_masks &= ~target_flags_explicit;
/* Identify the processor type. */
rs6000_select[0].string = default_cpu;
@@ -923,6 +942,17 @@ rs6000_override_options (const char *default_cpu)
rs6000_default_long_calls = (base[0] != 'n');
}
+ /* Handle -m(no-)warn-altivec-long similarly. */
+ if (rs6000_warn_altivec_long_switch)
+ {
+ const char *base = rs6000_warn_altivec_long_switch;
+ while (base[-1] != 'm') base--;
+
+ if (*rs6000_warn_altivec_long_switch != '\0')
+ error ("invalid option `%s'", base);
+ rs6000_warn_altivec_long = (base[0] != 'n');
+ }
+
/* Handle -mprioritize-restricted-insns option. */
rs6000_sched_restricted_insns_priority
= (rs6000_sched_groups ? 1 : 0);
@@ -2972,13 +3002,9 @@ rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
rs6000_emit_move (got, gsym, Pmode);
else
{
- char buf[30];
- static int tls_got_labelno = 0;
- rtx tempLR, lab, tmp3, mem;
+ rtx tempLR, tmp3, mem;
rtx first, last;
- ASM_GENERATE_INTERNAL_LABEL (buf, "LTLS", tls_got_labelno++);
- lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
tempLR = gen_reg_rtx (Pmode);
tmp1 = gen_reg_rtx (Pmode);
tmp2 = gen_reg_rtx (Pmode);
@@ -2986,8 +3012,7 @@ rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
mem = gen_rtx_MEM (Pmode, tmp1);
RTX_UNCHANGING_P (mem) = 1;
- first = emit_insn (gen_load_toc_v4_PIC_1b (tempLR, lab,
- gsym));
+ first = emit_insn (gen_load_toc_v4_PIC_1b (tempLR, gsym));
emit_move_insn (tmp1, tempLR);
emit_move_insn (tmp2, mem);
emit_insn (gen_addsi3 (tmp3, tmp1, tmp2));
@@ -3942,10 +3967,11 @@ init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
cum->nargs_prototype = n_named_args;
/* Check for a longcall attribute. */
- if (fntype
- && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
- && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype)))
- cum->call_cookie = CALL_LONG;
+ if ((!fntype && rs6000_default_long_calls)
+ || (fntype
+ && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
+ && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
+ cum->call_cookie |= CALL_LONG;
if (TARGET_DEBUG_ARG)
{
@@ -4258,105 +4284,49 @@ rs6000_spe_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
/* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
static rtx
-rs6000_mixed_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
- tree type, int align_words)
-{
- if (mode == DFmode)
- {
- /* -mpowerpc64 with 32bit ABI splits up a DFmode argument
- in vararg list into zero, one or two GPRs */
- if (align_words >= GP_ARG_NUM_REG)
- return gen_rtx_PARALLEL (DFmode,
- gen_rtvec (2,
- gen_rtx_EXPR_LIST (VOIDmode,
- NULL_RTX, const0_rtx),
- gen_rtx_EXPR_LIST (VOIDmode,
- gen_rtx_REG (mode,
- cum->fregno),
- const0_rtx)));
- else if (align_words + rs6000_arg_size (mode, type)
- > GP_ARG_NUM_REG)
- /* If this is partially on the stack, then we only
- include the portion actually in registers here. */
- return gen_rtx_PARALLEL (DFmode,
- gen_rtvec (2,
- gen_rtx_EXPR_LIST (VOIDmode,
- gen_rtx_REG (SImode,
- GP_ARG_MIN_REG
- + align_words),
- const0_rtx),
- gen_rtx_EXPR_LIST (VOIDmode,
- gen_rtx_REG (mode,
- cum->fregno),
- const0_rtx)));
-
- /* split a DFmode arg into two GPRs */
- return gen_rtx_PARALLEL (DFmode,
- gen_rtvec (3,
- gen_rtx_EXPR_LIST (VOIDmode,
- gen_rtx_REG (SImode,
- GP_ARG_MIN_REG
- + align_words),
- const0_rtx),
- gen_rtx_EXPR_LIST (VOIDmode,
- gen_rtx_REG (SImode,
- GP_ARG_MIN_REG
- + align_words + 1),
- GEN_INT (4)),
- gen_rtx_EXPR_LIST (VOIDmode,
- gen_rtx_REG (mode, cum->fregno),
- const0_rtx)));
- }
- /* -mpowerpc64 with 32bit ABI splits up a DImode argument into one
- or two GPRs */
- else if (mode == DImode)
+rs6000_mixed_function_arg (enum machine_mode mode, tree type, int align_words)
+{
+ int n_units;
+ int i, k;
+ rtx rvec[GP_ARG_NUM_REG + 1];
+
+ if (align_words >= GP_ARG_NUM_REG)
+ return NULL_RTX;
+
+ n_units = rs6000_arg_size (mode, type);
+
+ /* Optimize the simple case where the arg fits in one gpr, except in
+ the case of BLKmode due to assign_parms assuming that registers are
+ BITS_PER_WORD wide. */
+ if (n_units == 0
+ || (n_units == 1 && mode != BLKmode))
+ return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
+
+ k = 0;
+ if (align_words + n_units > GP_ARG_NUM_REG)
+ /* Not all of the arg fits in gprs. Say that it goes in memory too,
+ using a magic NULL_RTX component.
+ FIXME: This is not strictly correct. Only some of the arg
+ belongs in memory, not all of it. However, there isn't any way
+ to do this currently, apart from building rtx descriptions for
+ the pieces of memory we want stored. Due to bugs in the generic
+ code we can't use the normal function_arg_partial_nregs scheme
+ with the PARALLEL arg description we emit here.
+ In any case, the code to store the whole arg to memory is often
+ more efficient than code to store pieces, and we know that space
+ is available in the right place for the whole arg. */
+ rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
+
+ i = 0;
+ do
{
- if (align_words < GP_ARG_NUM_REG - 1)
- return gen_rtx_PARALLEL (DImode,
- gen_rtvec (2,
- gen_rtx_EXPR_LIST (VOIDmode,
- gen_rtx_REG (SImode,
- GP_ARG_MIN_REG
- + align_words),
- const0_rtx),
- gen_rtx_EXPR_LIST (VOIDmode,
- gen_rtx_REG (SImode,
- GP_ARG_MIN_REG
- + align_words + 1),
- GEN_INT (4))));
- else if (align_words == GP_ARG_NUM_REG - 1)
- return gen_rtx_PARALLEL (DImode,
- gen_rtvec (2,
- gen_rtx_EXPR_LIST (VOIDmode,
- NULL_RTX, const0_rtx),
- gen_rtx_EXPR_LIST (VOIDmode,
- gen_rtx_REG (SImode,
- GP_ARG_MIN_REG
- + align_words),
- const0_rtx)));
- }
- else if (mode == BLKmode && align_words <= (GP_ARG_NUM_REG - 1))
- {
- int k;
- int size = int_size_in_bytes (type);
- int no_units = ((size - 1) / 4) + 1;
- int max_no_words = GP_ARG_NUM_REG - align_words;
- int rtlvec_len = no_units < max_no_words ? no_units : max_no_words;
- rtx *rtlvec = (rtx *) alloca (rtlvec_len * sizeof (rtx));
-
- memset ((char *) rtlvec, 0, rtlvec_len * sizeof (rtx));
-
- for (k=0; k < rtlvec_len; k++)
- rtlvec[k] = gen_rtx_EXPR_LIST (VOIDmode,
- gen_rtx_REG (SImode,
- GP_ARG_MIN_REG
- + align_words + k),
- k == 0 ? const0_rtx : GEN_INT (k*4));
-
- return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k, rtlvec));
- }
+ rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
+ rtx off = GEN_INT (i++ * 4);
+ rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
+ }
+ while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
- return NULL_RTX;
+ return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
}
/* Determine where to put an argument to a function.
@@ -4451,8 +4421,8 @@ function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
{
/* Vector parameters to varargs functions under AIX or Darwin
get passed in memory and possibly also in GPRs. */
- int align, align_words;
- enum machine_mode part_mode = mode;
+ int align, align_words, n_words;
+ enum machine_mode part_mode;
/* Vector parameters must be 16-byte aligned. This places them at
2 mod 4 in terms of words in 32-bit mode, since the parameter
@@ -4468,15 +4438,19 @@ function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
/* Out of registers? Memory, then. */
if (align_words >= GP_ARG_NUM_REG)
return NULL_RTX;
-
+
+ if (TARGET_32BIT && TARGET_POWERPC64)
+ return rs6000_mixed_function_arg (mode, type, align_words);
+
/* The vector value goes in GPRs. Only the part of the
value in GPRs is reported here. */
- if (align_words + CLASS_MAX_NREGS (mode, GENERAL_REGS)
- > GP_ARG_NUM_REG)
+ part_mode = mode;
+ n_words = rs6000_arg_size (mode, type);
+ if (align_words + n_words > GP_ARG_NUM_REG)
/* Fortunately, there are only two possibilities, the value
is either wholly in GPRs or half in GPRs and half not. */
part_mode = DImode;
-
+
return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
}
}
@@ -4504,10 +4478,13 @@ function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
gregno += (1 - gregno) & 1;
/* Multi-reg args are not split between registers and stack. */
- if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
- return gen_rtx_REG (mode, gregno);
- else
+ if (gregno + n_words - 1 > GP_ARG_MAX_REG)
return NULL_RTX;
+
+ if (TARGET_32BIT && TARGET_POWERPC64)
+ return rs6000_mixed_function_arg (mode, type,
+ gregno - GP_ARG_MIN_REG);
+ return gen_rtx_REG (mode, gregno);
}
}
else
@@ -4517,75 +4494,82 @@ function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
if (USE_FP_FOR_ARG_P (cum, mode, type))
{
- rtx fpr[2];
- rtx *r;
+ rtx rvec[GP_ARG_NUM_REG + 1];
+ rtx r;
+ int k;
bool needs_psave;
enum machine_mode fmode = mode;
- int n;
unsigned long n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
{
- /* Long double split over regs and memory. */
- if (fmode == TFmode)
- fmode = DFmode;
-
/* Currently, we only ever need one reg here because complex
doubles are split. */
- if (cum->fregno != FP_ARG_MAX_REG - 1)
+ if (cum->fregno != FP_ARG_MAX_REG || fmode != TFmode)
abort ();
+
+ /* Long double split over regs and memory. */
+ fmode = DFmode;
}
- fpr[1] = gen_rtx_REG (fmode, cum->fregno);
/* Do we also need to pass this arg in the parameter save
area? */
needs_psave = (type
&& (cum->nargs_prototype <= 0
|| (DEFAULT_ABI == ABI_AIX
- && TARGET_XL_CALL
+ && TARGET_XL_COMPAT
&& align_words >= GP_ARG_NUM_REG)));
if (!needs_psave && mode == fmode)
- return fpr[1];
-
- if (TARGET_32BIT && TARGET_POWERPC64
- && mode == DFmode && cum->stdarg)
- return rs6000_mixed_function_arg (cum, mode, type, align_words);
-
- /* Describe where this piece goes. */
- r = fpr + 1;
- *r = gen_rtx_EXPR_LIST (VOIDmode, *r, const0_rtx);
- n = 1;
+ return gen_rtx_REG (fmode, cum->fregno);
+ k = 0;
if (needs_psave)
{
- /* Now describe the part that goes in gprs or the stack.
+ /* Describe the part that goes in gprs or the stack.
This piece must come first, before the fprs. */
- rtx reg = NULL_RTX;
if (align_words < GP_ARG_NUM_REG)
{
unsigned long n_words = rs6000_arg_size (mode, type);
- enum machine_mode rmode = mode;
-
- if (align_words + n_words > GP_ARG_NUM_REG)
- /* If this is partially on the stack, then we only
- include the portion actually in registers here.
- We know this can only be one register because
- complex doubles are splt. */
- rmode = Pmode;
- reg = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
+
+ if (align_words + n_words > GP_ARG_NUM_REG
+ || (TARGET_32BIT && TARGET_POWERPC64))
+ {
+ /* If this is partially on the stack, then we only
+ include the portion actually in registers here. */
+ enum machine_mode rmode = TARGET_32BIT ? SImode : DImode;
+ rtx off;
+ do
+ {
+ r = gen_rtx_REG (rmode,
+ GP_ARG_MIN_REG + align_words);
+ off = GEN_INT (k * GET_MODE_SIZE (rmode));
+ rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
+ }
+ while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
+ }
+ else
+ {
+ /* The whole arg fits in gprs. */
+ r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
+ rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
+ }
}
- *--r = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
- ++n;
+ else
+ /* It's entirely in memory. */
+ rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
}
- return gen_rtx_PARALLEL (mode, gen_rtvec_v (n, r));
+ /* Describe where this piece goes in the fprs. */
+ r = gen_rtx_REG (fmode, cum->fregno);
+ rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
+
+ return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
}
else if (align_words < GP_ARG_NUM_REG)
{
- if (TARGET_32BIT && TARGET_POWERPC64
- && (mode == DImode || mode == BLKmode))
- return rs6000_mixed_function_arg (cum, mode, type, align_words);
+ if (TARGET_32BIT && TARGET_POWERPC64)
+ return rs6000_mixed_function_arg (mode, type, align_words);
return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
}
@@ -4594,15 +4578,20 @@ function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
}
}
-/* For an arg passed partly in registers and partly in memory,
- this is the number of registers used.
- For args passed entirely in registers or entirely in memory, zero. */
+/* For an arg passed partly in registers and partly in memory, this is
+ the number of registers used. For args passed entirely in registers
+ or entirely in memory, zero. When an arg is described by a PARALLEL,
+ perhaps using more than one register type, this function returns the
+ number of registers used by the first element of the PARALLEL. */
int
function_arg_partial_nregs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
tree type, int named)
{
int ret = 0;
+ int align;
+ int parm_offset;
+ int align_words;
if (DEFAULT_ABI == ABI_V4)
return 0;
@@ -4611,17 +4600,29 @@ function_arg_partial_nregs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
&& cum->nargs_prototype >= 0)
return 0;
- if (USE_FP_FOR_ARG_P (cum, mode, type))
+ align = function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
+ parm_offset = TARGET_32BIT ? 2 : 0;
+ align_words = cum->words + ((parm_offset - cum->words) & align);
+
+ if (USE_FP_FOR_ARG_P (cum, mode, type)
+ /* If we are passing this arg in gprs as well, then this function
+ should return the number of gprs (or memory) partially passed,
+ *not* the number of fprs. */
+ && !(type
+ && (cum->nargs_prototype <= 0
+ || (DEFAULT_ABI == ABI_AIX
+ && TARGET_XL_COMPAT
+ && align_words >= GP_ARG_NUM_REG))))
{
if (cum->fregno + ((GET_MODE_SIZE (mode) + 7) >> 3) > FP_ARG_MAX_REG + 1)
- ret = FP_ARG_MAX_REG - cum->fregno;
+ ret = FP_ARG_MAX_REG + 1 - cum->fregno;
else if (cum->nargs_prototype >= 0)
return 0;
}
- if (cum->words < GP_ARG_NUM_REG
- && GP_ARG_NUM_REG < cum->words + rs6000_arg_size (mode, type))
- ret = GP_ARG_NUM_REG - cum->words;
+ if (align_words < GP_ARG_NUM_REG
+ && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
+ ret = GP_ARG_NUM_REG - align_words;
if (ret != 0 && TARGET_DEBUG_ARG)
fprintf (stderr, "function_arg_partial_nregs: %d\n", ret);
@@ -4776,6 +4777,7 @@ setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
{
mem = gen_rtx_MEM (DFmode, plus_constant (save_area, off));
set_mem_alias_set (mem, set);
+ set_mem_align (mem, GET_MODE_ALIGNMENT (DFmode));
emit_move_insn (mem, gen_rtx_REG (DFmode, fregno));
fregno++;
off += 8;
@@ -5695,6 +5697,7 @@ rs6000_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
|| icode == CODE_FOR_spe_evsrwiu)
{
/* Only allow 5-bit unsigned literals. */
+ STRIP_NOPS (arg1);
if (TREE_CODE (arg1) != INTEGER_CST
|| TREE_INT_CST_LOW (arg1) & ~0x1f)
{
@@ -6120,6 +6123,8 @@ altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
|| arg2 == error_mark_node)
return const0_rtx;
+ *expandedp = true;
+ STRIP_NOPS (arg2);
if (TREE_CODE (arg2) != INTEGER_CST
|| TREE_INT_CST_LOW (arg2) & ~0x3)
{
@@ -6136,7 +6141,6 @@ altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
if (pat != 0)
emit_insn (pat);
- *expandedp = true;
return NULL_RTX;
}
@@ -6226,6 +6230,7 @@ altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
case ALTIVEC_BUILTIN_DSS:
icode = CODE_FOR_altivec_dss;
arg0 = TREE_VALUE (arglist);
+ STRIP_NOPS (arg0);
op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
mode0 = insn_data[icode].operand[0].mode;
@@ -6245,6 +6250,15 @@ altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
emit_insn (gen_altivec_dss (op0));
return NULL_RTX;
+
+ case ALTIVEC_BUILTIN_COMPILETIME_ERROR:
+ arg0 = TREE_VALUE (arglist);
+ while (TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == ADDR_EXPR)
+ arg0 = TREE_OPERAND (arg0, 0);
+ error ("invalid parameter combination for `%s' AltiVec intrinsic",
+ TREE_STRING_POINTER (arg0));
+
+ return const0_rtx;
}
/* Expand abs* operations. */
@@ -6684,6 +6698,73 @@ rs6000_init_builtins (void)
opaque_V2SF_type_node = copy_node (V2SF_type_node);
opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
+ /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
+ types, especially in C++ land. Similarly, 'vector pixel' is distinct from+ 'vector unsigned short'. */
+
+ bool_char_type_node = copy_node (unsigned_intQI_type_node);
+ TYPE_MAIN_VARIANT (bool_char_type_node) = bool_char_type_node;
+ bool_short_type_node = copy_node (unsigned_intHI_type_node);
+ TYPE_MAIN_VARIANT (bool_short_type_node) = bool_short_type_node;
+ bool_int_type_node = copy_node (unsigned_intSI_type_node);
+ TYPE_MAIN_VARIANT (bool_int_type_node) = bool_int_type_node;
+ pixel_type_node = copy_node (unsigned_intHI_type_node);
+ TYPE_MAIN_VARIANT (pixel_type_node) = pixel_type_node;
+
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__bool char"),
+ bool_char_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__bool short"),
+ bool_short_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__bool int"),
+ bool_int_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__pixel"),
+ pixel_type_node));
+
+ bool_V16QI_type_node = make_vector (V16QImode, bool_char_type_node, 1);
+ bool_V8HI_type_node = make_vector (V8HImode, bool_short_type_node, 1);
+ bool_V4SI_type_node = make_vector (V4SImode, bool_int_type_node, 1);
+ pixel_V8HI_type_node = make_vector (V8HImode, pixel_type_node, 1);
+
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector unsigned char"),
+ unsigned_V16QI_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector signed char"),
+ V16QI_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector __bool char"),
+ bool_V16QI_type_node));
+
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector unsigned short"),
+ unsigned_V8HI_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector signed short"),
+ V8HI_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector __bool short"),
+ bool_V8HI_type_node));
+
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector unsigned int"),
+ unsigned_V4SI_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector signed int"),
+ V4SI_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector __bool int"),
+ bool_V4SI_type_node));
+
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector float"),
+ V4SF_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector __pixel"),
+ pixel_V8HI_type_node));
+
if (TARGET_SPE)
spe_init_builtins ();
if (TARGET_ALTIVEC)
@@ -6989,8 +7070,8 @@ altivec_init_builtins (void)
= build_function_type (V8HI_type_node, void_list_node);
tree void_ftype_void
= build_function_type (void_type_node, void_list_node);
- tree void_ftype_qi
- = build_function_type_list (void_type_node, char_type_node, NULL_TREE);
+ tree void_ftype_int
+ = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
tree v16qi_ftype_long_pcvoid
= build_function_type_list (V16QI_type_node,
@@ -7034,10 +7115,13 @@ altivec_init_builtins (void)
= build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
tree v4sf_ftype_v4sf
= build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
- tree void_ftype_pcvoid_int_char
+ tree void_ftype_pcvoid_int_int
= build_function_type_list (void_type_node,
pcvoid_type_node, integer_type_node,
- char_type_node, NULL_TREE);
+ integer_type_node, NULL_TREE);
+ tree int_ftype_pcchar
+ = build_function_type_list (integer_type_node,
+ pcchar_type_node, NULL_TREE);
def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_4sf", v4sf_ftype_pcfloat,
ALTIVEC_BUILTIN_LD_INTERNAL_4sf);
@@ -7058,7 +7142,7 @@ altivec_init_builtins (void)
def_builtin (MASK_ALTIVEC, "__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
def_builtin (MASK_ALTIVEC, "__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
def_builtin (MASK_ALTIVEC, "__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
- def_builtin (MASK_ALTIVEC, "__builtin_altivec_dss", void_ftype_qi, ALTIVEC_BUILTIN_DSS);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
@@ -7072,10 +7156,14 @@ altivec_init_builtins (void)
def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
+ /* See altivec.h for usage of "__builtin_altivec_compiletime_error". */
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_compiletime_error", int_ftype_pcchar,
+ ALTIVEC_BUILTIN_COMPILETIME_ERROR);
+
/* Add the DST variants. */
d = (struct builtin_description *) bdesc_dst;
for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
- def_builtin (d->mask, d->name, void_ftype_pcvoid_int_char, d->code);
+ def_builtin (d->mask, d->name, void_ftype_pcvoid_int_int, d->code);
/* Initialize the predicates. */
dp = (struct builtin_description_predicates *) bdesc_altivec_preds;
@@ -7160,12 +7248,12 @@ rs6000_common_init_builtins (void)
= build_function_type_list (V16QI_type_node,
V16QI_type_node, V16QI_type_node,
V16QI_type_node, NULL_TREE);
- tree v4si_ftype_char
- = build_function_type_list (V4SI_type_node, char_type_node, NULL_TREE);
- tree v8hi_ftype_char
- = build_function_type_list (V8HI_type_node, char_type_node, NULL_TREE);
- tree v16qi_ftype_char
- = build_function_type_list (V16QI_type_node, char_type_node, NULL_TREE);
+ tree v4si_ftype_int
+ = build_function_type_list (V4SI_type_node, integer_type_node, NULL_TREE);
+ tree v8hi_ftype_int
+ = build_function_type_list (V8HI_type_node, integer_type_node, NULL_TREE);
+ tree v16qi_ftype_int
+ = build_function_type_list (V16QI_type_node, integer_type_node, NULL_TREE);
tree v8hi_ftype_v16qi
= build_function_type_list (V8HI_type_node, V16QI_type_node, NULL_TREE);
tree v4sf_ftype_v4sf
@@ -7223,37 +7311,37 @@ rs6000_common_init_builtins (void)
tree v4si_ftype_v4si_v4si
= build_function_type_list (V4SI_type_node,
V4SI_type_node, V4SI_type_node, NULL_TREE);
- tree v4sf_ftype_v4si_char
+ tree v4sf_ftype_v4si_int
= build_function_type_list (V4SF_type_node,
- V4SI_type_node, char_type_node, NULL_TREE);
- tree v4si_ftype_v4sf_char
+ V4SI_type_node, integer_type_node, NULL_TREE);
+ tree v4si_ftype_v4sf_int
= build_function_type_list (V4SI_type_node,
- V4SF_type_node, char_type_node, NULL_TREE);
- tree v4si_ftype_v4si_char
+ V4SF_type_node, integer_type_node, NULL_TREE);
+ tree v4si_ftype_v4si_int
= build_function_type_list (V4SI_type_node,
- V4SI_type_node, char_type_node, NULL_TREE);
- tree v8hi_ftype_v8hi_char
+ V4SI_type_node, integer_type_node, NULL_TREE);
+ tree v8hi_ftype_v8hi_int
= build_function_type_list (V8HI_type_node,
- V8HI_type_node, char_type_node, NULL_TREE);
- tree v16qi_ftype_v16qi_char
+ V8HI_type_node, integer_type_node, NULL_TREE);
+ tree v16qi_ftype_v16qi_int
= build_function_type_list (V16QI_type_node,
- V16QI_type_node, char_type_node, NULL_TREE);
- tree v16qi_ftype_v16qi_v16qi_char
+ V16QI_type_node, integer_type_node, NULL_TREE);
+ tree v16qi_ftype_v16qi_v16qi_int
= build_function_type_list (V16QI_type_node,
V16QI_type_node, V16QI_type_node,
- char_type_node, NULL_TREE);
- tree v8hi_ftype_v8hi_v8hi_char
+ integer_type_node, NULL_TREE);
+ tree v8hi_ftype_v8hi_v8hi_int
= build_function_type_list (V8HI_type_node,
V8HI_type_node, V8HI_type_node,
- char_type_node, NULL_TREE);
- tree v4si_ftype_v4si_v4si_char
+ integer_type_node, NULL_TREE);
+ tree v4si_ftype_v4si_v4si_int
= build_function_type_list (V4SI_type_node,
V4SI_type_node, V4SI_type_node,
- char_type_node, NULL_TREE);
- tree v4sf_ftype_v4sf_v4sf_char
+ integer_type_node, NULL_TREE);
+ tree v4sf_ftype_v4sf_v4sf_int
= build_function_type_list (V4SF_type_node,
V4SF_type_node, V4SF_type_node,
- char_type_node, NULL_TREE);
+ integer_type_node, NULL_TREE);
tree v4sf_ftype_v4sf_v4sf
= build_function_type_list (V4SF_type_node,
V4SF_type_node, V4SF_type_node, NULL_TREE);
@@ -7396,22 +7484,22 @@ rs6000_common_init_builtins (void)
/* vchar, vchar, vchar, 4 bit literal. */
else if (mode0 == V16QImode && mode1 == mode0 && mode2 == mode0
&& mode3 == QImode)
- type = v16qi_ftype_v16qi_v16qi_char;
+ type = v16qi_ftype_v16qi_v16qi_int;
/* vshort, vshort, vshort, 4 bit literal. */
else if (mode0 == V8HImode && mode1 == mode0 && mode2 == mode0
&& mode3 == QImode)
- type = v8hi_ftype_v8hi_v8hi_char;
+ type = v8hi_ftype_v8hi_v8hi_int;
/* vint, vint, vint, 4 bit literal. */
else if (mode0 == V4SImode && mode1 == mode0 && mode2 == mode0
&& mode3 == QImode)
- type = v4si_ftype_v4si_v4si_char;
+ type = v4si_ftype_v4si_v4si_int;
/* vfloat, vfloat, vfloat, 4 bit literal. */
else if (mode0 == V4SFmode && mode1 == mode0 && mode2 == mode0
&& mode3 == QImode)
- type = v4sf_ftype_v4sf_v4sf_char;
+ type = v4sf_ftype_v4sf_v4sf_int;
else
abort ();
@@ -7500,23 +7588,23 @@ rs6000_common_init_builtins (void)
/* vint, vint, 5 bit literal. */
else if (mode0 == V4SImode && mode1 == V4SImode && mode2 == QImode)
- type = v4si_ftype_v4si_char;
+ type = v4si_ftype_v4si_int;
/* vshort, vshort, 5 bit literal. */
else if (mode0 == V8HImode && mode1 == V8HImode && mode2 == QImode)
- type = v8hi_ftype_v8hi_char;
+ type = v8hi_ftype_v8hi_int;
/* vchar, vchar, 5 bit literal. */
else if (mode0 == V16QImode && mode1 == V16QImode && mode2 == QImode)
- type = v16qi_ftype_v16qi_char;
+ type = v16qi_ftype_v16qi_int;
/* vfloat, vint, 5 bit literal. */
else if (mode0 == V4SFmode && mode1 == V4SImode && mode2 == QImode)
- type = v4sf_ftype_v4si_char;
+ type = v4sf_ftype_v4si_int;
/* vint, vfloat, 5 bit literal. */
else if (mode0 == V4SImode && mode1 == V4SFmode && mode2 == QImode)
- type = v4si_ftype_v4sf_char;
+ type = v4si_ftype_v4sf_int;
else if (mode0 == V2SImode && mode1 == SImode && mode2 == SImode)
type = v2si_ftype_int_int;
@@ -7569,11 +7657,11 @@ rs6000_common_init_builtins (void)
mode1 = insn_data[d->icode].operand[1].mode;
if (mode0 == V4SImode && mode1 == QImode)
- type = v4si_ftype_char;
+ type = v4si_ftype_int;
else if (mode0 == V8HImode && mode1 == QImode)
- type = v8hi_ftype_char;
+ type = v8hi_ftype_int;
else if (mode0 == V16QImode && mode1 == QImode)
- type = v16qi_ftype_char;
+ type = v16qi_ftype_int;
else if (mode0 == V4SFmode && mode1 == V4SFmode)
type = v4sf_ftype_v4sf;
else if (mode0 == V8HImode && mode1 == V16QImode)
@@ -7614,11 +7702,21 @@ rs6000_init_libfuncs (void)
set_conv_libfunc (ufix_optab, SImode, TFmode, "_quitrunc");
}
- /* Standard AIX/Darwin/64-bit SVR4 quad floating point routines. */
- set_optab_libfunc (add_optab, TFmode, "_xlqadd");
- set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
- set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
- set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
+ /* AIX/Darwin/64-bit Linux quad floating point routines. */
+ if (!TARGET_XL_COMPAT)
+ {
+ set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
+ set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
+ set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
+ set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
+ }
+ else
+ {
+ set_optab_libfunc (add_optab, TFmode, "_xlqadd");
+ set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
+ set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
+ set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
+ }
}
else
{
@@ -8982,12 +9080,12 @@ print_operand (FILE *file, rtx x, int code)
return;
case 'D':
- /* Like 'J' but get to the GT bit. */
+ /* Like 'J' but get to the EQ bit. */
if (GET_CODE (x) != REG)
abort ();
- /* Bit 1 is GT bit. */
- i = 4 * (REGNO (x) - CR0_REGNO) + 1;
+ /* Bit 1 is EQ bit. */
+ i = 4 * (REGNO (x) - CR0_REGNO) + 2;
/* If we want bit 31, write a shift count of zero, not 32. */
fprintf (file, "%d", i == 31 ? 0 : i + 1);
@@ -9182,12 +9280,12 @@ print_operand (FILE *file, rtx x, int code)
case 'P':
/* The operand must be an indirect memory reference. The result
- is the register number. */
+ is the register name. */
if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
|| REGNO (XEXP (x, 0)) >= 32)
output_operand_lossage ("invalid %%P value");
else
- fprintf (file, "%d", REGNO (XEXP (x, 0)));
+ fprintf (file, "%s", reg_names[REGNO (XEXP (x, 0))]);
return;
case 'q':
@@ -9659,7 +9757,7 @@ rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
{
#ifdef RELOCATABLE_NEEDS_FIXUP
/* Special handling for SI values. */
- if (size == 4 && aligned_p)
+ if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
{
extern int in_toc_section (void);
static int recurse = 0;
@@ -9858,10 +9956,34 @@ rs6000_generate_compare (enum rtx_code code)
emit_insn (cmp);
}
else
- emit_insn (gen_rtx_SET (VOIDmode, compare_result,
- gen_rtx_COMPARE (comp_mode,
- rs6000_compare_op0,
- rs6000_compare_op1)));
+ {
+ /* Generate XLC-compatible TFmode compare as PARALLEL with extra
+ CLOBBERs to match cmptf_internal2 pattern. */
+ if (comp_mode == CCFPmode && TARGET_XL_COMPAT
+ && GET_MODE (rs6000_compare_op0) == TFmode
+ && (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN)
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128)
+ emit_insn (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (9,
+ gen_rtx_SET (VOIDmode,
+ compare_result,
+ gen_rtx_COMPARE (comp_mode,
+ rs6000_compare_op0,
+ rs6000_compare_op1)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)))));
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, compare_result,
+ gen_rtx_COMPARE (comp_mode,
+ rs6000_compare_op0,
+ rs6000_compare_op1)));
+ }
/* Some kinds of FP comparisons need an OR operation;
under flag_finite_math_only we don't bother. */
@@ -9929,9 +10051,9 @@ rs6000_emit_sCOND (enum rtx_code code, rtx result)
abort ();
if (cond_code == NE)
- emit_insn (gen_e500_flip_gt_bit (t, t));
+ emit_insn (gen_e500_flip_eq_bit (t, t));
- emit_insn (gen_move_from_CR_gt_bit (result, t));
+ emit_insn (gen_move_from_CR_eq_bit (result, t));
return;
}
@@ -10112,9 +10234,9 @@ output_cbranch (rtx op, const char *label, int reversed, rtx insn)
return string;
}
-/* Return the string to flip the GT bit on a CR. */
+/* Return the string to flip the EQ bit on a CR. */
char *
-output_e500_flip_gt_bit (rtx dst, rtx src)
+output_e500_flip_eq_bit (rtx dst, rtx src)
{
static char string[64];
int a, b;
@@ -10123,9 +10245,9 @@ output_e500_flip_gt_bit (rtx dst, rtx src)
|| GET_CODE (src) != REG || ! CR_REGNO_P (REGNO (src)))
abort ();
- /* GT bit. */
- a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
- b = 4 * (REGNO (src) - CR0_REGNO) + 1;
+ /* EQ bit. */
+ a = 4 * (REGNO (dst) - CR0_REGNO) + 2;
+ b = 4 * (REGNO (src) - CR0_REGNO) + 2;
sprintf (string, "crnot %d,%d", a, b);
return string;
@@ -10435,22 +10557,27 @@ rs6000_split_multireg_move (rtx dst, rtx src)
: gen_adddi3 (breg, breg, delta_rtx));
src = gen_rtx_MEM (mode, breg);
}
+ else if (! offsettable_memref_p (src))
+ {
+ rtx newsrc, basereg;
+ basereg = gen_rtx_REG (Pmode, reg);
+ emit_insn (gen_rtx_SET (VOIDmode, basereg, XEXP (src, 0)));
+ newsrc = gen_rtx_MEM (GET_MODE (src), basereg);
+ MEM_COPY_ATTRIBUTES (newsrc, src);
+ src = newsrc;
+ }
- /* We have now address involving an base register only.
- If we use one of the registers to address memory,
- we have change that register last. */
-
- breg = (GET_CODE (XEXP (src, 0)) == PLUS
- ? XEXP (XEXP (src, 0), 0)
- : XEXP (src, 0));
-
- if (!REG_P (breg))
- abort();
+ breg = XEXP (src, 0);
+ if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
+ breg = XEXP (breg, 0);
- if (REGNO (breg) >= REGNO (dst)
+ /* If the base register we are using to address memory is
+ also a destination reg, then change that register last. */
+ if (REG_P (breg)
+ && REGNO (breg) >= REGNO (dst)
&& REGNO (breg) < REGNO (dst) + nregs)
j = REGNO (breg) - REGNO (dst);
- }
+ }
if (GET_CODE (dst) == MEM && INT_REGNO_P (reg))
{
@@ -10482,6 +10609,8 @@ rs6000_split_multireg_move (rtx dst, rtx src)
: gen_adddi3 (breg, breg, delta_rtx));
dst = gen_rtx_MEM (mode, breg);
}
+ else if (! offsettable_memref_p (dst))
+ abort ();
}
for (i = 0; i < nregs; i++)
@@ -10491,7 +10620,7 @@ rs6000_split_multireg_move (rtx dst, rtx src)
if (j == nregs)
j = 0;
- /* If compiler already emited move of first word by
+ /* If compiler already emitted move of first word by
store with update, no need to do anything. */
if (j == 0 && used_update)
continue;
@@ -10523,7 +10652,8 @@ first_reg_to_save (void)
&& (! call_used_regs[first_reg]
|| (first_reg == RS6000_PIC_OFFSET_TABLE_REGNUM
&& ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
- || (DEFAULT_ABI == ABI_DARWIN && flag_pic)))))
+ || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
+ || (TARGET_TOC && TARGET_MINIMAL_TOC)))))
break;
#if TARGET_MACHO
@@ -10719,6 +10849,7 @@ rs6000_stack_info (void)
rs6000_stack_t *info_ptr = &info;
int reg_size = TARGET_32BIT ? 4 : 8;
int ehrd_size;
+ int save_align;
HOST_WIDE_INT non_fixed_size;
/* Zero all fields portably. */
@@ -10936,6 +11067,7 @@ rs6000_stack_info (void)
break;
}
+ save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
info_ptr->save_size = RS6000_ALIGN (info_ptr->fp_size
+ info_ptr->gp_size
+ info_ptr->altivec_size
@@ -10947,8 +11079,7 @@ rs6000_stack_info (void)
+ info_ptr->lr_size
+ info_ptr->vrsave_size
+ info_ptr->toc_size,
- (TARGET_ALTIVEC_ABI || ABI_DARWIN)
- ? 16 : 8);
+ save_align);
non_fixed_size = (info_ptr->vars_size
+ info_ptr->parm_size
@@ -11342,7 +11473,6 @@ rs6000_emit_load_toc_table (int fromprolog)
rtx temp0 = (fromprolog
? gen_rtx_REG (Pmode, 0)
: gen_reg_rtx (Pmode));
- rtx symF;
/* possibly create the toc section */
if (! toc_initialized)
@@ -11353,7 +11483,7 @@ rs6000_emit_load_toc_table (int fromprolog)
if (fromprolog)
{
- rtx symL;
+ rtx symF, symL;
ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
@@ -11371,14 +11501,9 @@ rs6000_emit_load_toc_table (int fromprolog)
else
{
rtx tocsym;
- static int reload_toc_labelno = 0;
tocsym = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
-
- ASM_GENERATE_INTERNAL_LABEL (buf, "LCG", reload_toc_labelno++);
- symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
-
- emit_insn (gen_load_toc_v4_PIC_1b (tempLR, symF, tocsym));
+ emit_insn (gen_load_toc_v4_PIC_1b (tempLR, tocsym));
emit_move_insn (dest, tempLR);
emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
}
@@ -12007,8 +12132,10 @@ rs6000_emit_prologue (void)
rtx reg, mem, vrsave;
int offset;
- /* Get VRSAVE onto a GPR. */
- reg = gen_rtx_REG (SImode, 12);
+ /* Get VRSAVE onto a GPR. Note that ABI_V4 might be using r12
+ as frame_reg_rtx and r11 as the static chain pointer for
+ nested functions. */
+ reg = gen_rtx_REG (SImode, 0);
vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
if (TARGET_MACHO)
emit_insn (gen_get_vrsave_internal (reg));
@@ -12117,7 +12244,10 @@ rs6000_emit_prologue (void)
int i;
for (i = 0; i < 32 - info->first_gp_reg_save; i++)
if ((regs_ever_live[info->first_gp_reg_save+i]
- && ! call_used_regs[info->first_gp_reg_save+i])
+ && (! call_used_regs[info->first_gp_reg_save+i]
+ || (i+info->first_gp_reg_save
+ == RS6000_PIC_OFFSET_TABLE_REGNUM
+ && TARGET_TOC && TARGET_MINIMAL_TOC)))
|| (i+info->first_gp_reg_save == RS6000_PIC_OFFSET_TABLE_REGNUM
&& ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
|| (DEFAULT_ABI == ABI_DARWIN && flag_pic))))
@@ -12566,7 +12696,9 @@ rs6000_emit_epilogue (int sibcall)
else
for (i = 0; i < 32 - info->first_gp_reg_save; i++)
if ((regs_ever_live[info->first_gp_reg_save+i]
- && ! call_used_regs[info->first_gp_reg_save+i])
+ && (! call_used_regs[info->first_gp_reg_save+i]
+ || (i+info->first_gp_reg_save == RS6000_PIC_OFFSET_TABLE_REGNUM
+ && TARGET_TOC && TARGET_MINIMAL_TOC)))
|| (i+info->first_gp_reg_save == RS6000_PIC_OFFSET_TABLE_REGNUM
&& ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
|| (DEFAULT_ABI == ABI_DARWIN && flag_pic))))
@@ -14845,11 +14977,117 @@ rs6000_initialize_trampoline (rtx addr, rtx fnaddr, rtx cxt)
const struct attribute_spec rs6000_attribute_table[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
+ { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute },
{ "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute },
{ "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute },
{ NULL, 0, 0, false, false, false, NULL }
};
+/* Handle the "altivec" attribute. The attribute may have
+ arguments as follows:
+
+ __attribute__((altivec(vector__)))
+ __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
+ __attribute__((altivec(bool__))) (always followed by 'unsigned')
+
+ and may appear more than once (e.g., 'vector bool char') in a
+ given declaration. */
+
+static tree
+rs6000_handle_altivec_attribute (tree *node, tree name, tree args,
+ int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs)
+{
+ tree type = *node, result = NULL_TREE;
+ enum machine_mode mode;
+ int unsigned_p;
+ char altivec_type
+ = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
+ && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
+ ? *IDENTIFIER_POINTER (TREE_VALUE (args))
+ : '?');
+
+ while (POINTER_TYPE_P (type)
+ || TREE_CODE (type) == FUNCTION_TYPE
+ || TREE_CODE (type) == METHOD_TYPE
+ || TREE_CODE (type) == ARRAY_TYPE)
+ type = TREE_TYPE (type);
+
+ mode = TYPE_MODE (type);
+
+ if (rs6000_warn_altivec_long
+ && (type == long_unsigned_type_node || type == long_integer_type_node))
+ warning ("use of 'long' in AltiVec types is deprecated; use 'int'");
+
+ switch (altivec_type)
+ {
+ case 'v':
+ unsigned_p = TREE_UNSIGNED (type);
+ switch (mode)
+ {
+ case SImode:
+ result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
+ break;
+ case HImode:
+ result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
+ break;
+ case QImode:
+ result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
+ break;
+ case SFmode: result = V4SF_type_node; break;
+ /* If the user says 'vector int bool', we may be handed the 'bool'
+ attribute _before_ the 'vector' attribute, and so select the proper
+ type in the 'b' case below. */
+ case V4SImode: case V8HImode: case V16QImode: result = type;
+ default: break;
+ }
+ break;
+ case 'b':
+ switch (mode)
+ {
+ case SImode: case V4SImode: result = bool_V4SI_type_node; break;
+ case HImode: case V8HImode: result = bool_V8HI_type_node; break;
+ case QImode: case V16QImode: result = bool_V16QI_type_node;
+ default: break;
+ }
+ break;
+ case 'p':
+ switch (mode)
+ {
+ case V8HImode: result = pixel_V8HI_type_node;
+ default: break;
+ }
+ default: break;
+ }
+
+ if (result && result != type && TYPE_READONLY (type))
+ result = build_qualified_type (result, TYPE_QUAL_CONST);
+
+ *no_add_attrs = true; /* No need to hang on to the attribute. */
+
+ if (!result)
+ warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
+ else
+ *node = reconstruct_complex_type (*node, result);
+
+ return NULL_TREE;
+}
+
+/* AltiVec defines four built-in scalar types that serve as vector
+ elements; we must teach the compiler how to mangle them. */
+
+static const char *
+rs6000_mangle_fundamental_type (tree type)
+{
+ if (type == bool_char_type_node) return "U6__boolc";
+ if (type == bool_short_type_node) return "U6__bools";
+ if (type == pixel_type_node) return "u7__pixel";
+ if (type == bool_int_type_node) return "U6__booli";
+
+ /* For all other types, use normal C++ mangling. */
+ return NULL;
+}
+
/* Handle a "longcall" or "shortcall" attribute; arguments as in
struct attribute_spec.handler. */
@@ -14998,6 +15236,18 @@ rs6000_elf_in_small_data_p (tree decl)
if (rs6000_sdata == SDATA_NONE)
return false;
+ /* We want to merge strings, so we never consider them small data. */
+ if (TREE_CODE (decl) == STRING_CST)
+ return false;
+
+ /* Functions are never in the small data area. */
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ return false;
+
+ /* Thread-local vars can't go in the small data area. */
+ if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL (decl))
+ return false;
+
if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
{
const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
@@ -15539,6 +15789,13 @@ rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
}
ASM_OUTPUT_LABEL (file, name);
}
+
+static void
+rs6000_elf_end_indicate_exec_stack (void)
+{
+ if (TARGET_32BIT)
+ file_end_indicate_exec_stack ();
+}
#endif
#if TARGET_XCOFF
diff --git a/contrib/gcc/config/rs6000/rs6000.h b/contrib/gcc/config/rs6000/rs6000.h
index d3a66dd..f1ad896 100644
--- a/contrib/gcc/config/rs6000/rs6000.h
+++ b/contrib/gcc/config/rs6000/rs6000.h
@@ -258,7 +258,7 @@ extern int target_flags;
#define TARGET_POWERPC64 (target_flags & MASK_POWERPC64)
#endif
-#define TARGET_XL_CALL 0
+#define TARGET_XL_COMPAT 0
/* Run-time compilation parameters selecting different hardware subsets.
@@ -464,6 +464,9 @@ enum group_termination
{"longcall", &rs6000_longcall_switch, \
N_("Avoid all range limits on call instructions"), 0}, \
{"no-longcall", &rs6000_longcall_switch, "", 0}, \
+ {"warn-altivec-long", &rs6000_warn_altivec_long_switch, \
+ N_("Warn about deprecated 'vector long ...' AltiVec type usage"), 0}, \
+ {"no-warn-altivec-long", &rs6000_warn_altivec_long_switch, "", 0}, \
{"sched-costly-dep=", &rs6000_sched_costly_dep_str, \
N_("Determine which dependences between insns are considered costly"), 0}, \
{"insert-sched-nops=", &rs6000_sched_insert_nops_str, \
@@ -532,6 +535,9 @@ extern enum rs6000_dependence_cost rs6000_sched_costly_dep;
extern const char *rs6000_sched_insert_nops_str;
extern enum rs6000_nop_insertion rs6000_sched_insert_nops;
+extern int rs6000_warn_altivec_long;
+extern const char *rs6000_warn_altivec_long_switch;
+
/* Alignment options for fields in structures for sub-targets following
AIX-like ABI.
ALIGN_POWER word-aligns FP doubles (default AIX ABI).
@@ -1165,6 +1171,9 @@ extern enum rs6000_nop_insertion rs6000_sched_insert_nops;
= fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] \
= call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] \
= call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1; \
+ if (TARGET_TOC && TARGET_MINIMAL_TOC) \
+ fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] \
+ = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1; \
if (TARGET_ALTIVEC) \
global_regs[VSCR_REGNO] = 1; \
if (TARGET_SPE) \
@@ -2918,9 +2927,10 @@ enum rs6000_builtins
ALTIVEC_BUILTIN_ABS_V4SI,
ALTIVEC_BUILTIN_ABS_V4SF,
ALTIVEC_BUILTIN_ABS_V8HI,
- ALTIVEC_BUILTIN_ABS_V16QI
+ ALTIVEC_BUILTIN_ABS_V16QI,
+ ALTIVEC_BUILTIN_COMPILETIME_ERROR,
/* SPE builtins. */
- , SPE_BUILTIN_EVADDW,
+ SPE_BUILTIN_EVADDW,
SPE_BUILTIN_EVAND,
SPE_BUILTIN_EVANDC,
SPE_BUILTIN_EVDIVWS,
diff --git a/contrib/gcc/config/rs6000/rs6000.md b/contrib/gcc/config/rs6000/rs6000.md
index 0fc4c04..3b062ce 100644
--- a/contrib/gcc/config/rs6000/rs6000.md
+++ b/contrib/gcc/config/rs6000/rs6000.md
@@ -1,6 +1,6 @@
;; Machine description for IBM RISC System 6000 (POWER) for GNU C compiler
;; Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
-;; 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+;; 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
;; Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
;; This file is part of GCC.
@@ -50,7 +50,7 @@
(UNSPEC_TLSGOTTPREL 28)
(UNSPEC_TLSTLS 29)
(UNSPEC_FIX_TRUNC_TF 30) ; fadd, rounding towards zero
- (UNSPEC_MV_CR_GT 31) ; move_from_CR_gt_bit
+ (UNSPEC_MV_CR_EQ 31) ; move_from_CR_eq_bit
])
;;
@@ -2404,61 +2404,6 @@
}"
[(set_attr "length" "8")])
-(define_insn_and_split "*andsi3_internal7"
- [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
- (compare:CC (and:SI (match_operand:SI 0 "gpc_reg_operand" "r,r")
- (match_operand:SI 1 "mask_operand_wrap" "i,i"))
- (const_int 0)))
- (clobber (match_scratch:SI 3 "=r,r"))]
- "TARGET_POWERPC64"
- "#"
- "TARGET_POWERPC64"
- [(parallel [(set (match_dup 2)
- (compare:CC (and:SI (rotate:SI (match_dup 0) (match_dup 4))
- (match_dup 5))
- (const_int 0)))
- (clobber (match_dup 3))])]
- "
-{
- int mb = extract_MB (operands[1]);
- int me = extract_ME (operands[1]);
- operands[4] = GEN_INT (me + 1);
- operands[5] = GEN_INT (~((HOST_WIDE_INT) -1 << (33 + me - mb)));
-}"
- [(set_attr "type" "delayed_compare,compare")
- (set_attr "length" "4,8")])
-
-(define_insn_and_split "*andsi3_internal8"
- [(set (match_operand:CC 3 "cc_reg_operand" "=x,??y")
- (compare:CC (and:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
- (match_operand:SI 2 "mask_operand_wrap" "i,i"))
- (const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
- (and:SI (match_dup 1)
- (match_dup 2)))]
- "TARGET_POWERPC64"
- "#"
- "TARGET_POWERPC64"
- [(parallel [(set (match_dup 3)
- (compare:CC (and:SI (rotate:SI (match_dup 1) (match_dup 4))
- (match_dup 5))
- (const_int 0)))
- (set (match_dup 0)
- (and:SI (rotate:SI (match_dup 1) (match_dup 4))
- (match_dup 5)))])
- (set (match_dup 0)
- (rotate:SI (match_dup 0) (match_dup 6)))]
- "
-{
- int mb = extract_MB (operands[2]);
- int me = extract_ME (operands[2]);
- operands[4] = GEN_INT (me + 1);
- operands[6] = GEN_INT (32 - (me + 1));
- operands[5] = GEN_INT (~((HOST_WIDE_INT) -1 << (33 + me - mb)));
-}"
- [(set_attr "type" "delayed_compare,compare")
- (set_attr "length" "8,12")])
-
(define_expand "iorsi3"
[(set (match_operand:SI 0 "gpc_reg_operand" "")
(ior:SI (match_operand:SI 1 "gpc_reg_operand" "")
@@ -5245,16 +5190,18 @@
(define_expand "floatdisf2"
[(set (match_operand:SF 0 "gpc_reg_operand" "")
(float:SF (match_operand:DI 1 "gpc_reg_operand" "")))]
- "TARGET_64BIT && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS"
"
{
+ rtx val = operands[1];
if (!flag_unsafe_math_optimizations)
{
rtx label = gen_label_rtx ();
- emit_insn (gen_floatdisf2_internal2 (operands[1], label));
+ val = gen_reg_rtx (DImode);
+ emit_insn (gen_floatdisf2_internal2 (val, operands[1], label));
emit_label (label);
}
- emit_insn (gen_floatdisf2_internal1 (operands[0], operands[1]));
+ emit_insn (gen_floatdisf2_internal1 (operands[0], val));
DONE;
}")
@@ -5279,30 +5226,31 @@
;; by a bit that won't be lost at that stage, but is below the SFmode
;; rounding position.
(define_expand "floatdisf2_internal2"
- [(parallel [(set (match_dup 4)
- (compare:CC (and:DI (match_operand:DI 0 "" "")
- (const_int 2047))
- (const_int 0)))
- (set (match_dup 2) (and:DI (match_dup 0) (const_int 2047)))
- (clobber (match_scratch:CC 7 ""))])
- (set (match_dup 3) (ashiftrt:DI (match_dup 0) (const_int 53)))
- (set (match_dup 3) (plus:DI (match_dup 3) (const_int 1)))
- (set (pc) (if_then_else (eq (match_dup 4) (const_int 0))
- (label_ref (match_operand:DI 1 "" ""))
- (pc)))
- (set (match_dup 5) (compare:CCUNS (match_dup 3) (const_int 2)))
- (set (pc) (if_then_else (ltu (match_dup 5) (const_int 0))
- (label_ref (match_dup 1))
+ [(set (match_dup 3) (ashiftrt:DI (match_operand:DI 1 "" "")
+ (const_int 53)))
+ (parallel [(set (match_operand:DI 0 "" "") (and:DI (match_dup 1)
+ (const_int 2047)))
+ (clobber (scratch:CC))])
+ (set (match_dup 3) (plus:DI (match_dup 3)
+ (const_int 1)))
+ (set (match_dup 0) (plus:DI (match_dup 0)
+ (const_int 2047)))
+ (set (match_dup 4) (compare:CCUNS (match_dup 3)
+ (const_int 3)))
+ (set (match_dup 0) (ior:DI (match_dup 0)
+ (match_dup 1)))
+ (parallel [(set (match_dup 0) (and:DI (match_dup 0)
+ (const_int -2048)))
+ (clobber (scratch:CC))])
+ (set (pc) (if_then_else (geu (match_dup 4) (const_int 0))
+ (label_ref (match_operand:DI 2 "" ""))
(pc)))
- (set (match_dup 0) (xor:DI (match_dup 0) (match_dup 2)))
- (set (match_dup 0) (ior:DI (match_dup 0) (const_int 2048)))]
- "TARGET_64BIT && TARGET_HARD_FLOAT && TARGET_FPRS"
+ (set (match_dup 0) (match_dup 1))]
+ "TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS"
"
{
- operands[2] = gen_reg_rtx (DImode);
operands[3] = gen_reg_rtx (DImode);
- operands[4] = gen_reg_rtx (CCmode);
- operands[5] = gen_reg_rtx (CCUNSmode);
+ operands[4] = gen_reg_rtx (CCUNSmode);
}")
;; Define the DImode operations that can be done in a small number
@@ -8311,14 +8259,36 @@
DONE;
})
-(define_insn "trunctfdf2"
+(define_expand "trunctfdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (float_truncate:DF (match_operand:TF 1 "gpc_reg_operand" "")))]
+ "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN)
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
+ "")
+
+(define_insn_and_split "trunctfdf2_internal1"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f,?f")
+ (float_truncate:DF (match_operand:TF 1 "gpc_reg_operand" "0,f")))]
+ "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN) && !TARGET_XL_COMPAT
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
+ "@
+ #
+ fmr %0,%1"
+ "&& reload_completed && REGNO (operands[0]) == REGNO (operands[1])"
+ [(const_int 0)]
+{
+ emit_note (NOTE_INSN_DELETED);
+ DONE;
+}
+ [(set_attr "type" "fp")])
+
+(define_insn "trunctfdf2_internal2"
[(set (match_operand:DF 0 "gpc_reg_operand" "=f")
(float_truncate:DF (match_operand:TF 1 "gpc_reg_operand" "f")))]
- "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN)
+ "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN) && TARGET_XL_COMPAT
&& TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
"fadd %0,%1,%L1"
- [(set_attr "type" "fp")
- (set_attr "length" "4")])
+ [(set_attr "type" "fp")])
(define_insn_and_split "trunctfsf2"
[(set (match_operand:SF 0 "gpc_reg_operand" "=f")
@@ -10081,11 +10051,10 @@
(define_insn "load_toc_v4_PIC_1b"
[(set (match_operand:SI 0 "register_operand" "=l")
- (match_operand:SI 1 "immediate_operand" "s"))
- (use (unspec [(match_dup 1) (match_operand 2 "immediate_operand" "s")]
+ (unspec:SI [(match_operand:SI 1 "immediate_operand" "s")]
UNSPEC_TOCPTR))]
"TARGET_ELF && DEFAULT_ABI != ABI_AIX && flag_pic == 2"
- "bcl 20,31,%1+4\\n%1:\\n\\t.long %2-%1"
+ "bcl 20,31,$+8\\n\\t.long %1-$"
[(set_attr "type" "branch")
(set_attr "length" "8")])
@@ -11349,11 +11318,72 @@
[(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
(compare:CCFP (match_operand:TF 1 "gpc_reg_operand" "f")
(match_operand:TF 2 "gpc_reg_operand" "f")))]
- "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN)
+ "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN) && !TARGET_XL_COMPAT
&& TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
"fcmpu %0,%1,%2\;bne %0,$+8\;fcmpu %0,%L1,%L2"
[(set_attr "type" "fpcompare")
(set_attr "length" "12")])
+
+(define_insn_and_split "*cmptf_internal2"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (compare:CCFP (match_operand:TF 1 "gpc_reg_operand" "f")
+ (match_operand:TF 2 "gpc_reg_operand" "f")))
+ (clobber (match_scratch:DF 3 "=f"))
+ (clobber (match_scratch:DF 4 "=f"))
+ (clobber (match_scratch:DF 5 "=f"))
+ (clobber (match_scratch:DF 6 "=f"))
+ (clobber (match_scratch:DF 7 "=f"))
+ (clobber (match_scratch:DF 8 "=f"))
+ (clobber (match_scratch:DF 9 "=f"))
+ (clobber (match_scratch:DF 10 "=f"))]
+ "(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN) && TARGET_XL_COMPAT
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 3) (match_dup 13))
+ (set (match_dup 4) (match_dup 14))
+ (set (match_dup 9) (abs:DF (match_dup 5)))
+ (set (match_dup 0) (compare:CCFP (match_dup 9) (match_dup 3)))
+ (set (pc) (if_then_else (ne (match_dup 0) (const_int 0))
+ (label_ref (match_dup 11))
+ (pc)))
+ (set (match_dup 0) (compare:CCFP (match_dup 5) (match_dup 7)))
+ (set (pc) (label_ref (match_dup 12)))
+ (match_dup 11)
+ (set (match_dup 10) (minus:DF (match_dup 5) (match_dup 7)))
+ (set (match_dup 9) (minus:DF (match_dup 6) (match_dup 8)))
+ (set (match_dup 9) (plus:DF (match_dup 10) (match_dup 9)))
+ (set (match_dup 0) (compare:CCFP (match_dup 7) (match_dup 4)))
+ (match_dup 12)]
+{
+ REAL_VALUE_TYPE rv;
+ const int lo_word = FLOAT_WORDS_BIG_ENDIAN ? GET_MODE_SIZE (DFmode) : 0;
+ const int hi_word = FLOAT_WORDS_BIG_ENDIAN ? 0 : GET_MODE_SIZE (DFmode);
+
+ operands[5] = simplify_gen_subreg (DFmode, operands[1], TFmode, hi_word);
+ operands[6] = simplify_gen_subreg (DFmode, operands[1], TFmode, lo_word);
+ operands[7] = simplify_gen_subreg (DFmode, operands[2], TFmode, hi_word);
+ operands[8] = simplify_gen_subreg (DFmode, operands[2], TFmode, lo_word);
+ operands[11] = gen_label_rtx ();
+ operands[12] = gen_label_rtx ();
+ real_inf (&rv);
+ operands[13] = force_const_mem (DFmode,
+ CONST_DOUBLE_FROM_REAL_VALUE (rv, DFmode));
+ operands[14] = force_const_mem (DFmode,
+ CONST_DOUBLE_FROM_REAL_VALUE (dconst0,
+ DFmode));
+ if (TARGET_TOC)
+ {
+ operands[13] = gen_rtx_MEM (DFmode,
+ create_TOC_reference (XEXP (operands[13], 0)));
+ operands[14] = gen_rtx_MEM (DFmode,
+ create_TOC_reference (XEXP (operands[14], 0)));
+ set_mem_alias_set (operands[13], get_TOC_alias_set ());
+ set_mem_alias_set (operands[14], get_TOC_alias_set ());
+ RTX_UNCHANGING_P (operands[13]) = 1;
+ RTX_UNCHANGING_P (operands[14]) = 1;
+ }
+})
;; Now we have the scc insns. We can do some combinations because of the
;; way the machine works.
@@ -11376,10 +11406,10 @@
(const_string "mfcr")))
(set_attr "length" "12")])
-;; Same as above, but get the GT bit.
-(define_insn "move_from_CR_gt_bit"
+;; Same as above, but get the EQ bit.
+(define_insn "move_from_CR_eq_bit"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
- (unspec:SI [(match_operand 1 "cc_reg_operand" "y")] UNSPEC_MV_CR_GT))]
+ (unspec:SI [(match_operand 1 "cc_reg_operand" "y")] UNSPEC_MV_CR_EQ))]
"TARGET_E500"
"mfcr %0\;{rlinm|rlwinm} %0,%0,%D1,1"
[(set_attr "type" "mfcr")
diff --git a/contrib/gcc/config/rs6000/rtems.h b/contrib/gcc/config/rs6000/rtems.h
index 0245269..d83e1eb 100644
--- a/contrib/gcc/config/rs6000/rtems.h
+++ b/contrib/gcc/config/rs6000/rtems.h
@@ -1,5 +1,6 @@
/* Definitions for rtems targeting a PowerPC using elf.
- Copyright (C) 1996, 1997, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+ Copyright (C) 1996, 1997, 2000, 2001, 2002, 2003, 2005
+ Free Software Foundation, Inc.
Contributed by Joel Sherrill (joel@OARcorp.com).
This file is part of GCC.
@@ -27,6 +28,7 @@
{ \
builtin_define_std ("PPC"); \
builtin_define ("__rtems__"); \
+ builtin_define ("__USE_INIT_FINI__"); \
builtin_assert ("system=rtems"); \
builtin_assert ("cpu=powerpc"); \
builtin_assert ("machine=powerpc"); \
@@ -36,3 +38,20 @@
#undef CPP_OS_DEFAULT_SPEC
#define CPP_OS_DEFAULT_SPEC "%(cpp_os_rtems)"
+
+#define CPP_OS_RTEMS_SPEC "\
+%{!mcpu*: %{!Dppc*: %{!Dmpc*: -Dmpc750} } }\
+%{mcpu=403: %{!Dppc*: %{!Dmpc*: -Dppc403} } } \
+%{mcpu=505: %{!Dppc*: %{!Dmpc*: -Dmpc505} } } \
+%{mcpu=601: %{!Dppc*: %{!Dmpc*: -Dppc601} } } \
+%{mcpu=602: %{!Dppc*: %{!Dmpc*: -Dppc602} } } \
+%{mcpu=603: %{!Dppc*: %{!Dmpc*: -Dppc603} } } \
+%{mcpu=603e: %{!Dppc*: %{!Dmpc*: -Dppc603e} } } \
+%{mcpu=604: %{!Dppc*: %{!Dmpc*: -Dmpc604} } } \
+%{mcpu=750: %{!Dppc*: %{!Dmpc*: -Dmpc750} } } \
+%{mcpu=821: %{!Dppc*: %{!Dmpc*: -Dmpc821} } } \
+%{mcpu=860: %{!Dppc*: %{!Dmpc*: -Dmpc860} } }"
+
+#undef SUBSUBTARGET_EXTRA_SPECS
+#define SUBSUBTARGET_EXTRA_SPECS \
+ { "cpp_os_rtems", CPP_OS_RTEMS_SPEC }
diff --git a/contrib/gcc/config/rs6000/spe.h b/contrib/gcc/config/rs6000/spe.h
index 1676516..878fc72 100644
--- a/contrib/gcc/config/rs6000/spe.h
+++ b/contrib/gcc/config/rs6000/spe.h
@@ -1088,4 +1088,23 @@ __ev_set_spefscr_frmc (int rnd)
__builtin_spe_mtspefscr (i);
}
+/* The SPE PIM says these are declared in <spe.h>, although they are
+ not provided by GCC: they must be taken from a separate
+ library. */
+extern short int atosfix16 (const char *);
+extern int atosfix32 (const char *);
+extern long long atosfix64 (const char *);
+
+extern unsigned short atoufix16 (const char *);
+extern unsigned int atoufix32 (const char *);
+extern unsigned long long atoufix64 (const char *);
+
+extern short int strtosfix16 (const char *, char **);
+extern int strtosfix32 (const char *, char **);
+extern long long strtosfix64 (const char *, char **);
+
+extern unsigned short int strtoufix16 (const char *, char **);
+extern unsigned int strtoufix32 (const char *, char **);
+extern unsigned long long strtoufix64 (const char *, char **);
+
#endif /* _SPE_H */
diff --git a/contrib/gcc/config/rs6000/spe.md b/contrib/gcc/config/rs6000/spe.md
index 5eb6302..306accd 100644
--- a/contrib/gcc/config/rs6000/spe.md
+++ b/contrib/gcc/config/rs6000/spe.md
@@ -2458,14 +2458,14 @@
;; FP comparison stuff.
;; Flip the GT bit.
-(define_insn "e500_flip_gt_bit"
+(define_insn "e500_flip_eq_bit"
[(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
(unspec:CCFP
[(match_operand:CCFP 1 "cc_reg_operand" "y")] 999))]
"!TARGET_FPRS && TARGET_HARD_FLOAT"
"*
{
- return output_e500_flip_gt_bit (operands[0], operands[1]);
+ return output_e500_flip_eq_bit (operands[0], operands[1]);
}"
[(set_attr "type" "cr_logical")])
diff --git a/contrib/gcc/config/rs6000/sysv4.h b/contrib/gcc/config/rs6000/sysv4.h
index ea149f0..57af869 100644
--- a/contrib/gcc/config/rs6000/sysv4.h
+++ b/contrib/gcc/config/rs6000/sysv4.h
@@ -1086,7 +1086,7 @@ extern int fixuplabelno;
#define LINK_START_FREEBSD_SPEC ""
#define LINK_OS_FREEBSD_SPEC "\
- %{p:%e`-p' not supported; use `-pg' and gprof(1)} \
+ %{p:%nconsider using `-pg' instead of `-p' with gprof(1)} \
%{Wl,*:%*} \
%{v:-V} \
%{assert*} %{R*} %{rpath*} %{defsym*} \
@@ -1115,8 +1115,9 @@ extern int fixuplabelno;
%{static:crtbeginT.o%s;shared|pie:crtbeginS.o%s;:crtbegin.o%s}"
#endif
-#define ENDFILE_LINUX_SPEC "%{!shared:crtend.o%s} %{shared:crtendS.o%s} \
-%{mnewlib: ecrtn.o%s} %{!mnewlib: crtn.o%s}"
+#define ENDFILE_LINUX_SPEC "\
+%{shared|pie:crtendS.o%s;:crtend.o%s} \
+%{mnewlib:ecrtn.o%s;:crtn.o%s}"
#define LINK_START_LINUX_SPEC ""
@@ -1358,4 +1359,4 @@ ncrtn.o%s"
#define DOUBLE_INT_ASM_OP "\t.quad\t"
/* Generate entries in .fixup for relocatable addresses. */
-#define RELOCATABLE_NEEDS_FIXUP
+#define RELOCATABLE_NEEDS_FIXUP 1
diff --git a/contrib/gcc/config/rs6000/t-aix43 b/contrib/gcc/config/rs6000/t-aix43
index a716209..8c2592f 100644
--- a/contrib/gcc/config/rs6000/t-aix43
+++ b/contrib/gcc/config/rs6000/t-aix43
@@ -58,9 +58,12 @@ SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
SHLIB_INSTALL = $(INSTALL_DATA) @shlib_base_name@.a $$(DESTDIR)$$(slibdir)/
SHLIB_LIBS = -lc `case @shlib_base_name@ in *pthread*) echo -lpthread ;; esac`
SHLIB_MKMAP = $(srcdir)/mkmap-flat.awk
-SHLIB_MAPFILES = $(srcdir)/libgcc-std.ver
+SHLIB_MAPFILES = $(srcdir)/libgcc-std.ver $(srcdir)/config/rs6000/libgcc-ppc64.ver
SHLIB_NM_FLAGS = -Bpg -X32_64
+# GCC 128-bit long double support routines.
+LIB2FUNCS_EXTRA = $(srcdir)/config/rs6000/darwin-ldouble.c
+
# Either 32-bit and 64-bit objects in archives.
AR_FLAGS_FOR_TARGET = -X32_64
diff --git a/contrib/gcc/config/rs6000/t-aix52 b/contrib/gcc/config/rs6000/t-aix52
index bddcdb1..839bd0a 100644
--- a/contrib/gcc/config/rs6000/t-aix52
+++ b/contrib/gcc/config/rs6000/t-aix52
@@ -39,9 +39,12 @@ SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
SHLIB_INSTALL = $(INSTALL_DATA) @shlib_base_name@.a $$(DESTDIR)$$(slibdir)/
SHLIB_LIBS = -lc `case @shlib_base_name@ in *pthread*) echo -lpthread ;; esac`
SHLIB_MKMAP = $(srcdir)/mkmap-flat.awk
-SHLIB_MAPFILES = $(srcdir)/libgcc-std.ver
+SHLIB_MAPFILES = $(srcdir)/libgcc-std.ver $(srcdir)/config/rs6000/libgcc-ppc64.ver
SHLIB_NM_FLAGS = -Bpg -X32_64
+# GCC 128-bit long double support routines.
+LIB2FUNCS_EXTRA = $(srcdir)/config/rs6000/darwin-ldouble.c
+
# Either 32-bit and 64-bit objects in archives.
AR_FLAGS_FOR_TARGET = -X32_64
diff --git a/contrib/gcc/config/rs6000/t-linux64 b/contrib/gcc/config/rs6000/t-linux64
index 77ba93e..6d1e6f4 100644
--- a/contrib/gcc/config/rs6000/t-linux64
+++ b/contrib/gcc/config/rs6000/t-linux64
@@ -1,8 +1,9 @@
#rs6000/t-linux64
-LIB2FUNCS_EXTRA = tramp.S $(srcdir)/config/rs6000/ppc64-fp.c \
- $(srcdir)/config/rs6000/darwin-ldouble.c
+LIB2FUNCS_EXTRA = tramp.S $(srcdir)/config/rs6000/ppc64-fp.c
+LIB2FUNCS_STATIC_EXTRA = eabi.S $(srcdir)/config/rs6000/darwin-ldouble.c
+LIB2FUNCS_SHARED_EXTRA = $(srcdir)/config/rs6000/darwin-ldouble-shared.c
TARGET_LIBGCC2_CFLAGS = -mno-minimal-toc -fPIC -specs=bispecs
diff --git a/contrib/gcc/config/rs6000/t-newas b/contrib/gcc/config/rs6000/t-newas
index a26ce39..d5d03a1 100644
--- a/contrib/gcc/config/rs6000/t-newas
+++ b/contrib/gcc/config/rs6000/t-newas
@@ -27,6 +27,9 @@ MULTILIB_MATCHES = $(MULTILIB_MATCHES_FLOAT) \
mcpu?powerpc=mpowerpc-gpopt \
mcpu?powerpc=mpowerpc-gfxopt
+# GCC 128-bit long double support routines.
+LIB2FUNCS_EXTRA = $(srcdir)/config/rs6000/darwin-ldouble.c
+
# Aix 3.2.x needs milli.exp for -mcpu=common
EXTRA_PARTS = milli.exp
milli.exp: $(srcdir)/config/rs6000/milli.exp
diff --git a/contrib/gcc/config/rs6000/t-rtems b/contrib/gcc/config/rs6000/t-rtems
index 364a22d..05d9a26 100644
--- a/contrib/gcc/config/rs6000/t-rtems
+++ b/contrib/gcc/config/rs6000/t-rtems
@@ -33,6 +33,7 @@ MULTILIB_MATCHES = ${MULTILIB_MATCHES_ENDIAN} \
# Cpu-variants supporting new exception processing only
MULTILIB_NEW_EXCEPTIONS_ONLY = \
+*mcpu=505*/*D_OLD_EXCEPTIONS* \
*mcpu=604*/*D_OLD_EXCEPTIONS* \
*mcpu=750*/*D_OLD_EXCEPTIONS* \
*mcpu=821*/*D_OLD_EXCEPTIONS* \
diff --git a/contrib/gcc/config/s390/s390.md b/contrib/gcc/config/s390/s390.md
index ebb8b57..345a36a 100644
--- a/contrib/gcc/config/s390/s390.md
+++ b/contrib/gcc/config/s390/s390.md
@@ -1039,11 +1039,13 @@
})
(define_expand "reload_outti"
- [(parallel [(match_operand:TI 0 "memory_operand" "")
+ [(parallel [(match_operand:TI 0 "" "")
(match_operand:TI 1 "register_operand" "d")
(match_operand:DI 2 "register_operand" "=&a")])]
"TARGET_64BIT"
{
+ if (GET_CODE (operands[0]) != MEM)
+ abort ();
s390_load_address (operands[2], XEXP (operands[0], 0));
operands[0] = replace_equiv_address (operands[0], operands[2]);
emit_move_insn (operands[0], operands[1]);
@@ -1167,11 +1169,13 @@
})
(define_expand "reload_outdi"
- [(parallel [(match_operand:DI 0 "memory_operand" "")
+ [(parallel [(match_operand:DI 0 "" "")
(match_operand:DI 1 "register_operand" "d")
(match_operand:SI 2 "register_operand" "=&a")])]
"!TARGET_64BIT"
{
+ if (GET_CODE (operands[0]) != MEM)
+ abort ();
s390_load_address (operands[2], XEXP (operands[0], 0));
operands[0] = replace_equiv_address (operands[0], operands[2]);
emit_move_insn (operands[0], operands[1]);
@@ -1647,11 +1651,13 @@
})
(define_expand "reload_outdf"
- [(parallel [(match_operand:DF 0 "memory_operand" "")
+ [(parallel [(match_operand:DF 0 "" "")
(match_operand:DF 1 "register_operand" "d")
(match_operand:SI 2 "register_operand" "=&a")])]
"!TARGET_64BIT"
{
+ if (GET_CODE (operands[0]) != MEM)
+ abort ();
s390_load_address (operands[2], XEXP (operands[0], 0));
operands[0] = replace_equiv_address (operands[0], operands[2]);
emit_move_insn (operands[0], operands[1]);
@@ -4117,7 +4123,7 @@
(match_operand:DF 2 "general_operand" "f,R"))
(match_operand:DF 3 "const0_operand" "")))
(set (match_operand:DF 0 "register_operand" "=f,f")
- (plus:DF (match_dup 1) (match_dup 2)))]
+ (minus:DF (match_dup 1) (match_dup 2)))]
"s390_match_ccmode (insn, CCSmode) && TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT"
"@
sdbr\t%0,%2
diff --git a/contrib/gcc/config/s390/tpf.h b/contrib/gcc/config/s390/tpf.h
index ce984e6..3015827 100644
--- a/contrib/gcc/config/s390/tpf.h
+++ b/contrib/gcc/config/s390/tpf.h
@@ -92,6 +92,9 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#undef CPLUSPLUS_CPP_SPEC
#define CPLUSPLUS_CPP_SPEC "-D_GNU_SOURCE %(cpp)"
+#undef ASM_SPEC
+#define ASM_SPEC "%{m31&m64}%{mesa&mzarch}%{march=*}"
+
#undef LIB_SPEC
#define LIB_SPEC "%{pthread:-lpthread} -lc"
diff --git a/contrib/gcc/config/sparc/sparc.c b/contrib/gcc/config/sparc/sparc.c
index c959370..0b07e46 100644
--- a/contrib/gcc/config/sparc/sparc.c
+++ b/contrib/gcc/config/sparc/sparc.c
@@ -187,6 +187,8 @@ static bool sparc_function_ok_for_sibcall (tree, tree);
static void sparc_init_libfuncs (void);
static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
HOST_WIDE_INT, tree);
+static bool sparc_can_output_mi_thunk (tree, HOST_WIDE_INT,
+ HOST_WIDE_INT, tree);
static struct machine_function * sparc_init_machine_status (void);
static bool sparc_cannot_force_const_mem (rtx);
static rtx sparc_tls_get_addr (void);
@@ -270,7 +272,7 @@ enum processor_type sparc_cpu;
#undef TARGET_ASM_OUTPUT_MI_THUNK
#define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
-#define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
+#define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
#undef TARGET_RTX_COSTS
#define TARGET_RTX_COSTS sparc_rtx_costs
@@ -1315,23 +1317,7 @@ input_operand (rtx op, enum machine_mode mode)
/* Check for valid MEM forms. */
if (GET_CODE (op) == MEM)
- {
- rtx inside = XEXP (op, 0);
-
- if (GET_CODE (inside) == LO_SUM)
- {
- /* We can't allow these because all of the splits
- (eventually as they trickle down into DFmode
- splits) require offsettable memory references. */
- if (! TARGET_V9
- && GET_MODE (op) == TFmode)
- return 0;
-
- return (register_operand (XEXP (inside, 0), Pmode)
- && CONSTANT_P (XEXP (inside, 1)));
- }
- return memory_address_p (mode, inside);
- }
+ return memory_address_p (mode, XEXP (op, 0));
return 0;
}
@@ -3334,7 +3320,7 @@ legitimate_pic_operand_p (rtx x)
int
legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
{
- rtx rs1 = NULL, rs2 = NULL, imm1 = NULL, imm2;
+ rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
if (REG_P (addr) || GET_CODE (addr) == SUBREG)
rs1 = addr;
@@ -3374,15 +3360,14 @@ legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
&& (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
{
- /* We prohibit REG + REG for TFmode when there are no instructions
- which accept REG+REG instructions. We do this because REG+REG
- is not an offsetable address. If we get the situation in reload
+ /* We prohibit REG + REG for TFmode when there are no quad move insns
+ and we consequently need to split. We do this because REG+REG
+ is not an offsettable address. If we get the situation in reload
where source and destination of a movtf pattern are both MEMs with
REG+REG address, then only one of them gets converted to an
- offsetable address. */
+ offsettable address. */
if (mode == TFmode
- && !(TARGET_FPU && TARGET_ARCH64 && TARGET_V9
- && TARGET_HARD_QUAD))
+ && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
return 0;
/* We prohibit REG + REG on ARCH32 if not optimizing for
@@ -3399,7 +3384,6 @@ legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
&& ! TARGET_CM_MEDMID
&& RTX_OK_FOR_OLO10_P (rs2))
{
- imm2 = rs2;
rs2 = NULL;
imm1 = XEXP (rs1, 1);
rs1 = XEXP (rs1, 0);
@@ -3415,9 +3399,9 @@ legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
if (! CONSTANT_P (imm1) || tls_symbolic_operand (rs1))
return 0;
- /* We can't allow TFmode, because an offset greater than or equal to the
- alignment (8) may cause the LO_SUM to overflow if !v9. */
- if (mode == TFmode && !TARGET_V9)
+ /* We can't allow TFmode in 32-bit mode, because an offset greater
+ than the alignment (8) may cause the LO_SUM to overflow. */
+ if (mode == TFmode && TARGET_ARCH32)
return 0;
}
else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
@@ -5101,7 +5085,7 @@ static void function_arg_record_value_2
static void function_arg_record_value_1
(tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
static rtx function_arg_record_value (tree, enum machine_mode, int, int, int);
-static rtx function_arg_union_value (int, enum machine_mode, int);
+static rtx function_arg_union_value (int, enum machine_mode, int, int);
/* A subroutine of function_arg_record_value. Traverse the structure
recursively and determine how many registers will be required. */
@@ -5445,11 +5429,19 @@ function_arg_record_value (tree type, enum machine_mode mode,
REGNO is the hard register the union will be passed in. */
static rtx
-function_arg_union_value (int size, enum machine_mode mode, int regno)
+function_arg_union_value (int size, enum machine_mode mode, int slotno,
+ int regno)
{
int nwords = ROUND_ADVANCE (size), i;
rtx regs;
+ /* See comment in previous function for empty structures. */
+ if (nwords == 0)
+ return gen_rtx_REG (mode, regno);
+
+ if (slotno == SPARC_INT_ARG_MAX - 1)
+ nwords = 1;
+
/* Unions are passed left-justified. */
regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
@@ -5516,7 +5508,7 @@ function_arg (const struct sparc_args *cum, enum machine_mode mode,
if (size > 16)
abort (); /* shouldn't get here */
- return function_arg_union_value (size, mode, regno);
+ return function_arg_union_value (size, mode, slotno, regno);
}
/* v9 fp args in reg slots beyond the int reg slots get passed in regs
but also have the slot allocated for them.
@@ -5796,7 +5788,7 @@ function_value (tree type, enum machine_mode mode, int incoming_p)
if (size > 32)
abort (); /* shouldn't get here */
- return function_arg_union_value (size, mode, regbase);
+ return function_arg_union_value (size, mode, 0, regbase);
}
else if (AGGREGATE_TYPE_P (type))
{
@@ -5819,7 +5811,7 @@ function_value (tree type, enum machine_mode mode, int incoming_p)
try to be unduly clever, and simply follow the ABI
for unions in that case. */
if (mode == BLKmode)
- return function_arg_union_value (bytes, mode, regbase);
+ return function_arg_union_value (bytes, mode, 0, regbase);
}
else if (GET_MODE_CLASS (mode) == MODE_INT
&& GET_MODE_SIZE (mode) < UNITS_PER_WORD)
@@ -9288,16 +9280,18 @@ sparc_rtx_costs (rtx x, int code, int outer_code, int *total)
}
}
-/* Output code to add DELTA to the first argument, and then jump to FUNCTION.
- Used for C++ multiple inheritance. */
+/* Output the assembler code for a thunk function. THUNK_DECL is the
+ declaration for the thunk function itself, FUNCTION is the decl for
+ the target function. DELTA is an immediate constant offset to be
+ added to THIS. If VCALL_OFFSET is nonzero, the word at address
+ (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
static void
sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
- HOST_WIDE_INT delta,
- HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
tree function)
{
- rtx this, insn, funexp, delta_rtx, tmp;
+ rtx this, insn, funexp;
reload_completed = 1;
epilogue_completed = 1;
@@ -9315,26 +9309,73 @@ sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
/* Add DELTA. When possible use a plain add, otherwise load it into
a register first. */
- delta_rtx = GEN_INT (delta);
- if (!SPARC_SIMM13_P (delta))
+ if (delta)
+ {
+ rtx delta_rtx = GEN_INT (delta);
+
+ if (! SPARC_SIMM13_P (delta))
+ {
+ rtx scratch = gen_rtx_REG (Pmode, 1);
+ emit_move_insn (scratch, delta_rtx);
+ delta_rtx = scratch;
+ }
+
+ /* THIS += DELTA. */
+ emit_insn (gen_add2_insn (this, delta_rtx));
+ }
+
+ /* Add the word at address (*THIS + VCALL_OFFSET). */
+ if (vcall_offset)
{
+ rtx vcall_offset_rtx = GEN_INT (vcall_offset);
rtx scratch = gen_rtx_REG (Pmode, 1);
- if (input_operand (delta_rtx, GET_MODE (scratch)))
- emit_insn (gen_rtx_SET (VOIDmode, scratch, delta_rtx));
+ if (vcall_offset >= 0)
+ abort ();
+
+ /* SCRATCH = *THIS. */
+ emit_move_insn (scratch, gen_rtx_MEM (Pmode, this));
+
+ /* Prepare for adding VCALL_OFFSET. The difficulty is that we
+ may not have any available scratch register at this point. */
+ if (SPARC_SIMM13_P (vcall_offset))
+ ;
+ /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
+ else if (! fixed_regs[5]
+ /* The below sequence is made up of at least 2 insns,
+ while the default method may need only one. */
+ && vcall_offset < -8192)
+ {
+ rtx scratch2 = gen_rtx_REG (Pmode, 5);
+ emit_move_insn (scratch2, vcall_offset_rtx);
+ vcall_offset_rtx = scratch2;
+ }
else
{
- if (TARGET_ARCH64)
- sparc_emit_set_const64 (scratch, delta_rtx);
- else
- sparc_emit_set_const32 (scratch, delta_rtx);
+ rtx increment = GEN_INT (-4096);
+
+ /* VCALL_OFFSET is a negative number whose typical range can be
+ estimated as -32768..0 in 32-bit mode. In almost all cases
+ it is therefore cheaper to emit multiple add insns than
+ spilling and loading the constant into a register (at least
+ 6 insns). */
+ while (! SPARC_SIMM13_P (vcall_offset))
+ {
+ emit_insn (gen_add2_insn (scratch, increment));
+ vcall_offset += 4096;
+ }
+ vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
}
- delta_rtx = scratch;
- }
+ /* SCRATCH = *(*THIS + VCALL_OFFSET). */
+ emit_move_insn (scratch, gen_rtx_MEM (Pmode,
+ gen_rtx_PLUS (Pmode,
+ scratch,
+ vcall_offset_rtx)));
- tmp = gen_rtx_PLUS (Pmode, this, delta_rtx);
- emit_insn (gen_rtx_SET (VOIDmode, this, tmp));
+ /* THIS += *(*THIS + VCALL_OFFSET). */
+ emit_insn (gen_add2_insn (this, scratch));
+ }
/* Generate a tail call to the target function. */
if (! TREE_USED (function))
@@ -9364,6 +9405,19 @@ sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
no_new_pseudos = 0;
}
+/* Return true if sparc_output_mi_thunk would be able to output the
+ assembler code for the thunk function specified by the arguments
+ it is passed, and false otherwise. */
+static bool
+sparc_can_output_mi_thunk (tree thunk_fndecl ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT vcall_offset,
+ tree function ATTRIBUTE_UNUSED)
+{
+ /* Bound the loop used in the default method above. */
+ return (vcall_offset >= -32768 || ! fixed_regs[5]);
+}
+
/* How to allocate a 'struct machine_function'. */
static struct machine_function *
diff --git a/contrib/gcc/config/sparc/sparc.md b/contrib/gcc/config/sparc/sparc.md
index c7da780..b50d10a 100644
--- a/contrib/gcc/config/sparc/sparc.md
+++ b/contrib/gcc/config/sparc/sparc.md
@@ -2076,7 +2076,6 @@
if (! CONSTANT_P (operands[1]) || input_operand (operands[1], DImode))
;
else if (TARGET_ARCH64
- && CONSTANT_P (operands[1])
&& GET_CODE (operands[1]) != HIGH
&& GET_CODE (operands[1]) != LO_SUM)
{
diff --git a/contrib/gcc/config/sparc/t-elf b/contrib/gcc/config/sparc/t-elf
index 027940b..6868736 100644
--- a/contrib/gcc/config/sparc/t-elf
+++ b/contrib/gcc/config/sparc/t-elf
@@ -24,6 +24,6 @@ INSTALL_LIBGCC = install-multilib
# Assemble startup files.
crti.o: $(srcdir)/config/sparc/sol2-ci.asm $(GCC_PASSES)
- $(GCC_FOR_TARGET) -c -o crti.o -x assembler $(srcdir)/config/sparc/sol2-ci.asm
+ $(GCC_FOR_TARGET) -c -o crti.o -x assembler-with-cpp $(srcdir)/config/sparc/sol2-ci.asm
crtn.o: $(srcdir)/config/sparc/sol2-cn.asm $(GCC_PASSES)
- $(GCC_FOR_TARGET) -c -o crtn.o -x assembler $(srcdir)/config/sparc/sol2-cn.asm
+ $(GCC_FOR_TARGET) -c -o crtn.o -x assembler-with-cpp $(srcdir)/config/sparc/sol2-cn.asm
diff --git a/contrib/gcc/config/t-libunwind b/contrib/gcc/config/t-libunwind
index 2204ae3..121ce2e 100644
--- a/contrib/gcc/config/t-libunwind
+++ b/contrib/gcc/config/t-libunwind
@@ -1,5 +1,12 @@
+# Use the system libunwind library.
+#
# Override the default value from t-slibgcc-elf-ver and mention -lunwind
# so that the resulting libgcc_s.so has the necessary DT_NEEDED entry for
# libunwind.
SHLIB_LC = -lunwind -lc
-LIB2ADDEH = $(srcdir)/unwind-sjlj.c $(srcdir)/unwind-c.c
+LIB2ADDEH = $(srcdir)/unwind-sjlj.c $(srcdir)/unwind-c.c \
+ $(srcdir)/unwind-compat.c $(srcdir)/unwind-dw2-fde-compat.c
+LIB2ADDEHSTATIC = $(srcdir)/unwind-sjlj.c $(srcdir)/unwind-c.c
+
+T_CFLAGS += -DUSE_LIBUNWIND_EXCEPTIONS
+TARGET_LIBGCC2_CFLAGS += -DUSE_GAS_SYMVER
diff --git a/contrib/gcc/config/t-libunwind-elf b/contrib/gcc/config/t-libunwind-elf
new file mode 100644
index 0000000..a9609e7
--- /dev/null
+++ b/contrib/gcc/config/t-libunwind-elf
@@ -0,0 +1,30 @@
+# Build libunwind for ELF with the GNU linker.
+
+# Use unwind-dw2-fde-glibc
+LIBUNWIND = $(srcdir)/unwind-dw2.c $(srcdir)/unwind-dw2-fde-glibc.c
+LIBUNWINDDEP = unwind.inc unwind-dw2-fde.h unwind-dw2-fde.c
+
+SHLIBUNWIND_SOVERSION = 7
+SHLIBUNWIND_SONAME = @shlib_so_name@.so.$(SHLIBUNWIND_SOVERSION)
+SHLIBUNWIND_NAME = @shlib_dir@@shlib_so_name@.so.$(SHLIBUNWIND_SOVERSION)
+
+SHLIBUNWIND_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -shared \
+ -nodefaultlibs -Wl,-h,$(SHLIBUNWIND_SONAME) \
+ -Wl,-z,text -Wl,-z,defs -o $(SHLIBUNWIND_NAME).tmp \
+ @multilib_flags@ $(SHLIB_OBJS) -lc && \
+ rm -f $(SHLIB_SOLINK) && \
+ if [ -f $(SHLIBUNWIND_NAME) ]; then \
+ mv -f $(SHLIBUNWIND_NAME) $(SHLIBUNWIND_NAME).backup; \
+ else true; fi && \
+ mv $(SHLIBUNWIND_NAME).tmp $(SHLIBUNWIND_NAME) && \
+ $(LN_S) $(SHLIBUNWIND_NAME) $(SHLIB_SOLINK)
+
+# $(slibdir) double quoted to protect it from expansion while building
+# libgcc.mk. We want this delayed until actual install time.
+SHLIBUNWIND_INSTALL = \
+ $$(SHELL) $$(srcdir)/mkinstalldirs $$(DESTDIR)$$(slibdir)$(SHLIB_SLIBDIR_QUAL); \
+ $(INSTALL_DATA) $(SHLIBUNWIND_NAME) \
+ $$(DESTDIR)$$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIBUNWIND_SONAME); \
+ rm -f $$(DESTDIR)$$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIB_SOLINK); \
+ $(LN_S) $(SHLIBUNWIND_SONAME) \
+ $$(DESTDIR)$$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIB_SOLINK)
diff --git a/contrib/gcc/config/t-slibgcc-darwin b/contrib/gcc/config/t-slibgcc-darwin
index f27fae4..b820441 100644
--- a/contrib/gcc/config/t-slibgcc-darwin
+++ b/contrib/gcc/config/t-slibgcc-darwin
@@ -12,10 +12,14 @@ SHLIB_SLIBDIR_QUAL = @shlib_slibdir_qual@
SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -dynamiclib -nodefaultlibs \
-Wl,-install_name,$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIB_SONAME) \
- -Wl,-flat_namespace -o $(SHLIB_NAME) \
+ -Wl,-flat_namespace -o $(SHLIB_NAME).tmp \
$(SHLIB_VERSTRING) \
@multilib_flags@ $(SHLIB_OBJS) -lc && \
rm -f $(SHLIB_SOLINK) && \
+ if [ -f $(SHLIB_NAME) ]; then \
+ mv -f $(SHLIB_NAME) $(SHLIB_NAME).backup; \
+ else true; fi && \
+ mv $(SHLIB_NAME).tmp $(SHLIB_NAME) && \
$(LN_S) $(SHLIB_NAME) $(SHLIB_SOLINK)
# $(slibdir) double quoted to protect it from expansion while building
# libgcc.mk. We want this delayed until actual install time.
diff --git a/contrib/gcc/config/t-slibgcc-elf-ver b/contrib/gcc/config/t-slibgcc-elf-ver
index a4f8ef0..5086500 100644
--- a/contrib/gcc/config/t-slibgcc-elf-ver
+++ b/contrib/gcc/config/t-slibgcc-elf-ver
@@ -14,8 +14,12 @@ SHLIB_LC = -lc
SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
-Wl,--soname=$(SHLIB_SONAME) \
-Wl,--version-script=$(SHLIB_MAP) \
- -o $(SHLIB_NAME) @multilib_flags@ $(SHLIB_OBJS) $(SHLIB_LC) && \
+ -o $(SHLIB_NAME).tmp @multilib_flags@ $(SHLIB_OBJS) $(SHLIB_LC) && \
rm -f $(SHLIB_SOLINK) && \
+ if [ -f $(SHLIB_NAME) ]; then \
+ mv -f $(SHLIB_NAME) $(SHLIB_NAME).backup; \
+ else true; fi && \
+ mv $(SHLIB_NAME).tmp $(SHLIB_NAME) && \
$(LN_S) $(SHLIB_NAME) $(SHLIB_SOLINK)
# $(slibdir) double quoted to protect it from expansion while building
# libgcc.mk. We want this delayed until actual install time.
diff --git a/contrib/gcc/config/t-slibgcc-sld b/contrib/gcc/config/t-slibgcc-sld
index 6bdd521..44e7f18 100644
--- a/contrib/gcc/config/t-slibgcc-sld
+++ b/contrib/gcc/config/t-slibgcc-sld
@@ -10,9 +10,13 @@ SHLIB_SLIBDIR_QUAL = @shlib_slibdir_qual@
SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
-Wl,-h,$(SHLIB_SONAME) -Wl,-z,text -Wl,-z,defs \
- -Wl,-M,$(SHLIB_MAP) -o $(SHLIB_NAME) \
+ -Wl,-M,$(SHLIB_MAP) -o $(SHLIB_NAME).tmp \
@multilib_flags@ $(SHLIB_OBJS) -lc && \
rm -f $(SHLIB_SOLINK) && \
+ if [ -f $(SHLIB_NAME) ]; then \
+ mv -f $(SHLIB_NAME) $(SHLIB_NAME).backup; \
+ else true; fi && \
+ mv $(SHLIB_NAME).tmp $(SHLIB_NAME) && \
$(LN_S) $(SHLIB_NAME) $(SHLIB_SOLINK)
# $(slibdir) double quoted to protect it from expansion while building
# libgcc.mk. We want this delayed until actual install time.
OpenPOWER on IntegriCloud