summaryrefslogtreecommitdiffstats
path: root/test/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/2009-04-23-dbg.c2
-rw-r--r--test/CodeGen/2009-10-20-GlobalDebug.c2
-rw-r--r--test/CodeGen/2010-08-10-DbgConstant.c5
-rw-r--r--test/CodeGen/_Bool-conversion.c12
-rw-r--r--test/CodeGen/address-space-field1.c26
-rw-r--r--test/CodeGen/address-space-field2.c46
-rw-r--r--test/CodeGen/address-space-field3.c42
-rw-r--r--test/CodeGen/address-space-field4.c55
-rw-r--r--test/CodeGen/asm-errors.c2
-rw-r--r--test/CodeGen/asm-inout.c19
-rw-r--r--test/CodeGen/asm.c22
-rw-r--r--test/CodeGen/asm_arm.c22
-rw-r--r--test/CodeGen/atomic.c15
-rw-r--r--test/CodeGen/available-externally-suppress.c15
-rw-r--r--test/CodeGen/bitfield-2.c8
-rw-r--r--test/CodeGen/block-decl-merging.c20
-rw-r--r--test/CodeGen/blockstret.c4
-rw-r--r--test/CodeGen/builtin-expect.c11
-rw-r--r--test/CodeGen/builtins-arm.c2
-rw-r--r--test/CodeGen/builtins-ppc-altivec.c987
-rw-r--r--test/CodeGen/builtins-x86.c117
-rw-r--r--test/CodeGen/const-arithmetic.c4
-rw-r--r--test/CodeGen/const-init.c7
-rw-r--r--test/CodeGen/debug-info-enum.c11
-rw-r--r--test/CodeGen/debug-info-scope.c14
-rw-r--r--test/CodeGen/designated-initializers.c27
-rw-r--r--test/CodeGen/enum2.c8
-rw-r--r--test/CodeGen/exprs.c7
-rw-r--r--test/CodeGen/fold-const-declref.c9
-rw-r--r--test/CodeGen/func-in-block.c1
-rw-r--r--test/CodeGen/lineno-dbginfo.c3
-rw-r--r--test/CodeGen/packed-structure.c89
-rw-r--r--test/CodeGen/palignr.c2
-rw-r--r--test/CodeGen/pragma-visibility.c24
-rw-r--r--test/CodeGen/statements.c9
-rw-r--r--test/CodeGen/struct-passing.c12
-rw-r--r--test/CodeGen/thread-specifier.c1
-rw-r--r--test/CodeGen/trapv.c47
-rw-r--r--test/CodeGen/unwind-attr.c24
-rw-r--r--test/CodeGen/vector.c17
-rw-r--r--test/CodeGen/x86_32-arguments.c16
-rw-r--r--test/CodeGen/x86_64-arguments.c122
42 files changed, 1623 insertions, 265 deletions
diff --git a/test/CodeGen/2009-04-23-dbg.c b/test/CodeGen/2009-04-23-dbg.c
index 6a8bf01..704aba2 100644
--- a/test/CodeGen/2009-04-23-dbg.c
+++ b/test/CodeGen/2009-04-23-dbg.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -g -o %t %s -emit-llvm-bc && llc %t -o %t.s
+// RUN: %clang_cc1 -S -g -o %t %s
# 1 "a.c"
# 1 "a.c" 1
# 1 "<built-in>" 1
diff --git a/test/CodeGen/2009-10-20-GlobalDebug.c b/test/CodeGen/2009-10-20-GlobalDebug.c
index 1db37de..3c46bea 100644
--- a/test/CodeGen/2009-10-20-GlobalDebug.c
+++ b/test/CodeGen/2009-10-20-GlobalDebug.c
@@ -1,7 +1,7 @@
// RUN: %clang -ccc-host-triple i386-apple-darwin10 -S -g -dA %s -o - | FileCheck %s
int global;
+// CHECK: ascii "localstatic" ## DW_AT_name
// CHECK: asciz "global" ## External Name
-// CHECK: asciz "localstatic" ## External Name
int main() {
static int localstatic;
return 0;
diff --git a/test/CodeGen/2010-08-10-DbgConstant.c b/test/CodeGen/2010-08-10-DbgConstant.c
new file mode 100644
index 0000000..5b8f064
--- /dev/null
+++ b/test/CodeGen/2010-08-10-DbgConstant.c
@@ -0,0 +1,5 @@
+// RUN: %clang_cc1 -S -emit-llvm -g %s -o - | grep DW_TAG_variable
+
+static const unsigned int ro = 201;
+void bar(int);
+void foo() { bar(ro); }
diff --git a/test/CodeGen/_Bool-conversion.c b/test/CodeGen/_Bool-conversion.c
new file mode 100644
index 0000000..9e5e894
--- /dev/null
+++ b/test/CodeGen/_Bool-conversion.c
@@ -0,0 +1,12 @@
+// RUN: %clang_cc1 -triple i386 -emit-llvm -O2 -o - %s | FileCheck %s
+
+// CHECK: define i32 @f0()
+// CHECK: ret i32 1
+// CHECK: }
+
+static _Bool f0_0(void *a0) { return (_Bool) a0; }
+int f0() { return f0_0((void*) 0x2); }
+
+_Bool f1(void) {
+ return (_Bool) ({ void (*x)(); x = 0; });
+}
diff --git a/test/CodeGen/address-space-field1.c b/test/CodeGen/address-space-field1.c
index a81e08e..e9c1871 100644
--- a/test/CodeGen/address-space-field1.c
+++ b/test/CodeGen/address-space-field1.c
@@ -1,22 +1,22 @@
-// RUN: %clang_cc1 -emit-llvm < %s -o - | FileCheck %s
+// RUN: %clang_cc1 -emit-llvm -triple x86_64-apple-darwin10 < %s -o - | FileCheck %s
// CHECK:%struct.S = type { i32, i32 }
// CHECK:define void @test_addrspace(%struct.S addrspace(1)* %p1, %struct.S addrspace(2)* %p2) nounwind
// CHECK: [[p1addr:%.*]] = alloca %struct.S addrspace(1)*
// CHECK: [[p2addr:%.*]] = alloca %struct.S addrspace(2)*
// CHECK: store %struct.S addrspace(1)* %p1, %struct.S addrspace(1)** [[p1addr]]
// CHECK: store %struct.S addrspace(2)* %p2, %struct.S addrspace(2)** [[p2addr]]
-// CHECK: [[t0:%.*]] = load %struct.S addrspace(2)** [[p2addr]] ; <%struct.S addrspace(2)*> [#uses=1]
-// CHECK: [[t1:%.*]] = getelementptr inbounds %struct.S addrspace(2)* [[t0]], i32 0, i32 1 ; <i32 addrspace(2)*> [#uses=1]
-// CHECK: [[t2:%.*]] = load i32 addrspace(2)* [[t1]] ; <i32> [#uses=1]
-// CHECK: [[t3:%.*]] = load %struct.S addrspace(1)** [[p1addr]] ; <%struct.S addrspace(1)*> [#uses=1]
-// CHECK: [[t4:%.*]] = getelementptr inbounds %struct.S addrspace(1)* [[t3]], i32 0, i32 0 ; <i32 addrspace(1)*> [#uses=1]
-// CHECK: store i32 [[t2]], i32 addrspace(1)* [[t4]]
-// CHECK: [[t5:%.*]] = load %struct.S addrspace(2)** [[p2addr]] ; <%struct.S addrspace(2)*> [#uses=1]
-// CHECK: [[t6:%.*]] = getelementptr inbounds %struct.S addrspace(2)* [[t5]], i32 0, i32 0 ; <i32 addrspace(2)*> [#uses=1]
-// CHECK: [[t7:%.*]] = load i32 addrspace(2)* [[t6]] ; <i32> [#uses=1]
-// CHECK: [[t8:%.*]] = load %struct.S addrspace(1)** [[p1addr]] ; <%struct.S addrspace(1)*> [#uses=1]
-// CHECK: [[t9:%.*]] = getelementptr inbounds %struct.S addrspace(1)* [[t8]], i32 0, i32 1 ; <i32 addrspace(1)*> [#uses=1]
-// CHECK: store i32 [[t7]], i32 addrspace(1)* [[t9]]
+// CHECK: [[t0:%.*]] = load %struct.S addrspace(2)** [[p2addr]], align 8
+// CHECK: [[t1:%.*]] = getelementptr inbounds %struct.S addrspace(2)* [[t0]], i32 0, i32 1
+// CHECK: [[t2:%.*]] = load i32 addrspace(2)* [[t1]], align 4
+// CHECK: [[t3:%.*]] = load %struct.S addrspace(1)** [[p1addr]], align 8
+// CHECK: [[t4:%.*]] = getelementptr inbounds %struct.S addrspace(1)* [[t3]], i32 0, i32 0
+// CHECK: store i32 [[t2]], i32 addrspace(1)* [[t4]], align 4
+// CHECK: [[t5:%.*]] = load %struct.S addrspace(2)** [[p2addr]], align 8
+// CHECK: [[t6:%.*]] = getelementptr inbounds %struct.S addrspace(2)* [[t5]], i32 0, i32 0
+// CHECK: [[t7:%.*]] = load i32 addrspace(2)* [[t6]], align 4
+// CHECK: [[t8:%.*]] = load %struct.S addrspace(1)** [[p1addr]], align 8
+// CHECK: [[t9:%.*]] = getelementptr inbounds %struct.S addrspace(1)* [[t8]], i32 0, i32 1
+// CHECK: store i32 [[t7]], i32 addrspace(1)* [[t9]], align 4
// CHECK: ret void
// CHECK:}
diff --git a/test/CodeGen/address-space-field2.c b/test/CodeGen/address-space-field2.c
deleted file mode 100644
index 9c21cab..0000000
--- a/test/CodeGen/address-space-field2.c
+++ /dev/null
@@ -1,46 +0,0 @@
-// RUN: %clang_cc1 -emit-llvm %s -o - | FileCheck %s
-// CHECK: addrspace(1)
-// CHECK: addrspace(2)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(2)
-// CHECK: addrspace(2)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(2)
-// CHECK: addrspace(2)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(2)
-// CHECK: addrspace(2)
-// CHECK: addrspace(2)
-// CHECK: addrspace(2)
-// CHECK: addrspace(2)
-// CHECK: addrspace(2)
-// CHECK: addrspace(2)
-// CHECK: addrspace(2)
-// CHECK: addrspace(2)
-
-// Check that we don't lose the address space when accessing an array element
-// inside a structure.
-
-#define __addr1 __attribute__((address_space(1)))
-#define __addr2 __attribute__((address_space(2)))
-
-typedef struct S {
- int arr[ 3 ];
-} S;
-
-void test_addrspace(__addr1 S* p1, __addr2 S*p2, int* val, int n) {
- for (int i=0; i < 3; ++i) {
- int t = val[i];
- p1->arr[i] = t;
- for (int j=0; j < n; ++j)
- p2[j].arr[i] = t;
- }
-}
diff --git a/test/CodeGen/address-space-field3.c b/test/CodeGen/address-space-field3.c
deleted file mode 100644
index c17085c..0000000
--- a/test/CodeGen/address-space-field3.c
+++ /dev/null
@@ -1,42 +0,0 @@
-// RUN: %clang_cc1 -emit-llvm %s -o - | FileCheck %s
-// CHECK: addrspace(1)
-// CHECK: addrspace(2)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(2)
-// CHECK: addrspace(2)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(2)
-// CHECK: addrspace(2)
-// CHECK: addrspace(2)
-// CHECK: addrspace(2)
-// CHECK: addrspace(2)
-// CHECK: addrspace(2)
-// CHECK: addrspace(2)
-// CHECK: addrspace(2)
-// CHECK: addrspace(2)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-
-// Check that we don't lose the address space when accessing an array element
-// inside a structure.
-
-#define __addr1 __attribute__((address_space(1)))
-#define __addr2 __attribute__((address_space(2)))
-
-typedef struct S {
- int arr[ 3 ];
-} S;
-
-void test_addrspace(__addr1 S* p1, __addr2 S*p2, int* val, int n) {
- for (int i=0; i < 3; ++i) {
- int t = val[i];
- p1->arr[i] = p2->arr[i];
- }
-}
diff --git a/test/CodeGen/address-space-field4.c b/test/CodeGen/address-space-field4.c
deleted file mode 100644
index a896ab6..0000000
--- a/test/CodeGen/address-space-field4.c
+++ /dev/null
@@ -1,55 +0,0 @@
-// RUN: %clang_cc1 -emit-llvm %s -o - | FileCheck %s
-// CHECK: addrspace(2)
-// CHECK: addrspace(3)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(3)
-// CHECK: addrspace(3)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(1)
-// CHECK: addrspace(2)
-// CHECK: addrspace(2)
-
-// Check the load and store are using the correct address space to access
-// the variables.
-
-#define __addr1 __attribute__((address_space(1)))
-#define __addr2 __attribute__((address_space(2)))
-#define __addr3 __attribute__((address_space(3)))
-
-typedef struct Pair {
- __addr2 int* a;
- __addr3 int* b;
-} Pair;
-
-typedef struct S {
- Pair arr[ 3 ];
-} S;
-
-void test_addrspace(__addr1 S* p1, __addr1 S* p2) {
- *p1->arr[0].a = *p2->arr[1].b;
-}
diff --git a/test/CodeGen/asm-errors.c b/test/CodeGen/asm-errors.c
index 7323e61..aea5cb2 100644
--- a/test/CodeGen/asm-errors.c
+++ b/test/CodeGen/asm-errors.c
@@ -1,4 +1,4 @@
-// RUN: not %clang_cc1 -triple i386-apple-darwin10 -emit-obj %s > %t 2>&1
+// RUN: not %clang_cc1 -triple i386-apple-darwin10 -emit-obj %s -o /dev/null > %t 2>&1
// RUN: FileCheck %s < %t
int test1(int X) {
diff --git a/test/CodeGen/asm-inout.c b/test/CodeGen/asm-inout.c
new file mode 100644
index 0000000..f042766
--- /dev/null
+++ b/test/CodeGen/asm-inout.c
@@ -0,0 +1,19 @@
+// RUN: %clang_cc1 -triple i386-unknown-unknown -emit-llvm %s -o - | FileCheck %s
+// PR3800
+int *foo(void);
+
+// CHECK: @test1
+void test1() {
+ // CHECK: [[REGCALLRESULT:%[a-zA-Z0-9\.]+]] = call i32* @foo()
+ // CHECK: call void asm "foobar", "=*m,*m,~{dirflag},~{fpsr},~{flags}"(i32* [[REGCALLRESULT]], i32* [[REGCALLRESULT]])
+ asm ("foobar" : "+m"(*foo()));
+}
+
+// CHECK: @test2
+void test2() {
+ // CHECK: [[REGCALLRESULT:%[a-zA-Z0-9\.]+]] = call i32* @foo()
+ // CHECK: load i32* [[REGCALLRESULT]]
+ // CHECK: call i32 asm
+ // CHECK: store i32 {{%[a-zA-Z0-9\.]+}}, i32* [[REGCALLRESULT]]
+ asm ("foobar" : "+r"(*foo()));
+}
diff --git a/test/CodeGen/asm.c b/test/CodeGen/asm.c
index 5077028..eb11285 100644
--- a/test/CodeGen/asm.c
+++ b/test/CodeGen/asm.c
@@ -168,3 +168,25 @@ float t21(long double x) {
// CHECK: call x86_fp80 asm sideeffect "frndint"
// CHECK-NEXT: fptrunc x86_fp80 {{.*}} to float
}
+
+// <rdar://problem/8348447> - accept 'l' constraint
+unsigned char t22(unsigned char a, unsigned char b) {
+ unsigned int la = a;
+ unsigned int lb = b;
+ unsigned int bigres;
+ unsigned char res;
+ __asm__ ("0:\n1:\n" : [bigres] "=la"(bigres) : [la] "0"(la), [lb] "c"(lb) :
+ "edx", "cc");
+ res = bigres;
+ return res;
+}
+
+// <rdar://problem/8348447> - accept 'l' constraint
+unsigned char t23(unsigned char a, unsigned char b) {
+ unsigned int la = a;
+ unsigned int lb = b;
+ unsigned char res;
+ __asm__ ("0:\n1:\n" : [res] "=la"(res) : [la] "0"(la), [lb] "c"(lb) :
+ "edx", "cc");
+ return res;
+}
diff --git a/test/CodeGen/asm_arm.c b/test/CodeGen/asm_arm.c
index aac47d5..633bf55 100644
--- a/test/CodeGen/asm_arm.c
+++ b/test/CodeGen/asm_arm.c
@@ -30,3 +30,25 @@ void test4(float *a, float *b) {
"vst1.32 {q4}, [%0,:128] \n\t"
:: "r"(a), "r"(b));
}
+
+// {sp, lr, pc} are the canonical names for {r13, r14, r15}.
+//
+// CHECK: @test5
+// CHECK: call void asm sideeffect "", "~{sp},~{lr},~{pc},~{sp},~{lr},~{pc}"()
+void test5() {
+ __asm__("" : : : "r13", "r14", "r15", "sp", "lr", "pc");
+}
+
+// CHECK: @test6
+// CHECK: call void asm sideeffect "", "
+// CHECK: ~{s0},~{s1},~{s2},~{s3},~{s4},~{s5},~{s6},~{s7},
+// CHECK: ~{s8},~{s9},~{s10},~{s11},~{s12},~{s13},~{s14},~{s15},
+// CHECK: ~{s16},~{s17},~{s18},~{s19},~{s20},~{s21},~{s22},~{s23},
+// CHECK: ~{s24},~{s25},~{s26},~{s27},~{s28},~{s29},~{s30},~{s31}"()
+void test6() {
+ __asm__("" : : :
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
+ "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
+ "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31");
+}
diff --git a/test/CodeGen/atomic.c b/test/CodeGen/atomic.c
index aa5aa15..d0a7e04 100644
--- a/test/CodeGen/atomic.c
+++ b/test/CodeGen/atomic.c
@@ -1,5 +1,5 @@
// RUN: %clang_cc1 %s -emit-llvm -o - -triple=i686-apple-darwin9 > %t1
-// RUN: grep @llvm.memory.barrier %t1 | count 38
+// RUN: grep @llvm.memory.barrier %t1 | count 42
// RUN: grep @llvm.atomic.load.add.i32 %t1 | count 3
// RUN: grep @llvm.atomic.load.sub.i8 %t1 | count 2
// RUN: grep @llvm.atomic.load.min.i32 %t1
@@ -7,7 +7,7 @@
// RUN: grep @llvm.atomic.load.umin.i32 %t1
// RUN: grep @llvm.atomic.load.umax.i32 %t1
// RUN: grep @llvm.atomic.swap.i32 %t1
-// RUN: grep @llvm.atomic.cmp.swap.i32 %t1 | count 4
+// RUN: grep @llvm.atomic.cmp.swap.i32 %t1 | count 5
// RUN: grep @llvm.atomic.load.and.i32 %t1
// RUN: grep @llvm.atomic.load.or.i8 %t1
// RUN: grep @llvm.atomic.load.xor.i8 %t1
@@ -19,6 +19,7 @@ int atomic(void)
int old;
int val = 1;
char valc = 1;
+ _Bool valb = 0;
unsigned int uval = 1;
int cmp = 0;
@@ -43,10 +44,18 @@ int atomic(void)
__sync_val_compare_and_swap((void **)0, (void *)0, (void *)0);
-
+ if ( __sync_val_compare_and_swap(&valb, 0, 1)) {
+ old = 42;
+ }
+ __sync_bool_compare_and_swap((void **)0, (void *)0, (void *)0);
__sync_lock_release(&val);
__sync_synchronize ();
return old;
}
+
+void release_return(int *lock) {
+ // Ensure this is actually returning void all the way through.
+ return __sync_lock_release(lock);
+}
diff --git a/test/CodeGen/available-externally-suppress.c b/test/CodeGen/available-externally-suppress.c
index c3b7a21..747d3cd 100644
--- a/test/CodeGen/available-externally-suppress.c
+++ b/test/CodeGen/available-externally-suppress.c
@@ -10,3 +10,18 @@ inline void f0(int y) { x = y; }
void test() {
f0(17);
}
+
+inline int __attribute__((always_inline)) f1(int x) {
+ int blarg = 0;
+ for (int i = 0; i < x; ++i)
+ blarg = blarg + x * i;
+ return blarg;
+}
+
+// CHECK: @test1
+int test1(int x) {
+ // CHECK: br i1
+ // CHECK-NOT: call
+ // CHECK: ret i32
+ return f1(x);
+}
diff --git a/test/CodeGen/bitfield-2.c b/test/CodeGen/bitfield-2.c
index e91859f..8de432f 100644
--- a/test/CodeGen/bitfield-2.c
+++ b/test/CodeGen/bitfield-2.c
@@ -12,7 +12,7 @@
// CHECK-RECORD: Record: struct s0
// CHECK-RECORD: Layout: <CGRecordLayout
// CHECK-RECORD: LLVMType:<{ [3 x i8] }>
-// CHECK-RECORD: ContainsPointerToDataMember:0
+// CHECK-RECORD: IsZeroInitializable:1
// CHECK-RECORD: BitFields:[
// CHECK-RECORD: <CGBitFieldInfo Size:24 IsSigned:1
// CHECK-RECORD: NumComponents:2 Components: [
@@ -57,7 +57,7 @@ unsigned long long test_0() {
// CHECK-RECORD: Record: struct s1
// CHECK-RECORD: Layout: <CGRecordLayout
// CHECK-RECORD: LLVMType:<{ [2 x i8], i8 }>
-// CHECK-RECORD: ContainsPointerToDataMember:0
+// CHECK-RECORD: IsZeroInitializable:1
// CHECK-RECORD: BitFields:[
// CHECK-RECORD: <CGBitFieldInfo Size:10 IsSigned:1
// CHECK-RECORD: NumComponents:1 Components: [
@@ -114,7 +114,7 @@ unsigned long long test_1() {
// CHECK-RECORD: Record: union u2
// CHECK-RECORD: Layout: <CGRecordLayout
// CHECK-RECORD: LLVMType:<{ i8 }>
-// CHECK-RECORD: ContainsPointerToDataMember:0
+// CHECK-RECORD: IsZeroInitializable:1
// CHECK-RECORD: BitFields:[
// CHECK-RECORD: <CGBitFieldInfo Size:3 IsSigned:0
// CHECK-RECORD: NumComponents:1 Components: [
@@ -289,7 +289,7 @@ _Bool test_6() {
// CHECK-RECORD: Record: struct s7
// CHECK-RECORD: Layout: <CGRecordLayout
// CHECK-RECORD: LLVMType:{ i32, i32, i32, i8, [3 x i8], [4 x i8], [12 x i8] }
-// CHECK-RECORD: ContainsPointerToDataMember:0
+// CHECK-RECORD: IsZeroInitializable:1
// CHECK-RECORD: BitFields:[
// CHECK-RECORD: <CGBitFieldInfo Size:5 IsSigned:1
// CHECK-RECORD: NumComponents:1 Components: [
diff --git a/test/CodeGen/block-decl-merging.c b/test/CodeGen/block-decl-merging.c
new file mode 100644
index 0000000..1e7a9f4
--- /dev/null
+++ b/test/CodeGen/block-decl-merging.c
@@ -0,0 +1,20 @@
+// RUN: %clang_cc1 -triple i386-apple-darwin10 -fblocks -emit-llvm -o - %s | \
+// RUN: FileCheck %s
+
+// CHECK: @_NSConcreteGlobalBlock = extern_weak global
+extern void * _NSConcreteStackBlock[32] __attribute__((weak_import));
+// CHECK: @_NSConcreteStackBlock = extern_weak global
+extern void * _NSConcreteGlobalBlock[32] __attribute__((weak_import));
+extern void _Block_object_dispose(const void *, const int) __attribute__((weak_import));
+// CHECK: declare extern_weak void @_Block_object_assign
+extern void _Block_object_assign(void *, const void *, const int) __attribute__((weak_import));
+// CHECK: declare extern_weak void @_Block_object_dispose
+
+void *x = ^(){};
+
+void f1(void (^a0)(void));
+
+void f0() {
+ __block int x;
+ f1(^(void){ x = 1; });
+}
diff --git a/test/CodeGen/blockstret.c b/test/CodeGen/blockstret.c
index 09292b8..f630f22 100644
--- a/test/CodeGen/blockstret.c
+++ b/test/CodeGen/blockstret.c
@@ -98,8 +98,8 @@ int main(int argc, char *argv[]) {
/*
desired global flags: 1879048192
desired stack flags: 1610612736
-should be non-zero: 0
-should be non-zero: 0
+should be non-zero: 1
+should be non-zero: 1
should be non-zero: 1
should be zero: 0
diff --git a/test/CodeGen/builtin-expect.c b/test/CodeGen/builtin-expect.c
new file mode 100644
index 0000000..8f02c4d
--- /dev/null
+++ b/test/CodeGen/builtin-expect.c
@@ -0,0 +1,11 @@
+// RUN: %clang_cc1 -emit-llvm -o - %s | FileCheck %s
+
+int x;
+int y(void);
+void foo();
+void FUNC() {
+// CHECK: [[call:%.*]] = call i32 @y
+ if (__builtin_expect (x, y()))
+ foo ();
+}
+
diff --git a/test/CodeGen/builtins-arm.c b/test/CodeGen/builtins-arm.c
index 546f57a..09df1ef 100644
--- a/test/CodeGen/builtins-arm.c
+++ b/test/CodeGen/builtins-arm.c
@@ -9,4 +9,4 @@ void f1(char *a, char *b) {
__clear_cache(a,b);
}
-// CHECK: call void @__clear_cache
+// CHECK: call {{.*}} @__clear_cache
diff --git a/test/CodeGen/builtins-ppc-altivec.c b/test/CodeGen/builtins-ppc-altivec.c
index 6f65866..8627499 100644
--- a/test/CodeGen/builtins-ppc-altivec.c
+++ b/test/CodeGen/builtins-ppc-altivec.c
@@ -1,31 +1,25 @@
// RUN: %clang_cc1 -faltivec -triple powerpc-unknown-unknown -emit-llvm %s -o - | FileCheck %s
-// TODO: uncomment
-/* vector bool char vbc = { 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 }; */
+vector bool char vbc = { 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 };
vector signed char vsc = { 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16 };
vector unsigned char vuc = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 };
-// TODO: uncomment
-/* vector bool short vbs = { 1, 0, 1, 0, 1, 0, 1, 0 }; */
+vector bool short vbs = { 1, 0, 1, 0, 1, 0, 1, 0 };
vector short vs = { -1, 2, -3, 4, -5, 6, -7, 8 };
vector unsigned short vus = { 1, 2, 3, 4, 5, 6, 7, 8 };
-// TODO: uncomment
-/* vector bool int vbi = { 1, 0, 1, 0 }; */
+vector pixel vp = { 1, 2, 3, 4, 5, 6, 7, 8 };
+vector bool int vbi = { 1, 0, 1, 0 };
vector int vi = { -1, 2, -3, 4 };
vector unsigned int vui = { 1, 2, 3, 4 };
vector float vf = { -1.5, 2.5, -3.5, 4.5 };
-// TODO: uncomment
-/* vector bool char res_vbc; */
+vector bool char res_vbc;
vector signed char res_vsc;
vector unsigned char res_vuc;
-// TODO: uncomment
-/* vector bool short res_vbs; */
+vector bool short res_vbs;
vector short res_vs;
vector unsigned short res_vus;
-// TODO: uncomment
vector pixel res_vp;
-// TODO: uncomment
-/* vector bool int res_vbi; */
+vector bool int res_vbi;
vector int res_vi;
vector unsigned int res_vui;
vector float res_vf;
@@ -40,8 +34,8 @@ float param_f;
int res_i;
-int test1() {
-// CHECK: define i32 @test1
+// CHECK: define void @test1
+void test1() {
/* vec_abs */
vsc = vec_abs(vsc); // CHECK: sub nsw <16 x i8> zeroinitializer
@@ -67,18 +61,42 @@ int test1() {
/* vec_add */
res_vsc = vec_add(vsc, vsc); // CHECK: add nsw <16 x i8>
+ res_vsc = vec_add(vbc, vsc); // CHECK: add nsw <16 x i8>
+ res_vsc = vec_add(vsc, vbc); // CHECK: add nsw <16 x i8>
res_vuc = vec_add(vuc, vuc); // CHECK: add <16 x i8>
+ res_vuc = vec_add(vbc, vuc); // CHECK: add <16 x i8>
+ res_vuc = vec_add(vuc, vbc); // CHECK: add <16 x i8>
res_vs = vec_add(vs, vs); // CHECK: add nsw <8 x i16>
+ res_vs = vec_add(vbs, vs); // CHECK: add nsw <8 x i16>
+ res_vs = vec_add(vs, vbs); // CHECK: add nsw <8 x i16>
res_vus = vec_add(vus, vus); // CHECK: add <8 x i16>
+ res_vus = vec_add(vbs, vus); // CHECK: add <8 x i16>
+ res_vus = vec_add(vus, vbs); // CHECK: add <8 x i16>
res_vi = vec_add(vi, vi); // CHECK: add nsw <4 x i32>
+ res_vi = vec_add(vbi, vi); // CHECK: add nsw <4 x i32>
+ res_vi = vec_add(vi, vbi); // CHECK: add nsw <4 x i32>
res_vui = vec_add(vui, vui); // CHECK: add <4 x i32>
+ res_vui = vec_add(vbi, vui); // CHECK: add <4 x i32>
+ res_vui = vec_add(vui, vbi); // CHECK: add <4 x i32>
res_vf = vec_add(vf, vf); // CHECK: fadd <4 x float>
res_vsc = vec_vaddubm(vsc, vsc); // CHECK: add nsw <16 x i8>
+ res_vsc = vec_vaddubm(vbc, vsc); // CHECK: add nsw <16 x i8>
+ res_vsc = vec_vaddubm(vsc, vbc); // CHECK: add nsw <16 x i8>
res_vuc = vec_vaddubm(vuc, vuc); // CHECK: add <16 x i8>
+ res_vuc = vec_vaddubm(vbc, vuc); // CHECK: add <16 x i8>
+ res_vuc = vec_vaddubm(vuc, vbc); // CHECK: add <16 x i8>
res_vs = vec_vadduhm(vs, vs); // CHECK: add nsw <8 x i16>
+ res_vs = vec_vadduhm(vbs, vs); // CHECK: add nsw <8 x i16>
+ res_vs = vec_vadduhm(vs, vbs); // CHECK: add nsw <8 x i16>
res_vus = vec_vadduhm(vus, vus); // CHECK: add <8 x i16>
+ res_vus = vec_vadduhm(vbs, vus); // CHECK: add <8 x i16>
+ res_vus = vec_vadduhm(vus, vbs); // CHECK: add <8 x i16>
res_vi = vec_vadduwm(vi, vi); // CHECK: add nsw <4 x i32>
+ res_vi = vec_vadduwm(vbi, vi); // CHECK: add nsw <4 x i32>
+ res_vi = vec_vadduwm(vi, vbi); // CHECK: add nsw <4 x i32>
res_vui = vec_vadduwm(vui, vui); // CHECK: add <4 x i32>
+ res_vui = vec_vadduwm(vbi, vui); // CHECK: add <4 x i32>
+ res_vui = vec_vadduwm(vui, vbi); // CHECK: add <4 x i32>
res_vf = vec_vaddfp(vf, vf); // CHECK: fadd <4 x float>
/* vec_addc */
@@ -87,80 +105,231 @@ int test1() {
/* vec_adds */
res_vsc = vec_adds(vsc, vsc); // CHECK: @llvm.ppc.altivec.vaddsbs
+ res_vsc = vec_adds(vbc, vsc); // CHECK: @llvm.ppc.altivec.vaddsbs
+ res_vsc = vec_adds(vsc, vbc); // CHECK: @llvm.ppc.altivec.vaddsbs
res_vuc = vec_adds(vuc, vuc); // CHECK: @llvm.ppc.altivec.vaddubs
+ res_vuc = vec_adds(vbc, vuc); // CHECK: @llvm.ppc.altivec.vaddubs
+ res_vuc = vec_adds(vuc, vbc); // CHECK: @llvm.ppc.altivec.vaddubs
res_vs = vec_adds(vs, vs); // CHECK: @llvm.ppc.altivec.vaddshs
+ res_vs = vec_adds(vbs, vs); // CHECK: @llvm.ppc.altivec.vaddshs
+ res_vs = vec_adds(vs, vbs); // CHECK: @llvm.ppc.altivec.vaddshs
res_vus = vec_adds(vus, vus); // CHECK: @llvm.ppc.altivec.vadduhs
+ res_vus = vec_adds(vbs, vus); // CHECK: @llvm.ppc.altivec.vadduhs
+ res_vus = vec_adds(vus, vbs); // CHECK: @llvm.ppc.altivec.vadduhs
res_vi = vec_adds(vi, vi); // CHECK: @llvm.ppc.altivec.vaddsws
+ res_vi = vec_adds(vbi, vi); // CHECK: @llvm.ppc.altivec.vaddsws
+ res_vi = vec_adds(vi, vbi); // CHECK: @llvm.ppc.altivec.vaddsws
res_vui = vec_adds(vui, vui); // CHECK: @llvm.ppc.altivec.vadduws
+ res_vui = vec_adds(vbi, vui); // CHECK: @llvm.ppc.altivec.vadduws
+ res_vui = vec_adds(vui, vbi); // CHECK: @llvm.ppc.altivec.vadduws
res_vsc = vec_vaddsbs(vsc, vsc); // CHECK: @llvm.ppc.altivec.vaddsbs
+ res_vsc = vec_vaddsbs(vbc, vsc); // CHECK: @llvm.ppc.altivec.vaddsbs
+ res_vsc = vec_vaddsbs(vsc, vbc); // CHECK: @llvm.ppc.altivec.vaddsbs
res_vuc = vec_vaddubs(vuc, vuc); // CHECK: @llvm.ppc.altivec.vaddubs
+ res_vuc = vec_vaddubs(vbc, vuc); // CHECK: @llvm.ppc.altivec.vaddubs
+ res_vuc = vec_vaddubs(vuc, vbc); // CHECK: @llvm.ppc.altivec.vaddubs
res_vs = vec_vaddshs(vs, vs); // CHECK: @llvm.ppc.altivec.vaddshs
+ res_vs = vec_vaddshs(vbs, vs); // CHECK: @llvm.ppc.altivec.vaddshs
+ res_vs = vec_vaddshs(vs, vbs); // CHECK: @llvm.ppc.altivec.vaddshs
res_vus = vec_vadduhs(vus, vus); // CHECK: @llvm.ppc.altivec.vadduhs
+ res_vus = vec_vadduhs(vbs, vus); // CHECK: @llvm.ppc.altivec.vadduhs
+ res_vus = vec_vadduhs(vus, vbs); // CHECK: @llvm.ppc.altivec.vadduhs
res_vi = vec_vaddsws(vi, vi); // CHECK: @llvm.ppc.altivec.vaddsws
+ res_vi = vec_vaddsws(vbi, vi); // CHECK: @llvm.ppc.altivec.vaddsws
+ res_vi = vec_vaddsws(vi, vbi); // CHECK: @llvm.ppc.altivec.vaddsws
res_vui = vec_vadduws(vui, vui); // CHECK: @llvm.ppc.altivec.vadduws
+ res_vui = vec_vadduws(vbi, vui); // CHECK: @llvm.ppc.altivec.vadduws
+ res_vui = vec_vadduws(vui, vbi); // CHECK: @llvm.ppc.altivec.vadduws
/* vec_and */
res_vsc = vec_and(vsc, vsc); // CHECK: and <16 x i8>
+ res_vsc = vec_and(vbc, vsc); // CHECK: and <16 x i8>
+ res_vsc = vec_and(vsc, vbc); // CHECK: and <16 x i8>
res_vuc = vec_and(vuc, vuc); // CHECK: and <16 x i8>
+ res_vuc = vec_and(vbc, vuc); // CHECK: and <16 x i8>
+ res_vuc = vec_and(vuc, vbc); // CHECK: and <16 x i8>
+ res_vbc = vec_and(vbc, vbc); // CHECK: and <16 x i8>
res_vs = vec_and(vs, vs); // CHECK: and <8 x i16>
+ res_vs = vec_and(vbs, vs); // CHECK: and <8 x i16>
+ res_vs = vec_and(vs, vbs); // CHECK: and <8 x i16>
res_vus = vec_and(vus, vus); // CHECK: and <8 x i16>
+ res_vus = vec_and(vbs, vus); // CHECK: and <8 x i16>
+ res_vus = vec_and(vus, vbs); // CHECK: and <8 x i16>
+ res_vbs = vec_and(vbs, vbs); // CHECK: and <8 x i16>
res_vi = vec_and(vi, vi); // CHECK: and <4 x i32>
+ res_vi = vec_and(vbi, vi); // CHECK: and <4 x i32>
+ res_vi = vec_and(vi, vbi); // CHECK: and <4 x i32>
res_vui = vec_and(vui, vui); // CHECK: and <4 x i32>
+ res_vui = vec_and(vbi, vui); // CHECK: and <4 x i32>
+ res_vui = vec_and(vui, vbi); // CHECK: and <4 x i32>
+ res_vbi = vec_and(vbi, vbi); // CHECK: and <4 x i32>
res_vsc = vec_vand(vsc, vsc); // CHECK: and <16 x i8>
+ res_vsc = vec_vand(vbc, vsc); // CHECK: and <16 x i8>
+ res_vsc = vec_vand(vsc, vbc); // CHECK: and <16 x i8>
res_vuc = vec_vand(vuc, vuc); // CHECK: and <16 x i8>
+ res_vuc = vec_vand(vbc, vuc); // CHECK: and <16 x i8>
+ res_vuc = vec_vand(vuc, vbc); // CHECK: and <16 x i8>
+ res_vbc = vec_vand(vbc, vbc); // CHECK: and <16 x i8>
res_vs = vec_vand(vs, vs); // CHECK: and <8 x i16>
+ res_vs = vec_vand(vbs, vs); // CHECK: and <8 x i16>
+ res_vs = vec_vand(vs, vbs); // CHECK: and <8 x i16>
res_vus = vec_vand(vus, vus); // CHECK: and <8 x i16>
+ res_vus = vec_vand(vbs, vus); // CHECK: and <8 x i16>
+ res_vus = vec_vand(vus, vbs); // CHECK: and <8 x i16>
+ res_vbs = vec_vand(vbs, vbs); // CHECK: and <8 x i16>
res_vi = vec_vand(vi, vi); // CHECK: and <4 x i32>
+ res_vi = vec_vand(vbi, vi); // CHECK: and <4 x i32>
+ res_vi = vec_vand(vi, vbi); // CHECK: and <4 x i32>
res_vui = vec_vand(vui, vui); // CHECK: and <4 x i32>
+ res_vui = vec_vand(vbi, vui); // CHECK: and <4 x i32>
+ res_vui = vec_vand(vui, vbi); // CHECK: and <4 x i32>
+ res_vbi = vec_vand(vbi, vbi); // CHECK: and <4 x i32>
/* vec_andc */
res_vsc = vec_andc(vsc, vsc); // CHECK: xor <16 x i8>
// CHECK: and <16 x i8>
+ res_vsc = vec_andc(vbc, vsc); // CHECK: xor <16 x i8>
+ // CHECK: and <16 x i8>
+
+ res_vsc = vec_andc(vsc, vbc); // CHECK: xor <16 x i8>
+ // CHECK: and <16 x i8>
+
res_vuc = vec_andc(vuc, vuc); // CHECK: xor <16 x i8>
// CHECK: and <16 x i8>
+ res_vuc = vec_andc(vbc, vuc); // CHECK: xor <16 x i8>
+ // CHECK: and <16 x i8>
+
+ res_vuc = vec_andc(vuc, vbc); // CHECK: xor <16 x i8>
+ // CHECK: and <16 x i8>
+
+ res_vbc = vec_andc(vbc, vbc); // CHECK: xor <16 x i8>
+ // CHECK: and <16 x i8>
+
res_vs = vec_andc(vs, vs); // CHECK: xor <8 x i16>
// CHECK: and <8 x i16>
+ res_vs = vec_andc(vbs, vs); // CHECK: xor <8 x i16>
+ // CHECK: and <8 x i16>
+
+ res_vs = vec_andc(vs, vbs); // CHECK: xor <8 x i16>
+ // CHECK: and <8 x i16>
+
res_vus = vec_andc(vus, vus); // CHECK: xor <8 x i16>
// CHECK: and <8 x i16>
+ res_vus = vec_andc(vbs, vus); // CHECK: xor <8 x i16>
+ // CHECK: and <8 x i16>
+
+ res_vus = vec_andc(vus, vbs); // CHECK: xor <8 x i16>
+ // CHECK: and <8 x i16>
+
+ res_vbs = vec_andc(vbs, vbs); // CHECK: xor <8 x i16>
+ // CHECK: and <8 x i16>
+
res_vi = vec_andc(vi, vi); // CHECK: xor <4 x i32>
// CHECK: and <4 x i32>
+ res_vi = vec_andc(vbi, vi); // CHECK: xor <4 x i32>
+ // CHECK: and <4 x i32>
+
+ res_vi = vec_andc(vi, vbi); // CHECK: xor <4 x i32>
+ // CHECK: and <4 x i32>
+
res_vui = vec_andc(vui, vui); // CHECK: xor <4 x i32>
// CHECK: and <4 x i32>
+ res_vui = vec_andc(vbi, vui); // CHECK: xor <4 x i32>
+ // CHECK: and <4 x i32>
+
+ res_vui = vec_andc(vui, vbi); // CHECK: xor <4 x i32>
+ // CHECK: and <4 x i32>
+
res_vf = vec_andc(vf, vf); // CHECK: xor <4 x i32>
// CHECK: and <4 x i32>
+ res_vf = vec_andc(vbi, vf); // CHECK: xor <4 x i32>
+ // CHECK: and <4 x i32>
+
+ res_vf = vec_andc(vf, vbi); // CHECK: xor <4 x i32>
+ // CHECK: and <4 x i32>
+
res_vsc = vec_vandc(vsc, vsc); // CHECK: xor <16 x i8>
// CHECK: and <16 x i8>
+ res_vsc = vec_vandc(vbc, vsc); // CHECK: xor <16 x i8>
+ // CHECK: and <16 x i8>
+
+ res_vsc = vec_vandc(vsc, vbc); // CHECK: xor <16 x i8>
+ // CHECK: and <16 x i8>
+
res_vuc = vec_vandc(vuc, vuc); // CHECK: xor <16 x i8>
// CHECK: and <16 x i8>
+ res_vuc = vec_vandc(vbc, vuc); // CHECK: xor <16 x i8>
+ // CHECK: and <16 x i8>
+
+ res_vuc = vec_vandc(vuc, vbc); // CHECK: xor <16 x i8>
+ // CHECK: and <16 x i8>
+
+ res_vbc = vec_vandc(vbc, vbc); // CHECK: xor <16 x i8>
+ // CHECK: and <16 x i8>
+
res_vs = vec_vandc(vs, vs); // CHECK: xor <8 x i16>
// CHECK: and <8 x i16>
+ res_vs = vec_vandc(vbs, vs); // CHECK: xor <8 x i16>
+ // CHECK: and <8 x i16>
+
+ res_vs = vec_vandc(vs, vbs); // CHECK: xor <8 x i16>
+ // CHECK: and <8 x i16>
+
res_vus = vec_vandc(vus, vus); // CHECK: xor <8 x i16>
// CHECK: and <8 x i16>
+ res_vus = vec_vandc(vbs, vus); // CHECK: xor <8 x i16>
+ // CHECK: and <8 x i16>
+
+ res_vus = vec_vandc(vus, vbs); // CHECK: xor <8 x i16>
+ // CHECK: and <8 x i16>
+
+ res_vbs = vec_vandc(vbs, vbs); // CHECK: xor <8 x i16>
+ // CHECK: and <8 x i16>
+
res_vi = vec_vandc(vi, vi); // CHECK: xor <4 x i32>
// CHECK: and <4 x i32>
+ res_vi = vec_vandc(vbi, vi); // CHECK: xor <4 x i32>
+ // CHECK: and <4 x i32>
+
+ res_vi = vec_vandc(vi, vbi); // CHECK: xor <4 x i32>
+ // CHECK: and <4 x i32>
+
res_vui = vec_vandc(vui, vui); // CHECK: xor <4 x i32>
// CHECK: and <4 x i32>
+ res_vui = vec_vandc(vbi, vui); // CHECK: xor <4 x i32>
+ // CHECK: and <4 x i32>
+
+ res_vui = vec_vandc(vui, vbi); // CHECK: xor <4 x i32>
+ // CHECK: and <4 x i32>
+
res_vf = vec_vandc(vf, vf); // CHECK: xor <4 x i32>
// CHECK: and <4 x i32>
+
+ res_vf = vec_vandc(vbi, vf); // CHECK: xor <4 x i32>
+ // CHECK: and <4 x i32>
+
+ res_vf = vec_vandc(vf, vbi); // CHECK: xor <4 x i32>
+ // CHECK: and <4 x i32>
+
}
-// CHECK: i32 @test2
-int test2() {
+// CHECK: define void @test2
+void test2() {
/* vec_avg */
- res_vsc = vec_avg(vsc, vsc); // CHECK: call {{.*}}@llvm.ppc.altivec.vavgsb
+ res_vsc = vec_avg(vsc, vsc); // CHECK: @llvm.ppc.altivec.vavgsb
res_vuc = vec_avg(vuc, vuc); // CHECK: @llvm.ppc.altivec.vavgub
res_vs = vec_avg(vs, vs); // CHECK: @llvm.ppc.altivec.vavgsh
res_vus = vec_avg(vus, vus); // CHECK: @llvm.ppc.altivec.vavguh
@@ -182,53 +351,52 @@ int test2() {
res_vi = vec_vcmpbfp(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp
/* vec_cmpeq */
- vsc = vec_cmpeq(vsc, vsc); // CHCK: call {{.*}}@llvm.ppc.altivec.vcmpequb
- vuc = vec_cmpeq(vuc, vuc); // CHCK: @llvm.ppc.altivec.vcmpequb
- vs = vec_cmpeq(vs, vs); // CHCK: @llvm.ppc.altivec.vcmpequh
- vs = vec_cmpeq(vus, vus); // CHCK: @llvm.ppc.altivec.vcmpequh
- vi = vec_cmpeq(vi, vi); // CHCK: @llvm.ppc.altivec.vcmpequw
- vui = vec_cmpeq(vui, vui); // CHCK: @llvm.ppc.altivec.vcmpequw
- vf = vec_cmpeq(vf, vf); // CHCK: @llvm.ppc.altivec.vcmpeqfp
+ res_vbc = vec_cmpeq(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb
+ res_vbc = vec_cmpeq(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb
+ res_vbs = vec_cmpeq(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh
+ res_vbs = vec_cmpeq(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh
+ res_vbi = vec_cmpeq(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw
+ res_vbi = vec_cmpeq(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw
+ res_vbi = vec_cmpeq(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp
/* vec_cmpge */
- vf = vec_cmpge(vf, vf); // CHCK: @llvm.ppc.altivec.vcmpgefp
- vf = vec_vcmpgefp(vf, vf); // CHCK: call {{.*}}@llvm.ppc.altivec.vcmpgefp
-
+ res_vbi = vec_cmpge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp
+ res_vbi = vec_vcmpgefp(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp
}
-// CHECK: define i32 @test5
-int test5() {
+// CHECK: define void @test5
+void test5() {
/* vec_cmpgt */
- vsc = vec_cmpgt(vsc, vsc); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtsb
- vuc = vec_cmpgt(vuc, vuc); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtub
- vs = vec_cmpgt(vs, vs); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtsh
- vus = vec_cmpgt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh
- vi = vec_cmpgt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw
- vui = vec_cmpgt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw
- vf = vec_cmpgt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp
- vsc = vec_vcmpgtsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb
- vuc = vec_vcmpgtub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub
- vs = vec_vcmpgtsh(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh
- vus = vec_vcmpgtuh(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh
- vi = vec_vcmpgtsw(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw
- vui = vec_vcmpgtuw(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw
- vf = vec_vcmpgtfp(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp
+ res_vbc = vec_cmpgt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb
+ res_vbc = vec_cmpgt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub
+ res_vbs = vec_cmpgt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh
+ res_vbs = vec_cmpgt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh
+ res_vbi = vec_cmpgt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw
+ res_vbi = vec_cmpgt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw
+ res_vbi = vec_cmpgt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp
+ res_vbc = vec_vcmpgtsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb
+ res_vbc = vec_vcmpgtub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub
+ res_vbs = vec_vcmpgtsh(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh
+ res_vbs = vec_vcmpgtuh(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh
+ res_vbi = vec_vcmpgtsw(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw
+ res_vbi = vec_vcmpgtuw(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw
+ res_vbi = vec_vcmpgtfp(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp
/* vec_cmple */
- vf = vec_cmple(vf, vf); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgefp
+ res_vbi = vec_cmple(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp
}
-// CHECK: define i32 @test6
-int test6() {
+// CHECK: define void @test6
+void test6() {
/* vec_cmplt */
- vsc =vec_cmplt(vsc, vsc); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtsb
- vsc =vec_cmplt(vuc, vuc); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtub
- vs = vec_cmplt(vs, vs); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtsh
- vs = vec_cmplt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh
- vi = vec_cmplt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw
- vui = vec_cmplt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw
- vf = vec_cmplt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp
+ res_vbc = vec_cmplt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb
+ res_vbc = vec_cmplt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub
+ res_vbs = vec_cmplt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh
+ res_vbs = vec_cmplt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh
+ res_vbi = vec_cmplt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw
+ res_vbi = vec_cmplt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw
+ res_vbi = vec_cmplt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp
/* vec_ctf */
res_vf = vec_ctf(vi, param_i); // CHECK: @llvm.ppc.altivec.vcfsx
@@ -275,28 +443,36 @@ int test6() {
res_vsc = vec_ld(0, &param_sc); // CHECK: @llvm.ppc.altivec.lvx
res_vuc = vec_ld(0, &vuc); // CHECK: @llvm.ppc.altivec.lvx
res_vuc = vec_ld(0, &param_uc); // CHECK: @llvm.ppc.altivec.lvx
+ res_vbc = vec_ld(0, &vbc); // CHECK: @llvm.ppc.altivec.lvx
res_vs = vec_ld(0, &vs); // CHECK: @llvm.ppc.altivec.lvx
res_vs = vec_ld(0, &param_s); // CHECK: @llvm.ppc.altivec.lvx
res_vus = vec_ld(0, &vus); // CHECK: @llvm.ppc.altivec.lvx
res_vus = vec_ld(0, &param_us); // CHECK: @llvm.ppc.altivec.lvx
+ res_vbs = vec_ld(0, &vbs); // CHECK: @llvm.ppc.altivec.lvx
+ res_vp = vec_ld(0, &vp); // CHECK: @llvm.ppc.altivec.lvx
res_vi = vec_ld(0, &vi); // CHECK: @llvm.ppc.altivec.lvx
res_vi = vec_ld(0, &param_i); // CHECK: @llvm.ppc.altivec.lvx
res_vui = vec_ld(0, &vui); // CHECK: @llvm.ppc.altivec.lvx
res_vui = vec_ld(0, &param_ui); // CHECK: @llvm.ppc.altivec.lvx
+ res_vbi = vec_ld(0, &vbi); // CHECK: @llvm.ppc.altivec.lvx
res_vf = vec_ld(0, &vf); // CHECK: @llvm.ppc.altivec.lvx
res_vf = vec_ld(0, &param_f); // CHECK: @llvm.ppc.altivec.lvx
res_vsc = vec_lvx(0, &vsc); // CHECK: @llvm.ppc.altivec.lvx
res_vsc = vec_lvx(0, &param_sc); // CHECK: @llvm.ppc.altivec.lvx
res_vuc = vec_lvx(0, &vuc); // CHECK: @llvm.ppc.altivec.lvx
res_vuc = vec_lvx(0, &param_uc); // CHECK: @llvm.ppc.altivec.lvx
+ res_vbc = vec_lvx(0, &vbc); // CHECK: @llvm.ppc.altivec.lvx
res_vs = vec_lvx(0, &vs); // CHECK: @llvm.ppc.altivec.lvx
res_vs = vec_lvx(0, &param_s); // CHECK: @llvm.ppc.altivec.lvx
res_vus = vec_lvx(0, &vus); // CHECK: @llvm.ppc.altivec.lvx
res_vus = vec_lvx(0, &param_us); // CHECK: @llvm.ppc.altivec.lvx
+ res_vbs = vec_lvx(0, &vbs); // CHECK: @llvm.ppc.altivec.lvx
+ res_vp = vec_lvx(0, &vp); // CHECK: @llvm.ppc.altivec.lvx
res_vi = vec_lvx(0, &vi); // CHECK: @llvm.ppc.altivec.lvx
res_vi = vec_lvx(0, &param_i); // CHECK: @llvm.ppc.altivec.lvx
res_vui = vec_lvx(0, &vui); // CHECK: @llvm.ppc.altivec.lvx
res_vui = vec_lvx(0, &param_ui); // CHECK: @llvm.ppc.altivec.lvx
+ res_vbi = vec_lvx(0, &vbi); // CHECK: @llvm.ppc.altivec.lvx
res_vf = vec_lvx(0, &vf); // CHECK: @llvm.ppc.altivec.lvx
res_vf = vec_lvx(0, &param_f); // CHECK: @llvm.ppc.altivec.lvx
@@ -321,28 +497,36 @@ int test6() {
res_vsc = vec_ldl(0, &param_sc); // CHECK: @llvm.ppc.altivec.lvxl
res_vuc = vec_ldl(0, &vuc); // CHECK: @llvm.ppc.altivec.lvxl
res_vuc = vec_ldl(0, &param_uc); // CHECK: @llvm.ppc.altivec.lvxl
+ res_vbc = vec_ldl(0, &vbc); // CHECK: @llvm.ppc.altivec.lvxl
res_vs = vec_ldl(0, &vs); // CHECK: @llvm.ppc.altivec.lvxl
res_vs = vec_ldl(0, &param_s); // CHECK: @llvm.ppc.altivec.lvxl
res_vus = vec_ldl(0, &vus); // CHECK: @llvm.ppc.altivec.lvxl
res_vus = vec_ldl(0, &param_us); // CHECK: @llvm.ppc.altivec.lvxl
+ res_vbs = vec_ldl(0, &vbs); // CHECK: @llvm.ppc.altivec.lvxl
+ res_vp = vec_ldl(0, &vp); // CHECK: @llvm.ppc.altivec.lvxl
res_vi = vec_ldl(0, &vi); // CHECK: @llvm.ppc.altivec.lvxl
res_vi = vec_ldl(0, &param_i); // CHECK: @llvm.ppc.altivec.lvxl
res_vui = vec_ldl(0, &vui); // CHECK: @llvm.ppc.altivec.lvxl
res_vui = vec_ldl(0, &param_ui); // CHECK: @llvm.ppc.altivec.lvxl
+ res_vbi = vec_ldl(0, &vbi); // CHECK: @llvm.ppc.altivec.lvxl
res_vf = vec_ldl(0, &vf); // CHECK: @llvm.ppc.altivec.lvxl
res_vf = vec_ldl(0, &param_f); // CHECK: @llvm.ppc.altivec.lvxl
res_vsc = vec_lvxl(0, &vsc); // CHECK: @llvm.ppc.altivec.lvxl
res_vsc = vec_lvxl(0, &param_sc); // CHECK: @llvm.ppc.altivec.lvxl
res_vuc = vec_lvxl(0, &vuc); // CHECK: @llvm.ppc.altivec.lvxl
+ res_vbc = vec_lvxl(0, &vbc); // CHECK: @llvm.ppc.altivec.lvxl
res_vuc = vec_lvxl(0, &param_uc); // CHECK: @llvm.ppc.altivec.lvxl
res_vs = vec_lvxl(0, &vs); // CHECK: @llvm.ppc.altivec.lvxl
res_vs = vec_lvxl(0, &param_s); // CHECK: @llvm.ppc.altivec.lvxl
res_vus = vec_lvxl(0, &vus); // CHECK: @llvm.ppc.altivec.lvxl
res_vus = vec_lvxl(0, &param_us); // CHECK: @llvm.ppc.altivec.lvxl
+ res_vbs = vec_lvxl(0, &vbs); // CHECK: @llvm.ppc.altivec.lvxl
+ res_vp = vec_lvxl(0, &vp); // CHECK: @llvm.ppc.altivec.lvxl
res_vi = vec_lvxl(0, &vi); // CHECK: @llvm.ppc.altivec.lvxl
res_vi = vec_lvxl(0, &param_i); // CHECK: @llvm.ppc.altivec.lvxl
res_vui = vec_lvxl(0, &vui); // CHECK: @llvm.ppc.altivec.lvxl
res_vui = vec_lvxl(0, &param_ui); // CHECK: @llvm.ppc.altivec.lvxl
+ res_vbi = vec_lvxl(0, &vbi); // CHECK: @llvm.ppc.altivec.lvxl
res_vf = vec_lvxl(0, &vf); // CHECK: @llvm.ppc.altivec.lvxl
res_vf = vec_lvxl(0, &param_f); // CHECK: @llvm.ppc.altivec.lvxl
@@ -366,50 +550,90 @@ int test6() {
/* vec_max */
res_vsc = vec_max(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmaxsb
+ res_vsc = vec_max(vbc, vsc); // CHECK: @llvm.ppc.altivec.vmaxsb
+ res_vsc = vec_max(vsc, vbc); // CHECK: @llvm.ppc.altivec.vmaxsb
res_vuc = vec_max(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmaxub
+ res_vuc = vec_max(vbc, vuc); // CHECK: @llvm.ppc.altivec.vmaxub
+ res_vuc = vec_max(vuc, vbc); // CHECK: @llvm.ppc.altivec.vmaxub
res_vs = vec_max(vs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh
+ res_vs = vec_max(vbs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh
+ res_vs = vec_max(vs, vbs); // CHECK: @llvm.ppc.altivec.vmaxsh
res_vus = vec_max(vus, vus); // CHECK: @llvm.ppc.altivec.vmaxuh
+ res_vus = vec_max(vbs, vus); // CHECK: @llvm.ppc.altivec.vmaxuh
+ res_vus = vec_max(vus, vbs); // CHECK: @llvm.ppc.altivec.vmaxuh
res_vi = vec_max(vi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw
+ res_vi = vec_max(vbi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw
+ res_vi = vec_max(vi, vbi); // CHECK: @llvm.ppc.altivec.vmaxsw
res_vui = vec_max(vui, vui); // CHECK: @llvm.ppc.altivec.vmaxuw
+ res_vui = vec_max(vbi, vui); // CHECK: @llvm.ppc.altivec.vmaxuw
+ res_vui = vec_max(vui, vbi); // CHECK: @llvm.ppc.altivec.vmaxuw
res_vf = vec_max(vf, vf); // CHECK: @llvm.ppc.altivec.vmaxfp
res_vsc = vec_vmaxsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmaxsb
+ res_vsc = vec_vmaxsb(vbc, vsc); // CHECK: @llvm.ppc.altivec.vmaxsb
+ res_vsc = vec_vmaxsb(vsc, vbc); // CHECK: @llvm.ppc.altivec.vmaxsb
res_vuc = vec_vmaxub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmaxub
+ res_vuc = vec_vmaxub(vbc, vuc); // CHECK: @llvm.ppc.altivec.vmaxub
+ res_vuc = vec_vmaxub(vuc, vbc); // CHECK: @llvm.ppc.altivec.vmaxub
res_vs = vec_vmaxsh(vs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh
+ res_vs = vec_vmaxsh(vbs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh
+ res_vs = vec_vmaxsh(vs, vbs); // CHECK: @llvm.ppc.altivec.vmaxsh
res_vus = vec_vmaxuh(vus, vus); // CHECK: @llvm.ppc.altivec.vmaxuh
+ res_vus = vec_vmaxuh(vbs, vus); // CHECK: @llvm.ppc.altivec.vmaxuh
+ res_vus = vec_vmaxuh(vus, vbs); // CHECK: @llvm.ppc.altivec.vmaxuh
res_vi = vec_vmaxsw(vi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw
+ res_vi = vec_vmaxsw(vbi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw
+ res_vi = vec_vmaxsw(vi, vbi); // CHECK: @llvm.ppc.altivec.vmaxsw
res_vui = vec_vmaxuw(vui, vui); // CHECK: @llvm.ppc.altivec.vmaxuw
+ res_vui = vec_vmaxuw(vbi, vui); // CHECK: @llvm.ppc.altivec.vmaxuw
+ res_vui = vec_vmaxuw(vui, vbi); // CHECK: @llvm.ppc.altivec.vmaxuw
res_vf = vec_vmaxfp(vf, vf); // CHECK: @llvm.ppc.altivec.vmaxfp
/* vec_mergeh */
res_vsc = vec_mergeh(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm
res_vuc = vec_mergeh(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbc = vec_mergeh(vbc, vbc); // CHECK: @llvm.ppc.altivec.vperm
res_vs = vec_mergeh(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
+ res_vp = vec_mergeh(vp, vp); // CHECK: @llvm.ppc.altivec.vperm
res_vus = vec_mergeh(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbs = vec_mergeh(vbs, vbs); // CHECK: @llvm.ppc.altivec.vperm
res_vi = vec_mergeh(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
res_vui = vec_mergeh(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbi = vec_mergeh(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm
res_vf = vec_mergeh(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
res_vsc = vec_vmrghb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm
res_vuc = vec_vmrghb(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbc = vec_vmrghb(vbc, vbc); // CHECK: @llvm.ppc.altivec.vperm
res_vs = vec_vmrghh(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
+ res_vp = vec_vmrghh(vp, vp); // CHECK: @llvm.ppc.altivec.vperm
res_vus = vec_vmrghh(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbs = vec_vmrghh(vbs, vbs); // CHECK: @llvm.ppc.altivec.vperm
res_vi = vec_vmrghw(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
res_vui = vec_vmrghw(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbi = vec_vmrghw(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm
res_vf = vec_vmrghw(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
/* vec_mergel */
res_vsc = vec_mergel(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm
res_vuc = vec_mergel(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbc = vec_mergel(vbc, vbc); // CHECK: @llvm.ppc.altivec.vperm
res_vs = vec_mergel(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
+ res_vp = vec_mergeh(vp, vp); // CHECK: @llvm.ppc.altivec.vperm
res_vus = vec_mergel(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbs = vec_mergel(vbs, vbs); // CHECK: @llvm.ppc.altivec.vperm
res_vi = vec_mergel(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
res_vui = vec_mergel(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbi = vec_mergel(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm
res_vf = vec_mergel(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
res_vsc = vec_vmrglb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm
res_vuc = vec_vmrglb(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbc = vec_vmrglb(vbc, vbc); // CHECK: @llvm.ppc.altivec.vperm
res_vs = vec_vmrglh(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
+ res_vp = vec_vmrglh(vp, vp); // CHECK: @llvm.ppc.altivec.vperm
res_vus = vec_vmrglh(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbs = vec_vmrglh(vbs, vbs); // CHECK: @llvm.ppc.altivec.vperm
res_vi = vec_vmrglw(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
res_vui = vec_vmrglw(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbi = vec_vmrglw(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm
res_vf = vec_vmrglw(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
/* vec_mfvscr */
@@ -417,18 +641,42 @@ int test6() {
/* vec_min */
res_vsc = vec_min(vsc, vsc); // CHECK: @llvm.ppc.altivec.vminsb
+ res_vsc = vec_min(vbc, vsc); // CHECK: @llvm.ppc.altivec.vminsb
+ res_vsc = vec_min(vsc, vbc); // CHECK: @llvm.ppc.altivec.vminsb
res_vuc = vec_min(vuc, vuc); // CHECK: @llvm.ppc.altivec.vminub
+ res_vuc = vec_min(vbc, vuc); // CHECK: @llvm.ppc.altivec.vminub
+ res_vuc = vec_min(vuc, vbc); // CHECK: @llvm.ppc.altivec.vminub
res_vs = vec_min(vs, vs); // CHECK: @llvm.ppc.altivec.vminsh
+ res_vs = vec_min(vbs, vs); // CHECK: @llvm.ppc.altivec.vminsh
+ res_vs = vec_min(vs, vbs); // CHECK: @llvm.ppc.altivec.vminsh
res_vus = vec_min(vus, vus); // CHECK: @llvm.ppc.altivec.vminuh
+ res_vus = vec_min(vbs, vus); // CHECK: @llvm.ppc.altivec.vminuh
+ res_vus = vec_min(vus, vbs); // CHECK: @llvm.ppc.altivec.vminuh
res_vi = vec_min(vi, vi); // CHECK: @llvm.ppc.altivec.vminsw
+ res_vi = vec_min(vbi, vi); // CHECK: @llvm.ppc.altivec.vminsw
+ res_vi = vec_min(vi, vbi); // CHECK: @llvm.ppc.altivec.vminsw
res_vui = vec_min(vui, vui); // CHECK: @llvm.ppc.altivec.vminuw
+ res_vui = vec_min(vbi, vui); // CHECK: @llvm.ppc.altivec.vminuw
+ res_vui = vec_min(vui, vbi); // CHECK: @llvm.ppc.altivec.vminuw
res_vf = vec_min(vf, vf); // CHECK: @llvm.ppc.altivec.vminfp
res_vsc = vec_vminsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vminsb
+ res_vsc = vec_vminsb(vbc, vsc); // CHECK: @llvm.ppc.altivec.vminsb
+ res_vsc = vec_vminsb(vsc, vbc); // CHECK: @llvm.ppc.altivec.vminsb
res_vuc = vec_vminub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vminub
+ res_vuc = vec_vminub(vbc, vuc); // CHECK: @llvm.ppc.altivec.vminub
+ res_vuc = vec_vminub(vuc, vbc); // CHECK: @llvm.ppc.altivec.vminub
res_vs = vec_vminsh(vs, vs); // CHECK: @llvm.ppc.altivec.vminsh
+ res_vs = vec_vminsh(vbs, vs); // CHECK: @llvm.ppc.altivec.vminsh
+ res_vs = vec_vminsh(vs, vbs); // CHECK: @llvm.ppc.altivec.vminsh
res_vus = vec_vminuh(vus, vus); // CHECK: @llvm.ppc.altivec.vminuh
+ res_vus = vec_vminuh(vbs, vus); // CHECK: @llvm.ppc.altivec.vminuh
+ res_vus = vec_vminuh(vus, vbs); // CHECK: @llvm.ppc.altivec.vminuh
res_vi = vec_vminsw(vi, vi); // CHECK: @llvm.ppc.altivec.vminsw
+ res_vi = vec_vminsw(vbi, vi); // CHECK: @llvm.ppc.altivec.vminsw
+ res_vi = vec_vminsw(vi, vbi); // CHECK: @llvm.ppc.altivec.vminsw
res_vui = vec_vminuw(vui, vui); // CHECK: @llvm.ppc.altivec.vminuw
+ res_vui = vec_vminuw(vbi, vui); // CHECK: @llvm.ppc.altivec.vminuw
+ res_vui = vec_vminuw(vui, vbi); // CHECK: @llvm.ppc.altivec.vminuw
res_vf = vec_vminfp(vf, vf); // CHECK: @llvm.ppc.altivec.vminfp
/* vec_mladd */
@@ -466,6 +714,15 @@ int test6() {
/* vec_mtvscr */
vec_mtvscr(vsc); // CHECK: @llvm.ppc.altivec.mtvscr
+ vec_mtvscr(vuc); // CHECK: @llvm.ppc.altivec.mtvscr
+ vec_mtvscr(vbc); // CHECK: @llvm.ppc.altivec.mtvscr
+ vec_mtvscr(vs); // CHECK: @llvm.ppc.altivec.mtvscr
+ vec_mtvscr(vus); // CHECK: @llvm.ppc.altivec.mtvscr
+ vec_mtvscr(vbs); // CHECK: @llvm.ppc.altivec.mtvscr
+ vec_mtvscr(vp); // CHECK: @llvm.ppc.altivec.mtvscr
+ vec_mtvscr(vi); // CHECK: @llvm.ppc.altivec.mtvscr
+ vec_mtvscr(vui); // CHECK: @llvm.ppc.altivec.mtvscr
+ vec_mtvscr(vbi); // CHECK: @llvm.ppc.altivec.mtvscr
/* vec_mule */
res_vs = vec_mule(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulesb
@@ -498,18 +755,27 @@ int test6() {
res_vuc = vec_nor(vuc, vuc); // CHECK: or <16 x i8>
// CHECK: xor <16 x i8>
+ res_vuc = vec_nor(vbc, vbc); // CHECK: or <16 x i8>
+ // CHECK: xor <16 x i8>
+
res_vs = vec_nor(vs, vs); // CHECK: or <8 x i16>
// CHECK: xor <8 x i16>
res_vus = vec_nor(vus, vus); // CHECK: or <8 x i16>
// CHECK: xor <8 x i16>
+ res_vus = vec_nor(vbs, vbs); // CHECK: or <8 x i16>
+ // CHECK: xor <8 x i16>
+
res_vi = vec_nor(vi, vi); // CHECK: or <4 x i32>
// CHECK: xor <4 x i32>
res_vui = vec_nor(vui, vui); // CHECK: or <4 x i32>
// CHECK: xor <4 x i32>
+ res_vui = vec_nor(vbi, vbi); // CHECK: or <4 x i32>
+ // CHECK: xor <4 x i32>
+
res_vf = vec_nor(vf, vf); // CHECK: or <4 x i32>
// CHECK: xor <4 x i32>
@@ -519,46 +785,93 @@ int test6() {
res_vuc = vec_vnor(vuc, vuc); // CHECK: or <16 x i8>
// CHECK: xor <16 x i8>
+ res_vuc = vec_vnor(vbc, vbc); // CHECK: or <16 x i8>
+ // CHECK: xor <16 x i8>
+
res_vs = vec_vnor(vs, vs); // CHECK: or <8 x i16>
// CHECK: xor <8 x i16>
res_vus = vec_vnor(vus, vus); // CHECK: or <8 x i16>
// CHECK: xor <8 x i16>
+ res_vus = vec_vnor(vbs, vbs); // CHECK: or <8 x i16>
+ // CHECK: xor <8 x i16>
+
res_vi = vec_vnor(vi, vi); // CHECK: or <4 x i32>
// CHECK: xor <4 x i32>
res_vui = vec_vnor(vui, vui); // CHECK: or <4 x i32>
// CHECK: xor <4 x i32>
+ res_vui = vec_vnor(vbi, vbi); // CHECK: or <4 x i32>
+ // CHECK: xor <4 x i32>
+
res_vf = vec_vnor(vf, vf); // CHECK: or <4 x i32>
// CHECK: xor <4 x i32>
/* vec_or */
res_vsc = vec_or(vsc, vsc); // CHECK: or <16 x i8>
+ res_vsc = vec_or(vbc, vsc); // CHECK: or <16 x i8>
+ res_vsc = vec_or(vsc, vbc); // CHECK: or <16 x i8>
res_vuc = vec_or(vuc, vuc); // CHECK: or <16 x i8>
+ res_vuc = vec_or(vbc, vuc); // CHECK: or <16 x i8>
+ res_vuc = vec_or(vuc, vbc); // CHECK: or <16 x i8>
+ res_vbc = vec_or(vbc, vbc); // CHECK: or <16 x i8>
res_vs = vec_or(vs, vs); // CHECK: or <8 x i16>
+ res_vs = vec_or(vbs, vs); // CHECK: or <8 x i16>
+ res_vs = vec_or(vs, vbs); // CHECK: or <8 x i16>
res_vus = vec_or(vus, vus); // CHECK: or <8 x i16>
+ res_vus = vec_or(vbs, vus); // CHECK: or <8 x i16>
+ res_vus = vec_or(vus, vbs); // CHECK: or <8 x i16>
+ res_vbs = vec_or(vbs, vbs); // CHECK: or <8 x i16>
res_vi = vec_or(vi, vi); // CHECK: or <4 x i32>
+ res_vi = vec_or(vbi, vi); // CHECK: or <4 x i32>
+ res_vi = vec_or(vi, vbi); // CHECK: or <4 x i32>
res_vui = vec_or(vui, vui); // CHECK: or <4 x i32>
+ res_vui = vec_or(vbi, vui); // CHECK: or <4 x i32>
+ res_vui = vec_or(vui, vbi); // CHECK: or <4 x i32>
+ res_vbi = vec_or(vbi, vbi); // CHECK: or <4 x i32>
res_vf = vec_or(vf, vf); // CHECK: or <4 x i32>
+ res_vf = vec_or(vbi, vf); // CHECK: or <4 x i32>
+ res_vf = vec_or(vf, vbi); // CHECK: or <4 x i32>
res_vsc = vec_vor(vsc, vsc); // CHECK: or <16 x i8>
+ res_vsc = vec_vor(vbc, vsc); // CHECK: or <16 x i8>
+ res_vsc = vec_vor(vsc, vbc); // CHECK: or <16 x i8>
res_vuc = vec_vor(vuc, vuc); // CHECK: or <16 x i8>
+ res_vuc = vec_vor(vbc, vuc); // CHECK: or <16 x i8>
+ res_vuc = vec_vor(vuc, vbc); // CHECK: or <16 x i8>
+ res_vbc = vec_vor(vbc, vbc); // CHECK: or <16 x i8>
res_vs = vec_vor(vs, vs); // CHECK: or <8 x i16>
+ res_vs = vec_vor(vbs, vs); // CHECK: or <8 x i16>
+ res_vs = vec_vor(vs, vbs); // CHECK: or <8 x i16>
res_vus = vec_vor(vus, vus); // CHECK: or <8 x i16>
+ res_vus = vec_vor(vbs, vus); // CHECK: or <8 x i16>
+ res_vus = vec_vor(vus, vbs); // CHECK: or <8 x i16>
+ res_vbs = vec_vor(vbs, vbs); // CHECK: or <8 x i16>
res_vi = vec_vor(vi, vi); // CHECK: or <4 x i32>
+ res_vi = vec_vor(vbi, vi); // CHECK: or <4 x i32>
+ res_vi = vec_vor(vi, vbi); // CHECK: or <4 x i32>
res_vui = vec_vor(vui, vui); // CHECK: or <4 x i32>
+ res_vui = vec_vor(vbi, vui); // CHECK: or <4 x i32>
+ res_vui = vec_vor(vui, vbi); // CHECK: or <4 x i32>
+ res_vbi = vec_vor(vbi, vbi); // CHECK: or <4 x i32>
res_vf = vec_vor(vf, vf); // CHECK: or <4 x i32>
+ res_vf = vec_vor(vbi, vf); // CHECK: or <4 x i32>
+ res_vf = vec_vor(vf, vbi); // CHECK: or <4 x i32>
/* vec_pack */
res_vsc = vec_pack(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
res_vuc = vec_pack(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbc = vec_pack(vbs, vbs); // CHECK: @llvm.ppc.altivec.vperm
res_vs = vec_pack(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
res_vus = vec_pack(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbs = vec_pack(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm
res_vsc = vec_vpkuhum(vs, vs); // CHECK: @llvm.ppc.altivec.vperm
res_vuc = vec_vpkuhum(vus, vus); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbc = vec_vpkuhum(vbs, vbs); // CHECK: @llvm.ppc.altivec.vperm
res_vs = vec_vpkuwum(vi, vi); // CHECK: @llvm.ppc.altivec.vperm
res_vus = vec_vpkuwum(vui, vui); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbs = vec_vpkuwum(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm
/* vec_packpx */
res_vp = vec_packpx(vui, vui); // CHECK: @llvm.ppc.altivec.vpkpx
@@ -587,17 +900,25 @@ int test6() {
/* vec_perm */
res_vsc = vec_perm(vsc, vsc, vuc); // CHECK: @llvm.ppc.altivec.vperm
res_vuc = vec_perm(vuc, vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbc = vec_perm(vbc, vbc, vuc); // CHECK: @llvm.ppc.altivec.vperm
res_vs = vec_perm(vs, vs, vuc); // CHECK: @llvm.ppc.altivec.vperm
res_vus = vec_perm(vus, vus, vuc); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbs = vec_perm(vbs, vbs, vuc); // CHECK: @llvm.ppc.altivec.vperm
+ res_vp = vec_perm(vp, vp, vuc); // CHECK: @llvm.ppc.altivec.vperm
res_vi = vec_perm(vi, vi, vuc); // CHECK: @llvm.ppc.altivec.vperm
res_vui = vec_perm(vui, vui, vuc); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbi = vec_perm(vbi, vbi, vuc); // CHECK: @llvm.ppc.altivec.vperm
res_vf = vec_perm(vf, vf, vuc); // CHECK: @llvm.ppc.altivec.vperm
res_vsc = vec_vperm(vsc, vsc, vuc); // CHECK: @llvm.ppc.altivec.vperm
res_vuc = vec_vperm(vuc, vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbc = vec_vperm(vbc, vbc, vuc); // CHECK: @llvm.ppc.altivec.vperm
res_vs = vec_vperm(vs, vs, vuc); // CHECK: @llvm.ppc.altivec.vperm
res_vus = vec_vperm(vus, vus, vuc); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbs = vec_vperm(vbs, vbs, vuc); // CHECK: @llvm.ppc.altivec.vperm
+ res_vp = vec_vperm(vp, vp, vuc); // CHECK: @llvm.ppc.altivec.vperm
res_vi = vec_vperm(vi, vi, vuc); // CHECK: @llvm.ppc.altivec.vperm
res_vui = vec_vperm(vui, vui, vuc); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbi = vec_vperm(vbi, vbi, vuc); // CHECK: @llvm.ppc.altivec.vperm
res_vf = vec_vperm(vf, vf, vuc); // CHECK: @llvm.ppc.altivec.vperm
/* vec_re */
@@ -632,77 +953,200 @@ int test6() {
// CHECK: and <16 x i8>
// CHECK: or <16 x i8>
+ res_vsc = vec_sel(vsc, vsc, vbc); // CHECK: xor <16 x i8>
+ // CHECK: and <16 x i8>
+ // CHECK: and <16 x i8>
+ // CHECK: or <16 x i8>
+
res_vuc = vec_sel(vuc, vuc, vuc); // CHECK: xor <16 x i8>
// CHECK: and <16 x i8>
// CHECK: and <16 x i8>
// CHECK: or <16 x i8>
+ res_vuc = vec_sel(vuc, vuc, vbc); // CHECK: xor <16 x i8>
+ // CHECK: and <16 x i8>
+ // CHECK: and <16 x i8>
+ // CHECK: or <16 x i8>
+
+ res_vbc = vec_sel(vbc, vbc, vuc); // CHECK: xor <16 x i8>
+ // CHECK: and <16 x i8>
+ // CHECK: and <16 x i8>
+ // CHECK: or <16 x i8>
+
+ res_vbc = vec_sel(vbc, vbc, vbc); // CHECK: xor <16 x i8>
+ // CHECK: and <16 x i8>
+ // CHECK: and <16 x i8>
+ // CHECK: or <16 x i8>
+
res_vs = vec_sel(vs, vs, vus); // CHECK: xor <8 x i16>
// CHECK: and <8 x i16>
// CHECK: and <8 x i16>
// CHECK: or <8 x i16>
+ res_vs = vec_sel(vs, vs, vbs); // CHECK: xor <8 x i16>
+ // CHECK: and <8 x i16>
+ // CHECK: and <8 x i16>
+ // CHECK: or <8 x i16>
res_vus = vec_sel(vus, vus, vus); // CHECK: xor <8 x i16>
// CHECK: and <8 x i16>
// CHECK: and <8 x i16>
// CHECK: or <8 x i16>
+ res_vus = vec_sel(vus, vus, vbs); // CHECK: xor <8 x i16>
+ // CHECK: and <8 x i16>
+ // CHECK: and <8 x i16>
+ // CHECK: or <8 x i16>
+
+ res_vbs = vec_sel(vbs, vbs, vus); // CHECK: xor <8 x i16>
+ // CHECK: and <8 x i16>
+ // CHECK: and <8 x i16>
+ // CHECK: or <8 x i16>
+
+ res_vbs = vec_sel(vbs, vbs, vbs); // CHECK: xor <8 x i16>
+ // CHECK: and <8 x i16>
+ // CHECK: and <8 x i16>
+ // CHECK: or <8 x i16>
+
res_vi = vec_sel(vi, vi, vui); // CHECK: xor <4 x i32>
// CHECK: and <4 x i32>
// CHECK: and <4 x i32>
// CHECK: or <4 x i32>
+ res_vi = vec_sel(vi, vi, vbi); // CHECK: xor <4 x i32>
+ // CHECK: and <4 x i32>
+ // CHECK: and <4 x i32>
+ // CHECK: or <4 x i32>
res_vui = vec_sel(vui, vui, vui); // CHECK: xor <4 x i32>
// CHECK: and <4 x i32>
// CHECK: and <4 x i32>
// CHECK: or <4 x i32>
+ res_vui = vec_sel(vui, vui, vbi); // CHECK: xor <4 x i32>
+ // CHECK: and <4 x i32>
+ // CHECK: and <4 x i32>
+ // CHECK: or <4 x i32>
+
+ res_vbi = vec_sel(vbi, vbi, vui); // CHECK: xor <4 x i32>
+ // CHECK: and <4 x i32>
+ // CHECK: and <4 x i32>
+ // CHECK: or <4 x i32>
+
+ res_vbi = vec_sel(vbi, vbi, vbi); // CHECK: xor <4 x i32>
+ // CHECK: and <4 x i32>
+ // CHECK: and <4 x i32>
+ // CHECK: or <4 x i32>
res_vf = vec_sel(vf, vf, vui); // CHECK: xor <4 x i32>
// CHECK: and <4 x i32>
// CHECK: and <4 x i32>
// CHECK: or <4 x i32>
+ res_vf = vec_sel(vf, vf, vbi); // CHECK: xor <4 x i32>
+ // CHECK: and <4 x i32>
+ // CHECK: and <4 x i32>
+ // CHECK: or <4 x i32>
+
res_vsc = vec_vsel(vsc, vsc, vuc); // CHECK: xor <16 x i8>
// CHECK: and <16 x i8>
// CHECK: and <16 x i8>
// CHECK: or <16 x i8>
+ res_vsc = vec_vsel(vsc, vsc, vbc); // CHECK: xor <16 x i8>
+ // CHECK: and <16 x i8>
+ // CHECK: and <16 x i8>
+ // CHECK: or <16 x i8>
+
res_vuc = vec_vsel(vuc, vuc, vuc); // CHECK: xor <16 x i8>
// CHECK: and <16 x i8>
// CHECK: and <16 x i8>
// CHECK: or <16 x i8>
+ res_vuc = vec_vsel(vuc, vuc, vbc); // CHECK: xor <16 x i8>
+ // CHECK: and <16 x i8>
+ // CHECK: and <16 x i8>
+ // CHECK: or <16 x i8>
+
+ res_vbc = vec_vsel(vbc, vbc, vuc); // CHECK: xor <16 x i8>
+ // CHECK: and <16 x i8>
+ // CHECK: and <16 x i8>
+ // CHECK: or <16 x i8>
+
+ res_vbc = vec_vsel(vbc, vbc, vbc); // CHECK: xor <16 x i8>
+ // CHECK: and <16 x i8>
+ // CHECK: and <16 x i8>
+ // CHECK: or <16 x i8>
+
res_vs = vec_vsel(vs, vs, vus); // CHECK: xor <8 x i16>
// CHECK: and <8 x i16>
// CHECK: and <8 x i16>
// CHECK: or <8 x i16>
+ res_vs = vec_vsel(vs, vs, vbs); // CHECK: xor <8 x i16>
+ // CHECK: and <8 x i16>
+ // CHECK: and <8 x i16>
+ // CHECK: or <8 x i16>
res_vus = vec_vsel(vus, vus, vus); // CHECK: xor <8 x i16>
// CHECK: and <8 x i16>
// CHECK: and <8 x i16>
// CHECK: or <8 x i16>
+ res_vus = vec_vsel(vus, vus, vbs); // CHECK: xor <8 x i16>
+ // CHECK: and <8 x i16>
+ // CHECK: and <8 x i16>
+ // CHECK: or <8 x i16>
+
+ res_vbs = vec_vsel(vbs, vbs, vus); // CHECK: xor <8 x i16>
+ // CHECK: and <8 x i16>
+ // CHECK: and <8 x i16>
+ // CHECK: or <8 x i16>
+
+ res_vbs = vec_vsel(vbs, vbs, vbs); // CHECK: xor <8 x i16>
+ // CHECK: and <8 x i16>
+ // CHECK: and <8 x i16>
+ // CHECK: or <8 x i16>
+
res_vi = vec_vsel(vi, vi, vui); // CHECK: xor <4 x i32>
// CHECK: and <4 x i32>
// CHECK: and <4 x i32>
// CHECK: or <4 x i32>
+ res_vi = vec_vsel(vi, vi, vbi); // CHECK: xor <4 x i32>
+ // CHECK: and <4 x i32>
+ // CHECK: and <4 x i32>
+ // CHECK: or <4 x i32>
res_vui = vec_vsel(vui, vui, vui); // CHECK: xor <4 x i32>
// CHECK: and <4 x i32>
// CHECK: and <4 x i32>
// CHECK: or <4 x i32>
+ res_vui = vec_vsel(vui, vui, vbi); // CHECK: xor <4 x i32>
+ // CHECK: and <4 x i32>
+ // CHECK: and <4 x i32>
+ // CHECK: or <4 x i32>
+
+ res_vbi = vec_vsel(vbi, vbi, vui); // CHECK: xor <4 x i32>
+ // CHECK: and <4 x i32>
+ // CHECK: and <4 x i32>
+ // CHECK: or <4 x i32>
+
+ res_vbi = vec_vsel(vbi, vbi, vbi); // CHECK: xor <4 x i32>
+ // CHECK: and <4 x i32>
+ // CHECK: and <4 x i32>
+ // CHECK: or <4 x i32>
res_vf = vec_vsel(vf, vf, vui); // CHECK: xor <4 x i32>
// CHECK: and <4 x i32>
// CHECK: and <4 x i32>
// CHECK: or <4 x i32>
+ res_vf = vec_vsel(vf, vf, vbi); // CHECK: xor <4 x i32>
+ // CHECK: and <4 x i32>
+ // CHECK: and <4 x i32>
+ // CHECK: or <4 x i32>
/* vec_sl */
res_vsc = vec_sl(vsc, vuc); // CHECK: shl <16 x i8>
@@ -723,6 +1167,7 @@ int test6() {
res_vuc = vec_sld(vuc, vuc, 0); // CHECK: @llvm.ppc.altivec.vperm
res_vs = vec_sld(vs, vs, 0); // CHECK: @llvm.ppc.altivec.vperm
res_vus = vec_sld(vus, vus, 0); // CHECK: @llvm.ppc.altivec.vperm
+ res_vp = vec_sld(vp, vp, 0); // CHECK: @llvm.ppc.altivec.vperm
res_vi = vec_sld(vi, vi, 0); // CHECK: @llvm.ppc.altivec.vperm
res_vui = vec_sld(vui, vui, 0); // CHECK: @llvm.ppc.altivec.vperm
res_vf = vec_sld(vf, vf, 0); // CHECK: @llvm.ppc.altivec.vperm
@@ -730,6 +1175,7 @@ int test6() {
res_vuc = vec_vsldoi(vuc, vuc, 0); // CHECK: @llvm.ppc.altivec.vperm
res_vs = vec_vsldoi(vs, vs, 0); // CHECK: @llvm.ppc.altivec.vperm
res_vus = vec_vsldoi(vus, vus, 0); // CHECK: @llvm.ppc.altivec.vperm
+ res_vp = vec_vsldoi(vp, vp, 0); // CHECK: @llvm.ppc.altivec.vperm
res_vi = vec_vsldoi(vi, vi, 0); // CHECK: @llvm.ppc.altivec.vperm
res_vui = vec_vsldoi(vui, vui, 0); // CHECK: @llvm.ppc.altivec.vperm
res_vf = vec_vsldoi(vf, vf, 0); // CHECK: @llvm.ppc.altivec.vperm
@@ -741,36 +1187,60 @@ int test6() {
res_vuc = vec_sll(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsl
res_vuc = vec_sll(vuc, vus); // CHECK: @llvm.ppc.altivec.vsl
res_vuc = vec_sll(vuc, vui); // CHECK: @llvm.ppc.altivec.vsl
+ res_vbc = vec_sll(vbc, vuc); // CHECK: @llvm.ppc.altivec.vsl
+ res_vbc = vec_sll(vbc, vus); // CHECK: @llvm.ppc.altivec.vsl
+ res_vbc = vec_sll(vbc, vui); // CHECK: @llvm.ppc.altivec.vsl
res_vs = vec_sll(vs, vuc); // CHECK: @llvm.ppc.altivec.vsl
res_vs = vec_sll(vs, vus); // CHECK: @llvm.ppc.altivec.vsl
res_vs = vec_sll(vs, vui); // CHECK: @llvm.ppc.altivec.vsl
res_vus = vec_sll(vus, vuc); // CHECK: @llvm.ppc.altivec.vsl
res_vus = vec_sll(vus, vus); // CHECK: @llvm.ppc.altivec.vsl
res_vus = vec_sll(vus, vui); // CHECK: @llvm.ppc.altivec.vsl
+ res_vbs = vec_sll(vbs, vuc); // CHECK: @llvm.ppc.altivec.vsl
+ res_vbs = vec_sll(vbs, vus); // CHECK: @llvm.ppc.altivec.vsl
+ res_vbs = vec_sll(vbs, vui); // CHECK: @llvm.ppc.altivec.vsl
+ res_vp = vec_sll(vp, vuc); // CHECK: @llvm.ppc.altivec.vsl
+ res_vp = vec_sll(vp, vus); // CHECK: @llvm.ppc.altivec.vsl
+ res_vp = vec_sll(vp, vui); // CHECK: @llvm.ppc.altivec.vsl
res_vi = vec_sll(vi, vuc); // CHECK: @llvm.ppc.altivec.vsl
res_vi = vec_sll(vi, vus); // CHECK: @llvm.ppc.altivec.vsl
res_vi = vec_sll(vi, vui); // CHECK: @llvm.ppc.altivec.vsl
res_vui = vec_sll(vui, vuc); // CHECK: @llvm.ppc.altivec.vsl
res_vui = vec_sll(vui, vus); // CHECK: @llvm.ppc.altivec.vsl
res_vui = vec_sll(vui, vui); // CHECK: @llvm.ppc.altivec.vsl
+ res_vbi = vec_sll(vbi, vuc); // CHECK: @llvm.ppc.altivec.vsl
+ res_vbi = vec_sll(vbi, vus); // CHECK: @llvm.ppc.altivec.vsl
+ res_vbi = vec_sll(vbi, vui); // CHECK: @llvm.ppc.altivec.vsl
res_vsc = vec_vsl(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsl
res_vsc = vec_vsl(vsc, vus); // CHECK: @llvm.ppc.altivec.vsl
res_vsc = vec_vsl(vsc, vui); // CHECK: @llvm.ppc.altivec.vsl
res_vuc = vec_vsl(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsl
res_vuc = vec_vsl(vuc, vus); // CHECK: @llvm.ppc.altivec.vsl
res_vuc = vec_vsl(vuc, vui); // CHECK: @llvm.ppc.altivec.vsl
+ res_vbc = vec_vsl(vbc, vuc); // CHECK: @llvm.ppc.altivec.vsl
+ res_vbc = vec_vsl(vbc, vus); // CHECK: @llvm.ppc.altivec.vsl
+ res_vbc = vec_vsl(vbc, vui); // CHECK: @llvm.ppc.altivec.vsl
res_vs = vec_vsl(vs, vuc); // CHECK: @llvm.ppc.altivec.vsl
res_vs = vec_vsl(vs, vus); // CHECK: @llvm.ppc.altivec.vsl
res_vs = vec_vsl(vs, vui); // CHECK: @llvm.ppc.altivec.vsl
res_vus = vec_vsl(vus, vuc); // CHECK: @llvm.ppc.altivec.vsl
res_vus = vec_vsl(vus, vus); // CHECK: @llvm.ppc.altivec.vsl
res_vus = vec_vsl(vus, vui); // CHECK: @llvm.ppc.altivec.vsl
+ res_vbs = vec_vsl(vbs, vuc); // CHECK: @llvm.ppc.altivec.vsl
+ res_vbs = vec_vsl(vbs, vus); // CHECK: @llvm.ppc.altivec.vsl
+ res_vbs = vec_vsl(vbs, vui); // CHECK: @llvm.ppc.altivec.vsl
+ res_vp = vec_vsl(vp, vuc); // CHECK: @llvm.ppc.altivec.vsl
+ res_vp = vec_vsl(vp, vus); // CHECK: @llvm.ppc.altivec.vsl
+ res_vp = vec_vsl(vp, vui); // CHECK: @llvm.ppc.altivec.vsl
res_vi = vec_vsl(vi, vuc); // CHECK: @llvm.ppc.altivec.vsl
res_vi = vec_vsl(vi, vus); // CHECK: @llvm.ppc.altivec.vsl
res_vi = vec_vsl(vi, vui); // CHECK: @llvm.ppc.altivec.vsl
res_vui = vec_vsl(vui, vuc); // CHECK: @llvm.ppc.altivec.vsl
res_vui = vec_vsl(vui, vus); // CHECK: @llvm.ppc.altivec.vsl
res_vui = vec_vsl(vui, vui); // CHECK: @llvm.ppc.altivec.vsl
+ res_vbi = vec_vsl(vbi, vuc); // CHECK: @llvm.ppc.altivec.vsl
+ res_vbi = vec_vsl(vbi, vus); // CHECK: @llvm.ppc.altivec.vsl
+ res_vbi = vec_vsl(vbi, vui); // CHECK: @llvm.ppc.altivec.vsl
/* vec_slo */
res_vsc = vec_slo(vsc, vsc); // CHECK: @llvm.ppc.altivec.vslo
@@ -781,6 +1251,8 @@ int test6() {
res_vs = vec_slo(vs, vuc); // CHECK: @llvm.ppc.altivec.vslo
res_vus = vec_slo(vus, vsc); // CHECK: @llvm.ppc.altivec.vslo
res_vus = vec_slo(vus, vuc); // CHECK: @llvm.ppc.altivec.vslo
+ res_vp = vec_slo(vp, vsc); // CHECK: @llvm.ppc.altivec.vslo
+ res_vp = vec_slo(vp, vuc); // CHECK: @llvm.ppc.altivec.vslo
res_vi = vec_slo(vi, vsc); // CHECK: @llvm.ppc.altivec.vslo
res_vi = vec_slo(vi, vuc); // CHECK: @llvm.ppc.altivec.vslo
res_vui = vec_slo(vui, vsc); // CHECK: @llvm.ppc.altivec.vslo
@@ -795,6 +1267,8 @@ int test6() {
res_vs = vec_vslo(vs, vuc); // CHECK: @llvm.ppc.altivec.vslo
res_vus = vec_vslo(vus, vsc); // CHECK: @llvm.ppc.altivec.vslo
res_vus = vec_vslo(vus, vuc); // CHECK: @llvm.ppc.altivec.vslo
+ res_vp = vec_vslo(vp, vsc); // CHECK: @llvm.ppc.altivec.vslo
+ res_vp = vec_vslo(vp, vuc); // CHECK: @llvm.ppc.altivec.vslo
res_vi = vec_vslo(vi, vsc); // CHECK: @llvm.ppc.altivec.vslo
res_vi = vec_vslo(vi, vuc); // CHECK: @llvm.ppc.altivec.vslo
res_vui = vec_vslo(vui, vsc); // CHECK: @llvm.ppc.altivec.vslo
@@ -805,17 +1279,25 @@ int test6() {
/* vec_splat */
res_vsc = vec_splat(vsc, 0); // CHECK: @llvm.ppc.altivec.vperm
res_vuc = vec_splat(vuc, 0); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbc = vec_splat(vbc, 0); // CHECK: @llvm.ppc.altivec.vperm
res_vs = vec_splat(vs, 0); // CHECK: @llvm.ppc.altivec.vperm
res_vus = vec_splat(vus, 0); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbs = vec_splat(vbs, 0); // CHECK: @llvm.ppc.altivec.vperm
+ res_vp = vec_splat(vp, 0); // CHECK: @llvm.ppc.altivec.vperm
res_vi = vec_splat(vi, 0); // CHECK: @llvm.ppc.altivec.vperm
res_vui = vec_splat(vui, 0); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbi = vec_splat(vbi, 0); // CHECK: @llvm.ppc.altivec.vperm
res_vf = vec_splat(vf, 0); // CHECK: @llvm.ppc.altivec.vperm
res_vsc = vec_vspltb(vsc, 0); // CHECK: @llvm.ppc.altivec.vperm
res_vuc = vec_vspltb(vuc, 0); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbc = vec_vspltb(vbc, 0); // CHECK: @llvm.ppc.altivec.vperm
res_vs = vec_vsplth(vs, 0); // CHECK: @llvm.ppc.altivec.vperm
res_vus = vec_vsplth(vus, 0); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbs = vec_vsplth(vbs, 0); // CHECK: @llvm.ppc.altivec.vperm
+ res_vp = vec_vsplth(vp, 0); // CHECK: @llvm.ppc.altivec.vperm
res_vi = vec_vspltw(vi, 0); // CHECK: @llvm.ppc.altivec.vperm
res_vui = vec_vspltw(vui, 0); // CHECK: @llvm.ppc.altivec.vperm
+ res_vbi = vec_vspltw(vbi, 0); // CHECK: @llvm.ppc.altivec.vperm
res_vf = vec_vspltw(vf, 0); // CHECK: @llvm.ppc.altivec.vperm
/* vec_splat_s8 */
@@ -874,36 +1356,60 @@ int test6() {
res_vuc = vec_srl(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsr
res_vuc = vec_srl(vuc, vus); // CHECK: @llvm.ppc.altivec.vsr
res_vuc = vec_srl(vuc, vui); // CHECK: @llvm.ppc.altivec.vsr
+ res_vbc = vec_srl(vbc, vuc); // CHECK: @llvm.ppc.altivec.vsr
+ res_vbc = vec_srl(vbc, vus); // CHECK: @llvm.ppc.altivec.vsr
+ res_vbc = vec_srl(vbc, vui); // CHECK: @llvm.ppc.altivec.vsr
res_vs = vec_srl(vs, vuc); // CHECK: @llvm.ppc.altivec.vsr
res_vs = vec_srl(vs, vus); // CHECK: @llvm.ppc.altivec.vsr
res_vs = vec_srl(vs, vui); // CHECK: @llvm.ppc.altivec.vsr
res_vus = vec_srl(vus, vuc); // CHECK: @llvm.ppc.altivec.vsr
res_vus = vec_srl(vus, vus); // CHECK: @llvm.ppc.altivec.vsr
res_vus = vec_srl(vus, vui); // CHECK: @llvm.ppc.altivec.vsr
+ res_vbs = vec_srl(vbs, vuc); // CHECK: @llvm.ppc.altivec.vsr
+ res_vbs = vec_srl(vbs, vus); // CHECK: @llvm.ppc.altivec.vsr
+ res_vbs = vec_srl(vbs, vui); // CHECK: @llvm.ppc.altivec.vsr
+ res_vp = vec_srl(vp, vuc); // CHECK: @llvm.ppc.altivec.vsr
+ res_vp = vec_srl(vp, vus); // CHECK: @llvm.ppc.altivec.vsr
+ res_vp = vec_srl(vp, vui); // CHECK: @llvm.ppc.altivec.vsr
res_vi = vec_srl(vi, vuc); // CHECK: @llvm.ppc.altivec.vsr
res_vi = vec_srl(vi, vus); // CHECK: @llvm.ppc.altivec.vsr
res_vi = vec_srl(vi, vui); // CHECK: @llvm.ppc.altivec.vsr
res_vui = vec_srl(vui, vuc); // CHECK: @llvm.ppc.altivec.vsr
res_vui = vec_srl(vui, vus); // CHECK: @llvm.ppc.altivec.vsr
res_vui = vec_srl(vui, vui); // CHECK: @llvm.ppc.altivec.vsr
+ res_vbi = vec_srl(vbi, vuc); // CHECK: @llvm.ppc.altivec.vsr
+ res_vbi = vec_srl(vbi, vus); // CHECK: @llvm.ppc.altivec.vsr
+ res_vbi = vec_srl(vbi, vui); // CHECK: @llvm.ppc.altivec.vsr
res_vsc = vec_vsr(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsr
res_vsc = vec_vsr(vsc, vus); // CHECK: @llvm.ppc.altivec.vsr
res_vsc = vec_vsr(vsc, vui); // CHECK: @llvm.ppc.altivec.vsr
res_vuc = vec_vsr(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsr
res_vuc = vec_vsr(vuc, vus); // CHECK: @llvm.ppc.altivec.vsr
res_vuc = vec_vsr(vuc, vui); // CHECK: @llvm.ppc.altivec.vsr
+ res_vbc = vec_vsr(vbc, vuc); // CHECK: @llvm.ppc.altivec.vsr
+ res_vbc = vec_vsr(vbc, vus); // CHECK: @llvm.ppc.altivec.vsr
+ res_vbc = vec_vsr(vbc, vui); // CHECK: @llvm.ppc.altivec.vsr
res_vs = vec_vsr(vs, vuc); // CHECK: @llvm.ppc.altivec.vsr
res_vs = vec_vsr(vs, vus); // CHECK: @llvm.ppc.altivec.vsr
res_vs = vec_vsr(vs, vui); // CHECK: @llvm.ppc.altivec.vsr
res_vus = vec_vsr(vus, vuc); // CHECK: @llvm.ppc.altivec.vsr
res_vus = vec_vsr(vus, vus); // CHECK: @llvm.ppc.altivec.vsr
res_vus = vec_vsr(vus, vui); // CHECK: @llvm.ppc.altivec.vsr
+ res_vbs = vec_vsr(vbs, vuc); // CHECK: @llvm.ppc.altivec.vsr
+ res_vbs = vec_vsr(vbs, vus); // CHECK: @llvm.ppc.altivec.vsr
+ res_vbs = vec_vsr(vbs, vui); // CHECK: @llvm.ppc.altivec.vsr
+ res_vp = vec_vsr(vp, vuc); // CHECK: @llvm.ppc.altivec.vsr
+ res_vp = vec_vsr(vp, vus); // CHECK: @llvm.ppc.altivec.vsr
+ res_vp = vec_vsr(vp, vui); // CHECK: @llvm.ppc.altivec.vsr
res_vi = vec_vsr(vi, vuc); // CHECK: @llvm.ppc.altivec.vsr
res_vi = vec_vsr(vi, vus); // CHECK: @llvm.ppc.altivec.vsr
res_vi = vec_vsr(vi, vui); // CHECK: @llvm.ppc.altivec.vsr
res_vui = vec_vsr(vui, vuc); // CHECK: @llvm.ppc.altivec.vsr
res_vui = vec_vsr(vui, vus); // CHECK: @llvm.ppc.altivec.vsr
res_vui = vec_vsr(vui, vui); // CHECK: @llvm.ppc.altivec.vsr
+ res_vbi = vec_vsr(vbi, vuc); // CHECK: @llvm.ppc.altivec.vsr
+ res_vbi = vec_vsr(vbi, vus); // CHECK: @llvm.ppc.altivec.vsr
+ res_vbi = vec_vsr(vbi, vui); // CHECK: @llvm.ppc.altivec.vsr
/* vec_sro */
res_vsc = vec_sro(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsro
@@ -914,6 +1420,8 @@ int test6() {
res_vs = vec_sro(vs, vuc); // CHECK: @llvm.ppc.altivec.vsro
res_vus = vec_sro(vus, vsc); // CHECK: @llvm.ppc.altivec.vsro
res_vus = vec_sro(vus, vuc); // CHECK: @llvm.ppc.altivec.vsro
+ res_vp = vec_sro(vp, vsc); // CHECK: @llvm.ppc.altivec.vsro
+ res_vp = vec_sro(vp, vuc); // CHECK: @llvm.ppc.altivec.vsro
res_vi = vec_sro(vi, vsc); // CHECK: @llvm.ppc.altivec.vsro
res_vi = vec_sro(vi, vuc); // CHECK: @llvm.ppc.altivec.vsro
res_vui = vec_sro(vui, vsc); // CHECK: @llvm.ppc.altivec.vsro
@@ -928,6 +1436,8 @@ int test6() {
res_vs = vec_vsro(vs, vuc); // CHECK: @llvm.ppc.altivec.vsro
res_vus = vec_vsro(vus, vsc); // CHECK: @llvm.ppc.altivec.vsro
res_vus = vec_vsro(vus, vuc); // CHECK: @llvm.ppc.altivec.vsro
+ res_vp = vec_vsro(vp, vsc); // CHECK: @llvm.ppc.altivec.vsro
+ res_vp = vec_vsro(vp, vuc); // CHECK: @llvm.ppc.altivec.vsro
res_vi = vec_vsro(vi, vsc); // CHECK: @llvm.ppc.altivec.vsro
res_vi = vec_vsro(vi, vuc); // CHECK: @llvm.ppc.altivec.vsro
res_vui = vec_vsro(vui, vsc); // CHECK: @llvm.ppc.altivec.vsro
@@ -940,45 +1450,85 @@ int test6() {
vec_st(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvx
vec_st(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvx
vec_st(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvx
+ vec_st(vbc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvx
+ vec_st(vbc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvx
+ vec_st(vbc, 0, &vbc); // CHECK: @llvm.ppc.altivec.stvx
vec_st(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvx
vec_st(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvx
vec_st(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvx
vec_st(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvx
+ vec_st(vbs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvx
+ vec_st(vbs, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvx
+ vec_st(vbs, 0, &vbs); // CHECK: @llvm.ppc.altivec.stvx
+ vec_st(vp, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvx
+ vec_st(vp, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvx
+ vec_st(vp, 0, &vp); // CHECK: @llvm.ppc.altivec.stvx
vec_st(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvx
vec_st(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvx
vec_st(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvx
vec_st(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvx
+ vec_st(vbi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvx
+ vec_st(vbi, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvx
+ vec_st(vbi, 0, &vbi); // CHECK: @llvm.ppc.altivec.stvx
vec_st(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvx
vec_st(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvx
vec_stvx(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvx
vec_stvx(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvx
vec_stvx(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvx
vec_stvx(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvx
+ vec_stvx(vbc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvx
+ vec_stvx(vbc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvx
+ vec_stvx(vbc, 0, &vbc); // CHECK: @llvm.ppc.altivec.stvx
vec_stvx(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvx
vec_stvx(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvx
vec_stvx(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvx
vec_stvx(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvx
+ vec_stvx(vbs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvx
+ vec_stvx(vbs, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvx
+ vec_stvx(vbs, 0, &vbs); // CHECK: @llvm.ppc.altivec.stvx
+ vec_stvx(vp, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvx
+ vec_stvx(vp, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvx
+ vec_stvx(vp, 0, &vp); // CHECK: @llvm.ppc.altivec.stvx
vec_stvx(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvx
vec_stvx(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvx
vec_stvx(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvx
vec_stvx(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvx
+ vec_stvx(vbi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvx
+ vec_stvx(vbi, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvx
+ vec_stvx(vbi, 0, &vbi); // CHECK: @llvm.ppc.altivec.stvx
vec_stvx(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvx
vec_stvx(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvx
/* vec_ste */
vec_ste(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvebx
vec_ste(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvebx
+ vec_ste(vbc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvebx
+ vec_ste(vbc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvebx
vec_ste(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvehx
vec_ste(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvehx
+ vec_ste(vbs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvehx
+ vec_ste(vbs, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvehx
+ vec_ste(vp, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvehx
+ vec_ste(vp, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvehx
vec_ste(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvewx
vec_ste(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvewx
+ vec_ste(vbi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvewx
+ vec_ste(vbi, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvewx
vec_ste(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvewx
vec_stvebx(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvebx
vec_stvebx(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvebx
+ vec_stvebx(vbc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvebx
+ vec_stvebx(vbc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvebx
vec_stvehx(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvehx
vec_stvehx(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvehx
+ vec_stvehx(vbs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvehx
+ vec_stvehx(vbs, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvehx
+ vec_stvehx(vp, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvehx
+ vec_stvehx(vp, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvehx
vec_stvewx(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvewx
vec_stvewx(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvewx
+ vec_stvewx(vbi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvewx
+ vec_stvewx(vbi, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvewx
vec_stvewx(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvewx
/* vec_stl */
@@ -986,45 +1536,93 @@ int test6() {
vec_stl(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvxl
vec_stl(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvxl
vec_stl(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stl(vbc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stl(vbc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stl(vbc, 0, &vbc); // CHECK: @llvm.ppc.altivec.stvxl
vec_stl(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvxl
vec_stl(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvxl
vec_stl(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvxl
vec_stl(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stl(vbs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stl(vbs, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stl(vbs, 0, &vbs); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stl(vp, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stl(vp, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stl(vp, 0, &vp); // CHECK: @llvm.ppc.altivec.stvxl
vec_stl(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvxl
vec_stl(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvxl
vec_stl(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvxl
vec_stl(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stl(vbi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stl(vbi, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stl(vbi, 0, &vbi); // CHECK: @llvm.ppc.altivec.stvxl
vec_stl(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvxl
vec_stl(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvxl
vec_stvxl(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvxl
vec_stvxl(vsc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvxl
vec_stvxl(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvxl
vec_stvxl(vuc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stvxl(vbc, 0, &param_sc); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stvxl(vbc, 0, &param_uc); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stvxl(vbc, 0, &vbc); // CHECK: @llvm.ppc.altivec.stvxl
vec_stvxl(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvxl
vec_stvxl(vs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvxl
vec_stvxl(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvxl
vec_stvxl(vus, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stvxl(vbs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stvxl(vbs, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stvxl(vbs, 0, &vbs); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stvxl(vp, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stvxl(vp, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stvxl(vp, 0, &vp); // CHECK: @llvm.ppc.altivec.stvxl
vec_stvxl(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvxl
vec_stvxl(vi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvxl
vec_stvxl(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvxl
vec_stvxl(vui, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stvxl(vbi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stvxl(vbi, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvxl
+ vec_stvxl(vbi, 0, &vbi); // CHECK: @llvm.ppc.altivec.stvxl
vec_stvxl(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvxl
vec_stvxl(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvxl
/* vec_sub */
res_vsc = vec_sub(vsc, vsc); // CHECK: sub nsw <16 x i8>
+ res_vsc = vec_sub(vbc, vsc); // CHECK: sub nsw <16 x i8>
+ res_vsc = vec_sub(vsc, vbc); // CHECK: sub nsw <16 x i8>
res_vuc = vec_sub(vuc, vuc); // CHECK: sub <16 x i8>
+ res_vuc = vec_sub(vbc, vuc); // CHECK: sub <16 x i8>
+ res_vuc = vec_sub(vuc, vbc); // CHECK: sub <16 x i8>
res_vs = vec_sub(vs, vs); // CHECK: sub nsw <8 x i16>
+ res_vs = vec_sub(vbs, vs); // CHECK: sub nsw <8 x i16>
+ res_vs = vec_sub(vs, vbs); // CHECK: sub nsw <8 x i16>
res_vus = vec_sub(vus, vus); // CHECK: sub <8 x i16>
+ res_vus = vec_sub(vbs, vus); // CHECK: sub <8 x i16>
+ res_vus = vec_sub(vus, vbs); // CHECK: sub <8 x i16>
res_vi = vec_sub(vi, vi); // CHECK: sub nsw <4 x i32>
+ res_vi = vec_sub(vbi, vi); // CHECK: sub nsw <4 x i32>
+ res_vi = vec_sub(vi, vbi); // CHECK: sub nsw <4 x i32>
res_vui = vec_sub(vui, vui); // CHECK: sub <4 x i32>
+ res_vui = vec_sub(vbi, vui); // CHECK: sub <4 x i32>
+ res_vui = vec_sub(vui, vbi); // CHECK: sub <4 x i32>
res_vf = vec_sub(vf, vf); // CHECK: fsub <4 x float>
res_vsc = vec_vsububm(vsc, vsc); // CHECK: sub nsw <16 x i8>
+ res_vsc = vec_vsububm(vbc, vsc); // CHECK: sub nsw <16 x i8>
+ res_vsc = vec_vsububm(vsc, vbc); // CHECK: sub nsw <16 x i8>
res_vuc = vec_vsububm(vuc, vuc); // CHECK: sub <16 x i8>
+ res_vuc = vec_vsububm(vbc, vuc); // CHECK: sub <16 x i8>
+ res_vuc = vec_vsububm(vuc, vbc); // CHECK: sub <16 x i8>
res_vs = vec_vsubuhm(vs, vs); // CHECK: sub nsw <8 x i16>
+ res_vs = vec_vsubuhm(vbs, vus); // CHECK: sub <8 x i16>
+ res_vs = vec_vsubuhm(vus, vbs); // CHECK: sub <8 x i16>
res_vus = vec_vsubuhm(vus, vus); // CHECK: sub <8 x i16>
+ res_vus = vec_vsubuhm(vbs, vus); // CHECK: sub <8 x i16>
+ res_vus = vec_vsubuhm(vus, vbs); // CHECK: sub <8 x i16>
res_vi = vec_vsubuwm(vi, vi); // CHECK: sub nsw <4 x i32>
+ res_vi = vec_vsubuwm(vbi, vi); // CHECK: sub nsw <4 x i32>
+ res_vi = vec_vsubuwm(vi, vbi); // CHECK: sub nsw <4 x i32>
res_vui = vec_vsubuwm(vui, vui); // CHECK: sub <4 x i32>
+ res_vui = vec_vsubuwm(vbi, vui); // CHECK: sub <4 x i32>
+ res_vui = vec_vsubuwm(vui, vbi); // CHECK: sub <4 x i32>
res_vf = vec_vsubfp(vf, vf); // CHECK: fsub <4 x float>
/* vec_subc */
@@ -1033,17 +1631,41 @@ int test6() {
/* vec_subs */
res_vsc = vec_subs(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsubsbs
+ res_vsc = vec_subs(vbc, vsc); // CHECK: @llvm.ppc.altivec.vsubsbs
+ res_vsc = vec_subs(vsc, vbc); // CHECK: @llvm.ppc.altivec.vsubsbs
res_vuc = vec_subs(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsububs
+ res_vuc = vec_subs(vbc, vuc); // CHECK: @llvm.ppc.altivec.vsububs
+ res_vuc = vec_subs(vuc, vbc); // CHECK: @llvm.ppc.altivec.vsububs
res_vs = vec_subs(vs, vs); // CHECK: @llvm.ppc.altivec.vsubshs
+ res_vs = vec_subs(vbs, vs); // CHECK: @llvm.ppc.altivec.vsubshs
+ res_vs = vec_subs(vs, vbs); // CHECK: @llvm.ppc.altivec.vsubshs
res_vus = vec_subs(vus, vus); // CHECK: @llvm.ppc.altivec.vsubuhs
+ res_vus = vec_subs(vbs, vus); // CHECK: @llvm.ppc.altivec.vsubuhs
+ res_vus = vec_subs(vus, vbs); // CHECK: @llvm.ppc.altivec.vsubuhs
res_vi = vec_subs(vi, vi); // CHECK: @llvm.ppc.altivec.vsubsws
+ res_vi = vec_subs(vbi, vi); // CHECK: @llvm.ppc.altivec.vsubsws
+ res_vi = vec_subs(vi, vbi); // CHECK: @llvm.ppc.altivec.vsubsws
res_vui = vec_subs(vui, vui); // CHECK: @llvm.ppc.altivec.vsubuws
+ res_vui = vec_subs(vbi, vui); // CHECK: @llvm.ppc.altivec.vsubuws
+ res_vui = vec_subs(vui, vbi); // CHECK: @llvm.ppc.altivec.vsubuws
res_vsc = vec_vsubsbs(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsubsbs
+ res_vsc = vec_vsubsbs(vbc, vsc); // CHECK: @llvm.ppc.altivec.vsubsbs
+ res_vsc = vec_vsubsbs(vsc, vbc); // CHECK: @llvm.ppc.altivec.vsubsbs
res_vuc = vec_vsububs(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsububs
+ res_vuc = vec_vsububs(vbc, vuc); // CHECK: @llvm.ppc.altivec.vsububs
+ res_vuc = vec_vsububs(vuc, vbc); // CHECK: @llvm.ppc.altivec.vsububs
res_vs = vec_vsubshs(vs, vs); // CHECK: @llvm.ppc.altivec.vsubshs
+ res_vs = vec_vsubshs(vbs, vs); // CHECK: @llvm.ppc.altivec.vsubshs
+ res_vs = vec_vsubshs(vs, vbs); // CHECK: @llvm.ppc.altivec.vsubshs
res_vus = vec_vsubuhs(vus, vus); // CHECK: @llvm.ppc.altivec.vsubuhs
+ res_vus = vec_vsubuhs(vbs, vus); // CHECK: @llvm.ppc.altivec.vsubuhs
+ res_vus = vec_vsubuhs(vus, vbs); // CHECK: @llvm.ppc.altivec.vsubuhs
res_vi = vec_vsubsws(vi, vi); // CHECK: @llvm.ppc.altivec.vsubsws
+ res_vi = vec_vsubsws(vbi, vi); // CHECK: @llvm.ppc.altivec.vsubsws
+ res_vi = vec_vsubsws(vi, vbi); // CHECK: @llvm.ppc.altivec.vsubsws
res_vui = vec_vsubuws(vui, vui); // CHECK: @llvm.ppc.altivec.vsubuws
+ res_vui = vec_vsubuws(vbi, vui); // CHECK: @llvm.ppc.altivec.vsubuws
+ res_vui = vec_vsubuws(vui, vbi); // CHECK: @llvm.ppc.altivec.vsubuws
/* vec_sum4s */
res_vi = vec_sum4s(vsc, vi); // CHECK: @llvm.ppc.altivec.vsum4sbs
@@ -1066,60 +1688,152 @@ int test6() {
res_vf = vec_vrfiz(vf); // CHECK: @llvm.ppc.altivec.vrfiz
/* vec_unpackh */
- res_vs = vec_unpackh(vsc); // CHECK: @llvm.ppc.altivec.vupkhsb
- res_vi = vec_unpackh(vs); // CHECK: @llvm.ppc.altivec.vupkhsh
- res_vs = vec_vupkhsb(vsc); // CHECK: @llvm.ppc.altivec.vupkhsb
- res_vi = vec_vupkhsh(vs); // CHECK: @llvm.ppc.altivec.vupkhsh
+ res_vs = vec_unpackh(vsc); // CHECK: @llvm.ppc.altivec.vupkhsb
+ res_vbs = vec_unpackh(vbc); // CHECK: @llvm.ppc.altivec.vupkhsb
+ res_vi = vec_unpackh(vs); // CHECK: @llvm.ppc.altivec.vupkhsh
+ res_vbi = vec_unpackh(vbs); // CHECK: @llvm.ppc.altivec.vupkhsh
+ res_vui = vec_unpackh(vp); // CHECK: @llvm.ppc.altivec.vupkhsh
+ res_vs = vec_vupkhsb(vsc); // CHECK: @llvm.ppc.altivec.vupkhsb
+ res_vbs = vec_vupkhsb(vbc); // CHECK: @llvm.ppc.altivec.vupkhsb
+ res_vi = vec_vupkhsh(vs); // CHECK: @llvm.ppc.altivec.vupkhsh
+ res_vbi = vec_vupkhsh(vbs); // CHECK: @llvm.ppc.altivec.vupkhsh
+ res_vui = vec_vupkhsh(vp); // CHECK: @llvm.ppc.altivec.vupkhsh
/* vec_unpackl */
- res_vs = vec_unpackl(vsc); // CHECK: @llvm.ppc.altivec.vupklsb
- res_vi = vec_vupklsh(vs); // CHECK: @llvm.ppc.altivec.vupklsh
- res_vs = vec_vupklsb(vsc); // CHECK: @llvm.ppc.altivec.vupklsb
- res_vi = vec_vupklsh(vs); // CHECK: @llvm.ppc.altivec.vupklsh
+ res_vs = vec_unpackl(vsc); // CHECK: @llvm.ppc.altivec.vupklsb
+ res_vbs = vec_unpackl(vbc); // CHECK: @llvm.ppc.altivec.vupklsb
+ res_vi = vec_unpackl(vs); // CHECK: @llvm.ppc.altivec.vupklsh
+ res_vbi = vec_unpackl(vbs); // CHECK: @llvm.ppc.altivec.vupklsh
+ res_vui = vec_unpackl(vp); // CHECK: @llvm.ppc.altivec.vupklsh
+ res_vs = vec_vupklsb(vsc); // CHECK: @llvm.ppc.altivec.vupklsb
+ res_vbs = vec_vupklsb(vbc); // CHECK: @llvm.ppc.altivec.vupklsb
+ res_vi = vec_vupklsh(vs); // CHECK: @llvm.ppc.altivec.vupklsh
+ res_vbi = vec_vupklsh(vbs); // CHECK: @llvm.ppc.altivec.vupklsh
+ res_vui = vec_vupklsh(vp); // CHECK: @llvm.ppc.altivec.vupklsh
/* vec_xor */
res_vsc = vec_xor(vsc, vsc); // CHECK: xor <16 x i8>
+ res_vsc = vec_xor(vbc, vsc); // CHECK: xor <16 x i8>
+ res_vsc = vec_xor(vsc, vbc); // CHECK: xor <16 x i8>
res_vuc = vec_xor(vuc, vuc); // CHECK: xor <16 x i8>
+ res_vuc = vec_xor(vbc, vuc); // CHECK: xor <16 x i8>
+ res_vuc = vec_xor(vuc, vbc); // CHECK: xor <16 x i8>
+ res_vbc = vec_xor(vbc, vbc); // CHECK: xor <16 x i8>
res_vs = vec_xor(vs, vs); // CHECK: xor <8 x i16>
+ res_vs = vec_xor(vbs, vs); // CHECK: xor <8 x i16>
+ res_vs = vec_xor(vs, vbs); // CHECK: xor <8 x i16>
res_vus = vec_xor(vus, vus); // CHECK: xor <8 x i16>
+ res_vus = vec_xor(vbs, vus); // CHECK: xor <8 x i16>
+ res_vus = vec_xor(vus, vbs); // CHECK: xor <8 x i16>
+ res_vbs = vec_xor(vbs, vbs); // CHECK: xor <8 x i16>
res_vi = vec_xor(vi, vi); // CHECK: xor <4 x i32>
+ res_vi = vec_xor(vbi, vi); // CHECK: xor <4 x i32>
+ res_vi = vec_xor(vi, vbi); // CHECK: xor <4 x i32>
res_vui = vec_xor(vui, vui); // CHECK: xor <4 x i32>
+ res_vui = vec_xor(vbi, vui); // CHECK: xor <4 x i32>
+ res_vui = vec_xor(vui, vbi); // CHECK: xor <4 x i32>
+ res_vbi = vec_xor(vbi, vbi); // CHECK: xor <4 x i32>
res_vf = vec_xor(vf, vf); // CHECK: xor <4 x i32>
+ res_vf = vec_xor(vbi, vf); // CHECK: xor <4 x i32>
+ res_vf = vec_xor(vf, vbi); // CHECK: xor <4 x i32>
res_vsc = vec_vxor(vsc, vsc); // CHECK: xor <16 x i8>
+ res_vsc = vec_vxor(vbc, vsc); // CHECK: xor <16 x i8>
+ res_vsc = vec_vxor(vsc, vbc); // CHECK: xor <16 x i8>
res_vuc = vec_vxor(vuc, vuc); // CHECK: xor <16 x i8>
+ res_vuc = vec_vxor(vbc, vuc); // CHECK: xor <16 x i8>
+ res_vuc = vec_vxor(vuc, vbc); // CHECK: xor <16 x i8>
+ res_vbc = vec_vxor(vbc, vbc); // CHECK: xor <16 x i8>
res_vs = vec_vxor(vs, vs); // CHECK: xor <8 x i16>
+ res_vs = vec_vxor(vbs, vs); // CHECK: xor <8 x i16>
+ res_vs = vec_vxor(vs, vbs); // CHECK: xor <8 x i16>
res_vus = vec_vxor(vus, vus); // CHECK: xor <8 x i16>
+ res_vus = vec_vxor(vbs, vus); // CHECK: xor <8 x i16>
+ res_vus = vec_vxor(vus, vbs); // CHECK: xor <8 x i16>
+ res_vbs = vec_vxor(vbs, vbs); // CHECK: xor <8 x i16>
res_vi = vec_vxor(vi, vi); // CHECK: xor <4 x i32>
+ res_vi = vec_vxor(vbi, vi); // CHECK: xor <4 x i32>
+ res_vi = vec_vxor(vi, vbi); // CHECK: xor <4 x i32>
res_vui = vec_vxor(vui, vui); // CHECK: xor <4 x i32>
+ res_vui = vec_vxor(vbi, vui); // CHECK: xor <4 x i32>
+ res_vui = vec_vxor(vui, vbi); // CHECK: xor <4 x i32>
+ res_vbi = vec_vxor(vbi, vbi); // CHECK: xor <4 x i32>
res_vf = vec_vxor(vf, vf); // CHECK: xor <4 x i32>
+ res_vf = vec_vxor(vbi, vf); // CHECK: xor <4 x i32>
+ res_vf = vec_vxor(vf, vbi); // CHECK: xor <4 x i32>
/* ------------------------------ predicates -------------------------------------- */
/* vec_all_eq */
res_i = vec_all_eq(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
+ res_i = vec_all_eq(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
res_i = vec_all_eq(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
+ res_i = vec_all_eq(vuc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
+ res_i = vec_all_eq(vbc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
+ res_i = vec_all_eq(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
+ res_i = vec_all_eq(vbc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
res_i = vec_all_eq(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
+ res_i = vec_all_eq(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
res_i = vec_all_eq(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
+ res_i = vec_all_eq(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
+ res_i = vec_all_eq(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
+ res_i = vec_all_eq(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
+ res_i = vec_all_eq(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
+ res_i = vec_all_eq(vp, vp); // CHECK: @llvm.ppc.altivec.vcmpequh.p
res_i = vec_all_eq(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
+ res_i = vec_all_eq(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
res_i = vec_all_eq(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
+ res_i = vec_all_eq(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
+ res_i = vec_all_eq(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
+ res_i = vec_all_eq(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
+ res_i = vec_all_eq(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
res_i = vec_all_eq(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
/* vec_all_ge */
res_i = vec_all_ge(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+ res_i = vec_all_ge(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
res_i = vec_all_ge(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_all_ge(vuc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_all_ge(vbc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_all_ge(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_all_ge(vbc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
res_i = vec_all_ge(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+ res_i = vec_all_ge(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
res_i = vec_all_ge(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_all_ge(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_all_ge(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_all_ge(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_all_ge(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
res_i = vec_all_ge(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+ res_i = vec_all_ge(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
res_i = vec_all_ge(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_all_ge(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_all_ge(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_all_ge(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_all_ge(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
res_i = vec_all_ge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
/* vec_all_gt */
res_i = vec_all_gt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+ res_i = vec_all_gt(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
res_i = vec_all_gt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_all_gt(vuc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_all_gt(vbc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_all_gt(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_all_gt(vbc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
res_i = vec_all_gt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+ res_i = vec_all_gt(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
res_i = vec_all_gt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_all_gt(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_all_gt(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_all_gt(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_all_gt(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
res_i = vec_all_gt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+ res_i = vec_all_gt(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
res_i = vec_all_gt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_all_gt(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_all_gt(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_all_gt(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_all_gt(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
res_i = vec_all_gt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
/* vec_all_in */
@@ -1127,23 +1841,78 @@ int test6() {
/* vec_all_le */
res_i = vec_all_le(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+ res_i = vec_all_le(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
res_i = vec_all_le(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_all_le(vuc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_all_le(vbc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_all_le(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_all_le(vbc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
res_i = vec_all_le(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+ res_i = vec_all_le(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
res_i = vec_all_le(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_all_le(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_all_le(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_all_le(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_all_le(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
res_i = vec_all_le(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+ res_i = vec_all_le(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
res_i = vec_all_le(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_all_le(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_all_le(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_all_le(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_all_le(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
res_i = vec_all_le(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
+ /* vec_all_lt */
+ res_i = vec_all_lt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+ res_i = vec_all_lt(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+ res_i = vec_all_lt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_all_lt(vuc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_all_lt(vbc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_all_lt(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_all_lt(vbc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_all_lt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+ res_i = vec_all_lt(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+ res_i = vec_all_lt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_all_lt(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_all_lt(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_all_lt(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_all_lt(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_all_lt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+ res_i = vec_all_lt(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+ res_i = vec_all_lt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_all_lt(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_all_lt(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_all_lt(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_all_lt(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_all_lt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
+
/* vec_all_nan */
res_i = vec_all_nan(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
/* vec_all_ne */
res_i = vec_all_ne(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
+ res_i = vec_all_ne(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
res_i = vec_all_ne(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
+ res_i = vec_all_ne(vuc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
+ res_i = vec_all_ne(vbc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
+ res_i = vec_all_ne(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
+ res_i = vec_all_ne(vbc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
res_i = vec_all_ne(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
+ res_i = vec_all_ne(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
res_i = vec_all_ne(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
+ res_i = vec_all_ne(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
+ res_i = vec_all_ne(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
+ res_i = vec_all_ne(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
+ res_i = vec_all_ne(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
+ res_i = vec_all_ne(vp, vp); // CHECK: @llvm.ppc.altivec.vcmpequh.p
res_i = vec_all_ne(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
+ res_i = vec_all_ne(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
res_i = vec_all_ne(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
+ res_i = vec_all_ne(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
+ res_i = vec_all_ne(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
+ res_i = vec_all_ne(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
+ res_i = vec_all_ne(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
res_i = vec_all_ne(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
/* vec_all_nge */
@@ -1163,47 +1932,123 @@ int test6() {
/* vec_any_eq */
res_i = vec_any_eq(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
+ res_i = vec_any_eq(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
res_i = vec_any_eq(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
+ res_i = vec_any_eq(vuc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
+ res_i = vec_any_eq(vbc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
+ res_i = vec_any_eq(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
+ res_i = vec_any_eq(vbc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
res_i = vec_any_eq(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
+ res_i = vec_any_eq(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
res_i = vec_any_eq(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
+ res_i = vec_any_eq(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
+ res_i = vec_any_eq(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
+ res_i = vec_any_eq(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
+ res_i = vec_any_eq(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
+ res_i = vec_any_eq(vp, vp); // CHECK: @llvm.ppc.altivec.vcmpequh.p
res_i = vec_any_eq(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
+ res_i = vec_any_eq(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
res_i = vec_any_eq(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
+ res_i = vec_any_eq(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
+ res_i = vec_any_eq(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
+ res_i = vec_any_eq(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
+ res_i = vec_any_eq(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
res_i = vec_any_eq(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
/* vec_any_ge */
res_i = vec_any_ge(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+ res_i = vec_any_ge(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
res_i = vec_any_ge(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_any_ge(vuc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_any_ge(vbc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_any_ge(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_any_ge(vbc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
res_i = vec_any_ge(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+ res_i = vec_any_ge(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
res_i = vec_any_ge(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_any_ge(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_any_ge(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_any_ge(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_any_ge(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
res_i = vec_any_ge(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+ res_i = vec_any_ge(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
res_i = vec_any_ge(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_any_ge(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_any_ge(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_any_ge(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_any_ge(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
res_i = vec_any_ge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
/* vec_any_gt */
res_i = vec_any_gt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+ res_i = vec_any_gt(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
res_i = vec_any_gt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_any_gt(vuc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_any_gt(vbc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_any_gt(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_any_gt(vbc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
res_i = vec_any_gt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+ res_i = vec_any_gt(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
res_i = vec_any_gt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_any_gt(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_any_gt(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_any_gt(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_any_gt(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
res_i = vec_any_gt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+ res_i = vec_any_gt(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
res_i = vec_any_gt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_any_gt(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_any_gt(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_any_gt(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_any_gt(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
res_i = vec_any_gt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
/* vec_any_le */
res_i = vec_any_le(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+ res_i = vec_any_le(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
res_i = vec_any_le(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_any_le(vuc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_any_le(vbc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_any_le(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_any_le(vbc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
res_i = vec_any_le(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+ res_i = vec_any_le(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
res_i = vec_any_le(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_any_le(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_any_le(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_any_le(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_any_le(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
res_i = vec_any_le(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+ res_i = vec_any_le(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
res_i = vec_any_le(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_any_le(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_any_le(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_any_le(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_any_le(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
res_i = vec_any_le(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
/* vec_any_lt */
res_i = vec_any_lt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
+ res_i = vec_any_lt(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p
res_i = vec_any_lt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_any_lt(vuc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_any_lt(vbc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_any_lt(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
+ res_i = vec_any_lt(vbc, vbc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p
res_i = vec_any_lt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
+ res_i = vec_any_lt(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
res_i = vec_any_lt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_any_lt(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_any_lt(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_any_lt(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
+ res_i = vec_any_lt(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
res_i = vec_any_lt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
+ res_i = vec_any_lt(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p
res_i = vec_any_lt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_any_lt(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_any_lt(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_any_lt(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
+ res_i = vec_any_lt(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p
res_i = vec_any_lt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
/* vec_any_nan */
@@ -1211,11 +2056,27 @@ int test6() {
/* vec_any_ne */
res_i = vec_any_ne(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
+ res_i = vec_any_ne(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
res_i = vec_any_ne(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
+ res_i = vec_any_ne(vuc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
+ res_i = vec_any_ne(vbc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
+ res_i = vec_any_ne(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
+ res_i = vec_any_ne(vbc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p
res_i = vec_any_ne(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
+ res_i = vec_any_ne(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
res_i = vec_any_ne(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
+ res_i = vec_any_ne(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
+ res_i = vec_any_ne(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
+ res_i = vec_any_ne(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
+ res_i = vec_any_ne(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
+ res_i = vec_any_ne(vp, vp); // CHECK: @llvm.ppc.altivec.vcmpequh.p
res_i = vec_any_ne(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
+ res_i = vec_any_ne(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
res_i = vec_any_ne(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
+ res_i = vec_any_ne(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
+ res_i = vec_any_ne(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
+ res_i = vec_any_ne(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p
+ res_i = vec_any_ne(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p
res_i = vec_any_ne(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
/* vec_any_nge */
@@ -1235,6 +2096,4 @@ int test6() {
/* vec_any_out */
res_i = vec_any_out(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp.p
-
- return 0;
}
diff --git a/test/CodeGen/builtins-x86.c b/test/CodeGen/builtins-x86.c
index b587814..1b4e68b 100644
--- a/test/CodeGen/builtins-x86.c
+++ b/test/CodeGen/builtins-x86.c
@@ -24,6 +24,14 @@ typedef signed long long V2LLi __attribute__((vector_size(16)));
typedef float V4f __attribute__((vector_size(16)));
typedef double V2d __attribute__((vector_size(16)));
+// 256-bit
+typedef char V32c __attribute__((vector_size(32)));
+typedef signed int V8i __attribute__((vector_size(32)));
+typedef signed long long V4LLi __attribute__((vector_size(32)));
+
+typedef double V4d __attribute__((vector_size(32)));
+typedef float V8f __attribute__((vector_size(32)));
+
void f0() {
signed char tmp_c;
// unsigned char tmp_Uc;
@@ -76,6 +84,22 @@ void f0() {
V2LLi tmp_V2LLi;
V4f tmp_V4f;
V2d tmp_V2d;
+ V2d* tmp_V2dp;
+ V4f* tmp_V4fp;
+ const V2d* tmp_V2dCp;
+ const V4f* tmp_V4fCp;
+
+ // 256-bit
+ V32c tmp_V32c;
+ V4d tmp_V4d;
+ V8f tmp_V8f;
+ V4LLi tmp_V4LLi;
+ V8i tmp_V8i;
+ V4LLi* tmp_V4LLip;
+ V4d* tmp_V4dp;
+ V8f* tmp_V8fp;
+ const V4d* tmp_V4dCp;
+ const V8f* tmp_V8fCp;
tmp_i = __builtin_ia32_comieq(tmp_V4f, tmp_V4f);
tmp_i = __builtin_ia32_comilt(tmp_V4f, tmp_V4f);
@@ -365,6 +389,95 @@ void f0() {
tmp_V2d = __builtin_ia32_roundpd(tmp_V2d, imm_i_0_16);
tmp_V4f = __builtin_ia32_insertps128(tmp_V4f, tmp_V4f, tmp_i);
#endif
-}
-
+ tmp_V4d = __builtin_ia32_addsubpd256(tmp_V4d, tmp_V4d);
+ tmp_V8f = __builtin_ia32_addsubps256(tmp_V8f, tmp_V8f);
+ tmp_V4d = __builtin_ia32_haddpd256(tmp_V4d, tmp_V4d);
+ tmp_V8f = __builtin_ia32_hsubps256(tmp_V8f, tmp_V8f);
+ tmp_V4d = __builtin_ia32_hsubpd256(tmp_V4d, tmp_V4d);
+ tmp_V8f = __builtin_ia32_haddps256(tmp_V8f, tmp_V8f);
+ tmp_V4d = __builtin_ia32_maxpd256(tmp_V4d, tmp_V4d);
+ tmp_V8f = __builtin_ia32_maxps256(tmp_V8f, tmp_V8f);
+ tmp_V4d = __builtin_ia32_minpd256(tmp_V4d, tmp_V4d);
+ tmp_V8f = __builtin_ia32_minps256(tmp_V8f, tmp_V8f);
+ tmp_V2d = __builtin_ia32_vpermilvarpd(tmp_V2d, tmp_V2LLi);
+ tmp_V4f = __builtin_ia32_vpermilvarps(tmp_V4f, tmp_V4i);
+ tmp_V4d = __builtin_ia32_vpermilvarpd256(tmp_V4d, tmp_V4LLi);
+ tmp_V8f = __builtin_ia32_vpermilvarps256(tmp_V8f, tmp_V8i);
+ tmp_V4d = __builtin_ia32_blendpd256(tmp_V4d, tmp_V4d, 0x7);
+ tmp_V8f = __builtin_ia32_blendps256(tmp_V8f, tmp_V8f, 0x7);
+ tmp_V4d = __builtin_ia32_blendvpd256(tmp_V4d, tmp_V4d, tmp_V4d);
+ tmp_V8f = __builtin_ia32_blendvps256(tmp_V8f, tmp_V8f, tmp_V8f);
+ tmp_V8f = __builtin_ia32_dpps256(tmp_V8f, tmp_V8f, 0x7);
+ tmp_V4d = __builtin_ia32_cmppd256(tmp_V4d, tmp_V4d, 0);
+ tmp_V8f = __builtin_ia32_cmpps256(tmp_V8f, tmp_V8f, 0);
+ tmp_V2d = __builtin_ia32_vextractf128_pd256(tmp_V4d, 0x7);
+ tmp_V4f = __builtin_ia32_vextractf128_ps256(tmp_V8f, 0x7);
+ tmp_V4i = __builtin_ia32_vextractf128_si256(tmp_V8i, 0x7);
+ tmp_V4d = __builtin_ia32_cvtdq2pd256(tmp_V4i);
+ tmp_V8f = __builtin_ia32_cvtdq2ps256(tmp_V8i);
+ tmp_V4f = __builtin_ia32_cvtpd2ps256(tmp_V4d);
+ tmp_V8i = __builtin_ia32_cvtps2dq256(tmp_V8f);
+ tmp_V4d = __builtin_ia32_cvtps2pd256(tmp_V4f);
+ tmp_V4i = __builtin_ia32_cvttpd2dq256(tmp_V4d);
+ tmp_V4i = __builtin_ia32_cvtpd2dq256(tmp_V4d);
+ tmp_V8i = __builtin_ia32_cvttps2dq256(tmp_V8f);
+ tmp_V4d = __builtin_ia32_vperm2f128_pd256(tmp_V4d, tmp_V4d, 0x7);
+ tmp_V8f = __builtin_ia32_vperm2f128_ps256(tmp_V8f, tmp_V8f, 0x7);
+ tmp_V8i = __builtin_ia32_vperm2f128_si256(tmp_V8i, tmp_V8i, 0x7);
+ tmp_V2d = __builtin_ia32_vpermilpd(tmp_V2d, 0x7);
+ tmp_V4f = __builtin_ia32_vpermilps(tmp_V4f, 0x7);
+ tmp_V4d = __builtin_ia32_vpermilpd256(tmp_V4d, 0x7);
+ tmp_V8f = __builtin_ia32_vpermilps256(tmp_V8f, 0x7);
+ tmp_V4d = __builtin_ia32_vinsertf128_pd256(tmp_V4d, tmp_V2d, 0x7);
+ tmp_V8f = __builtin_ia32_vinsertf128_ps256(tmp_V8f, tmp_V4f, 0x7);
+ tmp_V8i = __builtin_ia32_vinsertf128_si256(tmp_V8i, tmp_V4i, 0x7);
+ tmp_V4d = __builtin_ia32_sqrtpd256(tmp_V4d);
+ tmp_V8f = __builtin_ia32_sqrtps256(tmp_V8f);
+ tmp_V8f = __builtin_ia32_rsqrtps256(tmp_V8f);
+ tmp_V8f = __builtin_ia32_rcpps256(tmp_V8f);
+ tmp_V4d = __builtin_ia32_roundpd256(tmp_V4d, tmp_i);
+ tmp_V8f = __builtin_ia32_roundps256(tmp_V8f, tmp_i);
+ tmp_i = __builtin_ia32_vtestzpd(tmp_V2d, tmp_V2d);
+ tmp_i = __builtin_ia32_vtestcpd(tmp_V2d, tmp_V2d);
+ tmp_i = __builtin_ia32_vtestnzcpd(tmp_V2d, tmp_V2d);
+ tmp_i = __builtin_ia32_vtestzps(tmp_V4f, tmp_V4f);
+ tmp_i = __builtin_ia32_vtestcps(tmp_V4f, tmp_V4f);
+ tmp_i = __builtin_ia32_vtestnzcps(tmp_V4f, tmp_V4f);
+ tmp_i = __builtin_ia32_vtestzpd256(tmp_V4d, tmp_V4d);
+ tmp_i = __builtin_ia32_vtestcpd256(tmp_V4d, tmp_V4d);
+ tmp_i = __builtin_ia32_vtestnzcpd256(tmp_V4d, tmp_V4d);
+ tmp_i = __builtin_ia32_vtestzps256(tmp_V8f, tmp_V8f);
+ tmp_i = __builtin_ia32_vtestcps256(tmp_V8f, tmp_V8f);
+ tmp_i = __builtin_ia32_vtestnzcps256(tmp_V8f, tmp_V8f);
+ tmp_i = __builtin_ia32_ptestz256(tmp_V4LLi, tmp_V4LLi);
+ tmp_i = __builtin_ia32_ptestc256(tmp_V4LLi, tmp_V4LLi);
+ tmp_i = __builtin_ia32_ptestnzc256(tmp_V4LLi, tmp_V4LLi);
+ tmp_i = __builtin_ia32_movmskpd256(tmp_V4d);
+ tmp_i = __builtin_ia32_movmskps256(tmp_V8f);
+ __builtin_ia32_vzeroall();
+ __builtin_ia32_vzeroupper();
+ tmp_V4f = __builtin_ia32_vbroadcastss(tmp_fCp);
+ tmp_V4d = __builtin_ia32_vbroadcastsd256(tmp_dCp);
+ tmp_V8f = __builtin_ia32_vbroadcastss256(tmp_fCp);
+ tmp_V4d = __builtin_ia32_vbroadcastf128_pd256(tmp_V2dCp);
+ tmp_V8f = __builtin_ia32_vbroadcastf128_ps256(tmp_V4fCp);
+ tmp_V4d = __builtin_ia32_loadupd256(tmp_dCp);
+ tmp_V8f = __builtin_ia32_loadups256(tmp_fCp);
+ __builtin_ia32_storeupd256(tmp_dp, tmp_V4d);
+ __builtin_ia32_storeups256(tmp_fp, tmp_V8f);
+ tmp_V32c = __builtin_ia32_loaddqu256(tmp_cCp);
+ __builtin_ia32_storedqu256(tmp_cp, tmp_V32c);
+ tmp_V32c = __builtin_ia32_lddqu256(tmp_cCp);
+ __builtin_ia32_movntdq256(tmp_V4LLip, tmp_V4LLi);
+ __builtin_ia32_movntpd256(tmp_dp, tmp_V4d);
+ __builtin_ia32_movntps256(tmp_fp, tmp_V8f);
+ tmp_V2d = __builtin_ia32_maskloadpd(tmp_V2dCp, tmp_V2d);
+ tmp_V4f = __builtin_ia32_maskloadps(tmp_V4fCp, tmp_V4f);
+ tmp_V4d = __builtin_ia32_maskloadpd256(tmp_V4dCp, tmp_V4d);
+ tmp_V8f = __builtin_ia32_maskloadps256(tmp_V8fCp, tmp_V8f);
+ __builtin_ia32_maskstorepd(tmp_V2dp, tmp_V2d, tmp_V2d);
+ __builtin_ia32_maskstoreps(tmp_V4fp, tmp_V4f, tmp_V4f);
+ __builtin_ia32_maskstorepd256(tmp_V4dp, tmp_V4d, tmp_V4d);
+ __builtin_ia32_maskstoreps256(tmp_V8fp, tmp_V8f, tmp_V8f);
+}
diff --git a/test/CodeGen/const-arithmetic.c b/test/CodeGen/const-arithmetic.c
index 92c02f0..a28f73f 100644
--- a/test/CodeGen/const-arithmetic.c
+++ b/test/CodeGen/const-arithmetic.c
@@ -1,7 +1,7 @@
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s | FileCheck %s
-// CHECK: @g1 = global [2 x i8*] [i8* getelementptr (i8* getelementptr inbounds ([0 x %struct.anon]* @g0, i32 0, i32 0, i32 0), i64 -2), i8* getelementptr (i8* getelementptr inbounds ([0 x %struct.anon]* @g0, i32 0, i32 0, i32 0), i64 -46)], align 16 ; <[2 x i8*]*> [#uses=0]
-// CHECK: @g2 = global [2 x i8*] [i8* getelementptr (i8* getelementptr inbounds ([0 x %struct.anon]* @g0, i32 0, i32 0, i32 0), i64 -2), i8* getelementptr (i8* getelementptr inbounds ([0 x %struct.anon]* @g0, i32 0, i32 0, i32 0), i64 -46)], align 16 ; <[2 x i8*]*> [#uses=0]
+// CHECK: @g1 = global [2 x i8*] [i8* getelementptr (i8* getelementptr inbounds ([0 x %struct.anon]* @g0, i32 0, i32 0, i32 0), i64 -2), i8* getelementptr (i8* getelementptr inbounds ([0 x %struct.anon]* @g0, i32 0, i32 0, i32 0), i64 -46)], align 16
+// CHECK: @g2 = global [2 x i8*] [i8* getelementptr (i8* getelementptr inbounds ([0 x %struct.anon]* @g0, i32 0, i32 0, i32 0), i64 -2), i8* getelementptr (i8* getelementptr inbounds ([0 x %struct.anon]* @g0, i32 0, i32 0, i32 0), i64 -46)], align 16
extern struct { unsigned char a, b; } g0[];
void *g1[] = {g0 + -1, g0 + -23 };
diff --git a/test/CodeGen/const-init.c b/test/CodeGen/const-init.c
index c7a53be..ac26b65 100644
--- a/test/CodeGen/const-init.c
+++ b/test/CodeGen/const-init.c
@@ -117,9 +117,14 @@ struct g22 {int x;} __attribute((packed));
struct g23 {char a; short b; char c; struct g22 d;};
struct g23 g24 = {1,2,3,4};
-// CHECK: @__func__.g25 = private constant [4 x i8] c"g25\00"
// CHECK: @g25.g26 = internal global i8* getelementptr inbounds ([4 x i8]* @__func__.g25, i32 0, i32 0)
+// CHECK: @__func__.g25 = private constant [4 x i8] c"g25\00"
int g25() {
static const char *g26 = __func__;
return *g26;
}
+
+// CHECK: @g27.x = internal global i8* bitcast (i8** @g27.x to i8*), align 4
+void g27() { // PR8073
+ static void *x = &x;
+}
diff --git a/test/CodeGen/debug-info-enum.c b/test/CodeGen/debug-info-enum.c
new file mode 100644
index 0000000..b4a1ce0
--- /dev/null
+++ b/test/CodeGen/debug-info-enum.c
@@ -0,0 +1,11 @@
+// RUN: %clang_cc1 -emit-llvm -g %s -o %t
+// RUN: grep DW_TAG_enumeration_type %t
+// Radar 8195980
+
+enum vtag {
+ VT_ONE
+};
+
+int foo(int i) {
+ return i == VT_ONE;
+}
diff --git a/test/CodeGen/debug-info-scope.c b/test/CodeGen/debug-info-scope.c
new file mode 100644
index 0000000..6051e6e
--- /dev/null
+++ b/test/CodeGen/debug-info-scope.c
@@ -0,0 +1,14 @@
+// RUN: %clang_cc1 -g -emit-llvm < %s | FileCheck %s
+// Two variables with same name in separate scope.
+// Radar 8330217.
+int main() {
+ int j = 0;
+ int k = 0;
+// CHECK: DW_TAG_auto_variable
+// CHECK-NEXT: DW_TAG_lexical_block
+ for (int i = 0; i < 10; i++)
+ j++;
+ for (int i = 0; i < 10; i++)
+ k++;
+ return 0;
+}
diff --git a/test/CodeGen/designated-initializers.c b/test/CodeGen/designated-initializers.c
index 49f57ad..312d785 100644
--- a/test/CodeGen/designated-initializers.c
+++ b/test/CodeGen/designated-initializers.c
@@ -8,10 +8,10 @@ struct foo {
// CHECK: @u = global %union.anon zeroinitializer
union { int i; float f; } u = { };
-// CHECK: @u2 = global %0 { i32 0, [4 x i8] undef }
+// CHECK: @u2 = global %1 { i32 0, [4 x i8] undef }
union { int i; double f; } u2 = { };
-// CHECK: @u3 = global %1 zeroinitializer
+// CHECK: @u3 = global %2 zeroinitializer
union { double f; int i; } u3 = { };
// CHECK: @b = global [2 x i32] [i32 0, i32 22]
@@ -19,7 +19,7 @@ int b[2] = {
[1] = 22
};
-int main(int argc, char **argv)
+void test1(int argc, char **argv)
{
// CHECK: internal global %struct.foo { i8* null, i32 1024 }
static struct foo foo = {
@@ -33,5 +33,24 @@ int main(int argc, char **argv)
// CHECK-NOT: call void @llvm.memset
union { int i; float f; } u3;
- // CHECK: ret i32
+ // CHECK: ret void
+}
+
+
+// PR7151
+struct S {
+ int nkeys;
+ int *keys;
+ union {
+ void *data;
+ };
+};
+
+void test2() {
+ struct S *btkr;
+
+ *btkr = (struct S) {
+ .keys = 0,
+ { .data = 0 },
+ };
}
diff --git a/test/CodeGen/enum2.c b/test/CodeGen/enum2.c
new file mode 100644
index 0000000..3203627
--- /dev/null
+++ b/test/CodeGen/enum2.c
@@ -0,0 +1,8 @@
+// RUN: %clang_cc1 -triple i386-unknown-unknown %s -g -emit-llvm -o /dev/null
+int v;
+enum e { MAX };
+
+void foo (void)
+{
+ v = MAX;
+}
diff --git a/test/CodeGen/exprs.c b/test/CodeGen/exprs.c
index 7cc1134..c9978b8 100644
--- a/test/CodeGen/exprs.c
+++ b/test/CodeGen/exprs.c
@@ -145,3 +145,10 @@ double f13(double X) {
// CHECK: fsub double -0.0
return -X;
}
+
+// Check operations on incomplete types.
+struct s14;
+void f14(struct s13 *a) {
+ (void) &*a;
+}
+
diff --git a/test/CodeGen/fold-const-declref.c b/test/CodeGen/fold-const-declref.c
new file mode 100644
index 0000000..5a7ba8e
--- /dev/null
+++ b/test/CodeGen/fold-const-declref.c
@@ -0,0 +1,9 @@
+// RUN: %clang_cc1 -verify -emit-llvm-only
+
+// PR7242: Check that this doesn't crash.
+int main(void)
+{
+ int __negative = 1;
+ const int __max = __negative && 0 ;
+ __max / 0;
+}
diff --git a/test/CodeGen/func-in-block.c b/test/CodeGen/func-in-block.c
index 27e0c09..7e65ff9 100644
--- a/test/CodeGen/func-in-block.c
+++ b/test/CodeGen/func-in-block.c
@@ -15,4 +15,5 @@ int main()
return 0; // not reached
}
+// CHECK: @__func__.__main_block_invoke_0 = private constant [22 x i8] c"__main_block_invoke_0\00"
// CHECK: call void @PRINTF({{.*}}@__func__.__main_block_invoke_
diff --git a/test/CodeGen/lineno-dbginfo.c b/test/CodeGen/lineno-dbginfo.c
index c5c350f..176d415 100644
--- a/test/CodeGen/lineno-dbginfo.c
+++ b/test/CodeGen/lineno-dbginfo.c
@@ -1,6 +1,5 @@
// RUN: echo "#include <stdio.h>" > %t.h
-// RUN: %clang -S -save-temps -g -include %t.h %s -emit-llvm -o %t.ll
+// RUN: %clang -S -g -include %t.h %s -emit-llvm -o %t.ll
// RUN: grep "i32 5" %t.ll
-// RUN: rm -f lineno-dbginfo.i
// outer is at line number 5.
int outer = 42;
diff --git a/test/CodeGen/packed-structure.c b/test/CodeGen/packed-structure.c
new file mode 100644
index 0000000..2934d01
--- /dev/null
+++ b/test/CodeGen/packed-structure.c
@@ -0,0 +1,89 @@
+// RUN: %clang_cc1 -triple x86_64 -emit-llvm -o - %s | opt -S -strip -o %t
+// RUX: llvm-gcc -flto -S -O3 -o %t %s
+// RUN: FileCheck --check-prefix=CHECK-GLOBAL < %t %s
+// RUN: FileCheck --check-prefix=CHECK-FUNCTIONS < %t %s
+
+struct s0 {
+ int x;
+ int y __attribute__((packed));
+};
+
+// CHECK-GLOBAL: @s0_align_x = global i32 4
+
+// FIXME: This should be 1 to match gcc. PR7951.
+// CHECK-GLOBAL: @s0_align_y = global i32 4
+
+// CHECK-GLOBAL: @s0_align = global i32 4
+int s0_align_x = __alignof(((struct s0*)0)->x);
+int s0_align_y = __alignof(((struct s0*)0)->y);
+int s0_align = __alignof(struct s0);
+
+// CHECK-FUNCTIONS: define i32 @s0_load_x
+// CHECK-FUNCTIONS: [[s0_load_x:%.*]] = load i32* {{.*}}, align 4
+// CHECK-FUNCTIONS: ret i32 [[s0_load_x]]
+int s0_load_x(struct s0 *a) { return a->x; }
+// FIXME: This seems like it should be align 1. This is actually something which
+// has changed in llvm-gcc recently, previously both x and y would be loaded
+// with align 1 (in 2363.1 at least).
+//
+// CHECK-FUNCTIONS: define i32 @s0_load_y
+// CHECK-FUNCTIONS: [[s0_load_y:%.*]] = load i32* {{.*}}, align 4
+// CHECK-FUNCTIONS: ret i32 [[s0_load_y]]
+int s0_load_y(struct s0 *a) { return a->y; }
+// CHECK-FUNCTIONS: define void @s0_copy
+// CHECK-FUNCTIONS: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 8, i32 4, i1 false)
+void s0_copy(struct s0 *a, struct s0 *b) { *b = *a; }
+
+//
+
+struct s1 {
+ int x;
+ int y;
+} __attribute__((packed));
+
+// CHECK-GLOBAL: @s1_align_x = global i32 1
+// CHECK-GLOBAL: @s1_align_y = global i32 1
+// CHECK-GLOBAL: @s1_align = global i32 1
+int s1_align_x = __alignof(((struct s1*)0)->x);
+int s1_align_y = __alignof(((struct s1*)0)->y);
+int s1_align = __alignof(struct s1);
+
+// CHECK-FUNCTIONS: define i32 @s1_load_x
+// CHECK-FUNCTIONS: [[s1_load_x:%.*]] = load i32* {{.*}}, align 1
+// CHECK-FUNCTIONS: ret i32 [[s1_load_x]]
+int s1_load_x(struct s1 *a) { return a->x; }
+// CHECK-FUNCTIONS: define i32 @s1_load_y
+// CHECK-FUNCTIONS: [[s1_load_y:%.*]] = load i32* {{.*}}, align 1
+// CHECK-FUNCTIONS: ret i32 [[s1_load_y]]
+int s1_load_y(struct s1 *a) { return a->y; }
+// CHECK-FUNCTIONS: define void @s1_copy
+// CHECK-FUNCTIONS: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 8, i32 1, i1 false)
+void s1_copy(struct s1 *a, struct s1 *b) { *b = *a; }
+
+//
+
+#pragma pack(push,2)
+struct s2 {
+ int x;
+ int y;
+};
+#pragma pack(pop)
+
+// CHECK-GLOBAL: @s2_align_x = global i32 2
+// CHECK-GLOBAL: @s2_align_y = global i32 2
+// CHECK-GLOBAL: @s2_align = global i32 2
+int s2_align_x = __alignof(((struct s2*)0)->x);
+int s2_align_y = __alignof(((struct s2*)0)->y);
+int s2_align = __alignof(struct s2);
+
+// CHECK-FUNCTIONS: define i32 @s2_load_x
+// CHECK-FUNCTIONS: [[s2_load_y:%.*]] = load i32* {{.*}}, align 2
+// CHECK-FUNCTIONS: ret i32 [[s2_load_y]]
+int s2_load_x(struct s2 *a) { return a->x; }
+// CHECK-FUNCTIONS: define i32 @s2_load_y
+// CHECK-FUNCTIONS: [[s2_load_y:%.*]] = load i32* {{.*}}, align 2
+// CHECK-FUNCTIONS: ret i32 [[s2_load_y]]
+int s2_load_y(struct s2 *a) { return a->y; }
+// CHECK-FUNCTIONS: define void @s2_copy
+// CHECK-FUNCTIONS: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 8, i32 2, i1 false)
+void s2_copy(struct s2 *a, struct s2 *b) { *b = *a; }
diff --git a/test/CodeGen/palignr.c b/test/CodeGen/palignr.c
index 6297b2e..e9c1dbd 100644
--- a/test/CodeGen/palignr.c
+++ b/test/CodeGen/palignr.c
@@ -27,4 +27,4 @@ int2 align6(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 9); }
int2 align7(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 16); }
// CHECK: palignr
-int2 align8(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 7); } \ No newline at end of file
+int2 align8(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 7); }
diff --git a/test/CodeGen/pragma-visibility.c b/test/CodeGen/pragma-visibility.c
new file mode 100644
index 0000000..16460a2
--- /dev/null
+++ b/test/CodeGen/pragma-visibility.c
@@ -0,0 +1,24 @@
+// RUN: %clang_cc1 -emit-llvm -o - %s | FileCheck %s
+
+#pragma GCC visibility push(hidden)
+int x = 2;
+// CHECK: @x = hidden global
+
+extern int y;
+#pragma GCC visibility pop
+int y = 4;
+// CHECK: @y = hidden global
+
+#pragma GCC visibility push(hidden)
+extern __attribute((visibility("default"))) int z;
+int z = 0;
+// CHECK: @z = global
+#pragma GCC visibility pop
+
+#pragma GCC visibility push(hidden)
+void f() {}
+// CHECK: define hidden void @f
+
+__attribute((visibility("default"))) void g();
+void g() {}
+// CHECK: define void @g
diff --git a/test/CodeGen/statements.c b/test/CodeGen/statements.c
index 7ed82ad..0ea0597 100644
--- a/test/CodeGen/statements.c
+++ b/test/CodeGen/statements.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -Wreturn-type < %s -emit-llvm
+// RUN: %clang_cc1 -Wreturn-type %s -emit-llvm -o /dev/null
void test1(int x) {
switch (x) {
@@ -31,5 +31,10 @@ static long y = &&baz;
}
// PR3869
-int test5(long long b) { goto *b; }
+int test5(long long b) {
+ static void *lbls[] = { &&lbl };
+ goto *b;
+ lbl:
+ return 0;
+}
diff --git a/test/CodeGen/struct-passing.c b/test/CodeGen/struct-passing.c
index 409d14e..cbc14d5 100644
--- a/test/CodeGen/struct-passing.c
+++ b/test/CodeGen/struct-passing.c
@@ -1,10 +1,10 @@
// RUN: %clang_cc1 -triple i386-pc-linux-gnu -emit-llvm -o %t %s
-// RUN: grep 'declare i32 @f0() readnone$' %t
-// RUN: grep 'declare i32 @f1() readonly$' %t
-// RUN: grep 'declare void @f2(.* sret)$' %t
-// RUN: grep 'declare void @f3(.* sret)$' %t
-// RUN: grep 'declare void @f4(.* byval)$' %t
-// RUN: grep 'declare void @f5(.* byval)$' %t
+// RUN: grep 'declare i32 @f0() readnone' %t
+// RUN: grep 'declare i32 @f1() readonly' %t
+// RUN: grep 'declare void @f2(.* sret)' %t
+// RUN: grep 'declare void @f3(.* sret)' %t
+// RUN: grep 'declare void @f4(.* byval)' %t
+// RUN: grep 'declare void @f5(.* byval)' %t
// PR3835
typedef int T0;
diff --git a/test/CodeGen/thread-specifier.c b/test/CodeGen/thread-specifier.c
index b1e1ed8..a16103f 100644
--- a/test/CodeGen/thread-specifier.c
+++ b/test/CodeGen/thread-specifier.c
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 -triple i686-pc-linux-gnu -emit-llvm -o - %s | grep thread_local | count 4
+// RUN: %clang_cc1 -triple i686-pc-linux-gnu -emit-llvm -o - %s | not grep common
__thread int a;
extern __thread int b;
diff --git a/test/CodeGen/trapv.c b/test/CodeGen/trapv.c
index d10d617..7f192c6 100644
--- a/test/CodeGen/trapv.c
+++ b/test/CodeGen/trapv.c
@@ -1,10 +1,51 @@
-// RUN: %clang_cc1 -ftrapv %s -emit-llvm -o %t
-// RUN: grep "__overflow_handler" %t | count 2
+// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -ftrapv %s -emit-llvm -o - | FileCheck %s
+
+// CHECK: [[I32O:%.*]] = type { i32, i1 }
unsigned int ui, uj, uk;
int i, j, k;
-void foo() {
+// CHECK: define void @test0()
+void test0() {
+ // -ftrapv doesn't affect unsigned arithmetic.
+ // CHECK: [[T1:%.*]] = load i32* @uj
+ // CHECK-NEXT: [[T2:%.*]] = load i32* @uk
+ // CHECK-NEXT: [[T3:%.*]] = add i32 [[T1]], [[T2]]
+ // CHECK-NEXT: store i32 [[T3]], i32* @ui
ui = uj + uk;
+
+ // CHECK: [[T1:%.*]] = load i32* @j
+ // CHECK-NEXT: [[T2:%.*]] = load i32* @k
+ // CHECK-NEXT: [[T3:%.*]] = call [[I32O]] @llvm.sadd.with.overflow.i32(i32 [[T1]], i32 [[T2]])
+ // CHECK-NEXT: [[T4:%.*]] = extractvalue [[I32O]] [[T3]], 0
+ // CHECK-NEXT: [[T5:%.*]] = extractvalue [[I32O]] [[T3]], 1
+ // CHECK-NEXT: br i1 [[T5]]
+ // CHECK: call void @llvm.trap()
i = j + k;
}
+
+// CHECK: define void @test1()
+void test1() {
+ extern void opaque(int);
+ opaque(i++);
+
+ // CHECK: [[T1:%.*]] = load i32* @i
+ // CHECK-NEXT: [[T2:%.*]] = call [[I32O]] @llvm.sadd.with.overflow.i32(i32 [[T1]], i32 1)
+ // CHECK-NEXT: [[T3:%.*]] = extractvalue [[I32O]] [[T2]], 0
+ // CHECK-NEXT: [[T4:%.*]] = extractvalue [[I32O]] [[T2]], 1
+ // CHECK-NEXT: br i1 [[T4]]
+ // CHECK: call void @llvm.trap()
+}
+
+// CHECK: define void @test2()
+void test2() {
+ extern void opaque(int);
+ opaque(++i);
+
+ // CHECK: [[T1:%.*]] = load i32* @i
+ // CHECK-NEXT: [[T2:%.*]] = call [[I32O]] @llvm.sadd.with.overflow.i32(i32 [[T1]], i32 1)
+ // CHECK-NEXT: [[T3:%.*]] = extractvalue [[I32O]] [[T2]], 0
+ // CHECK-NEXT: [[T4:%.*]] = extractvalue [[I32O]] [[T2]], 1
+ // CHECK-NEXT: br i1 [[T4]]
+ // CHECK: call void @llvm.trap()
+}
diff --git a/test/CodeGen/unwind-attr.c b/test/CodeGen/unwind-attr.c
index ee3199d..c588ca8 100644
--- a/test/CodeGen/unwind-attr.c
+++ b/test/CodeGen/unwind-attr.c
@@ -1,6 +1,24 @@
-// RUN: %clang_cc1 -fexceptions -emit-llvm -o - %s | grep "@foo()" | not grep nounwind
-// RUN: %clang_cc1 -emit-llvm -o - %s | grep "@foo()" | grep nounwind
+// RUN: %clang_cc1 -fexceptions -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -emit-llvm -o - %s | FileCheck -check-prefix NOEXC %s
-int foo(void) {
+int opaque();
+
+// CHECK: define [[INT:i.*]] @test0() {
+// CHECK-NOEXC: define [[INT:i.*]] @test0() nounwind {
+int test0(void) {
+ return opaque();
+}
+
+// <rdar://problem/8087431>: locally infer nounwind at -O0
+// CHECK: define [[INT:i.*]] @test1() nounwind {
+// CHECK-NOEXC: define [[INT:i.*]] @test1() nounwind {
+int test1(void) {
+ return 0;
+}
+
+// <rdar://problem/8283071>: not for weak functions
+// CHECK: define weak [[INT:i.*]] @test2() {
+// CHECK-NOEXC: define weak [[INT:i.*]] @test2() nounwind {
+__attribute__((weak)) int test2(void) {
return 0;
}
diff --git a/test/CodeGen/vector.c b/test/CodeGen/vector.c
index c16d65b..3fa5f14 100644
--- a/test/CodeGen/vector.c
+++ b/test/CodeGen/vector.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -triple i386-apple-darwin9 -target-cpu pentium4 -g -emit-llvm %s -o -
+// RUN: %clang_cc1 -triple i386-apple-darwin9 -O1 -target-cpu pentium4 -target-feature +sse4.1 -g -emit-llvm %s -o - | FileCheck %s
typedef short __v4hi __attribute__ ((__vector_size__ (8)));
void test1() {
@@ -20,6 +20,8 @@ void test3 ( vec4* a, char b, float c ) {
+// Don't include mm_malloc.h, it's system specific.
+#define __MM_MALLOC_H
#include <mmintrin.h>
@@ -40,3 +42,16 @@ int test4(int argc, char *argv[]) {
return result;
}
+
+#include <smmintrin.h>
+
+unsigned long test_epi8(__m128i x) { return _mm_extract_epi8(x, 4); }
+// CHECK: @test_epi8
+// CHECK: extractelement <16 x i8> {{.*}}, i32 4
+// CHECK: zext i8 {{.*}} to i32
+
+unsigned long test_epi16(__m128i x) { return _mm_extract_epi16(x, 3); }
+
+// CHECK: @test_epi16
+// CHECK: extractelement <8 x i16> {{.*}}, i32 3
+// CHECK: zext i16 {{.*}} to i32
diff --git a/test/CodeGen/x86_32-arguments.c b/test/CodeGen/x86_32-arguments.c
index 01c3e23..75dfb82 100644
--- a/test/CodeGen/x86_32-arguments.c
+++ b/test/CodeGen/x86_32-arguments.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -fblocks -triple i386-apple-darwin9 -emit-llvm -o %t %s
+// RUN: %clang_cc1 -w -fblocks -triple i386-apple-darwin9 -emit-llvm -o %t %s
// RUN: FileCheck < %t %s
// CHECK: define signext i8 @f0()
@@ -214,3 +214,17 @@ struct __attribute__((aligned(32))) s53 {
int y;
};
void f53(struct s53 x) {}
+
+typedef unsigned short v2i16 __attribute__((__vector_size__(4)));
+
+// CHECK: define i32 @f54(i32 %arg.coerce)
+// rdar://8359483
+v2i16 f54(v2i16 arg) { return arg+arg; }
+
+
+typedef int v4i32 __attribute__((__vector_size__(16)));
+
+// CHECK: define <2 x i64> @f55(<4 x i32> %arg)
+// PR8029
+v4i32 f55(v4i32 arg) { return arg+arg; }
+
diff --git a/test/CodeGen/x86_64-arguments.c b/test/CodeGen/x86_64-arguments.c
index cc318dc..51a234d 100644
--- a/test/CodeGen/x86_64-arguments.c
+++ b/test/CodeGen/x86_64-arguments.c
@@ -1,5 +1,5 @@
-// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o %t %s
-// RUN: FileCheck < %t %s
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s| FileCheck %s
+#include <stdarg.h>
// CHECK: %0 = type { i64, double }
@@ -63,8 +63,8 @@ void f10(struct s10 a0) {}
// CHECK: define void @f11(%struct.s19* sret %agg.result)
union { long double a; float b; } f11() { while (1) {} }
-// CHECK: define i64 @f12_0()
-// CHECK: define void @f12_1(i64 %a0.coerce)
+// CHECK: define i32 @f12_0()
+// CHECK: define void @f12_1(i32 %a0.coerce)
struct s12 { int a __attribute__((aligned(16))); };
struct s12 f12_0(void) { while (1) {} }
void f12_1(struct s12 a0) {}
@@ -131,3 +131,117 @@ void f22(L x, L y) { }
// CHECK: %y = alloca{{.*}}, align 16
+
+// PR7714
+struct f23S {
+ short f0;
+ unsigned f1;
+ int f2;
+};
+
+
+void f23(int A, struct f23S B) {
+ // CHECK: define void @f23(i32 %A, i64 %B.coerce0, i32 %B.coerce1)
+}
+
+struct f24s { long a; int b; };
+
+struct f23S f24(struct f23S *X, struct f24s *P2) {
+ return *X;
+
+ // CHECK: define %struct.f24s @f24(%struct.f23S* %X, %struct.f24s* %P2)
+}
+
+// rdar://8248065
+typedef float v4f32 __attribute__((__vector_size__(16)));
+v4f32 f25(v4f32 X) {
+ // CHECK: define <4 x float> @f25(<4 x float> %X)
+ // CHECK-NOT: alloca
+ // CHECK: alloca <4 x float>
+ // CHECK-NOT: alloca
+ // CHECK: store <4 x float> %X, <4 x float>*
+ // CHECK-NOT: store
+ // CHECK: ret <4 x float>
+ return X+X;
+}
+
+struct foo26 {
+ int *X;
+ float *Y;
+};
+
+struct foo26 f26(struct foo26 *P) {
+ // CHECK: define %struct.foo26 @f26(%struct.foo26* %P)
+ return *P;
+}
+
+
+struct v4f32wrapper {
+ v4f32 v;
+};
+
+struct v4f32wrapper f27(struct v4f32wrapper X) {
+ // CHECK: define <4 x float> @f27(<4 x float> %X.coerce)
+ return X;
+}
+
+// rdar://5711709
+struct f28c {
+ double x;
+ int y;
+};
+void f28(struct f28c C) {
+ // CHECK: define void @f28(double %C.coerce0, i32 %C.coerce1)
+}
+
+struct f29a {
+ struct c {
+ double x;
+ int y;
+ } x[1];
+};
+
+void f29a(struct f29a A) {
+ // CHECK: define void @f29a(double %A.coerce0, i32 %A.coerce1)
+}
+
+// rdar://8249586
+struct S0 { char f0[8]; char f2; char f3; char f4; };
+void f30(struct S0 p_4) {
+ // CHECK: define void @f30(i64 %p_4.coerce0, i24 %p_4.coerce1)
+}
+
+// Pass the third element as a float when followed by tail padding.
+// rdar://8251384
+struct f31foo { float a, b, c; };
+float f31(struct f31foo X) {
+ // CHECK: define float @f31(<2 x float> %X.coerce0, float %X.coerce1)
+ return X.c;
+}
+
+_Complex float f32(_Complex float A, _Complex float B) {
+ // rdar://6379669
+ // CHECK: define <2 x float> @f32(<2 x float> %A.coerce, <2 x float> %B.coerce)
+ return A+B;
+}
+
+
+// rdar://8357396
+struct f33s { long x; float c,d; };
+
+void f33(va_list X) {
+ va_arg(X, struct f33s);
+}
+
+typedef unsigned long long v1i64 __attribute__((__vector_size__(8)));
+
+// rdar://8359248
+// CHECK: define i64 @f34(i64 %arg.coerce)
+v1i64 f34(v1i64 arg) { return arg; }
+
+
+// rdar://8358475
+// CHECK: define i64 @f35(i64 %arg.coerce)
+typedef unsigned long v1i64_2 __attribute__((__vector_size__(8)));
+v1i64_2 f35(v1i64_2 arg) { return arg+arg; }
+
OpenPOWER on IntegriCloud