diff options
author | dim <dim@FreeBSD.org> | 2015-12-30 11:49:41 +0000 |
---|---|---|
committer | dim <dim@FreeBSD.org> | 2015-12-30 11:49:41 +0000 |
commit | 3176e97f130184ece0e1a21352c8124cc83ff24a (patch) | |
tree | 0a5b74c0b9ca73aded34df95c91fcaf3815230d8 /test/CodeGen/builtins-ppc-p8vector.c | |
parent | 1e9b8d38881c3213d1e67b0c47ab9b2c00721a5c (diff) | |
download | FreeBSD-src-3176e97f130184ece0e1a21352c8124cc83ff24a.zip FreeBSD-src-3176e97f130184ece0e1a21352c8124cc83ff24a.tar.gz |
Vendor import of clang trunk r256633:
https://llvm.org/svn/llvm-project/cfe/trunk@256633
Diffstat (limited to 'test/CodeGen/builtins-ppc-p8vector.c')
-rw-r--r-- | test/CodeGen/builtins-ppc-p8vector.c | 479 |
1 files changed, 251 insertions, 228 deletions
diff --git a/test/CodeGen/builtins-ppc-p8vector.c b/test/CodeGen/builtins-ppc-p8vector.c index 208dd43..29503f0 100644 --- a/test/CodeGen/builtins-ppc-p8vector.c +++ b/test/CodeGen/builtins-ppc-p8vector.c @@ -7,6 +7,13 @@ // (vec_cmpge, vec_cmple). Without this option, there is only one overload so // it is selected. +void dummy() { } +signed int si; +signed long long sll; +unsigned long long ull; +signed __int128 sx; +unsigned __int128 ux; +double d; vector signed char vsc = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5 }; vector unsigned char vuc = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5 }; vector bool char vbc = { 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1 }; @@ -23,10 +30,17 @@ vector signed long long vsll = { 1, 2 }; vector unsigned long long vull = { 1, 2 }; vector bool long long vbll = { 1, 0 }; +vector signed __int128 vsx = { 1 }; +vector unsigned __int128 vux = { 1 }; + vector float vfa = { 1.e-4f, -132.23f, -22.1, 32.00f }; vector double vda = { 1.e-11, -132.23e10 }; int res_i; +double res_d; +signed long long res_sll; +unsigned long long res_ull; + vector signed char res_vsc; vector unsigned char res_vuc; vector bool char res_vbc; @@ -43,7 +57,10 @@ vector signed long long res_vsll; vector unsigned long long res_vull; vector bool long long res_vbll; -vector double res_vf; +vector signed __int128 res_vsx; +vector unsigned __int128 res_vux; + +vector float res_vf; vector double res_vd; // CHECK-LABEL: define void @test1 @@ -73,6 +90,37 @@ void test1() { // CHECK-LE: add <2 x i64> // CHECK-PPC: error: call to 'vec_add' is ambiguous + /* vec_addc */ + res_vsi = vec_addc(vsi, vsi); +// CHECK: @llvm.ppc.altivec.vaddcuw +// CHECK-LE: @llvm.ppc.altivec.vaddcuw + + res_vui = vec_addc(vui, vui); +// CHECK: @llvm.ppc.altivec.vaddcuw +// CHECK-LE: @llvm.ppc.altivec.vaddcuw + + res_vsx = vec_addc(vsx, vsx); +// CHECK: @llvm.ppc.altivec.vaddcuq +// CHECK-LE: @llvm.ppc.altivec.vaddcuq + + res_vux = vec_addc(vux, vux); +// CHECK: @llvm.ppc.altivec.vaddcuq +// CHECK-LE: @llvm.ppc.altivec.vaddcuq + + /* vec_adde */ + res_vsx = vec_adde(vsx, vsx, vsx); +// CHECK: @llvm.ppc.altivec.vaddeuqm +// CHECK-LE: @llvm.ppc.altivec.vaddeuqm + + res_vux = vec_adde(vux, vux, vux); +// CHECK: @llvm.ppc.altivec.vaddeuqm +// CHECK-LE: @llvm.ppc.altivec.vaddeuqm + + /* vec_addec */ + res_vsx = vec_addec(vsx, vsx, vsx); +// CHECK: @llvm.ppc.altivec.vaddecuq +// CHECK-LE: @llvm.ppc.altivec.vaddecuq + /* vec_mergee */ res_vbi = vec_mergee(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm @@ -156,6 +204,15 @@ void test1() { // CHECK-LE: call <2 x i64> @llvm.ppc.altivec.vcmpgtud(<2 x i64> %{{[0-9]*}}, <2 x i64> %{{[0-9]*}}) // CHECK-PPC: error: call to 'vec_cmplt' is ambiguous + /* vec_double */ + res_vd = vec_double(vsll); +// CHECK: sitofp i64 {{.+}} to double +// CHECK-BE: sitofp i64 {{.+}} to double + + res_vd = vec_double(vull); +// CHECK: uitofp i64 {{.+}} to double +// CHECK-BE: uitofp i64 {{.+}} to double + /* vec_eqv */ res_vsc = vec_eqv(vsc, vsc); // CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> @@ -168,18 +225,7 @@ void test1() { // CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8> // CHECK-PPC: error: assigning to - res_vsc = vec_eqv(vbc, vsc); -// CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> -// CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> -// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) -// CHECK: bitcast <4 x i32> [[T3]] to <16 x i8> -// CHECK-LE: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> -// CHECK-LE: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> -// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) -// CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8> -// CHECK-PPC: error: assigning to - - res_vsc = vec_eqv(vsc, vbc); + res_vsc = vec_eqv(vbc, vbc); // CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> // CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) @@ -201,28 +247,6 @@ void test1() { // CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8> // CHECK-PPC: error: assigning to - res_vuc = vec_eqv(vbc, vuc); -// CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> -// CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> -// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) -// CHECK: bitcast <4 x i32> [[T3]] to <16 x i8> -// CHECK-LE: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> -// CHECK-LE: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> -// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) -// CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8> -// CHECK-PPC: error: assigning to - - res_vuc = vec_eqv(vuc, vbc); -// CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> -// CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> -// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) -// CHECK: bitcast <4 x i32> [[T3]] to <16 x i8> -// CHECK-LE: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> -// CHECK-LE: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32> -// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) -// CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8> -// CHECK-PPC: error: assigning to - res_vss = vec_eqv(vss, vss); // CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> // CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> @@ -234,18 +258,7 @@ void test1() { // CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16> // CHECK-PPC: error: assigning to - res_vss = vec_eqv(vbs, vss); -// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> -// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> -// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) -// CHECK: bitcast <4 x i32> [[T3]] to <8 x i16> -// CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> -// CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> -// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) -// CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16> -// CHECK-PPC: error: assigning to - - res_vss = vec_eqv(vss, vbs); + res_vss = vec_eqv(vbs, vbs); // CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> // CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) @@ -267,39 +280,12 @@ void test1() { // CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16> // CHECK-PPC: error: assigning to - res_vus = vec_eqv(vbs, vus); -// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> -// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> -// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) -// CHECK: bitcast <4 x i32> [[T3]] to <8 x i16> -// CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> -// CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> -// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) -// CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16> -// CHECK-PPC: error: assigning to - - res_vus = vec_eqv(vus, vbs); -// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> -// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> -// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) -// CHECK: bitcast <4 x i32> [[T3]] to <8 x i16> -// CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> -// CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> -// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) -// CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16> -// CHECK-PPC: error: assigning to - res_vsi = vec_eqv(vsi, vsi); // CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}}) // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}}) // CHECK-PPC: error: assigning to - res_vsi = vec_eqv(vbi, vsi); -// CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}}) -// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}}) -// CHECK-PPC: error: assigning to - - res_vsi = vec_eqv(vsi, vbi); + res_vsi = vec_eqv(vbi, vbi); // CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}}) // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}}) // CHECK-PPC: error: assigning to @@ -309,16 +295,6 @@ void test1() { // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}}) // CHECK-PPC: error: assigning to - res_vui = vec_eqv(vbi, vui); -// CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}}) -// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}}) -// CHECK-PPC: error: assigning to - - res_vui = vec_eqv(vui, vbi); -// CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}}) -// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}}) -// CHECK-PPC: error: assigning to - res_vsll = vec_eqv(vsll, vsll); // CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> // CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> @@ -330,18 +306,7 @@ void test1() { // CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64> // CHECK-PPC: error: assigning to - res_vsll = vec_eqv(vbll, vsll); -// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> -// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> -// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) -// CHECK: bitcast <4 x i32> [[T3]] to <2 x i64> -// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> -// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> -// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) -// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64> -// CHECK-PPC: error: assigning to - - res_vsll = vec_eqv(vsll, vbll); + res_vsll = vec_eqv(vbll, vbll); // CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> // CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) @@ -363,28 +328,6 @@ void test1() { // CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64> // CHECK-PPC: error: assigning to - res_vull = vec_eqv(vbll, vull); -// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> -// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> -// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) -// CHECK: bitcast <4 x i32> [[T3]] to <2 x i64> -// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> -// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> -// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) -// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64> -// CHECK-PPC: error: assigning to - - res_vull = vec_eqv(vull, vbll); -// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> -// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> -// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) -// CHECK: bitcast <4 x i32> [[T3]] to <2 x i64> -// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> -// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32> -// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]]) -// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64> -// CHECK-PPC: error: assigning to - res_vf = vec_eqv(vfa, vfa); // CHECK: [[T1:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32> // CHECK: [[T2:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32> @@ -396,23 +339,6 @@ void test1() { // CHECK-LE: bitcast <4 x i32> [[T3]] to <4 x float> // CHECK-PPC: error: assigning to - res_vf = vec_eqv(vbi, vfa); -// CHECK: [[T2:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32> -// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.+}}, <4 x i32> [[T2]]) -// CHECK: bitcast <4 x i32> [[T3]] to <4 x float> -// CHECK-LE: [[T2:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32> -// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.+}}, <4 x i32> [[T2]]) -// CHECK-PPC: error: assigning to - - res_vf = vec_eqv(vfa, vbi); -// CHECK: [[T1:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32> -// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> -// CHECK: bitcast <4 x i32> [[T3]] to <4 x float> -// CHECK-LE: [[T1:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32> -// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> -// CHECK-LE: bitcast <4 x i32> [[T3]] to <4 x float> -// CHECK-PPC: error: assigning to - res_vd = vec_eqv(vda, vda); // CHECK: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32> // CHECK: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32> @@ -424,24 +350,41 @@ void test1() { // CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x double> // CHECK-PPC: error: assigning to - res_vd = vec_eqv(vbll, vda); -// CHECK: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32> -// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.+}}, <4 x i32> [[T2]]) -// CHECK: bitcast <4 x i32> [[T3]] to <2 x double> -// CHECK-LE: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32> -// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.+}}, <4 x i32> [[T2]]) -// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x double> -// CHECK-PPC: error: assigning to + /* vec_extract */ + res_sll = vec_extract(vsll, si); +// CHECK: extractelement <2 x i64> +// CHECK-LE: extractelement <2 x i64> - res_vd = vec_eqv(vda, vbll); -// CHECK: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32> -// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> -// CHECK: bitcast <4 x i32> [[T3]] to <2 x double> -// CHECK-LE: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32> -// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> -// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x double> -// CHECK-PPC: error: assigning to + res_ull = vec_extract(vull, si); +// CHECK: extractelement <2 x i64> +// CHECK-LE: extractelement <2 x i64> + + res_ull = vec_extract(vbll, si); +// CHECK: extractelement <2 x i64> +// CHECK-LE: extractelement <2 x i64> + + res_d = vec_extract(vda, si); +// CHECK: extractelement <2 x double> +// CHECK-LE: extractelement <2 x double> + + /* vec_insert */ + res_vsll = vec_insert(sll, vsll, si); +// CHECK: insertelement <2 x i64> +// CHECK-LE: insertelement <2 x i64> + + res_vbll = vec_insert(ull, vbll, si); +// CHECK: insertelement <2 x i64> +// CHECK-LE: insertelement <2 x i64> + + res_vull = vec_insert(ull, vull, si); +// CHECK: insertelement <2 x i64> +// CHECK-LE: insertelement <2 x i64> + res_vd = vec_insert(d, vda, si); +// CHECK: insertelement <2 x double> +// CHECK-LE: insertelement <2 x double> + + /* vec_cntlz */ res_vsc = vec_cntlz(vsc); // CHECK: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.+}}, i1 false) // CHECK-LE: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.+}}, i1 false) @@ -512,6 +455,10 @@ void test1() { // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_all_eq' is ambiguous + res_i = vec_all_eq(vda, vda); +// CHECK: @llvm.ppc.vsx.xvcmpeqdp.p +// CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p + /* vec_all_ne */ res_i = vec_all_ne(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpequd.p @@ -548,6 +495,24 @@ void test1() { // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_all_ne' is ambiguous + dummy(); +// CHECK: @dummy + + res_i = vec_all_ne(vda, vda); +// CHECK: @llvm.ppc.vsx.xvcmpeqdp.p +// CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p + + dummy(); +// CHECK: @dummy + + res_i = vec_all_nge(vda, vda); +// CHECK: @llvm.ppc.vsx.xvcmpgedp.p +// CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p + + res_i = vec_all_ngt(vda, vda); +// CHECK: @llvm.ppc.vsx.xvcmpgtdp.p +// CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p + /* vec_any_eq */ res_i = vec_any_eq(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpequd.p @@ -584,6 +549,10 @@ void test1() { // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_any_eq' is ambiguous + res_i = vec_any_eq(vda, vda); +// CHECK: @llvm.ppc.vsx.xvcmpeqdp.p +// CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p + /* vec_any_ne */ res_i = vec_any_ne(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpequd.p @@ -620,6 +589,10 @@ void test1() { // CHECK-LE: @llvm.ppc.altivec.vcmpequd.p // CHECK-PPC: error: call to 'vec_any_ne' is ambiguous + res_i = vec_any_ne(vda, vda); +// CHECK: @llvm.ppc.vsx.xvcmpeqdp.p +// CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p + /* vec_all_ge */ res_i = vec_all_ge(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtsd.p @@ -656,6 +629,10 @@ void test1() { // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_all_ge' is ambiguous + res_i = vec_all_ge(vda, vda); +// CHECK: @llvm.ppc.vsx.xvcmpgedp.p +// CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p + /* vec_all_gt */ res_i = vec_all_gt(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtsd.p @@ -692,6 +669,10 @@ void test1() { // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_all_gt' is ambiguous + res_i = vec_all_gt(vda, vda); +// CHECK: @llvm.ppc.vsx.xvcmpgtdp.p +// CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p + /* vec_all_le */ res_i = vec_all_le(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtsd.p @@ -728,6 +709,10 @@ void test1() { // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_all_le' is ambiguous + res_i = vec_all_le(vda, vda); +// CHECK: @llvm.ppc.vsx.xvcmpgedp.p +// CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p + /* vec_all_lt */ res_i = vec_all_lt(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtsd.p @@ -764,6 +749,14 @@ void test1() { // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_all_lt' is ambiguous + res_i = vec_all_lt(vda, vda); +// CHECK: @llvm.ppc.vsx.xvcmpgtdp.p +// CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p + + res_i = vec_all_nan(vda); +// CHECK: @llvm.ppc.vsx.xvcmpeqdp.p +// CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p + /* vec_any_ge */ res_i = vec_any_ge(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtsd.p @@ -800,6 +793,10 @@ void test1() { // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_any_ge' is ambiguous + res_i = vec_any_ge(vda, vda); +// CHECK: @llvm.ppc.vsx.xvcmpgedp.p +// CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p + /* vec_any_gt */ res_i = vec_any_gt(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtsd.p @@ -836,6 +833,10 @@ void test1() { // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_any_gt' is ambiguous + res_i = vec_any_gt(vda, vda); +// CHECK: @llvm.ppc.vsx.xvcmpgtdp.p +// CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p + /* vec_any_le */ res_i = vec_any_le(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtsd.p @@ -872,6 +873,10 @@ void test1() { // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_any_le' is ambiguous + res_i = vec_any_le(vda, vda); +// CHECK: @llvm.ppc.vsx.xvcmpgedp.p +// CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p + /* vec_any_lt */ res_i = vec_any_lt(vsll, vsll); // CHECK: @llvm.ppc.altivec.vcmpgtsd.p @@ -908,6 +913,10 @@ void test1() { // CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p // CHECK-PPC: error: call to 'vec_any_lt' is ambiguous + res_i = vec_any_lt(vda, vda); +// CHECK: @llvm.ppc.vsx.xvcmpgtdp.p +// CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p + /* vec_max */ res_vsll = vec_max(vsll, vsll); // CHECK: @llvm.ppc.altivec.vmaxsd @@ -939,6 +948,15 @@ void test1() { // CHECK-LE: @llvm.ppc.altivec.vmaxud // CHECK-PPC: error: call to 'vec_max' is ambiguous + /* vec_mergeh */ + res_vbll = vec_mergeh(vbll, vbll); +// CHECK: @llvm.ppc.altivec.vperm +// CHECK-LE: @llvm.ppc.altivec.vperm + + res_vbll = vec_mergel(vbll, vbll); +// CHECK: @llvm.ppc.altivec.vperm +// CHECK-LE: @llvm.ppc.altivec.vperm + /* vec_min */ res_vsll = vec_min(vsll, vsll); // CHECK: @llvm.ppc.altivec.vminsd @@ -1058,6 +1076,28 @@ void test1() { // CHECK-LE: ashr <2 x i64> // CHECK-PPC: error: call to 'vec_sra' is ambiguous + /* vec_splats */ + res_vsll = vec_splats(sll); +// CHECK: insertelement <2 x i64> +// CHECK-LE: insertelement <2 x i64> + + res_vull = vec_splats(ull); +// CHECK: insertelement <2 x i64> +// CHECK-LE: insertelement <2 x i64> + + res_vsx = vec_splats(sx); +// CHECK: insertelement <1 x i128> +// CHECK-LE: insertelement <1 x i128> + + res_vux = vec_splats(ux); +// CHECK: insertelement <1 x i128> +// CHECK-LE: insertelement <1 x i128> + + res_vd = vec_splats(d); +// CHECK: insertelement <2 x double> +// CHECK-LE: insertelement <2 x double> + + /* vec_unpackh */ res_vsll = vec_unpackh(vsi); // CHECK: llvm.ppc.altivec.vupkhsw @@ -1177,13 +1217,7 @@ void test1() { // CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> // CHECK-PPC: warning: implicit declaration of function 'vec_nand' is invalid in C99 - res_vsc = vec_nand(vsc, vbc); -// CHECK: [[T1:%.+]] = and <16 x i8> -// CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> -// CHECK-LE: [[T1:%.+]] = and <16 x i8> -// CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> - - res_vsc = vec_nand(vbc, vsc); + res_vsc = vec_nand(vbc, vbc); // CHECK: [[T1:%.+]] = and <16 x i8> // CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> // CHECK-LE: [[T1:%.+]] = and <16 x i8> @@ -1195,31 +1229,13 @@ void test1() { // CHECK-LE: [[T1:%.+]] = and <16 x i8> // CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> - res_vuc = vec_nand(vuc, vbc); -// CHECK: [[T1:%.+]] = and <16 x i8> -// CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> -// CHECK-LE: [[T1:%.+]] = and <16 x i8> -// CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> - - res_vuc = vec_nand(vbc, vuc); -// CHECK: [[T1:%.+]] = and <16 x i8> -// CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> -// CHECK-LE: [[T1:%.+]] = and <16 x i8> -// CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> - res_vss = vec_nand(vss, vss); // CHECK: [[T1:%.+]] = and <8 x i16> // CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> // CHECK-LE: [[T1:%.+]] = and <8 x i16> // CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> - res_vss = vec_nand(vss, vbs); -// CHECK: [[T1:%.+]] = and <8 x i16> -// CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> -// CHECK-LE: [[T1:%.+]] = and <8 x i16> -// CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> - - res_vss = vec_nand(vbs, vss); + res_vss = vec_nand(vbs, vbs); // CHECK: [[T1:%.+]] = and <8 x i16> // CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> // CHECK-LE: [[T1:%.+]] = and <8 x i16> @@ -1231,31 +1247,13 @@ void test1() { // CHECK-LE: [[T1:%.+]] = and <8 x i16> // CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> - res_vus = vec_nand(vus, vbs); -// CHECK: [[T1:%.+]] = and <8 x i16> -// CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> -// CHECK-LE: [[T1:%.+]] = and <8 x i16> -// CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> - - res_vus = vec_nand(vbs, vus); -// CHECK: [[T1:%.+]] = and <8 x i16> -// CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> -// CHECK-LE: [[T1:%.+]] = and <8 x i16> -// CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> - res_vsi = vec_nand(vsi, vsi); // CHECK: [[T1:%.+]] = and <4 x i32> // CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1> // CHECK-LE: [[T1:%.+]] = and <4 x i32> // CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1> - res_vsi = vec_nand(vsi, vbi); -// CHECK: [[T1:%.+]] = and <4 x i32> -// CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1> -// CHECK-LE: [[T1:%.+]] = and <4 x i32> -// CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1> - - res_vsi = vec_nand(vbi, vsi); + res_vsi = vec_nand(vbi, vbi); // CHECK: [[T1:%.+]] = and <4 x i32> // CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1> // CHECK-LE: [[T1:%.+]] = and <4 x i32> @@ -1267,31 +1265,13 @@ void test1() { // CHECK-LE: [[T1:%.+]] = and <4 x i32> // CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1> - res_vui = vec_nand(vui, vbi); -// CHECK: [[T1:%.+]] = and <4 x i32> -// CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1> -// CHECK-LE: [[T1:%.+]] = and <4 x i32> -// CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1> - - res_vui = vec_nand(vbi, vui); -// CHECK: [[T1:%.+]] = and <4 x i32> -// CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1> -// CHECK-LE: [[T1:%.+]] = and <4 x i32> -// CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1> - res_vsll = vec_nand(vsll, vsll); // CHECK: [[T1:%.+]] = and <2 x i64> // CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1> // CHECK-LE: [[T1:%.+]] = and <2 x i64> // CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1> - res_vsll = vec_nand(vsll, vbll); -// CHECK: [[T1:%.+]] = and <2 x i64> -// CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1> -// CHECK-LE: [[T1:%.+]] = and <2 x i64> -// CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1> - - res_vsll = vec_nand(vbll, vsll); + res_vsll = vec_nand(vbll, vbll); // CHECK: [[T1:%.+]] = and <2 x i64> // CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1> // CHECK-LE: [[T1:%.+]] = and <2 x i64> @@ -1303,18 +1283,6 @@ void test1() { // CHECK-LE: [[T1:%.+]] = and <2 x i64> // CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1> - res_vull = vec_nand(vull, vbll); -// CHECK: [[T1:%.+]] = and <2 x i64> -// CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1> -// CHECK-LE: [[T1:%.+]] = and <2 x i64> -// CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1> - - res_vull = vec_nand(vbll, vull); -// CHECK: [[T1:%.+]] = and <2 x i64> -// CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1> -// CHECK-LE: [[T1:%.+]] = and <2 x i64> -// CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1> - /* vec_orc */ res_vsc = vec_orc(vsc, vsc); // CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> @@ -1353,6 +1321,12 @@ void test1() { // CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> // CHECK-LE: or <16 x i8> {{%.+}}, [[T1]] + res_vbc = vec_orc(vbc, vbc); +// CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> +// CHECK: or <16 x i8> {{%.+}}, [[T1]] +// CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> +// CHECK-LE: or <16 x i8> {{%.+}}, [[T1]] + res_vss = vec_orc(vss, vss); // CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> // CHECK: or <8 x i16> {{%.+}}, [[T1]] @@ -1389,6 +1363,12 @@ void test1() { // CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> // CHECK-LE: or <8 x i16> {{%.+}}, [[T1]] + res_vbs = vec_orc(vbs, vbs); +// CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> +// CHECK: or <8 x i16> {{%.+}}, [[T1]] +// CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> +// CHECK-LE: or <8 x i16> {{%.+}}, [[T1]] + res_vsi = vec_orc(vsi, vsi); // CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1> // CHECK: or <4 x i32> {{%.+}}, [[T1]] @@ -1425,6 +1405,12 @@ void test1() { // CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1> // CHECK-LE: or <4 x i32> {{%.+}}, [[T1]] + res_vbi = vec_orc(vbi, vbi); +// CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1> +// CHECK: or <4 x i32> {{%.+}}, [[T1]] +// CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1> +// CHECK-LE: or <4 x i32> {{%.+}}, [[T1]] + res_vsll = vec_orc(vsll, vsll); // CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1> // CHECK: or <2 x i64> {{%.+}}, [[T1]] @@ -1461,6 +1447,33 @@ void test1() { // CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1> // CHECK-LE: or <2 x i64> {{%.+}}, [[T1]] + res_vbll = vec_orc(vbll, vbll); +// CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1> +// CHECK: or <2 x i64> {{%.+}}, [[T1]] +// CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1> +// CHECK-LE: or <2 x i64> {{%.+}}, [[T1]] + + /* vec_sub */ + res_vsll = vec_sub(vsll, vsll); +// CHECK: sub <2 x i64> +// CHECK-LE: sub <2 x i64> + + res_vull = vec_sub(vull, vull); +// CHECK: sub <2 x i64> +// CHECK-LE: sub <2 x i64> + + res_vd = vec_sub(vda, vda); +// CHECK: fsub <2 x double> +// CHECK-LE: fsub <2 x double> + + res_vsx = vec_sub(vsx, vsx); +// CHECK: sub <1 x i128> +// CHECK-LE: sub <1 x i128> + + res_vux = vec_sub(vux, vux); +// CHECK: sub <1 x i128> +// CHECK-LE: sub <1 x i128> + /* vec_vbpermq */ res_vsll = vec_vbpermq(vsc, vsc); // CHECK: llvm.ppc.altivec.vbpermq @@ -1480,4 +1493,14 @@ void test1() { // CHECK: llvm.ppc.altivec.vgbbd // CHECK-LE: llvm.ppc.altivec.vgbbd // CHECK-PPC: warning: implicit declaration of function 'vec_vgbbd' + + res_vuc = vec_gb(vuc); +// CHECK: llvm.ppc.altivec.vgbbd +// CHECK-LE: llvm.ppc.altivec.vgbbd +// CHECK-PPC: warning: implicit declaration of function 'vec_gb' + + res_vull = vec_bperm(vux, vux); +// CHECK: llvm.ppc.altivec.vbpermq +// CHECK-LE: llvm.ppc.altivec.vbpermq +// CHECK-PPC: warning: implicit declaration of function 'vec_bperm' } |