diff options
author | dim <dim@FreeBSD.org> | 2012-12-02 13:10:19 +0000 |
---|---|---|
committer | dim <dim@FreeBSD.org> | 2012-12-02 13:10:19 +0000 |
commit | 6de2c08bc400b4aca9fb46684e8bdb56eed9b09f (patch) | |
tree | 32b4679ab4b8f28e5228daafc65e9dc436935353 /test/CodeGen | |
parent | 4dc93743c9d40c29c0a3bec2aae328cac0d289e8 (diff) | |
download | FreeBSD-src-6de2c08bc400b4aca9fb46684e8bdb56eed9b09f.zip FreeBSD-src-6de2c08bc400b4aca9fb46684e8bdb56eed9b09f.tar.gz |
Vendor import of llvm release_32 branch r168974 (effectively, 3.2 RC2):
http://llvm.org/svn/llvm-project/llvm/branches/release_32@168974
Diffstat (limited to 'test/CodeGen')
321 files changed, 14718 insertions, 463 deletions
diff --git a/test/CodeGen/ARM/2010-10-19-mc-elf-objheader.ll b/test/CodeGen/ARM/2010-10-19-mc-elf-objheader.ll index 99db637..36d1575 100644 --- a/test/CodeGen/ARM/2010-10-19-mc-elf-objheader.ll +++ b/test/CodeGen/ARM/2010-10-19-mc-elf-objheader.ll @@ -13,12 +13,12 @@ ; BASIC-NEXT: 0x00000000 ; BASIC-NEXT: 0x00000000 ; BASIC-NEXT: 0x0000003c -; BASIC-NEXT: 0x00000020 +; BASIC-NEXT: 0x00000022 ; BASIC-NEXT: 0x00000000 ; BASIC-NEXT: 0x00000000 ; BASIC-NEXT: 0x00000001 ; BASIC-NEXT: 0x00000000 -; BASIC-NEXT: '411f0000 00616561 62690001 15000000 06020801 09011401 15011703 18011901' +; BASIC-NEXT: '41210000 00616561 62690001 17000000 060a0741 08010902 14011501 17031801 1901' ; CORTEXA8: .ARM.attributes ; CORTEXA8-NEXT: 0x70000003 diff --git a/test/CodeGen/ARM/2010-12-07-PEIBug.ll b/test/CodeGen/ARM/2010-12-07-PEIBug.ll index 770ad44..4879f4e 100644 --- a/test/CodeGen/ARM/2010-12-07-PEIBug.ll +++ b/test/CodeGen/ARM/2010-12-07-PEIBug.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple=thumbv7-apple-ios -mcpu=cortex-a8 | FileCheck %s +; RUN: llc < %s -mtriple=thumbv7-apple-ios -mcpu=cortex-a9 | FileCheck %s ; rdar://8728956 define hidden void @foo() nounwind ssp { diff --git a/test/CodeGen/ARM/2011-06-16-TailCallByVal.ll b/test/CodeGen/ARM/2011-06-16-TailCallByVal.ll index 3e78c46..101a913 100644 --- a/test/CodeGen/ARM/2011-06-16-TailCallByVal.ll +++ b/test/CodeGen/ARM/2011-06-16-TailCallByVal.ll @@ -1,4 +1,9 @@ ; RUN: llc < %s -arm-tail-calls=1 | FileCheck %s + +; tail call inside a function where byval argument is splitted between +; registers and stack is currently unsupported. +; XFAIL: * + target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32" target triple = "thumbv7-apple-ios" diff --git a/test/CodeGen/ARM/2011-10-26-memset-with-neon.ll b/test/CodeGen/ARM/2011-10-26-memset-with-neon.ll index 42b1491..6e0ef96 100644 --- a/test/CodeGen/ARM/2011-10-26-memset-with-neon.ll +++ b/test/CodeGen/ARM/2011-10-26-memset-with-neon.ll @@ -9,8 +9,8 @@ entry: } ; Trigger multiple NEON stores. -; CHECK: vstmia -; CHECK-NEXT: vstmia +; CHECK: vst1.64 +; CHECK-NEXT: vst1.64 define void @f_0_40(i8* nocapture %c) nounwind optsize { entry: call void @llvm.memset.p0i8.i64(i8* %c, i8 0, i64 40, i32 16, i1 false) diff --git a/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll b/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll index 89c01d5..f9ede74 100644 --- a/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll +++ b/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll @@ -8,12 +8,12 @@ define void @test_sqrt(<4 x float>* %X) nounwind { ; CHECK: movw r1, :lower16:{{.*}} ; CHECK: movt r1, :upper16:{{.*}} -; CHECK: vldmia r1 +; CHECK: vld1.64 {{.*}}, [r1, :128] ; CHECK: vsqrt.f32 {{s[0-9]+}}, {{s[0-9]+}} ; CHECK: vsqrt.f32 {{s[0-9]+}}, {{s[0-9]+}} ; CHECK: vsqrt.f32 {{s[0-9]+}}, {{s[0-9]+}} ; CHECK: vsqrt.f32 {{s[0-9]+}}, {{s[0-9]+}} -; CHECK: vstmia {{.*}} +; CHECK: vst1.64 {{.*}} L.entry: %0 = load <4 x float>* @A, align 16 @@ -31,21 +31,21 @@ define void @test_cos(<4 x float>* %X) nounwind { ; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}} ; CHECK: movt [[reg0]], :upper16:{{.*}} -; CHECK: vldmia r{{[0-9][0-9]?}}, {{.*}} +; CHECK: vld1.64 -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}cosf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}cosf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}cosf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}cosf -; CHECK: vstmia {{.*}} +; CHECK: vst1.64 L.entry: %0 = load <4 x float>* @A, align 16 @@ -62,21 +62,21 @@ define void @test_exp(<4 x float>* %X) nounwind { ; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}} ; CHECK: movt [[reg0]], :upper16:{{.*}} -; CHECK: vldmia r{{[0-9][0-9]?}}, {{.*}} +; CHECK: vld1.64 -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}expf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}expf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}expf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}expf -; CHECK: vstmia {{.*}} +; CHECK: vst1.64 L.entry: %0 = load <4 x float>* @A, align 16 @@ -93,21 +93,21 @@ define void @test_exp2(<4 x float>* %X) nounwind { ; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}} ; CHECK: movt [[reg0]], :upper16:{{.*}} -; CHECK: vldmia r{{[0-9][0-9]?}}, {{.*}} +; CHECK: vld1.64 -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}exp2f -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}exp2f -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}exp2f -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}exp2f -; CHECK: vstmia {{.*}} +; CHECK: vst1.64 L.entry: %0 = load <4 x float>* @A, align 16 @@ -124,21 +124,21 @@ define void @test_log10(<4 x float>* %X) nounwind { ; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}} ; CHECK: movt [[reg0]], :upper16:{{.*}} -; CHECK: vldmia r{{[0-9][0-9]?}}, {{.*}} +; CHECK: vld1.64 -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}log10f -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}log10f -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}log10f -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}log10f -; CHECK: vstmia {{.*}} +; CHECK: vst1.64 L.entry: %0 = load <4 x float>* @A, align 16 @@ -155,21 +155,21 @@ define void @test_log(<4 x float>* %X) nounwind { ; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}} ; CHECK: movt [[reg0]], :upper16:{{.*}} -; CHECK: vldmia r{{[0-9][0-9]?}}, {{.*}} +; CHECK: vld1.64 -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}logf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}logf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}logf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}logf -; CHECK: vstmia {{.*}} +; CHECK: vst1.64 L.entry: %0 = load <4 x float>* @A, align 16 @@ -186,21 +186,21 @@ define void @test_log2(<4 x float>* %X) nounwind { ; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}} ; CHECK: movt [[reg0]], :upper16:{{.*}} -; CHECK: vldmia r{{[0-9][0-9]?}}, {{.*}} +; CHECK: vld1.64 -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}log2f -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}log2f -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}log2f -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}log2f -; CHECK: vstmia {{.*}} +; CHECK: vst1.64 L.entry: %0 = load <4 x float>* @A, align 16 @@ -218,21 +218,21 @@ define void @test_pow(<4 x float>* %X) nounwind { ; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}} ; CHECK: movt [[reg0]], :upper16:{{.*}} -; CHECK: vldmia r{{[0-9][0-9]?}}, {{.*}} +; CHECK: vld1.64 -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}powf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}powf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}powf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}powf -; CHECK: vstmia {{.*}} +; CHECK: vst1.64 L.entry: @@ -252,10 +252,10 @@ define void @test_powi(<4 x float>* %X) nounwind { ; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}} ; CHECK: movt [[reg0]], :upper16:{{.*}} -; CHECK: vldmia [[reg0]], {{.*}} +; CHECK: vld1.64 {{.*}}, :128 ; CHECK: vmul.f32 {{.*}} -; CHECK: vstmia {{.*}} +; CHECK: vst1.64 L.entry: @@ -275,21 +275,21 @@ define void @test_sin(<4 x float>* %X) nounwind { ; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}} ; CHECK: movt [[reg0]], :upper16:{{.*}} -; CHECK: vldmia r{{[0-9][0-9]?}}, {{.*}} +; CHECK: vld1.64 -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}sinf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}sinf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}sinf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}sinf -; CHECK: vstmia {{.*}} +; CHECK: vst1.64 L.entry: %0 = load <4 x float>* @A, align 16 @@ -300,3 +300,34 @@ L.entry: declare <4 x float> @llvm.sin.v4f32(<4 x float>) nounwind readonly +define void @test_floor(<4 x float>* %X) nounwind { + +; CHECK: test_floor: + +; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}} +; CHECK: movt [[reg0]], :upper16:{{.*}} +; CHECK: vld1.64 + +; CHECK: {{v?mov(.32)?}} r0, +; CHECK: bl {{.*}}floorf + +; CHECK: {{v?mov(.32)?}} r0, +; CHECK: bl {{.*}}floorf + +; CHECK: {{v?mov(.32)?}} r0, +; CHECK: bl {{.*}}floorf + +; CHECK: {{v?mov(.32)?}} r0, +; CHECK: bl {{.*}}floorf + +; CHECK: vst1.64 + +L.entry: + %0 = load <4 x float>* @A, align 16 + %1 = call <4 x float> @llvm.floor.v4f32(<4 x float> %0) + store <4 x float> %1, <4 x float>* %X, align 16 + ret void +} + +declare <4 x float> @llvm.floor.v4f32(<4 x float>) nounwind readonly + diff --git a/test/CodeGen/ARM/2012-05-04-vmov.ll b/test/CodeGen/ARM/2012-05-04-vmov.ll new file mode 100644 index 0000000..d52ef2c --- /dev/null +++ b/test/CodeGen/ARM/2012-05-04-vmov.ll @@ -0,0 +1,11 @@ +; RUN: llc -O1 -march=arm -mcpu=cortex-a9 < %s | FileCheck -check-prefix=A9-CHECK %s +; RUN: llc -O1 -march=arm -mcpu=swift < %s | FileCheck -check-prefix=SWIFT-CHECK %s +; Check that swift doesn't use vmov.32. <rdar://problem/10453003>. + +define <2 x i32> @testuvec(<2 x i32> %A, <2 x i32> %B) nounwind { +entry: + %div = udiv <2 x i32> %A, %B + ret <2 x i32> %div +; A9-CHECK: vmov.32 +; SWIFT-CHECK-NOT: vmov.32 +} diff --git a/test/CodeGen/ARM/2012-05-10-PreferVMOVtoVDUP32.ll b/test/CodeGen/ARM/2012-05-10-PreferVMOVtoVDUP32.ll new file mode 100644 index 0000000..dd67843 --- /dev/null +++ b/test/CodeGen/ARM/2012-05-10-PreferVMOVtoVDUP32.ll @@ -0,0 +1,14 @@ +; RUN: llc -march=arm -mcpu=swift < %s | FileCheck %s +; <rdar://problem/10451892> + +define void @f(i32 %x, i32* %p) nounwind ssp { +entry: +; CHECK-NOT: vdup.32 + %vecinit.i = insertelement <2 x i32> undef, i32 %x, i32 0 + %vecinit1.i = insertelement <2 x i32> %vecinit.i, i32 %x, i32 1 + %0 = bitcast i32* %p to i8* + tail call void @llvm.arm.neon.vst1.v2i32(i8* %0, <2 x i32> %vecinit1.i, i32 4) + ret void +} + +declare void @llvm.arm.neon.vst1.v2i32(i8*, <2 x i32>, i32) nounwind diff --git a/test/CodeGen/ARM/2012-08-27-CopyPhysRegCrash.ll b/test/CodeGen/ARM/2012-08-27-CopyPhysRegCrash.ll new file mode 100644 index 0000000..ec7f72d --- /dev/null +++ b/test/CodeGen/ARM/2012-08-27-CopyPhysRegCrash.ll @@ -0,0 +1,129 @@ +; RUN: llc < %s -mcpu=cortex-a8 -march=thumb +; Test that this doesn't crash. +; <rdar://problem/12183003> + +target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32" +target triple = "thumbv7-apple-ios5.1.0" + +declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8*, i32) nounwind readonly + +declare void @llvm.arm.neon.vst1.v16i8(i8*, <16 x i8>, i32) nounwind + +define void @findEdges(i8*) nounwind ssp { + %2 = icmp sgt i32 undef, 0 + br i1 %2, label %5, label %3 + +; <label>:3 ; preds = %5, %1 + %4 = phi i8* [ %0, %1 ], [ %19, %5 ] + ret void + +; <label>:5 ; preds = %5, %1 + %6 = phi i8* [ %19, %5 ], [ %0, %1 ] + %7 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* null, i32 1) + %8 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %7, 0 + %9 = getelementptr inbounds i8* null, i32 3 + %10 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* %9, i32 1) + %11 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %10, 2 + %12 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* %6, i32 1) + %13 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %12, 0 + %14 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %12, 1 + %15 = getelementptr inbounds i8* %6, i32 3 + %16 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* %15, i32 1) + %17 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %16, 1 + %18 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %16, 2 + %19 = getelementptr inbounds i8* %6, i32 48 + %20 = bitcast <16 x i8> %13 to <2 x i64> + %21 = bitcast <16 x i8> %8 to <2 x i64> + %22 = bitcast <16 x i8> %14 to <2 x i64> + %23 = shufflevector <2 x i64> %22, <2 x i64> undef, <1 x i32> zeroinitializer + %24 = bitcast <1 x i64> %23 to <8 x i8> + %25 = zext <8 x i8> %24 to <8 x i16> + %26 = sub <8 x i16> zeroinitializer, %25 + %27 = bitcast <16 x i8> %17 to <2 x i64> + %28 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %26) nounwind + %29 = mul <8 x i16> %28, %28 + %30 = add <8 x i16> zeroinitializer, %29 + %31 = tail call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> undef, <8 x i16> %30) nounwind + %32 = bitcast <16 x i8> %11 to <2 x i64> + %33 = shufflevector <2 x i64> %32, <2 x i64> undef, <1 x i32> zeroinitializer + %34 = bitcast <1 x i64> %33 to <8 x i8> + %35 = zext <8 x i8> %34 to <8 x i16> + %36 = sub <8 x i16> %35, zeroinitializer + %37 = bitcast <16 x i8> %18 to <2 x i64> + %38 = shufflevector <2 x i64> %37, <2 x i64> undef, <1 x i32> zeroinitializer + %39 = bitcast <1 x i64> %38 to <8 x i8> + %40 = zext <8 x i8> %39 to <8 x i16> + %41 = sub <8 x i16> zeroinitializer, %40 + %42 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %36) nounwind + %43 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %41) nounwind + %44 = mul <8 x i16> %42, %42 + %45 = mul <8 x i16> %43, %43 + %46 = add <8 x i16> %45, %44 + %47 = tail call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> %31, <8 x i16> %46) nounwind + %48 = bitcast <8 x i16> %47 to <2 x i64> + %49 = shufflevector <2 x i64> %48, <2 x i64> undef, <1 x i32> zeroinitializer + %50 = bitcast <1 x i64> %49 to <4 x i16> + %51 = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %50, <4 x i16> undef) nounwind + %52 = tail call <4 x i16> @llvm.arm.neon.vqshiftnu.v4i16(<4 x i32> %51, <4 x i32> <i32 -6, i32 -6, i32 -6, i32 -6>) + %53 = bitcast <4 x i16> %52 to <1 x i64> + %54 = shufflevector <1 x i64> %53, <1 x i64> undef, <2 x i32> <i32 0, i32 1> + %55 = bitcast <2 x i64> %54 to <8 x i16> + %56 = tail call <8 x i8> @llvm.arm.neon.vshiftn.v8i8(<8 x i16> %55, <8 x i16> <i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8>) + %57 = shufflevector <2 x i64> %20, <2 x i64> undef, <1 x i32> <i32 1> + %58 = bitcast <1 x i64> %57 to <8 x i8> + %59 = zext <8 x i8> %58 to <8 x i16> + %60 = sub <8 x i16> zeroinitializer, %59 + %61 = shufflevector <2 x i64> %21, <2 x i64> undef, <1 x i32> <i32 1> + %62 = bitcast <1 x i64> %61 to <8 x i8> + %63 = zext <8 x i8> %62 to <8 x i16> + %64 = sub <8 x i16> %63, zeroinitializer + %65 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %60) nounwind + %66 = mul <8 x i16> %65, %65 + %67 = add <8 x i16> zeroinitializer, %66 + %68 = shufflevector <2 x i64> %27, <2 x i64> undef, <1 x i32> <i32 1> + %69 = bitcast <1 x i64> %68 to <8 x i8> + %70 = zext <8 x i8> %69 to <8 x i16> + %71 = sub <8 x i16> zeroinitializer, %70 + %72 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> undef) nounwind + %73 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %71) nounwind + %74 = mul <8 x i16> %72, %72 + %75 = mul <8 x i16> %73, %73 + %76 = add <8 x i16> %75, %74 + %77 = shufflevector <2 x i64> %32, <2 x i64> undef, <1 x i32> <i32 1> + %78 = bitcast <1 x i64> %77 to <8 x i8> + %79 = zext <8 x i8> %78 to <8 x i16> + %80 = sub <8 x i16> %79, zeroinitializer + %81 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %80) nounwind + %82 = mul <8 x i16> %81, %81 + %83 = add <8 x i16> zeroinitializer, %82 + %84 = tail call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> %76, <8 x i16> %83) nounwind + %85 = tail call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> %67, <8 x i16> %84) nounwind + %86 = bitcast <8 x i16> %85 to <2 x i64> + %87 = shufflevector <2 x i64> %86, <2 x i64> undef, <1 x i32> <i32 1> + %88 = bitcast <1 x i64> %87 to <4 x i16> + %89 = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %88, <4 x i16> undef) nounwind + %90 = tail call <4 x i16> @llvm.arm.neon.vqrshiftnu.v4i16(<4 x i32> %89, <4 x i32> <i32 -6, i32 -6, i32 -6, i32 -6>) + %91 = bitcast <4 x i16> %90 to <1 x i64> + %92 = shufflevector <1 x i64> undef, <1 x i64> %91, <2 x i32> <i32 0, i32 1> + %93 = bitcast <2 x i64> %92 to <8 x i16> + %94 = tail call <8 x i8> @llvm.arm.neon.vshiftn.v8i8(<8 x i16> %93, <8 x i16> <i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8>) + %95 = bitcast <8 x i8> %56 to <1 x i64> + %96 = bitcast <8 x i8> %94 to <1 x i64> + %97 = shufflevector <1 x i64> %95, <1 x i64> %96, <2 x i32> <i32 0, i32 1> + %98 = bitcast <2 x i64> %97 to <16 x i8> + tail call void @llvm.arm.neon.vst1.v16i8(i8* null, <16 x i8> %98, i32 1) + %99 = icmp slt i32 undef, undef + br i1 %99, label %5, label %3 +} + +declare <4 x i16> @llvm.arm.neon.vqshiftnu.v4i16(<4 x i32>, <4 x i32>) nounwind readnone + +declare <8 x i8> @llvm.arm.neon.vshiftn.v8i8(<8 x i16>, <8 x i16>) nounwind readnone + +declare <4 x i16> @llvm.arm.neon.vqrshiftnu.v4i16(<4 x i32>, <4 x i32>) nounwind readnone + +declare <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone + +declare <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone + +declare <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16>) nounwind readnone diff --git a/test/CodeGen/ARM/2012-08-30-select.ll b/test/CodeGen/ARM/2012-08-30-select.ll new file mode 100644 index 0000000..8471be5 --- /dev/null +++ b/test/CodeGen/ARM/2012-08-30-select.ll @@ -0,0 +1,18 @@ +; RUN: llc < %s -mtriple=thumbv7-apple-ios | FileCheck %s +; rdar://12201387 + +;CHECK: select_s_v_v +;CHECK: it ne +;CHECK-NEXT: vmovne.i32 +;CHECK: bx +define <16 x i8> @select_s_v_v(i32 %avail, i8* %bar) { +entry: + %vld1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %bar, i32 1) + %and = and i32 %avail, 1 + %tobool = icmp eq i32 %and, 0 + %vld1. = select i1 %tobool, <16 x i8> %vld1, <16 x i8> zeroinitializer + ret <16 x i8> %vld1. +} + +declare <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* , i32 ) + diff --git a/test/CodeGen/ARM/2012-09-18-ARMv4ISelBug.ll b/test/CodeGen/ARM/2012-09-18-ARMv4ISelBug.ll new file mode 100644 index 0000000..e761ffe --- /dev/null +++ b/test/CodeGen/ARM/2012-09-18-ARMv4ISelBug.ll @@ -0,0 +1,11 @@ +; RUN: llc < %s -march=arm -mcpu=arm7tdmi | FileCheck %s + +; movw is only legal for V6T2 and later. +; rdar://12300648 + +define i32 @t(i32 %x) { +; CHECK: t: +; CHECK-NOT: movw + %tmp = add i32 %x, -65535 + ret i32 %tmp +} diff --git a/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv.ll b/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv.ll new file mode 100644 index 0000000..7576609 --- /dev/null +++ b/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv.ll @@ -0,0 +1,11 @@ +; RUN: llc < %s -march=arm -mcpu=cortex-a8 2>&1 | FileCheck %s + +; Check for error message: +; CHECK: non-trivial scalar-to-vector conversion, possible invalid constraint for vector type + +define void @f() nounwind ssp { + %1 = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } asm "vldm $4, { ${0:q}, ${1:q}, ${2:q}, ${3:q} }", "=r,=r,=r,=r,r"(i64* undef) nounwind, !srcloc !0 + ret void +} + +!0 = metadata !{i32 318437} diff --git a/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv2.ll b/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv2.ll new file mode 100644 index 0000000..6fa1391 --- /dev/null +++ b/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv2.ll @@ -0,0 +1,11 @@ +; RUN: llc < %s -march=arm -mcpu=cortex-a8 2>&1 | FileCheck %s + +; Check for error message: +; CHECK: scalar-to-vector conversion failed, possible invalid constraint for vector type + +define hidden void @f(i32* %corr, i32 %order) nounwind ssp { + tail call void asm sideeffect "vst1.s32 { ${1:q}, ${2:q} }, [$0]", "r,{q0},{q1}"(i32* %corr, <2 x i64>* undef, <2 x i64>* undef) nounwind, !srcloc !0 + ret void +} + +!0 = metadata !{i32 257} diff --git a/test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll b/test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll new file mode 100644 index 0000000..b5f6d31 --- /dev/null +++ b/test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll @@ -0,0 +1,56 @@ +; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi | FileCheck %s +; Test that we correctly use registers and align elements when using va_arg + +%struct_t = type { double, double, double } +@static_val = constant %struct_t { double 1.0, double 2.0, double 3.0 } + +declare void @llvm.va_start(i8*) nounwind +declare void @llvm.va_end(i8*) nounwind + +; CHECK: test_byval_8_bytes_alignment: +define void @test_byval_8_bytes_alignment(i32 %i, ...) { +entry: +; CHECK: stm r0, {r1, r2, r3} + %g = alloca i8* + %g1 = bitcast i8** %g to i8* + call void @llvm.va_start(i8* %g1) + +; CHECK: add [[REG:(r[0-9]+)|(lr)]], {{(r[0-9]+)|(lr)}}, #7 +; CHECK: bfc [[REG]], #0, #3 + %0 = va_arg i8** %g, double + call void @llvm.va_end(i8* %g1) + + ret void +} + +; CHECK: main: +; CHECK: ldm r0, {r2, r3} +define i32 @main() { +entry: + call void (i32, ...)* @test_byval_8_bytes_alignment(i32 555, %struct_t* byval @static_val) + ret i32 0 +} + +declare void @f(double); + +; CHECK: test_byval_8_bytes_alignment_fixed_arg: +; CHECK-NOT: str r1 +; CHECK: str r3, [sp, #12] +; CHECK: str r2, [sp, #8] +; CHECK-NOT: str r1 +define void @test_byval_8_bytes_alignment_fixed_arg(i32 %n1, %struct_t* byval %val) nounwind { +entry: + %a = getelementptr inbounds %struct_t* %val, i32 0, i32 0 + %0 = load double* %a + call void (double)* @f(double %0) + ret void +} + +; CHECK: main_fixed_arg: +; CHECK: ldm r0, {r2, r3} +define i32 @main_fixed_arg() { +entry: + call void (i32, %struct_t*)* @test_byval_8_bytes_alignment_fixed_arg(i32 555, %struct_t* byval @static_val) + ret i32 0 +} + diff --git a/test/CodeGen/ARM/2012-10-04-FixedFrame-vs-byval.ll b/test/CodeGen/ARM/2012-10-04-FixedFrame-vs-byval.ll new file mode 100644 index 0000000..478048d --- /dev/null +++ b/test/CodeGen/ARM/2012-10-04-FixedFrame-vs-byval.ll @@ -0,0 +1,19 @@ +; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi | FileCheck %s + +@.str = private unnamed_addr constant [12 x i8] c"val.a = %f\0A\00" +%struct_t = type { double, double, double } +@static_val = constant %struct_t { double 1.0, double 2.0, double 3.0 } + +declare i32 @printf(i8*, ...) + +; CHECK: test_byval_usage_scheduling: +; CHECK: str r3, [sp, #12] +; CHECK: str r2, [sp, #8] +; CHECK: vldr d16, [sp, #8] +define void @test_byval_usage_scheduling(i32 %n1, i32 %n2, %struct_t* byval %val) nounwind { +entry: + %a = getelementptr inbounds %struct_t* %val, i32 0, i32 0 + %0 = load double* %a + %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([12 x i8]* @.str, i32 0, i32 0), double %0) + ret void +} diff --git a/test/CodeGen/ARM/2012-10-04-LDRB_POST_IMM-Crash.ll b/test/CodeGen/ARM/2012-10-04-LDRB_POST_IMM-Crash.ll new file mode 100644 index 0000000..f239510 --- /dev/null +++ b/test/CodeGen/ARM/2012-10-04-LDRB_POST_IMM-Crash.ll @@ -0,0 +1,16 @@ +; RUN: llc < %s -mtriple=armv7-none-linux- | FileCheck %s +; Check that LDRB_POST_IMM instruction emitted properly. + +%my_struct_t = type { i8, i8, i8, i8, i8 } +@main.val = private unnamed_addr constant %my_struct_t { i8 1, i8 2, i8 3, i8 4, i8 5 } + +declare void @f(i32 %n1, i32 %n2, i32 %n3, %my_struct_t* byval %val); + +; CHECK: main: +define i32 @main() nounwind { +entry: +; CHECK: ldrb {{(r[0-9]+)}}, {{(\[r[0-9]+\])}}, #1 + call void @f(i32 555, i32 555, i32 555, %my_struct_t* byval @main.val) + ret i32 0 +} + diff --git a/test/CodeGen/ARM/2012-10-18-PR14099-ByvalFrameAddress.ll b/test/CodeGen/ARM/2012-10-18-PR14099-ByvalFrameAddress.ll new file mode 100644 index 0000000..fcc6a7f --- /dev/null +++ b/test/CodeGen/ARM/2012-10-18-PR14099-ByvalFrameAddress.ll @@ -0,0 +1,29 @@ +; RUN: llc < %s -mtriple=armv7-linux-gnueabi | FileCheck %s + +%struct.s = type { [4 x i32] } +@v = constant %struct.s zeroinitializer; + +declare void @f(%struct.s* %p); + +; CHECK: t: +define void @t(i32 %a, %struct.s* byval %s) nounwind { +entry: + +; Here we need to only check proper start address of restored %s argument. +; CHECK: sub sp, sp, #16 +; CHECK: push {r11, lr} +; CHECK: add r0, sp, #12 +; CHECK: stm r0, {r1, r2, r3} +; CHECK: add r0, sp, #12 +; CHECK-NEXT: bl f + call void @f(%struct.s* %s) + ret void +} + +; CHECK: caller: +define void @caller() { + +; CHECK: ldm r0, {r1, r2, r3} + call void @t(i32 0, %struct.s* @v); + ret void +} diff --git a/test/CodeGen/ARM/a15-mla.ll b/test/CodeGen/ARM/a15-mla.ll new file mode 100644 index 0000000..25f6de4 --- /dev/null +++ b/test/CodeGen/ARM/a15-mla.ll @@ -0,0 +1,12 @@ +; RUN: llc < %s -march=arm -float-abi=hard -mcpu=cortex-a15 -mattr=+neon,+neonfp | FileCheck %s + +; This test checks that the VMLxForwarting feature is disabled for A15. +; CHECK: fun_a +define <4 x i32> @fun_a(<4 x i32> %x, <4 x i32> %y) nounwind{ + %1 = add <4 x i32> %x, %y +; CHECK-NOT: vmul +; CHECK: vmla + %2 = mul <4 x i32> %1, %1 + %3 = add <4 x i32> %y, %2 + ret <4 x i32> %3 +} diff --git a/test/CodeGen/ARM/a15.ll b/test/CodeGen/ARM/a15.ll new file mode 100644 index 0000000..6f816c1 --- /dev/null +++ b/test/CodeGen/ARM/a15.ll @@ -0,0 +1,6 @@ +; RUN: llc < %s -mcpu=cortex-a15 | FileCheck %s + +; CHECK: a +define i32 @a(i32 %x) { + ret i32 %x; +} diff --git a/test/CodeGen/ARM/atomic-op.ll b/test/CodeGen/ARM/atomic-op.ll index 8967730..6e6b363 100644 --- a/test/CodeGen/ARM/atomic-op.ll +++ b/test/CodeGen/ARM/atomic-op.ll @@ -159,3 +159,13 @@ entry: store i8 %3, i8* %old ret void } + +; CHECK: func4 +; This function should not need to use callee-saved registers. +; rdar://problem/12203728 +; CHECK-NOT: r4 +define i32 @func4(i32* %p) nounwind optsize ssp { +entry: + %0 = atomicrmw add i32* %p, i32 1 monotonic + ret i32 %0 +} diff --git a/test/CodeGen/ARM/atomicrmw_minmax.ll b/test/CodeGen/ARM/atomicrmw_minmax.ll new file mode 100644 index 0000000..69f1384 --- /dev/null +++ b/test/CodeGen/ARM/atomicrmw_minmax.ll @@ -0,0 +1,21 @@ +; RUN: llc -march=arm -mcpu=cortex-a9 < %s | FileCheck %s + +; CHECK: max: +define i32 @max(i8 %ctx, i32* %ptr, i32 %val) +{ +; CHECK: ldrex +; CHECK: cmp [[old:r[0-9]*]], [[val:r[0-9]*]] +; CHECK: movhi {{r[0-9]*}}, [[old]] + %old = atomicrmw umax i32* %ptr, i32 %val monotonic + ret i32 %old +} + +; CHECK: min: +define i32 @min(i8 %ctx, i32* %ptr, i32 %val) +{ +; CHECK: ldrex +; CHECK: cmp [[old:r[0-9]*]], [[val:r[0-9]*]] +; CHECK: movlo {{r[0-9]*}}, [[old]] + %old = atomicrmw umin i32* %ptr, i32 %val monotonic + ret i32 %old +} diff --git a/test/CodeGen/ARM/avoid-cpsr-rmw.ll b/test/CodeGen/ARM/avoid-cpsr-rmw.ll index 1b385ab..96e83dd 100644 --- a/test/CodeGen/ARM/avoid-cpsr-rmw.ll +++ b/test/CodeGen/ARM/avoid-cpsr-rmw.ll @@ -1,4 +1,5 @@ ; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a9 | FileCheck %s +; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=swift | FileCheck %s ; Avoid some 's' 16-bit instruction which partially update CPSR (and add false ; dependency) when it isn't dependent on last CPSR defining instruction. ; rdar://8928208 diff --git a/test/CodeGen/ARM/call-noret-minsize.ll b/test/CodeGen/ARM/call-noret-minsize.ll new file mode 100644 index 0000000..df3c19e --- /dev/null +++ b/test/CodeGen/ARM/call-noret-minsize.ll @@ -0,0 +1,27 @@ +; RUN: llc < %s -mtriple=armv7-apple-ios -mcpu=cortex-a8 | FileCheck %s -check-prefix=ARM +; RUN: llc < %s -mtriple=armv7-apple-ios -mcpu=swift | FileCheck %s -check-prefix=SWIFT +; rdar://12348580 + +define void @t1() noreturn minsize nounwind ssp { +entry: +; ARM: t1: +; ARM: bl _bar + +; SWIFT: t1: +; SWIFT: bl _bar + tail call void @bar() noreturn nounwind + unreachable +} + +define void @t2() noreturn minsize nounwind ssp { +entry: +; ARM: t2: +; ARM: bl _t1 + +; SWIFT: t2: +; SWIFT: bl _t1 + tail call void @t1() noreturn nounwind + unreachable +} + +declare void @bar() noreturn diff --git a/test/CodeGen/ARM/call-noret.ll b/test/CodeGen/ARM/call-noret.ll new file mode 100644 index 0000000..27062dc --- /dev/null +++ b/test/CodeGen/ARM/call-noret.ll @@ -0,0 +1,31 @@ +; RUN: llc < %s -mtriple=armv7-apple-ios -mcpu=cortex-a8 | FileCheck %s -check-prefix=ARM +; RUN: llc < %s -mtriple=armv7-apple-ios -mcpu=swift | FileCheck %s -check-prefix=SWIFT +; rdar://8979299 + +define void @t1() noreturn nounwind ssp { +entry: +; ARM: t1: +; ARM: mov lr, pc +; ARM: b _bar + +; SWIFT: t1: +; SWIFT: mov lr, pc +; SWIFT: b _bar + tail call void @bar() noreturn nounwind + unreachable +} + +define void @t2() noreturn nounwind ssp { +entry: +; ARM: t2: +; ARM: mov lr, pc +; ARM: b _t1 + +; SWIFT: t2: +; SWIFT: mov lr, pc +; SWIFT: b _t1 + tail call void @t1() noreturn nounwind + unreachable +} + +declare void @bar() noreturn diff --git a/test/CodeGen/ARM/carry.ll b/test/CodeGen/ARM/carry.ll index f84774d..bf51cd6 100644 --- a/test/CodeGen/ARM/carry.ll +++ b/test/CodeGen/ARM/carry.ll @@ -45,3 +45,16 @@ entry: %0 = sub nsw i64 0, %x ret i64 %0 } + +; rdar://12559385 +define i64 @f5(i32 %vi) { +entry: +; CHECK: f5: +; CHECK: movw [[REG:r[0-9]+]], #36102 +; CHECK: sbc r{{[0-9]+}}, r{{[0-9]+}}, [[REG]] + %v0 = zext i32 %vi to i64 + %v1 = xor i64 %v0, -155057456198619 + %v4 = add i64 %v1, 155057456198619 + %v5 = add i64 %v4, %v1 + ret i64 %v5 +} diff --git a/test/CodeGen/ARM/coalesce-subregs.ll b/test/CodeGen/ARM/coalesce-subregs.ll index fb0f4c6..3ba9475 100644 --- a/test/CodeGen/ARM/coalesce-subregs.ll +++ b/test/CodeGen/ARM/coalesce-subregs.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mcpu=cortex-a9 | FileCheck %s +; RUN: llc < %s -mcpu=cortex-a9 -verify-coalescing -verify-machineinstrs | FileCheck %s target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32" target triple = "thumbv7-apple-ios0.0.0" @@ -66,3 +66,295 @@ do.end: ; preds = %do.body declare { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8*, i32) nounwind readonly declare void @llvm.arm.neon.vst2.v4f32(i8*, <4 x float>, <4 x float>, i32) nounwind + +; CHECK: f3 +; This function has lane insertions that span basic blocks. +; The trivial REG_SEQUENCE lowering can't handle that, but the coalescer can. +; +; void f3(float *p, float *q) { +; float32x2_t x; +; x[1] = p[3]; +; if (q) +; x[0] = q[0] + q[1]; +; else +; x[0] = p[2]; +; vst1_f32(p+4, x); +; } +; +; CHECK-NOT: vmov +; CHECK-NOT: vorr +define void @f3(float* %p, float* %q) nounwind ssp { +entry: + %arrayidx = getelementptr inbounds float* %p, i32 3 + %0 = load float* %arrayidx, align 4 + %vecins = insertelement <2 x float> undef, float %0, i32 1 + %tobool = icmp eq float* %q, null + br i1 %tobool, label %if.else, label %if.then + +if.then: ; preds = %entry + %1 = load float* %q, align 4 + %arrayidx2 = getelementptr inbounds float* %q, i32 1 + %2 = load float* %arrayidx2, align 4 + %add = fadd float %1, %2 + %vecins3 = insertelement <2 x float> %vecins, float %add, i32 0 + br label %if.end + +if.else: ; preds = %entry + %arrayidx4 = getelementptr inbounds float* %p, i32 2 + %3 = load float* %arrayidx4, align 4 + %vecins5 = insertelement <2 x float> %vecins, float %3, i32 0 + br label %if.end + +if.end: ; preds = %if.else, %if.then + %x.0 = phi <2 x float> [ %vecins3, %if.then ], [ %vecins5, %if.else ] + %add.ptr = getelementptr inbounds float* %p, i32 4 + %4 = bitcast float* %add.ptr to i8* + tail call void @llvm.arm.neon.vst1.v2f32(i8* %4, <2 x float> %x.0, i32 4) + ret void +} + +declare void @llvm.arm.neon.vst1.v2f32(i8*, <2 x float>, i32) nounwind +declare <2 x float> @llvm.arm.neon.vld1.v2f32(i8*, i32) nounwind readonly + +; CHECK: f4 +; This function inserts a lane into a fully defined vector. +; The destination lane isn't read, so the subregs can coalesce. +; CHECK-NOT: vmov +; CHECK-NOT: vorr +define void @f4(float* %p, float* %q) nounwind ssp { +entry: + %0 = bitcast float* %p to i8* + %vld1 = tail call <2 x float> @llvm.arm.neon.vld1.v2f32(i8* %0, i32 4) + %tobool = icmp eq float* %q, null + br i1 %tobool, label %if.end, label %if.then + +if.then: ; preds = %entry + %1 = load float* %q, align 4 + %arrayidx1 = getelementptr inbounds float* %q, i32 1 + %2 = load float* %arrayidx1, align 4 + %add = fadd float %1, %2 + %vecins = insertelement <2 x float> %vld1, float %add, i32 1 + br label %if.end + +if.end: ; preds = %entry, %if.then + %x.0 = phi <2 x float> [ %vecins, %if.then ], [ %vld1, %entry ] + tail call void @llvm.arm.neon.vst1.v2f32(i8* %0, <2 x float> %x.0, i32 4) + ret void +} + +; CHECK: f5 +; Coalesce vector lanes through phis. +; CHECK: vmov.f32 {{.*}}, #1.0 +; CHECK-NOT: vmov +; CHECK-NOT: vorr +; CHECK: %if.end +; We may leave the last insertelement in the if.end block. +; It is inserting the %add value into a dead lane, but %add causes interference +; in the entry block, and we don't do dead lane checks across basic blocks. +define void @f5(float* %p, float* %q) nounwind ssp { +entry: + %0 = bitcast float* %p to i8* + %vld1 = tail call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %0, i32 4) + %vecext = extractelement <4 x float> %vld1, i32 0 + %vecext1 = extractelement <4 x float> %vld1, i32 1 + %vecext2 = extractelement <4 x float> %vld1, i32 2 + %vecext3 = extractelement <4 x float> %vld1, i32 3 + %add = fadd float %vecext3, 1.000000e+00 + %tobool = icmp eq float* %q, null + br i1 %tobool, label %if.end, label %if.then + +if.then: ; preds = %entry + %arrayidx = getelementptr inbounds float* %q, i32 1 + %1 = load float* %arrayidx, align 4 + %add4 = fadd float %vecext, %1 + %2 = load float* %q, align 4 + %add6 = fadd float %vecext1, %2 + %arrayidx7 = getelementptr inbounds float* %q, i32 2 + %3 = load float* %arrayidx7, align 4 + %add8 = fadd float %vecext2, %3 + br label %if.end + +if.end: ; preds = %entry, %if.then + %a.0 = phi float [ %add4, %if.then ], [ %vecext, %entry ] + %b.0 = phi float [ %add6, %if.then ], [ %vecext1, %entry ] + %c.0 = phi float [ %add8, %if.then ], [ %vecext2, %entry ] + %vecinit = insertelement <4 x float> undef, float %a.0, i32 0 + %vecinit9 = insertelement <4 x float> %vecinit, float %b.0, i32 1 + %vecinit10 = insertelement <4 x float> %vecinit9, float %c.0, i32 2 + %vecinit11 = insertelement <4 x float> %vecinit10, float %add, i32 3 + tail call void @llvm.arm.neon.vst1.v4f32(i8* %0, <4 x float> %vecinit11, i32 4) + ret void +} + +declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32) nounwind readonly + +declare void @llvm.arm.neon.vst1.v4f32(i8*, <4 x float>, i32) nounwind + +; CHECK: pr13999 +define void @pr13999() nounwind readonly { +entry: + br i1 true, label %outer_loop, label %loop.end + +outer_loop: + %d = phi double [ 0.0, %entry ], [ %add, %after_inner_loop ] + %0 = insertelement <2 x double> <double 0.0, double 0.0>, double %d, i32 0 + br i1 undef, label %after_inner_loop, label %inner_loop + +inner_loop: + br i1 true, label %after_inner_loop, label %inner_loop + +after_inner_loop: + %1 = phi <2 x double> [ %0, %outer_loop ], [ <double 0.0, double 0.0>, +%inner_loop ] + %2 = extractelement <2 x double> %1, i32 1 + %add = fadd double 1.0, %2 + br i1 false, label %loop.end, label %outer_loop + +loop.end: + %d.end = phi double [ 0.0, %entry ], [ %add, %after_inner_loop ] + ret void +} + +; CHECK: pr14078 +define arm_aapcs_vfpcc i32 @pr14078(i8* nocapture %arg, i8* nocapture %arg1, i32 %arg2) nounwind uwtable readonly { +bb: + br i1 undef, label %bb31, label %bb3 + +bb3: ; preds = %bb12, %bb + %tmp = shufflevector <2 x i64> undef, <2 x i64> undef, <1 x i32> zeroinitializer + %tmp4 = bitcast <1 x i64> %tmp to <2 x float> + %tmp5 = shufflevector <2 x float> %tmp4, <2 x float> undef, <4 x i32> zeroinitializer + %tmp6 = bitcast <4 x float> %tmp5 to <2 x i64> + %tmp7 = shufflevector <2 x i64> %tmp6, <2 x i64> undef, <1 x i32> zeroinitializer + %tmp8 = bitcast <1 x i64> %tmp7 to <2 x float> + %tmp9 = tail call <2 x float> @baz(<2 x float> <float 0xFFFFFFFFE0000000, float 0.000000e+00>, <2 x float> %tmp8, <2 x float> zeroinitializer) nounwind + br i1 undef, label %bb10, label %bb12 + +bb10: ; preds = %bb3 + %tmp11 = load <4 x float>* undef, align 8 + br label %bb12 + +bb12: ; preds = %bb10, %bb3 + %tmp13 = shufflevector <2 x float> %tmp9, <2 x float> zeroinitializer, <2 x i32> <i32 0, i32 2> + %tmp14 = bitcast <2 x float> %tmp13 to <1 x i64> + %tmp15 = shufflevector <1 x i64> %tmp14, <1 x i64> zeroinitializer, <2 x i32> <i32 0, i32 1> + %tmp16 = bitcast <2 x i64> %tmp15 to <4 x float> + %tmp17 = fmul <4 x float> zeroinitializer, %tmp16 + %tmp18 = bitcast <4 x float> %tmp17 to <2 x i64> + %tmp19 = shufflevector <2 x i64> %tmp18, <2 x i64> undef, <1 x i32> zeroinitializer + %tmp20 = bitcast <1 x i64> %tmp19 to <2 x float> + %tmp21 = tail call <2 x float> @baz67(<2 x float> %tmp20, <2 x float> undef) nounwind + %tmp22 = tail call <2 x float> @baz67(<2 x float> %tmp21, <2 x float> %tmp21) nounwind + %tmp23 = shufflevector <2 x float> %tmp22, <2 x float> undef, <4 x i32> zeroinitializer + %tmp24 = bitcast <4 x float> %tmp23 to <2 x i64> + %tmp25 = shufflevector <2 x i64> %tmp24, <2 x i64> undef, <1 x i32> zeroinitializer + %tmp26 = bitcast <1 x i64> %tmp25 to <2 x float> + %tmp27 = extractelement <2 x float> %tmp26, i32 0 + %tmp28 = fcmp olt float %tmp27, 0.000000e+00 + %tmp29 = select i1 %tmp28, i32 0, i32 undef + %tmp30 = icmp ult i32 undef, %arg2 + br i1 %tmp30, label %bb3, label %bb31 + +bb31: ; preds = %bb12, %bb + %tmp32 = phi i32 [ 1, %bb ], [ %tmp29, %bb12 ] + ret i32 %tmp32 +} + +declare <2 x float> @baz(<2 x float>, <2 x float>, <2 x float>) nounwind readnone + +declare <2 x float> @baz67(<2 x float>, <2 x float>) nounwind readnone + +%struct.wombat.5 = type { %struct.quux, %struct.quux, %struct.quux, %struct.quux } +%struct.quux = type { <4 x float> } + +; CHECK: pr14079 +define linkonce_odr arm_aapcs_vfpcc %struct.wombat.5 @pr14079(i8* nocapture %arg, i8* nocapture %arg1, i8* nocapture %arg2) nounwind uwtable inlinehint { +bb: + %tmp = shufflevector <2 x i64> zeroinitializer, <2 x i64> undef, <1 x i32> zeroinitializer + %tmp3 = bitcast <1 x i64> %tmp to <2 x float> + %tmp4 = shufflevector <2 x float> %tmp3, <2 x float> zeroinitializer, <2 x i32> <i32 1, i32 3> + %tmp5 = shufflevector <2 x float> %tmp4, <2 x float> undef, <2 x i32> <i32 1, i32 3> + %tmp6 = bitcast <2 x float> %tmp5 to <1 x i64> + %tmp7 = shufflevector <1 x i64> undef, <1 x i64> %tmp6, <2 x i32> <i32 0, i32 1> + %tmp8 = bitcast <2 x i64> %tmp7 to <4 x float> + %tmp9 = shufflevector <2 x i64> zeroinitializer, <2 x i64> undef, <1 x i32> <i32 1> + %tmp10 = bitcast <1 x i64> %tmp9 to <2 x float> + %tmp11 = shufflevector <2 x float> %tmp10, <2 x float> undef, <2 x i32> <i32 0, i32 2> + %tmp12 = shufflevector <2 x float> %tmp11, <2 x float> undef, <2 x i32> <i32 0, i32 2> + %tmp13 = bitcast <2 x float> %tmp12 to <1 x i64> + %tmp14 = shufflevector <1 x i64> %tmp13, <1 x i64> undef, <2 x i32> <i32 0, i32 1> + %tmp15 = bitcast <2 x i64> %tmp14 to <4 x float> + %tmp16 = insertvalue %struct.wombat.5 undef, <4 x float> %tmp8, 1, 0 + %tmp17 = insertvalue %struct.wombat.5 %tmp16, <4 x float> %tmp15, 2, 0 + %tmp18 = insertvalue %struct.wombat.5 %tmp17, <4 x float> undef, 3, 0 + ret %struct.wombat.5 %tmp18 +} + +; CHECK: adjustCopiesBackFrom +; The shuffle in if.else3 must be preserved even though adjustCopiesBackFrom +; is tempted to remove it. +; CHECK: %if.else3 +; CHECK: vorr d +define internal void @adjustCopiesBackFrom(<2 x i64>* noalias nocapture sret %agg.result, <2 x i64> %in) { +entry: + %0 = extractelement <2 x i64> %in, i32 0 + %cmp = icmp slt i64 %0, 1 + %.in = select i1 %cmp, <2 x i64> <i64 0, i64 undef>, <2 x i64> %in + %1 = extractelement <2 x i64> %in, i32 1 + %cmp1 = icmp slt i64 %1, 1 + br i1 %cmp1, label %if.then2, label %if.else3 + +if.then2: ; preds = %entry + %2 = insertelement <2 x i64> %.in, i64 0, i32 1 + br label %if.end4 + +if.else3: ; preds = %entry + %3 = shufflevector <2 x i64> %.in, <2 x i64> %in, <2 x i32> <i32 0, i32 3> + br label %if.end4 + +if.end4: ; preds = %if.else3, %if.then2 + %result.2 = phi <2 x i64> [ %2, %if.then2 ], [ %3, %if.else3 ] + store <2 x i64> %result.2, <2 x i64>* %agg.result, align 128 + ret void +} + +; <rdar://problem/12758887> +; RegisterCoalescer::updateRegDefsUses() could visit an instruction more than +; once under rare circumstances. When widening a register from QPR to DTriple +; with the original virtual register in dsub_1_dsub_2, the double rewrite would +; produce an invalid sub-register. +; +; This is because dsub_1_dsub_2 is not an idempotent sub-register index. +; It will translate %vr:dsub_0 -> %vr:dsub_1. +define hidden fastcc void @radar12758887() nounwind optsize ssp { +entry: + br i1 undef, label %for.body, label %for.end70 + +for.body: ; preds = %for.end, %entry + br i1 undef, label %for.body29, label %for.end + +for.body29: ; preds = %for.body29, %for.body + %0 = load <2 x double>* null, align 1 + %splat40 = shufflevector <2 x double> %0, <2 x double> undef, <2 x i32> zeroinitializer + %mul41 = fmul <2 x double> undef, %splat40 + %add42 = fadd <2 x double> undef, %mul41 + %splat44 = shufflevector <2 x double> %0, <2 x double> undef, <2 x i32> <i32 1, i32 1> + %mul45 = fmul <2 x double> undef, %splat44 + %add46 = fadd <2 x double> undef, %mul45 + br i1 undef, label %for.end, label %for.body29 + +for.end: ; preds = %for.body29, %for.body + %accumR2.0.lcssa = phi <2 x double> [ zeroinitializer, %for.body ], [ %add42, %for.body29 ] + %accumI2.0.lcssa = phi <2 x double> [ zeroinitializer, %for.body ], [ %add46, %for.body29 ] + %1 = shufflevector <2 x double> %accumI2.0.lcssa, <2 x double> undef, <2 x i32> <i32 1, i32 0> + %add58 = fadd <2 x double> undef, %1 + %mul61 = fmul <2 x double> %add58, undef + %add63 = fadd <2 x double> undef, %mul61 + %add64 = fadd <2 x double> undef, %add63 + %add67 = fadd <2 x double> undef, %add64 + store <2 x double> %add67, <2 x double>* undef, align 1 + br i1 undef, label %for.end70, label %for.body + +for.end70: ; preds = %for.end, %entry + ret void +} diff --git a/test/CodeGen/ARM/constants.ll b/test/CodeGen/ARM/constants.ll index f4c1b5a..3baa103 100644 --- a/test/CodeGen/ARM/constants.ll +++ b/test/CodeGen/ARM/constants.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple=armv4t-unknown-linux-gnueabi -disable-cgp-branch-opts | FileCheck %s +; RUN: llc < %s -mtriple=armv4t-unknown-linux-gnueabi -disable-cgp-branch-opts -verify-machineinstrs | FileCheck %s define i32 @f1() { ; CHECK: f1 @@ -45,6 +45,16 @@ r: ret void } +define i32 @f8() nounwind { +; Check that constant propagation through (i32)-1 => (float)Nan => (i32)-1 +; gives expected result +; CHECK: f8 +; CHECK: mvn r0, #0 + %tmp0 = bitcast i32 -1 to float + %tmp1 = bitcast float %tmp0 to i32 + ret i32 %tmp1 +} + %t1 = type { <3 x float>, <3 x float> } @const1 = global %t1 { <3 x float> zeroinitializer, diff --git a/test/CodeGen/ARM/crash-shufflevector.ll b/test/CodeGen/ARM/crash-shufflevector.ll new file mode 100644 index 0000000..bdc0e0e --- /dev/null +++ b/test/CodeGen/ARM/crash-shufflevector.ll @@ -0,0 +1,10 @@ +; RUN: llc < %s -mtriple=armv7 + +declare void @g(<16 x i8>) +define void @f(<4 x i8> %param1, <4 x i8> %param2) { + %y1 = shufflevector <4 x i8> %param1, <4 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> + %y2 = shufflevector <4 x i8> %param2, <4 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> + %z = shufflevector <16 x i8> %y1, <16 x i8> %y2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19> + call void @g(<16 x i8> %z) + ret void +}
\ No newline at end of file diff --git a/test/CodeGen/ARM/darwin-section-order.ll b/test/CodeGen/ARM/darwin-section-order.ll new file mode 100644 index 0000000..701028c --- /dev/null +++ b/test/CodeGen/ARM/darwin-section-order.ll @@ -0,0 +1,21 @@ +; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s + +; CHECK: .section __TEXT,__text,regular,pure_instructions +; CHECK: .section __TEXT,myprecious +; CHECK: .section __TEXT,__textcoal_nt,coalesced,pure_instructions +; CHECK: .section __TEXT,__const_coal,coalesced +; CHECK: .section __TEXT,__picsymbolstub4,symbol_stubs,none,16 +; CHECK: .section __TEXT,__StaticInit,regular,pure_instructions + + +define void @normal() nounwind readnone { +; CHECK: .section __TEXT,__text,regular,pure_instructions +; CHECK: _normal: + ret void +} + +define void @special() nounwind readnone section "__TEXT,myprecious" { +; CHECK: .section __TEXT,myprecious +; CHECK: _special: + ret void +} diff --git a/test/CodeGen/ARM/deps-fix.ll b/test/CodeGen/ARM/deps-fix.ll new file mode 100644 index 0000000..288697a --- /dev/null +++ b/test/CodeGen/ARM/deps-fix.ll @@ -0,0 +1,22 @@ +; RUN: llc < %s -march=arm -mcpu=cortex-a9 -mattr=+neon,+neonfp -float-abi=hard -mtriple armv7-linux-gnueabi | FileCheck %s + +;; This test checks that the ExecutionDepsFix pass performs the domain changes +;; even when some dependencies are propagated through implicit definitions. + +; CHECK: fun_a +define <4 x float> @fun_a(<4 x float> %in, <4 x float> %x, float %y) nounwind { +; CHECK: vext +; CHECK: vext +; CHECK: vadd.f32 + %1 = insertelement <4 x float> %in, float %y, i32 0 + %2 = fadd <4 x float> %1, %x + ret <4 x float> %2 +} +; CHECK: fun_b +define <4 x i32> @fun_b(<4 x i32> %in, <4 x i32> %x, i32 %y) nounwind { +; CHECK: vmov.32 +; CHECK: vadd.i32 + %1 = insertelement <4 x i32> %in, i32 %y, i32 0 + %2 = add <4 x i32> %1, %x + ret <4 x i32> %2 +} diff --git a/test/CodeGen/ARM/div.ll b/test/CodeGen/ARM/div.ll index 3d29e05..82cfca1 100644 --- a/test/CodeGen/ARM/div.ll +++ b/test/CodeGen/ARM/div.ll @@ -1,9 +1,13 @@ -; RUN: llc < %s -mtriple=arm-apple-darwin | FileCheck %s -check-prefix=CHECK-ARM +; RUN: llc < %s -mtriple=arm-apple-ios -mcpu=cortex-a8 | FileCheck %s -check-prefix=CHECK-ARM +; RUN: llc < %s -mtriple=arm-apple-ios -mcpu=swift | FileCheck %s -check-prefix=CHECK-SWIFT define i32 @f1(i32 %a, i32 %b) { entry: ; CHECK-ARM: f1 ; CHECK-ARM: __divsi3 + +; CHECK-SWIFT: f1 +; CHECK-SWIFT: sdiv %tmp1 = sdiv i32 %a, %b ; <i32> [#uses=1] ret i32 %tmp1 } @@ -12,6 +16,9 @@ define i32 @f2(i32 %a, i32 %b) { entry: ; CHECK-ARM: f2 ; CHECK-ARM: __udivsi3 + +; CHECK-SWIFT: f2 +; CHECK-SWIFT: udiv %tmp1 = udiv i32 %a, %b ; <i32> [#uses=1] ret i32 %tmp1 } @@ -20,6 +27,10 @@ define i32 @f3(i32 %a, i32 %b) { entry: ; CHECK-ARM: f3 ; CHECK-ARM: __modsi3 + +; CHECK-SWIFT: f3 +; CHECK-SWIFT: sdiv +; CHECK-SWIFT: mls %tmp1 = srem i32 %a, %b ; <i32> [#uses=1] ret i32 %tmp1 } @@ -28,6 +39,10 @@ define i32 @f4(i32 %a, i32 %b) { entry: ; CHECK-ARM: f4 ; CHECK-ARM: __umodsi3 + +; CHECK-SWIFT: f4 +; CHECK-SWIFT: udiv +; CHECK-SWIFT: mls %tmp1 = urem i32 %a, %b ; <i32> [#uses=1] ret i32 %tmp1 } diff --git a/test/CodeGen/ARM/divmod.ll b/test/CodeGen/ARM/divmod.ll index 7fbf8f4..577f8aa 100644 --- a/test/CodeGen/ARM/divmod.ll +++ b/test/CodeGen/ARM/divmod.ll @@ -1,10 +1,18 @@ -; RUN: llc < %s -mtriple=arm-apple-ios5.0 -mcpu=cortex-a8 | FileCheck %s +; RUN: llc < %s -mtriple=arm-apple-ios5.0 -mcpu=cortex-a8 | FileCheck %s -check-prefix=A8 +; RUN: llc < %s -mtriple=arm-apple-ios5.0 -mcpu=swift | FileCheck %s -check-prefix=SWIFT + +; rdar://12481395 define void @foo(i32 %x, i32 %y, i32* nocapture %P) nounwind ssp { entry: -; CHECK: foo: -; CHECK: bl ___divmodsi4 -; CHECK-NOT: bl ___divmodsi4 +; A8: foo: +; A8: bl ___divmodsi4 +; A8-NOT: bl ___divmodsi4 + +; SWIFT: foo: +; SWIFT: sdiv +; SWIFT: mls +; SWIFT-NOT: bl __divmodsi4 %div = sdiv i32 %x, %y store i32 %div, i32* %P, align 4 %rem = srem i32 %x, %y @@ -15,9 +23,14 @@ entry: define void @bar(i32 %x, i32 %y, i32* nocapture %P) nounwind ssp { entry: -; CHECK: bar: -; CHECK: bl ___udivmodsi4 -; CHECK-NOT: bl ___udivmodsi4 +; A8: bar: +; A8: bl ___udivmodsi4 +; A8-NOT: bl ___udivmodsi4 + +; SWIFT: bar: +; SWIFT: udiv +; SWIFT: mls +; SWIFT-NOT: bl __udivmodsi4 %div = udiv i32 %x, %y store i32 %div, i32* %P, align 4 %rem = urem i32 %x, %y @@ -32,14 +45,18 @@ entry: define void @do_indent(i32 %cols) nounwind { entry: -; CHECK: do_indent: +; A8: do_indent: +; SWIFT: do_indent: %0 = load i32* @flags, align 4 %1 = and i32 %0, 67108864 %2 = icmp eq i32 %1, 0 br i1 %2, label %bb1, label %bb bb: -; CHECK: bl ___divmodsi4 +; A8: bl ___divmodsi4 +; SWIFT: sdiv +; SWIFT: mls +; SWIFT-NOT: bl __divmodsi4 %3 = load i32* @tabsize, align 4 %4 = srem i32 %cols, %3 %5 = sdiv i32 %cols, %3 @@ -60,9 +77,14 @@ declare i8* @__memset_chk(i8*, i32, i32, i32) nounwind ; rdar://11714607 define i32 @howmany(i32 %x, i32 %y) nounwind { entry: -; CHECK: howmany: -; CHECK: bl ___udivmodsi4 -; CHECK-NOT: ___udivsi3 +; A8: howmany: +; A8: bl ___udivmodsi4 +; A8-NOT: ___udivsi3 + +; SWIFT: howmany: +; SWIFT: udiv +; SWIFT: mls +; SWIFT-NOT: bl __udivmodsi4 %rem = urem i32 %x, %y %div = udiv i32 %x, %y %not.cmp = icmp ne i32 %rem, 0 diff --git a/test/CodeGen/ARM/domain-conv-vmovs.ll b/test/CodeGen/ARM/domain-conv-vmovs.ll new file mode 100644 index 0000000..a5c4114 --- /dev/null +++ b/test/CodeGen/ARM/domain-conv-vmovs.ll @@ -0,0 +1,100 @@ +; RUN: llc -verify-machineinstrs -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a9 -mattr=+neon,+neonfp -float-abi=hard < %s | FileCheck %s + +define <2 x float> @test_vmovs_via_vext_lane0to0(float %arg, <2 x float> %in) { +; CHECK: test_vmovs_via_vext_lane0to0: + %vec = insertelement <2 x float> %in, float %arg, i32 0 + %res = fadd <2 x float> %vec, %vec + +; CHECK: vext.32 d1, d1, d0, #1 +; CHECK: vext.32 d1, d1, d1, #1 +; CHECK: vadd.f32 {{d[0-9]+}}, d1, d1 + + ret <2 x float> %res +} + +define <2 x float> @test_vmovs_via_vext_lane0to1(float %arg, <2 x float> %in) { +; CHECK: test_vmovs_via_vext_lane0to1: + %vec = insertelement <2 x float> %in, float %arg, i32 1 + %res = fadd <2 x float> %vec, %vec + +; CHECK: vext.32 d1, d1, d1, #1 +; CHECK: vext.32 d1, d1, d0, #1 +; CHECK: vadd.f32 {{d[0-9]+}}, d1, d1 + + ret <2 x float> %res +} + +define <2 x float> @test_vmovs_via_vext_lane1to0(float, float %arg, <2 x float> %in) { +; CHECK: test_vmovs_via_vext_lane1to0: + %vec = insertelement <2 x float> %in, float %arg, i32 0 + %res = fadd <2 x float> %vec, %vec + +; CHECK: vext.32 d1, d1, d1, #1 +; CHECK: vext.32 d1, d0, d1, #1 +; CHECK: vadd.f32 {{d[0-9]+}}, d1, d1 + + ret <2 x float> %res +} + +define <2 x float> @test_vmovs_via_vext_lane1to1(float, float %arg, <2 x float> %in) { +; CHECK: test_vmovs_via_vext_lane1to1: + %vec = insertelement <2 x float> %in, float %arg, i32 1 + %res = fadd <2 x float> %vec, %vec + +; CHECK: vext.32 d1, d0, d1, #1 +; CHECK: vext.32 d1, d1, d1, #1 +; CHECK: vadd.f32 {{d[0-9]+}}, d1, d1 + + ret <2 x float> %res +} + + +define float @test_vmovs_via_vdup(float, float %ret, float %lhs, float %rhs) { +; CHECK: test_vmovs_via_vdup: + + ; Do an operation (which will end up NEON because of +neonfp) to convince the + ; execution-domain pass that NEON is a good thing to use. + %res = fadd float %ret, %ret + ; It makes sense for LLVM to do the addition in d0 here, because it's going + ; to be returned. This means it will want a "vmov s0, s1": +; CHECK: vdup.32 d0, d0[1] + + ret float %res +} + +declare float @llvm.sqrt.f32(float) + +declare void @bar() + +; This is a comp +define float @test_ineligible(float, float %in) { +; CHECK: test_ineligible: + + %sqrt = call float @llvm.sqrt.f32(float %in) + %val = fadd float %sqrt, %sqrt + + ; This call forces a move from a callee-saved register to the return-reg. That + ; move is not eligible for conversion to a d-register instructions because the + ; use-def chains would be messed up. Primarily a compile-test (we used to + ; internal fault). + call void @bar() +; CHECL: bl bar +; CHECK: vext.32 +; CHECK: vext.32 + ret float %val +} + +define i32 @test_vmovs_no_sreg(i32 %in) { +; CHECK: test_vmovs_no_sreg: + + ; Check that the movement to and from GPRs takes place in the NEON domain. +; CHECK: vmov.32 d + %x = bitcast i32 %in to float + + %res = fadd float %x, %x + +; CHECK: vmov.32 r{{[0-9]+}}, d + %resi = bitcast float %res to i32 + + ret i32 %resi +} diff --git a/test/CodeGen/ARM/fabss.ll b/test/CodeGen/ARM/fabss.ll index bcb4ee7..46c2f1c 100644 --- a/test/CodeGen/ARM/fabss.ll +++ b/test/CodeGen/ARM/fabss.ll @@ -14,12 +14,12 @@ entry: declare float @fabsf(float) ; VFP2: test: -; VFP2: vabs.f32 s1, s1 +; VFP2: vabs.f32 s2, s2 ; NFP1: test: ; NFP1: vabs.f32 d1, d1 ; NFP0: test: -; NFP0: vabs.f32 s1, s1 +; NFP0: vabs.f32 s2, s2 ; CORTEXA8: test: ; CORTEXA8: vadd.f32 [[D1:d[0-9]+]] diff --git a/test/CodeGen/ARM/fadds.ll b/test/CodeGen/ARM/fadds.ll index e35103c..48ef5ed 100644 --- a/test/CodeGen/ARM/fadds.ll +++ b/test/CodeGen/ARM/fadds.ll @@ -10,14 +10,14 @@ entry: } ; VFP2: test: -; VFP2: vadd.f32 s0, s1, s0 +; VFP2: vadd.f32 s ; NFP1: test: -; NFP1: vadd.f32 d0, d1, d0 +; NFP1: vadd.f32 d ; NFP0: test: -; NFP0: vadd.f32 s0, s1, s0 +; NFP0: vadd.f32 s ; CORTEXA8: test: -; CORTEXA8: vadd.f32 d0, d1, d0 +; CORTEXA8: vadd.f32 d ; CORTEXA9: test: ; CORTEXA9: vadd.f32 s{{.}}, s{{.}}, s{{.}} diff --git a/test/CodeGen/ARM/fast-isel-pic.ll b/test/CodeGen/ARM/fast-isel-pic.ll new file mode 100644 index 0000000..867d53f --- /dev/null +++ b/test/CodeGen/ARM/fast-isel-pic.ll @@ -0,0 +1,61 @@ +; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB +; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=pic -mtriple=arm-apple-ios | FileCheck %s --check-prefix=ARM +; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARMv7 +; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=pic -mtriple=thumbv7-none-linux-gnueabi | FileCheck %s --check-prefix=THUMB-ELF +; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=pic -mtriple=armv7-none-linux-gnueabi | FileCheck %s --check-prefix=ARMv7-ELF + +@g = global i32 0, align 4 + +define i32 @LoadGV() { +entry: +; THUMB: LoadGV +; THUMB: movw [[reg0:r[0-9]+]], +; THUMB: movt [[reg0]], +; THUMB: add [[reg0]], pc +; THUMB-ELF: LoadGV +; THUMB-ELF: ldr.n r[[reg0:[0-9]+]], +; THUMB-ELF: ldr.n r[[reg1:[0-9]+]], +; THUMB-ELF: ldr r[[reg0]], [r[[reg1]], r[[reg0]]] +; ARM: LoadGV +; ARM: ldr [[reg1:r[0-9]+]], +; ARM: add [[reg1]], pc, [[reg1]] +; ARMv7: LoadGV +; ARMv7: movw [[reg2:r[0-9]+]], +; ARMv7: movt [[reg2]], +; ARMv7: add [[reg2]], pc, [[reg2]] +; ARMv7-ELF: LoadGV +; ARMv7-ELF: ldr r[[reg2:[0-9]+]], +; ARMv7-ELF: ldr r[[reg3:[0-9]+]], +; ARMv7-ELF: ldr r[[reg2]], [r[[reg3]], r[[reg2]]] + %tmp = load i32* @g + ret i32 %tmp +} + +@i = external global i32 + +define i32 @LoadIndirectSymbol() { +entry: +; THUMB: LoadIndirectSymbol +; THUMB: movw r[[reg3:[0-9]+]], +; THUMB: movt r[[reg3]], +; THUMB: add r[[reg3]], pc +; THUMB: ldr r[[reg3]], [r[[reg3]]] +; THUMB-ELF: LoadIndirectSymbol +; THUMB-ELF: ldr.n r[[reg3:[0-9]+]], +; THUMB-ELF: ldr.n r[[reg4:[0-9]+]], +; THUMB-ELF: ldr r[[reg3]], [r[[reg4]], r[[reg3]]] +; ARM: LoadIndirectSymbol +; ARM: ldr [[reg4:r[0-9]+]], +; ARM: ldr [[reg4]], [pc, [[reg4]]] +; ARMv7: LoadIndirectSymbol +; ARMv7: movw r[[reg5:[0-9]+]], +; ARMv7: movt r[[reg5]], +; ARMv7: add r[[reg5]], pc, r[[reg5]] +; ARMv7: ldr r[[reg5]], [r[[reg5]]] +; ARMv7-ELF: LoadIndirectSymbol +; ARMv7-ELF: ldr r[[reg5:[0-9]+]], +; ARMv7-ELF: ldr r[[reg6:[0-9]+]], +; ARMv7-ELF: ldr r[[reg5]], [r[[reg6]], r[[reg5]]] + %tmp = load i32* @i + ret i32 %tmp +} diff --git a/test/CodeGen/ARM/fast-isel.ll b/test/CodeGen/ARM/fast-isel.ll index ecd5fe2..41fda41 100644 --- a/test/CodeGen/ARM/fast-isel.ll +++ b/test/CodeGen/ARM/fast-isel.ll @@ -1,5 +1,7 @@ ; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM ; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB +; RUN: llc < %s -O0 -arm-strict-align -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM-STRICT-ALIGN +; RUN: llc < %s -O0 -arm-strict-align -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB-STRICT-ALIGN ; Very basic fast-isel functionality. define i32 @add(i32 %a, i32 %b) nounwind { @@ -238,3 +240,67 @@ entry: } declare void @llvm.trap() nounwind + +define void @unaligned_i16_store(i16 %x, i16* %y) nounwind { +entry: +; ARM-STRICT-ALIGN: @unaligned_i16_store +; ARM-STRICT-ALIGN: strb +; ARM-STRICT-ALIGN: strb + +; THUMB-STRICT-ALIGN: @unaligned_i16_store +; THUMB-STRICT-ALIGN: strb +; THUMB-STRICT-ALIGN: strb + + store i16 %x, i16* %y, align 1 + ret void +} + +define i16 @unaligned_i16_load(i16* %x) nounwind { +entry: +; ARM-STRICT-ALIGN: @unaligned_i16_load +; ARM-STRICT-ALIGN: ldrb +; ARM-STRICT-ALIGN: ldrb + +; THUMB-STRICT-ALIGN: @unaligned_i16_load +; THUMB-STRICT-ALIGN: ldrb +; THUMB-STRICT-ALIGN: ldrb + + %0 = load i16* %x, align 1 + ret i16 %0 +} + +define void @unaligned_i32_store(i32 %x, i32* %y) nounwind { +entry: +; ARM-STRICT-ALIGN: @unaligned_i32_store +; ARM-STRICT-ALIGN: strb +; ARM-STRICT-ALIGN: strb +; ARM-STRICT-ALIGN: strb +; ARM-STRICT-ALIGN: strb + +; THUMB-STRICT-ALIGN: @unaligned_i32_store +; THUMB-STRICT-ALIGN: strb +; THUMB-STRICT-ALIGN: strb +; THUMB-STRICT-ALIGN: strb +; THUMB-STRICT-ALIGN: strb + + store i32 %x, i32* %y, align 1 + ret void +} + +define i32 @unaligned_i32_load(i32* %x) nounwind { +entry: +; ARM-STRICT-ALIGN: @unaligned_i32_load +; ARM-STRICT-ALIGN: ldrb +; ARM-STRICT-ALIGN: ldrb +; ARM-STRICT-ALIGN: ldrb +; ARM-STRICT-ALIGN: ldrb + +; THUMB-STRICT-ALIGN: @unaligned_i32_load +; THUMB-STRICT-ALIGN: ldrb +; THUMB-STRICT-ALIGN: ldrb +; THUMB-STRICT-ALIGN: ldrb +; THUMB-STRICT-ALIGN: ldrb + + %0 = load i32* %x, align 1 + ret i32 %0 +} diff --git a/test/CodeGen/ARM/fdivs.ll b/test/CodeGen/ARM/fdivs.ll index 31c1ca9..8fab002 100644 --- a/test/CodeGen/ARM/fdivs.ll +++ b/test/CodeGen/ARM/fdivs.ll @@ -10,14 +10,14 @@ entry: } ; VFP2: test: -; VFP2: vdiv.f32 s0, s1, s0 +; VFP2: vdiv.f32 s0, s2, s0 ; NFP1: test: -; NFP1: vdiv.f32 s0, s1, s0 +; NFP1: vdiv.f32 s0, s2, s0 ; NFP0: test: -; NFP0: vdiv.f32 s0, s1, s0 +; NFP0: vdiv.f32 s0, s2, s0 ; CORTEXA8: test: -; CORTEXA8: vdiv.f32 s0, s1, s0 +; CORTEXA8: vdiv.f32 s0, s2, s0 ; CORTEXA9: test: ; CORTEXA9: vdiv.f32 s{{.}}, s{{.}}, s{{.}} diff --git a/test/CodeGen/ARM/fmuls.ll b/test/CodeGen/ARM/fmuls.ll index 3c3182b..1566a92 100644 --- a/test/CodeGen/ARM/fmuls.ll +++ b/test/CodeGen/ARM/fmuls.ll @@ -10,15 +10,15 @@ entry: } ; VFP2: test: -; VFP2: vmul.f32 s0, s1, s0 +; VFP2: vmul.f32 s ; NFP1: test: -; NFP1: vmul.f32 d0, d1, d0 +; NFP1: vmul.f32 d ; NFP0: test: -; NFP0: vmul.f32 s0, s1, s0 +; NFP0: vmul.f32 s ; CORTEXA8: test: -; CORTEXA8: vmul.f32 d0, d1, d0 +; CORTEXA8: vmul.f32 d ; CORTEXA9: test: ; CORTEXA9: vmul.f32 s{{.}}, s{{.}}, s{{.}} diff --git a/test/CodeGen/ARM/fp-fast.ll b/test/CodeGen/ARM/fp-fast.ll new file mode 100644 index 0000000..ec57187 --- /dev/null +++ b/test/CodeGen/ARM/fp-fast.ll @@ -0,0 +1,60 @@ +; RUN: llc -march=arm -mcpu=cortex-a9 -mattr=+vfp4 -enable-unsafe-fp-math < %s | FileCheck %s + +; CHECK: test1 +define float @test1(float %x) { +; CHECK-NOT: vfma +; CHECK: vmul.f32 +; CHECK-NOT: vfma + %t1 = fmul float %x, 3.0 + %t2 = call float @llvm.fma.f32(float %x, float 2.0, float %t1) + ret float %t2 +} + +; CHECK: test2 +define float @test2(float %x, float %y) { +; CHECK-NOT: vmul +; CHECK: vfma.f32 +; CHECK-NOT: vmul + %t1 = fmul float %x, 3.0 + %t2 = call float @llvm.fma.f32(float %t1, float 2.0, float %y) + ret float %t2 +} + +; CHECK: test3 +define float @test3(float %x, float %y) { +; CHECK-NOT: vfma +; CHECK: vadd.f32 +; CHECK-NOT: vfma + %t2 = call float @llvm.fma.f32(float %x, float 1.0, float %y) + ret float %t2 +} + +; CHECK: test4 +define float @test4(float %x, float %y) { +; CHECK-NOT: vfma +; CHECK: vsub.f32 +; CHECK-NOT: vfma + %t2 = call float @llvm.fma.f32(float %x, float -1.0, float %y) + ret float %t2 +} + +; CHECK: test5 +define float @test5(float %x) { +; CHECK-NOT: vfma +; CHECK: vmul.f32 +; CHECK-NOT: vfma + %t2 = call float @llvm.fma.f32(float %x, float 2.0, float %x) + ret float %t2 +} + +; CHECK: test6 +define float @test6(float %x) { +; CHECK-NOT: vfma +; CHECK: vmul.f32 +; CHECK-NOT: vfma + %t1 = fsub float -0.0, %x + %t2 = call float @llvm.fma.f32(float %x, float 5.0, float %t1) + ret float %t2 +} + +declare float @llvm.fma.f32(float, float, float) diff --git a/test/CodeGen/ARM/fp_convert.ll b/test/CodeGen/ARM/fp_convert.ll index 7002cec..44298b9 100644 --- a/test/CodeGen/ARM/fp_convert.ll +++ b/test/CodeGen/ARM/fp_convert.ll @@ -31,7 +31,7 @@ define float @test3(i32 %a, i32 %b) { ; VFP2: test3: ; VFP2: vcvt.f32.u32 s{{.}}, s{{.}} ; NEON: test3: -; NEON: vcvt.f32.u32 d0, d0 +; NEON: vcvt.f32.u32 d entry: %0 = add i32 %a, %b %1 = uitofp i32 %0 to float @@ -42,7 +42,7 @@ define float @test4(i32 %a, i32 %b) { ; VFP2: test4: ; VFP2: vcvt.f32.s32 s{{.}}, s{{.}} ; NEON: test4: -; NEON: vcvt.f32.s32 d0, d0 +; NEON: vcvt.f32.s32 d entry: %0 = add i32 %a, %b %1 = sitofp i32 %0 to float diff --git a/test/CodeGen/ARM/fsubs.ll b/test/CodeGen/ARM/fsubs.ll index bea8d5f..f039e74 100644 --- a/test/CodeGen/ARM/fsubs.ll +++ b/test/CodeGen/ARM/fsubs.ll @@ -8,6 +8,6 @@ entry: ret float %0 } -; VFP2: vsub.f32 s0, s1, s0 -; NFP1: vsub.f32 d0, d1, d0 -; NFP0: vsub.f32 s0, s1, s0 +; VFP2: vsub.f32 s +; NFP1: vsub.f32 d +; NFP0: vsub.f32 s diff --git a/test/CodeGen/ARM/ifcvt1.ll b/test/CodeGen/ARM/ifcvt1.ll index cd870bb..fd83144 100644 --- a/test/CodeGen/ARM/ifcvt1.ll +++ b/test/CodeGen/ARM/ifcvt1.ll @@ -1,17 +1,21 @@ -; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s +; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=A8 +; RUN: llc < %s -march=arm -mcpu=swift | FileCheck %s -check-prefix=SWIFT define i32 @t1(i32 %a, i32 %b) { -; CHECK: t1: +; A8: t1: +; SWIFT: t1: %tmp2 = icmp eq i32 %a, 0 br i1 %tmp2, label %cond_false, label %cond_true cond_true: -; CHECK: subeq r0, r1, #1 +; A8: subeq r0, r1, #1 +; SWIFT: sub r0, r1, #1 %tmp5 = add i32 %b, 1 ret i32 %tmp5 cond_false: -; CHECK: addne r0, r1, #1 +; A8: addne r0, r1, #1 +; SWIFT: addne r0, r1, #1 %tmp7 = add i32 %b, -1 ret i32 %tmp7 } diff --git a/test/CodeGen/ARM/ifcvt12.ll b/test/CodeGen/ARM/ifcvt12.ll new file mode 100644 index 0000000..77bdca5 --- /dev/null +++ b/test/CodeGen/ARM/ifcvt12.ll @@ -0,0 +1,15 @@ +; RUN: llc < %s -mtriple=arm-apple-darwin -mcpu=cortex-a8 | FileCheck %s +define i32 @f1(i32 %a, i32 %b, i32 %c) { +; CHECK: f1: +; CHECK: mlsne r0, r0, r1, r2 + %tmp1 = icmp eq i32 %a, 0 + br i1 %tmp1, label %cond_false, label %cond_true + +cond_true: + %tmp2 = mul i32 %a, %b + %tmp3 = sub i32 %c, %tmp2 + ret i32 %tmp3 + +cond_false: + ret i32 %a +} diff --git a/test/CodeGen/ARM/ifcvt5.ll b/test/CodeGen/ARM/ifcvt5.ll index 95f5c97..5081791 100644 --- a/test/CodeGen/ARM/ifcvt5.ll +++ b/test/CodeGen/ARM/ifcvt5.ll @@ -1,4 +1,6 @@ -; RUN: llc < %s -mtriple=armv7-apple-ios | FileCheck %s +; RUN: llc < %s -mtriple=armv7-apple-ios -mcpu=cortex-a8 | FileCheck %s -check-prefix=A8 +; RUN: llc < %s -mtriple=armv7-apple-ios -mcpu=swift | FileCheck %s -check-prefix=SWIFT +; rdar://8402126 @x = external global i32* ; <i32**> [#uses=1] @@ -10,8 +12,12 @@ entry: } define i32 @t1(i32 %a, i32 %b) { -; CHECK: t1: -; CHECK: poplt {r7, pc} +; A8: t1: +; A8: poplt {r7, pc} + +; SWIFT: t1: +; SWIFT: pop {r7, pc} +; SWIFT: pop {r7, pc} entry: %tmp1 = icmp sgt i32 %a, 10 ; <i1> [#uses=1] br i1 %tmp1, label %cond_true, label %UnifiedReturnBlock diff --git a/test/CodeGen/ARM/indirectbr-2.ll b/test/CodeGen/ARM/indirectbr-2.ll new file mode 100644 index 0000000..084f520 --- /dev/null +++ b/test/CodeGen/ARM/indirectbr-2.ll @@ -0,0 +1,46 @@ +; RUN: llc < %s -O0 -relocation-model=pic -mtriple=thumbv7-apple-ios | FileCheck %s +; <rdar://problem/12529625> + +@foo = global i32 34879, align 4 +@DWJumpTable2808 = global [2 x i32] [i32 sub (i32 ptrtoint (i8* blockaddress(@func, %14) to i32), i32 ptrtoint (i8* blockaddress(@func, %4) to i32)), i32 sub (i32 ptrtoint (i8* blockaddress(@func, %13) to i32), i32 ptrtoint (i8* blockaddress(@func, %4) to i32))] +@0 = internal constant [45 x i8] c"func XXXXXXXXXXX :: bb xxxxxxxxxxxxxxxxxxxx\0A\00" + +; The indirect branch has the two destinations as successors. The lone PHI +; statement shouldn't be implicitly defined. + +; CHECK: func: +; CHECK: Ltmp1: @ Block address taken +; CHECK-NOT: @ implicit-def: R0 +; CHECK: @ 4-byte Reload + +define i32 @func() nounwind ssp { + %1 = alloca i32, align 4 + %2 = load i32* @foo, align 4 + %3 = icmp eq i32 %2, 34879 + br label %4 + +; <label>:4 ; preds = %0 + %5 = zext i1 %3 to i32 + %6 = mul i32 %5, 287 + %7 = add i32 %6, 2 + %8 = getelementptr [2 x i32]* @DWJumpTable2808, i32 0, i32 %5 + %9 = load i32* %8 + %10 = add i32 %9, ptrtoint (i8* blockaddress(@func, %4) to i32) + %11 = inttoptr i32 %10 to i8* + %12 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([45 x i8]* @0, i32 0, i32 0)) + indirectbr i8* %11, [label %13, label %14] + +; <label>:13 ; preds = %4 + %tmp14 = phi i32 [ %7, %4 ] + store i32 23958, i32* @foo, align 4 + %tmp15 = load i32* %1, align 4 + %tmp16 = icmp eq i32 %tmp15, 0 + %tmp17 = zext i1 %tmp16 to i32 + %tmp21 = add i32 %tmp17, %tmp14 + ret i32 %tmp21 + +; <label>:14 ; preds = %4 + ret i32 42 +} + +declare i32 @printf(i8*, ...) diff --git a/test/CodeGen/ARM/integer_insertelement.ll b/test/CodeGen/ARM/integer_insertelement.ll new file mode 100644 index 0000000..1d72afe --- /dev/null +++ b/test/CodeGen/ARM/integer_insertelement.ll @@ -0,0 +1,35 @@ +; RUN: llc %s -o - -march=arm -mattr=+neon | FileCheck %s + +; This test checks that when inserting one (integer) element into a vector, +; the vector is not spuriously copied. "vorr dX, dY, dY" is the way of moving +; one DPR to another that we check for. + +; CHECK: @f +; CHECK-NOT: vorr d +; CHECK: vmov.32 d +; CHECK-NOT: vorr d +; CHECK: mov pc, lr +define <4 x i32> @f(<4 x i32> %in) { + %1 = insertelement <4 x i32> %in, i32 255, i32 3 + ret <4 x i32> %1 +} + +; CHECK: @g +; CHECK-NOT: vorr d +; CHECK: vmov.16 d +; CHECK-NOT: vorr d +; CHECK: mov pc, lr +define <8 x i16> @g(<8 x i16> %in) { + %1 = insertelement <8 x i16> %in, i16 255, i32 7 + ret <8 x i16> %1 +} + +; CHECK: @h +; CHECK-NOT: vorr d +; CHECK: vmov.8 d +; CHECK-NOT: vorr d +; CHECK: mov pc, lr +define <16 x i8> @h(<16 x i8> %in) { + %1 = insertelement <16 x i8> %in, i8 255, i32 15 + ret <16 x i8> %1 +} diff --git a/test/CodeGen/ARM/ldr_post.ll b/test/CodeGen/ARM/ldr_post.ll index 8ddf025..a6ca434 100644 --- a/test/CodeGen/ARM/ldr_post.ll +++ b/test/CodeGen/ARM/ldr_post.ll @@ -1,4 +1,5 @@ ; RUN: llc < %s -march=arm | FileCheck %s +; RUN: llc < %s -march=arm -mcpu=swift | FileCheck %s ; CHECK: test1: ; CHECK: ldr {{.*, \[.*]}}, -r2 diff --git a/test/CodeGen/ARM/ldr_pre.ll b/test/CodeGen/ARM/ldr_pre.ll index e904e5f..6c40ad7 100644 --- a/test/CodeGen/ARM/ldr_pre.ll +++ b/test/CodeGen/ARM/ldr_pre.ll @@ -1,4 +1,5 @@ ; RUN: llc < %s -march=arm | FileCheck %s +; RUN: llc < %s -march=arm -mcpu=swift | FileCheck %s ; CHECK: test1: ; CHECK: ldr {{.*!}} diff --git a/test/CodeGen/ARM/longMAC.ll b/test/CodeGen/ARM/longMAC.ll new file mode 100644 index 0000000..e4a00e9 --- /dev/null +++ b/test/CodeGen/ARM/longMAC.ll @@ -0,0 +1,44 @@ +; RUN: llc < %s -march=arm | FileCheck %s +; Check generated signed and unsigned multiply accumulate long. + +define i64 @MACLongTest1(i32 %a, i32 %b, i64 %c) { +;CHECK: MACLongTest1: +;CHECK: umlal + %conv = zext i32 %a to i64 + %conv1 = zext i32 %b to i64 + %mul = mul i64 %conv1, %conv + %add = add i64 %mul, %c + ret i64 %add +} + +define i64 @MACLongTest2(i32 %a, i32 %b, i64 %c) { +;CHECK: MACLongTest2: +;CHECK: smlal + %conv = sext i32 %a to i64 + %conv1 = sext i32 %b to i64 + %mul = mul nsw i64 %conv1, %conv + %add = add nsw i64 %mul, %c + ret i64 %add +} + +define i64 @MACLongTest3(i32 %a, i32 %b, i32 %c) { +;CHECK: MACLongTest3: +;CHECK: umlal + %conv = zext i32 %b to i64 + %conv1 = zext i32 %a to i64 + %mul = mul i64 %conv, %conv1 + %conv2 = zext i32 %c to i64 + %add = add i64 %mul, %conv2 + ret i64 %add +} + +define i64 @MACLongTest4(i32 %a, i32 %b, i32 %c) { +;CHECK: MACLongTest4: +;CHECK: smlal + %conv = sext i32 %b to i64 + %conv1 = sext i32 %a to i64 + %mul = mul nsw i64 %conv, %conv1 + %conv2 = sext i32 %c to i64 + %add = add nsw i64 %mul, %conv2 + ret i64 %add +} diff --git a/test/CodeGen/ARM/mls.ll b/test/CodeGen/ARM/mls.ll index a6cdba4..066bf98 100644 --- a/test/CodeGen/ARM/mls.ll +++ b/test/CodeGen/ARM/mls.ll @@ -1,4 +1,5 @@ ; RUN: llc < %s -march=arm -mattr=+v6t2 | FileCheck %s +; RUN: llc < %s -march=arm -mattr=+v6t2 -arm-use-mulops=false | FileCheck %s -check-prefix=NO_MULOPS define i32 @f1(i32 %a, i32 %b, i32 %c) { %tmp1 = mul i32 %a, %b @@ -13,4 +14,15 @@ define i32 @f2(i32 %a, i32 %b, i32 %c) { ret i32 %tmp2 } +; CHECK: f1: ; CHECK: mls r0, r0, r1, r2 +; NO_MULOPS: f1: +; NO_MULOPS: mul r0, r0, r1 +; NO_MULOPS-NEXT: sub r0, r2, r0 + +; CHECK: f2: +; CHECK: mul r0, r0, r1 +; CHECK-NEXT: sub r0, r0, r2 +; NO_MULOPS: f2: +; NO_MULOPS: mul r0, r0, r1 +; NO_MULOPS-NEXT: sub r0, r0, r2 diff --git a/test/CodeGen/ARM/neon-fma.ll b/test/CodeGen/ARM/neon-fma.ll new file mode 100644 index 0000000..d2cca50 --- /dev/null +++ b/test/CodeGen/ARM/neon-fma.ll @@ -0,0 +1,22 @@ +; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -mcpu=swift | FileCheck %s + +; CHECK: test_v2f32 +; CHECK: vfma.f32 {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}} + +define <2 x float> @test_v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c) nounwind readnone ssp { +entry: + %call = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c) nounwind readnone + ret <2 x float> %call +} + +; CHECK: test_v4f32 +; CHECK: vfma.f32 {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}} + +define <4 x float> @test_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) nounwind readnone ssp { +entry: + %call = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) nounwind readnone + ret <4 x float> %call +} + +declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>) nounwind readnone +declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) nounwind readnone diff --git a/test/CodeGen/ARM/neon_ld2.ll b/test/CodeGen/ARM/neon_ld2.ll index 944bfe0..497619e 100644 --- a/test/CodeGen/ARM/neon_ld2.ll +++ b/test/CodeGen/ARM/neon_ld2.ll @@ -1,10 +1,16 @@ ; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s +; RUN: llc < %s -march=arm -mcpu=swift | FileCheck %s --check-prefix=SWIFT ; CHECK: t1 -; CHECK: vldmia -; CHECK: vldmia +; CHECK: vld1.64 +; CHECK: vld1.64 ; CHECK: vadd.i64 q -; CHECK: vstmia +; CHECK: vst1.64 +; SWIFT: t1 +; SWIFT: vld1.64 {{.d[0-9]+, d[0-9]+}, \[r[0-9]+, :128\]}} +; SWIFT: vld1.64 {{.d[0-9]+, d[0-9]+}, \[r[0-9]+, :128\]}} +; SWIFT: vadd.i64 q +; SWIFT: vst1.64 {{.d[0-9]+, d[0-9]+}, \[r[0-9]+, :128\]}} define void @t1(<4 x i32>* %r, <2 x i64>* %a, <2 x i64>* %b) nounwind { entry: %0 = load <2 x i64>* %a, align 16 ; <<2 x i64>> [#uses=1] @@ -16,11 +22,17 @@ entry: } ; CHECK: t2 -; CHECK: vldmia -; CHECK: vldmia +; CHECK: vld1.64 +; CHECK: vld1.64 ; CHECK: vsub.i64 q ; CHECK: vmov r0, r1, d ; CHECK: vmov r2, r3, d +; SWIFT: t2 +; SWIFT: vld1.64 {{.d[0-9]+, d[0-9]+}, \[r[0-9]+, :128\]}} +; SWIFT: vld1.64 {{.d[0-9]+, d[0-9]+}, \[r[0-9]+, :128\]}} +; SWIFT: vsub.i64 q +; SWIFT: vmov r0, r1, d +; SWIFT: vmov r2, r3, d define <4 x i32> @t2(<2 x i64>* %a, <2 x i64>* %b) nounwind readonly { entry: %0 = load <2 x i64>* %a, align 16 ; <<2 x i64>> [#uses=1] @@ -30,3 +42,18 @@ entry: ret <4 x i32> %3 } +; Limited alignment. +; SWIFT: t3 +; SWIFT: vld1.64 {{.d[0-9]+, d[0-9]+}, \[r[0-9]+}} +; SWIFT: vld1.64 {{.d[0-9]+, d[0-9]+}, \[r[0-9]+}} +; SWIFT: vadd.i64 q +; SWIFT: vst1.64 {{.d[0-9]+, d[0-9]+}, \[r[0-9]+}} +define void @t3(<4 x i32>* %r, <2 x i64>* %a, <2 x i64>* %b) nounwind { +entry: + %0 = load <2 x i64>* %a, align 8 + %1 = load <2 x i64>* %b, align 8 + %2 = add <2 x i64> %0, %1 + %3 = bitcast <2 x i64> %2 to <4 x i32> + store <4 x i32> %3, <4 x i32>* %r, align 8 + ret void +} diff --git a/test/CodeGen/ARM/opt-shuff-tstore.ll b/test/CodeGen/ARM/opt-shuff-tstore.ll index df98e23..74c9a21 100644 --- a/test/CodeGen/ARM/opt-shuff-tstore.ll +++ b/test/CodeGen/ARM/opt-shuff-tstore.ll @@ -2,7 +2,7 @@ ; CHECK: func_4_8 ; CHECK: vst1.32 -; CHECK-NEXT: bx lr +; CHECK: bx lr define void @func_4_8(<4 x i8> %param, <4 x i8>* %p) { %r = add <4 x i8> %param, <i8 1, i8 2, i8 3, i8 4> store <4 x i8> %r, <4 x i8>* %p @@ -11,7 +11,7 @@ define void @func_4_8(<4 x i8> %param, <4 x i8>* %p) { ; CHECK: func_2_16 ; CHECK: vst1.32 -; CHECK-NEXT: bx lr +; CHECK: bx lr define void @func_2_16(<2 x i16> %param, <2 x i16>* %p) { %r = add <2 x i16> %param, <i16 1, i16 2> store <2 x i16> %r, <2 x i16>* %p diff --git a/test/CodeGen/ARM/reg_sequence.ll b/test/CodeGen/ARM/reg_sequence.ll index 05794e4..6d6586e 100644 --- a/test/CodeGen/ARM/reg_sequence.ll +++ b/test/CodeGen/ARM/reg_sequence.ll @@ -1,5 +1,5 @@ -; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -; RUN: llc < %s -march=arm -mcpu=cortex-a8 -regalloc=basic | FileCheck %s +; RUN: llc < %s -mtriple=arm-apple-ios -mcpu=cortex-a8 | FileCheck %s +; RUN: llc < %s -mtriple=arm-apple-ios -mcpu=cortex-a8 -regalloc=basic | FileCheck %s ; Implementing vld / vst as REG_SEQUENCE eliminates the extra vmov's. %struct.int16x8_t = type { <8 x i16> } @@ -124,7 +124,6 @@ return1: return2: ; CHECK: %return2 ; CHECK: vadd.i32 -; CHECK: vorr {{q[0-9]+}}, {{q[0-9]+}} ; CHECK-NOT: vmov ; CHECK: vst2.32 {d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}} %tmp100 = extractvalue %struct.__neon_int32x4x2_t %tmp2, 0 ; <<4 x i32>> [#uses=1] @@ -137,7 +136,7 @@ return2: define <8 x i16> @t5(i16* %A, <8 x i16>* %B) nounwind { ; CHECK: t5: -; CHECK: vldmia +; CHECK: vld1.32 ; How can FileCheck match Q and D registers? We need a lisp interpreter. ; CHECK: vorr {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}} ; CHECK-NOT: vmov @@ -243,8 +242,8 @@ define arm_aapcs_vfpcc float @t9(%0* nocapture, %3* nocapture) nounwind { ; CHECK: vldr ; CHECK-NOT: vmov d{{.*}}, d16 ; CHECK: vmov.i32 d17 -; CHECK-NEXT: vstmia r0, {d16, d17} -; CHECK-NEXT: vstmia r0, {d16, d17} +; CHECK-NEXT: vst1.64 {d16, d17}, [r0, :128] +; CHECK-NEXT: vst1.64 {d16, d17}, [r0, :128] %3 = bitcast double 0.000000e+00 to <2 x float> ; <<2 x float>> [#uses=2] %4 = shufflevector <2 x float> %3, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1] store <4 x float> %4, <4 x float>* undef, align 16 diff --git a/test/CodeGen/ARM/select.ll b/test/CodeGen/ARM/select.ll index 5575566..62708ed 100644 --- a/test/CodeGen/ARM/select.ll +++ b/test/CodeGen/ARM/select.ll @@ -80,7 +80,7 @@ define double @f7(double %a, double %b) { ; CHECK-NEON: adr [[R2:r[0-9]+]], LCPI7_0 ; CHECK-NEON-NEXT: cmp r0, [[R3]] ; CHECK-NEON-NEXT: it eq -; CHECK-NEON-NEXT: addeq.w {{r.*}}, [[R2]] +; CHECK-NEON-NEXT: addeq{{.*}} [[R2]], #4 ; CHECK-NEON-NEXT: ldr ; CHECK-NEON: bx diff --git a/test/CodeGen/ARM/select_xform.ll b/test/CodeGen/ARM/select_xform.ll index 26f7cb6..7507808 100644 --- a/test/CodeGen/ARM/select_xform.ll +++ b/test/CodeGen/ARM/select_xform.ll @@ -9,7 +9,7 @@ define i32 @t1(i32 %a, i32 %b, i32 %c) nounwind { ; T2: t1: ; T2: mvn r0, #-2147483648 -; T2: addle.w r1, r1 +; T2: addle r1, r0 ; T2: mov r0, r1 %tmp1 = icmp sgt i32 %c, 10 %tmp2 = select i1 %tmp1, i32 0, i32 2147483647 @@ -23,7 +23,7 @@ define i32 @t2(i32 %a, i32 %b, i32 %c, i32 %d) nounwind { ; ARM: mov r0, r1 ; T2: t2: -; T2: suble.w r1, r1, #10 +; T2: suble r1, #10 ; T2: mov r0, r1 %tmp1 = icmp sgt i32 %c, 10 %tmp2 = select i1 %tmp1, i32 0, i32 10 @@ -33,12 +33,12 @@ define i32 @t2(i32 %a, i32 %b, i32 %c, i32 %d) nounwind { define i32 @t3(i32 %a, i32 %b, i32 %x, i32 %y) nounwind { ; ARM: t3: -; ARM: mvnlt r2, #0 -; ARM: and r0, r2, r3 +; ARM: andge r3, r3, r2 +; ARM: mov r0, r3 ; T2: t3: -; T2: movlt.w r2, #-1 -; T2: and.w r0, r2, r3 +; T2: andge r3, r2 +; T2: mov r0, r3 %cond = icmp slt i32 %a, %b %z = select i1 %cond, i32 -1, i32 %x %s = and i32 %z, %y @@ -47,12 +47,12 @@ define i32 @t3(i32 %a, i32 %b, i32 %x, i32 %y) nounwind { define i32 @t4(i32 %a, i32 %b, i32 %x, i32 %y) nounwind { ; ARM: t4: -; ARM: movlt r2, #0 -; ARM: orr r0, r2, r3 +; ARM: orrge r3, r3, r2 +; ARM: mov r0, r3 ; T2: t4: -; T2: movlt r2, #0 -; T2: orr.w r0, r2, r3 +; T2: orrge r3, r2 +; T2: mov r0, r3 %cond = icmp slt i32 %a, %b %z = select i1 %cond, i32 0, i32 %x %s = or i32 %z, %y @@ -81,7 +81,7 @@ define i32 @t6(i32 %a, i32 %b, i32 %c, i32 %d) nounwind { ; T2: t6: ; T2-NOT: movge -; T2: eorlt.w r3, r3, r2 +; T2: eorlt r3, r2 %cond = icmp slt i32 %a, %b %tmp1 = select i1 %cond, i32 %c, i32 0 %tmp2 = xor i32 %tmp1, %d @@ -179,3 +179,46 @@ define i32 @t12(i32 %a, i32 %b) nounwind { %tmp1 = select i1 %cond, i32 %a, i32 %x ret i32 %tmp1 } + +; Handle frame index operands. +define void @pr13628() nounwind uwtable align 2 { + %x3 = alloca i8, i32 256, align 8 + %x4 = load i8* undef, align 1 + %x5 = icmp ne i8 %x4, 0 + %x6 = select i1 %x5, i8* %x3, i8* null + call void @bar(i8* %x6) nounwind + ret void +} +declare void @bar(i8*) + +; Fold zext i1 into predicated add +define i32 @t13(i32 %c, i32 %a) nounwind readnone ssp { +entry: +; ARM: t13 +; ARM: cmp r1, #10 +; ARM: addgt r0, r0, #1 + +; T2: t13 +; T2: cmp r1, #10 +; T2: addgt r0, #1 + %cmp = icmp sgt i32 %a, 10 + %conv = zext i1 %cmp to i32 + %add = add i32 %conv, %c + ret i32 %add +} + +; Fold sext i1 into predicated sub +define i32 @t14(i32 %c, i32 %a) nounwind readnone ssp { +entry: +; ARM: t14 +; ARM: cmp r1, #10 +; ARM: subgt r0, r0, #1 + +; T2: t14 +; T2: cmp r1, #10 +; T2: subgt r0, #1 + %cmp = icmp sgt i32 %a, 10 + %conv = sext i1 %cmp to i32 + %add = add i32 %conv, %c + ret i32 %add +} diff --git a/test/CodeGen/ARM/struct_byval.ll b/test/CodeGen/ARM/struct_byval.ll index 99ba475..e9541c2 100644 --- a/test/CodeGen/ARM/struct_byval.ll +++ b/test/CodeGen/ARM/struct_byval.ll @@ -44,3 +44,47 @@ entry: declare i32 @e1(%struct.SmallStruct* nocapture byval %in) nounwind declare i32 @e2(%struct.LargeStruct* nocapture byval %in) nounwind declare i32 @e3(%struct.LargeStruct* nocapture byval align 16 %in) nounwind + +; rdar://12442472 +; We can't do tail call since address of s is passed to the callee and part of +; s is in caller's local frame. +define void @f3(%struct.SmallStruct* nocapture byval %s) nounwind optsize { +; CHECK: f3 +; CHECK: bl _consumestruct +entry: + %0 = bitcast %struct.SmallStruct* %s to i8* + tail call void @consumestruct(i8* %0, i32 80) optsize + ret void +} + +define void @f4(%struct.SmallStruct* nocapture byval %s) nounwind optsize { +; CHECK: f4 +; CHECK: bl _consumestruct +entry: + %addr = getelementptr inbounds %struct.SmallStruct* %s, i32 0, i32 0 + %0 = bitcast i32* %addr to i8* + tail call void @consumestruct(i8* %0, i32 80) optsize + ret void +} + +; We can do tail call here since s is in the incoming argument area. +define void @f5(i32 %a, i32 %b, i32 %c, i32 %d, %struct.SmallStruct* nocapture byval %s) nounwind optsize { +; CHECK: f5 +; CHECK: b _consumestruct +entry: + %0 = bitcast %struct.SmallStruct* %s to i8* + tail call void @consumestruct(i8* %0, i32 80) optsize + ret void +} + +define void @f6(i32 %a, i32 %b, i32 %c, i32 %d, %struct.SmallStruct* nocapture byval %s) nounwind optsize { +; CHECK: f6 +; CHECK: b _consumestruct +entry: + %addr = getelementptr inbounds %struct.SmallStruct* %s, i32 0, i32 0 + %0 = bitcast i32* %addr to i8* + tail call void @consumestruct(i8* %0, i32 80) optsize + ret void +} + +declare void @consumestruct(i8* nocapture %structp, i32 %structsize) nounwind diff --git a/test/CodeGen/ARM/sub-cmp-peephole.ll b/test/CodeGen/ARM/sub-cmp-peephole.ll index 6fcbdee..2961b94 100644 --- a/test/CodeGen/ARM/sub-cmp-peephole.ll +++ b/test/CodeGen/ARM/sub-cmp-peephole.ll @@ -63,3 +63,24 @@ if.then: if.else: ret i32 %sub } + +; If the sub/rsb instruction is predicated, we can't use the flags. +; <rdar://problem/12263428> +; Test case from MultiSource/Benchmarks/Ptrdist/bc/number.s +; CHECK: bc_raise +; CHECK: rsbeq +; CHECK: cmp +define i32 @bc_raise() nounwind ssp { +entry: + %val.2.i = select i1 undef, i32 0, i32 undef + %sub.i = sub nsw i32 0, %val.2.i + %retval.0.i = select i1 undef, i32 %val.2.i, i32 %sub.i + %cmp1 = icmp eq i32 %retval.0.i, 0 + br i1 %cmp1, label %land.lhs.true, label %if.end11 + +land.lhs.true: ; preds = %num2long.exit + ret i32 17 + +if.end11: ; preds = %num2long.exit + ret i32 23 +} diff --git a/test/CodeGen/ARM/sub.ll b/test/CodeGen/ARM/sub.ll index 474043a..7f82ca7 100644 --- a/test/CodeGen/ARM/sub.ll +++ b/test/CodeGen/ARM/sub.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm < %s | FileCheck %s +; RUN: llc -march=arm -mcpu=cortex-a8 < %s | FileCheck %s ; 171 = 0x000000ab define i64 @f1(i64 %a) { diff --git a/test/CodeGen/ARM/subreg-remat.ll b/test/CodeGen/ARM/subreg-remat.ll index 03ae12c..455bfce 100644 --- a/test/CodeGen/ARM/subreg-remat.ll +++ b/test/CodeGen/ARM/subreg-remat.ll @@ -4,14 +4,14 @@ target triple = "thumbv7-apple-ios" ; ; The vector %v2 is built like this: ; -; %vreg6:ssub_1<def> = VMOVSR %vreg0<kill>, pred:14, pred:%noreg, %vreg6<imp-def>; DPR_VFP2:%vreg6 GPR:%vreg0 +; %vreg6:ssub_1<def> = ... ; %vreg6:ssub_0<def> = VLDRS <cp#0>, 0, pred:14, pred:%noreg; mem:LD4[ConstantPool] DPR_VFP2:%vreg6 ; ; When %vreg6 spills, the VLDRS constant pool load cannot be rematerialized ; since it implicitly reads the ssub_1 sub-register. ; ; CHECK: f1 -; CHECK: vmov s1, r0 +; CHECK: vmov d0, r0, r0 ; CHECK: vldr s0, LCPI ; The vector must be spilled: ; CHECK: vstr d0, diff --git a/test/CodeGen/ARM/trap.ll b/test/CodeGen/ARM/trap.ll index 38842a9..21865f8 100644 --- a/test/CodeGen/ARM/trap.ll +++ b/test/CodeGen/ARM/trap.ll @@ -14,4 +14,16 @@ entry: unreachable } +define void @t2() nounwind { +entry: +; INSTR: t2: +; INSTR: trap + +; FUNC: t2: +; FUNC: bl __trap + call void @llvm.debugtrap() + unreachable +} + declare void @llvm.trap() nounwind +declare void @llvm.debugtrap() nounwind diff --git a/test/CodeGen/ARM/twoaddrinstr.ll b/test/CodeGen/ARM/twoaddrinstr.ll index 4e227dd..fc2aa1e 100644 --- a/test/CodeGen/ARM/twoaddrinstr.ll +++ b/test/CodeGen/ARM/twoaddrinstr.ll @@ -4,18 +4,18 @@ define void @PR13378() nounwind { ; This was orriginally a crasher trying to schedule the instructions. ; CHECK: PR13378: -; CHECK: vldmia +; CHECK: vld1.32 +; CHECK-NEXT: vst1.32 +; CHECK-NEXT: vst1.32 ; CHECK-NEXT: vmov.f32 -; CHECK-NEXT: vstmia -; CHECK-NEXT: vstmia ; CHECK-NEXT: vmov.f32 -; CHECK-NEXT: vstmia +; CHECK-NEXT: vst1.32 entry: - %0 = load <4 x float>* undef - store <4 x float> zeroinitializer, <4 x float>* undef - store <4 x float> %0, <4 x float>* undef + %0 = load <4 x float>* undef, align 4 + store <4 x float> zeroinitializer, <4 x float>* undef, align 4 + store <4 x float> %0, <4 x float>* undef, align 4 %1 = insertelement <4 x float> %0, float 1.000000e+00, i32 3 - store <4 x float> %1, <4 x float>* undef + store <4 x float> %1, <4 x float>* undef, align 4 unreachable } diff --git a/test/CodeGen/ARM/unaligned_load_store.ll b/test/CodeGen/ARM/unaligned_load_store.ll index 869b926..3064202 100644 --- a/test/CodeGen/ARM/unaligned_load_store.ll +++ b/test/CodeGen/ARM/unaligned_load_store.ll @@ -1,5 +1,5 @@ ; RUN: llc < %s -march=arm -pre-RA-sched=source | FileCheck %s -check-prefix=EXPANDED -; RUN: llc < %s -mtriple=armv6-apple-darwin -mcpu=cortex-a8 -arm-strict-align -pre-RA-sched=source | FileCheck %s -check-prefix=EXPANDED +; RUN: llc < %s -mtriple=armv6-apple-darwin -mcpu=cortex-a8 -mattr=-neon -arm-strict-align -pre-RA-sched=source | FileCheck %s -check-prefix=EXPANDED ; RUN: llc < %s -mtriple=armv6-apple-darwin -mcpu=cortex-a8 | FileCheck %s -check-prefix=UNALIGNED ; rdar://7113725 @@ -59,3 +59,19 @@ entry: store double %tmp, double* %b, align 1 ret void } + +define void @byte_word_ops(i32* %a, i32* %b) nounwind { +entry: +; EXPANDED: byte_word_ops: +; EXPANDED: ldrb +; EXPANDED: strb + +; UNALIGNED: byte_word_ops: +; UNALIGNED-NOT: ldrb +; UNALIGNED: ldr +; UNALIGNED-NOT: strb +; UNALIGNED: str + %tmp = load i32* %a, align 1 + store i32 %tmp, i32* %b, align 1 + ret void +} diff --git a/test/CodeGen/ARM/unaligned_load_store_vector.ll b/test/CodeGen/ARM/unaligned_load_store_vector.ll new file mode 100644 index 0000000..25ae651 --- /dev/null +++ b/test/CodeGen/ARM/unaligned_load_store_vector.ll @@ -0,0 +1,487 @@ +;RUN: llc < %s -march=arm -mattr=+v7 -mattr=+neon | FileCheck %s + +;ALIGN = 1 +;SIZE = 64 +;TYPE = <8 x i8> +define void @v64_v8i8_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v64_v8i8_1: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <8 x i8>* + %vo = bitcast i8* %po to <8 x i8>* +;CHECK: vld1.8 + %v1 = load <8 x i8>* %vi, align 1 +;CHECK: vst1.8 + store <8 x i8> %v1, <8 x i8>* %vo, align 1 + ret void +} + + +;ALIGN = 1 +;SIZE = 64 +;TYPE = <4 x i16> +define void @v64_v4i16_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v64_v4i16_1: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <4 x i16>* + %vo = bitcast i8* %po to <4 x i16>* +;CHECK: vld1.8 + %v1 = load <4 x i16>* %vi, align 1 +;CHECK: vst1.8 + store <4 x i16> %v1, <4 x i16>* %vo, align 1 + ret void +} + + +;ALIGN = 1 +;SIZE = 64 +;TYPE = <2 x i32> +define void @v64_v2i32_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v64_v2i32_1: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <2 x i32>* + %vo = bitcast i8* %po to <2 x i32>* +;CHECK: vld1.8 + %v1 = load <2 x i32>* %vi, align 1 +;CHECK: vst1.8 + store <2 x i32> %v1, <2 x i32>* %vo, align 1 + ret void +} + + +;ALIGN = 1 +;SIZE = 64 +;TYPE = <2 x float> +define void @v64_v2f32_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v64_v2f32_1: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <2 x float>* + %vo = bitcast i8* %po to <2 x float>* +;CHECK: vld1.8 + %v1 = load <2 x float>* %vi, align 1 +;CHECK: vst1.8 + store <2 x float> %v1, <2 x float>* %vo, align 1 + ret void +} + + +;ALIGN = 1 +;SIZE = 128 +;TYPE = <16 x i8> +define void @v128_v16i8_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v128_v16i8_1: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <16 x i8>* + %vo = bitcast i8* %po to <16 x i8>* +;CHECK: vld1.8 + %v1 = load <16 x i8>* %vi, align 1 +;CHECK: vst1.8 + store <16 x i8> %v1, <16 x i8>* %vo, align 1 + ret void +} + + +;ALIGN = 1 +;SIZE = 128 +;TYPE = <8 x i16> +define void @v128_v8i16_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v128_v8i16_1: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <8 x i16>* + %vo = bitcast i8* %po to <8 x i16>* +;CHECK: vld1.8 + %v1 = load <8 x i16>* %vi, align 1 +;CHECK: vst1.8 + store <8 x i16> %v1, <8 x i16>* %vo, align 1 + ret void +} + + +;ALIGN = 1 +;SIZE = 128 +;TYPE = <4 x i32> +define void @v128_v4i32_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v128_v4i32_1: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <4 x i32>* + %vo = bitcast i8* %po to <4 x i32>* +;CHECK: vld1.8 + %v1 = load <4 x i32>* %vi, align 1 +;CHECK: vst1.8 + store <4 x i32> %v1, <4 x i32>* %vo, align 1 + ret void +} + + +;ALIGN = 1 +;SIZE = 128 +;TYPE = <2 x i64> +define void @v128_v2i64_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v128_v2i64_1: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <2 x i64>* + %vo = bitcast i8* %po to <2 x i64>* +;CHECK: vld1.8 + %v1 = load <2 x i64>* %vi, align 1 +;CHECK: vst1.8 + store <2 x i64> %v1, <2 x i64>* %vo, align 1 + ret void +} + + +;ALIGN = 1 +;SIZE = 128 +;TYPE = <4 x float> +define void @v128_v4f32_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v128_v4f32_1: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <4 x float>* + %vo = bitcast i8* %po to <4 x float>* +;CHECK: vld1.8 + %v1 = load <4 x float>* %vi, align 1 +;CHECK: vst1.8 + store <4 x float> %v1, <4 x float>* %vo, align 1 + ret void +} + + +;ALIGN = 2 +;SIZE = 64 +;TYPE = <8 x i8> +define void @v64_v8i8_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v64_v8i8_2: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <8 x i8>* + %vo = bitcast i8* %po to <8 x i8>* +;CHECK: vld1.16 + %v1 = load <8 x i8>* %vi, align 2 +;CHECK: vst1.16 + store <8 x i8> %v1, <8 x i8>* %vo, align 2 + ret void +} + + +;ALIGN = 2 +;SIZE = 64 +;TYPE = <4 x i16> +define void @v64_v4i16_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v64_v4i16_2: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <4 x i16>* + %vo = bitcast i8* %po to <4 x i16>* +;CHECK: vld1.16 + %v1 = load <4 x i16>* %vi, align 2 +;CHECK: vst1.16 + store <4 x i16> %v1, <4 x i16>* %vo, align 2 + ret void +} + + +;ALIGN = 2 +;SIZE = 64 +;TYPE = <2 x i32> +define void @v64_v2i32_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v64_v2i32_2: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <2 x i32>* + %vo = bitcast i8* %po to <2 x i32>* +;CHECK: vld1.16 + %v1 = load <2 x i32>* %vi, align 2 +;CHECK: vst1.16 + store <2 x i32> %v1, <2 x i32>* %vo, align 2 + ret void +} + + +;ALIGN = 2 +;SIZE = 64 +;TYPE = <2 x float> +define void @v64_v2f32_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v64_v2f32_2: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <2 x float>* + %vo = bitcast i8* %po to <2 x float>* +;CHECK: vld1.16 + %v1 = load <2 x float>* %vi, align 2 +;CHECK: vst1.16 + store <2 x float> %v1, <2 x float>* %vo, align 2 + ret void +} + + +;ALIGN = 2 +;SIZE = 128 +;TYPE = <16 x i8> +define void @v128_v16i8_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v128_v16i8_2: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <16 x i8>* + %vo = bitcast i8* %po to <16 x i8>* +;CHECK: vld1.16 + %v1 = load <16 x i8>* %vi, align 2 +;CHECK: vst1.16 + store <16 x i8> %v1, <16 x i8>* %vo, align 2 + ret void +} + + +;ALIGN = 2 +;SIZE = 128 +;TYPE = <8 x i16> +define void @v128_v8i16_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v128_v8i16_2: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <8 x i16>* + %vo = bitcast i8* %po to <8 x i16>* +;CHECK: vld1.16 + %v1 = load <8 x i16>* %vi, align 2 +;CHECK: vst1.16 + store <8 x i16> %v1, <8 x i16>* %vo, align 2 + ret void +} + + +;ALIGN = 2 +;SIZE = 128 +;TYPE = <4 x i32> +define void @v128_v4i32_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v128_v4i32_2: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <4 x i32>* + %vo = bitcast i8* %po to <4 x i32>* +;CHECK: vld1.16 + %v1 = load <4 x i32>* %vi, align 2 +;CHECK: vst1.16 + store <4 x i32> %v1, <4 x i32>* %vo, align 2 + ret void +} + + +;ALIGN = 2 +;SIZE = 128 +;TYPE = <2 x i64> +define void @v128_v2i64_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v128_v2i64_2: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <2 x i64>* + %vo = bitcast i8* %po to <2 x i64>* +;CHECK: vld1.16 + %v1 = load <2 x i64>* %vi, align 2 +;CHECK: vst1.16 + store <2 x i64> %v1, <2 x i64>* %vo, align 2 + ret void +} + + +;ALIGN = 2 +;SIZE = 128 +;TYPE = <4 x float> +define void @v128_v4f32_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v128_v4f32_2: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <4 x float>* + %vo = bitcast i8* %po to <4 x float>* +;CHECK: vld1.16 + %v1 = load <4 x float>* %vi, align 2 +;CHECK: vst1.16 + store <4 x float> %v1, <4 x float>* %vo, align 2 + ret void +} + + +;ALIGN = 4 +;SIZE = 64 +;TYPE = <8 x i8> +define void @v64_v8i8_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v64_v8i8_4: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <8 x i8>* + %vo = bitcast i8* %po to <8 x i8>* +;CHECK: vldr + %v1 = load <8 x i8>* %vi, align 4 +;CHECK: vstr + store <8 x i8> %v1, <8 x i8>* %vo, align 4 + ret void +} + + +;ALIGN = 4 +;SIZE = 64 +;TYPE = <4 x i16> +define void @v64_v4i16_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v64_v4i16_4: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <4 x i16>* + %vo = bitcast i8* %po to <4 x i16>* +;CHECK: vldr + %v1 = load <4 x i16>* %vi, align 4 +;CHECK: vstr + store <4 x i16> %v1, <4 x i16>* %vo, align 4 + ret void +} + + +;ALIGN = 4 +;SIZE = 64 +;TYPE = <2 x i32> +define void @v64_v2i32_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v64_v2i32_4: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <2 x i32>* + %vo = bitcast i8* %po to <2 x i32>* +;CHECK: vldr + %v1 = load <2 x i32>* %vi, align 4 +;CHECK: vstr + store <2 x i32> %v1, <2 x i32>* %vo, align 4 + ret void +} + + +;ALIGN = 4 +;SIZE = 64 +;TYPE = <2 x float> +define void @v64_v2f32_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v64_v2f32_4: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <2 x float>* + %vo = bitcast i8* %po to <2 x float>* +;CHECK: vldr + %v1 = load <2 x float>* %vi, align 4 +;CHECK: vstr + store <2 x float> %v1, <2 x float>* %vo, align 4 + ret void +} + + +;ALIGN = 4 +;SIZE = 128 +;TYPE = <16 x i8> +define void @v128_v16i8_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v128_v16i8_4: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <16 x i8>* + %vo = bitcast i8* %po to <16 x i8>* +;CHECK: vld1.32 + %v1 = load <16 x i8>* %vi, align 4 +;CHECK: vst1.32 + store <16 x i8> %v1, <16 x i8>* %vo, align 4 + ret void +} + + +;ALIGN = 4 +;SIZE = 128 +;TYPE = <8 x i16> +define void @v128_v8i16_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v128_v8i16_4: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <8 x i16>* + %vo = bitcast i8* %po to <8 x i16>* +;CHECK: vld1.32 + %v1 = load <8 x i16>* %vi, align 4 +;CHECK: vst1.32 + store <8 x i16> %v1, <8 x i16>* %vo, align 4 + ret void +} + + +;ALIGN = 4 +;SIZE = 128 +;TYPE = <4 x i32> +define void @v128_v4i32_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v128_v4i32_4: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <4 x i32>* + %vo = bitcast i8* %po to <4 x i32>* +;CHECK: vld1.32 + %v1 = load <4 x i32>* %vi, align 4 +;CHECK: vst1.32 + store <4 x i32> %v1, <4 x i32>* %vo, align 4 + ret void +} + + +;ALIGN = 4 +;SIZE = 128 +;TYPE = <2 x i64> +define void @v128_v2i64_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v128_v2i64_4: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <2 x i64>* + %vo = bitcast i8* %po to <2 x i64>* +;CHECK: vld1.32 + %v1 = load <2 x i64>* %vi, align 4 +;CHECK: vst1.32 + store <2 x i64> %v1, <2 x i64>* %vo, align 4 + ret void +} + + +;ALIGN = 4 +;SIZE = 128 +;TYPE = <4 x float> +define void @v128_v4f32_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind { +;CHECK: v128_v4f32_4: +entry: + %po = getelementptr i8* %out, i32 0 + %pi = getelementptr i8* %in, i32 0 + %vi = bitcast i8* %pi to <4 x float>* + %vo = bitcast i8* %po to <4 x float>* +;CHECK: vld1.32 + %v1 = load <4 x float>* %vi, align 4 +;CHECK: vst1.32 + store <4 x float> %v1, <4 x float>* %vo, align 4 + ret void +} + diff --git a/test/CodeGen/ARM/vbsl-constant.ll b/test/CodeGen/ARM/vbsl-constant.ll index f157dbd..ffda0a5 100644 --- a/test/CodeGen/ARM/vbsl-constant.ll +++ b/test/CodeGen/ARM/vbsl-constant.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s +; RUN: llc < %s -mtriple=arm-apple-ios -mattr=+neon | FileCheck %s define <8 x i8> @v_bsli8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind { ;CHECK: v_bsli8: @@ -59,8 +59,8 @@ define <1 x i64> @v_bsli64(<1 x i64>* %A, <1 x i64>* %B, <1 x i64>* %C) nounwind define <16 x i8> @v_bslQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind { ;CHECK: v_bslQi8: -;CHECK: vldmia -;CHECK: vldmia +;CHECK: vld1.32 +;CHECK: vld1.32 ;CHECK: vbsl %tmp1 = load <16 x i8>* %A %tmp2 = load <16 x i8>* %B @@ -73,8 +73,8 @@ define <16 x i8> @v_bslQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind define <8 x i16> @v_bslQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind { ;CHECK: v_bslQi16: -;CHECK: vldmia -;CHECK: vldmia +;CHECK: vld1.32 +;CHECK: vld1.32 ;CHECK: vbsl %tmp1 = load <8 x i16>* %A %tmp2 = load <8 x i16>* %B @@ -87,8 +87,8 @@ define <8 x i16> @v_bslQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwin define <4 x i32> @v_bslQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind { ;CHECK: v_bslQi32: -;CHECK: vldmia -;CHECK: vldmia +;CHECK: vld1.32 +;CHECK: vld1.32 ;CHECK: vbsl %tmp1 = load <4 x i32>* %A %tmp2 = load <4 x i32>* %B @@ -101,9 +101,9 @@ define <4 x i32> @v_bslQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwin define <2 x i64> @v_bslQi64(<2 x i64>* %A, <2 x i64>* %B, <2 x i64>* %C) nounwind { ;CHECK: v_bslQi64: -;CHECK: vldmia -;CHECK: vldmia -;CHECK: vldmia +;CHECK: vld1.32 +;CHECK: vld1.32 +;CHECK: vld1.64 ;CHECK: vbsl %tmp1 = load <2 x i64>* %A %tmp2 = load <2 x i64>* %B diff --git a/test/CodeGen/ARM/vbsl.ll b/test/CodeGen/ARM/vbsl.ll index 9f3bb4e..750fb0d 100644 --- a/test/CodeGen/ARM/vbsl.ll +++ b/test/CodeGen/ARM/vbsl.ll @@ -1,5 +1,7 @@ ; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s +; rdar://12471808 + define <8 x i8> @v_bsli8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind { ;CHECK: v_bsli8: ;CHECK: vbsl @@ -103,3 +105,98 @@ define <2 x i64> @v_bslQi64(<2 x i64>* %A, <2 x i64>* %B, <2 x i64>* %C) nounwin %tmp7 = or <2 x i64> %tmp4, %tmp6 ret <2 x i64> %tmp7 } + +define <8 x i8> @f1(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) nounwind readnone optsize ssp { +; CHECK: f1: +; CHECK: vbsl + %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) nounwind + ret <8 x i8> %vbsl.i +} + +define <4 x i16> @f2(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c) nounwind readnone optsize ssp { +; CHECK: f2: +; CHECK: vbsl + %vbsl3.i = tail call <4 x i16> @llvm.arm.neon.vbsl.v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c) nounwind + ret <4 x i16> %vbsl3.i +} + +define <2 x i32> @f3(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c) nounwind readnone optsize ssp { +; CHECK: f3: +; CHECK: vbsl + %vbsl3.i = tail call <2 x i32> @llvm.arm.neon.vbsl.v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c) nounwind + ret <2 x i32> %vbsl3.i +} + +define <2 x float> @f4(<2 x float> %a, <2 x float> %b, <2 x float> %c) nounwind readnone optsize ssp { +; CHECK: f4: +; CHECK: vbsl + %vbsl4.i = tail call <2 x float> @llvm.arm.neon.vbsl.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c) nounwind + ret <2 x float> %vbsl4.i +} + +define <16 x i8> @g1(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) nounwind readnone optsize ssp { +; CHECK: g1: +; CHECK: vbsl + %vbsl.i = tail call <16 x i8> @llvm.arm.neon.vbsl.v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) nounwind + ret <16 x i8> %vbsl.i +} + +define <8 x i16> @g2(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) nounwind readnone optsize ssp { +; CHECK: g2: +; CHECK: vbsl + %vbsl3.i = tail call <8 x i16> @llvm.arm.neon.vbsl.v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) nounwind + ret <8 x i16> %vbsl3.i +} + +define <4 x i32> @g3(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) nounwind readnone optsize ssp { +; CHECK: g3: +; CHECK: vbsl + %vbsl3.i = tail call <4 x i32> @llvm.arm.neon.vbsl.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) nounwind + ret <4 x i32> %vbsl3.i +} + +define <4 x float> @g4(<4 x float> %a, <4 x float> %b, <4 x float> %c) nounwind readnone optsize ssp { +; CHECK: g4: +; CHECK: vbsl + %vbsl4.i = tail call <4 x float> @llvm.arm.neon.vbsl.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) nounwind + ret <4 x float> %vbsl4.i +} + +define <1 x i64> @test_vbsl_s64(<1 x i64> %a, <1 x i64> %b, <1 x i64> %c) nounwind readnone optsize ssp { +; CHECK: test_vbsl_s64: +; CHECK: vbsl d + %vbsl3.i = tail call <1 x i64> @llvm.arm.neon.vbsl.v1i64(<1 x i64> %a, <1 x i64> %b, <1 x i64> %c) nounwind + ret <1 x i64> %vbsl3.i +} + +define <1 x i64> @test_vbsl_u64(<1 x i64> %a, <1 x i64> %b, <1 x i64> %c) nounwind readnone optsize ssp { +; CHECK: test_vbsl_u64: +; CHECK: vbsl d + %vbsl3.i = tail call <1 x i64> @llvm.arm.neon.vbsl.v1i64(<1 x i64> %a, <1 x i64> %b, <1 x i64> %c) nounwind + ret <1 x i64> %vbsl3.i +} + +define <2 x i64> @test_vbslq_s64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) nounwind readnone optsize ssp { +; CHECK: test_vbslq_s64: +; CHECK: vbsl q + %vbsl3.i = tail call <2 x i64> @llvm.arm.neon.vbsl.v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) nounwind + ret <2 x i64> %vbsl3.i +} + +define <2 x i64> @test_vbslq_u64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) nounwind readnone optsize ssp { +; CHECK: test_vbslq_u64: +; CHECK: vbsl q + %vbsl3.i = tail call <2 x i64> @llvm.arm.neon.vbsl.v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) nounwind + ret <2 x i64> %vbsl3.i +} + +declare <4 x i32> @llvm.arm.neon.vbsl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) nounwind readnone +declare <8 x i16> @llvm.arm.neon.vbsl.v8i16(<8 x i16>, <8 x i16>, <8 x i16>) nounwind readnone +declare <16 x i8> @llvm.arm.neon.vbsl.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone +declare <2 x i32> @llvm.arm.neon.vbsl.v2i32(<2 x i32>, <2 x i32>, <2 x i32>) nounwind readnone +declare <4 x i16> @llvm.arm.neon.vbsl.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) nounwind readnone +declare <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone +declare <2 x float> @llvm.arm.neon.vbsl.v2f32(<2 x float>, <2 x float>, <2 x float>) nounwind readnone +declare <4 x float> @llvm.arm.neon.vbsl.v4f32(<4 x float>, <4 x float>, <4 x float>) nounwind readnone +declare <2 x i64> @llvm.arm.neon.vbsl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>) nounwind readnone +declare <1 x i64> @llvm.arm.neon.vbsl.v1i64(<1 x i64>, <1 x i64>, <1 x i64>) nounwind readnone diff --git a/test/CodeGen/ARM/vdup.ll b/test/CodeGen/ARM/vdup.ll index 05332e4..2cf94d6 100644 --- a/test/CodeGen/ARM/vdup.ll +++ b/test/CodeGen/ARM/vdup.ll @@ -261,3 +261,73 @@ define void @redundantVdup(<8 x i8>* %ptr) nounwind { store <8 x i8> %2, <8 x i8>* %ptr, align 8 ret void } + +define <4 x i32> @tdupi(i32 %x, i32 %y) { +;CHECK: tdupi +;CHECK: vdup.32 + %1 = insertelement <4 x i32> undef, i32 %x, i32 0 + %2 = insertelement <4 x i32> %1, i32 %x, i32 1 + %3 = insertelement <4 x i32> %2, i32 %x, i32 2 + %4 = insertelement <4 x i32> %3, i32 %y, i32 3 + ret <4 x i32> %4 +} + +define <4 x float> @tdupf(float %x, float %y) { +;CHECK: tdupf +;CHECK: vdup.32 + %1 = insertelement <4 x float> undef, float %x, i32 0 + %2 = insertelement <4 x float> %1, float %x, i32 1 + %3 = insertelement <4 x float> %2, float %x, i32 2 + %4 = insertelement <4 x float> %3, float %y, i32 3 + ret <4 x float> %4 +} + +; This test checks that when splatting an element from a vector into another, +; the value isn't moved out to GPRs first. +define <4 x i32> @tduplane(<4 x i32> %invec) { +;CHECK: tduplane +;CHECK-NOT: vmov {{.*}}, d16[1] +;CHECK: vdup.32 {{.*}}, d16[1] + %in = extractelement <4 x i32> %invec, i32 1 + %1 = insertelement <4 x i32> undef, i32 %in, i32 0 + %2 = insertelement <4 x i32> %1, i32 %in, i32 1 + %3 = insertelement <4 x i32> %2, i32 %in, i32 2 + %4 = insertelement <4 x i32> %3, i32 255, i32 3 + ret <4 x i32> %4 +} + +define <2 x float> @check_f32(<4 x float> %v) nounwind { +;CHECK: check_f32: +;CHECK: vdup.32 {{.*}}, d{{..}}[1] + %x = extractelement <4 x float> %v, i32 3 + %1 = insertelement <2 x float> undef, float %x, i32 0 + %2 = insertelement <2 x float> %1, float %x, i32 1 + ret <2 x float> %2 +} + +define <2 x i32> @check_i32(<4 x i32> %v) nounwind { +;CHECK: check_i32: +;CHECK: vdup.32 {{.*}}, d{{..}}[1] + %x = extractelement <4 x i32> %v, i32 3 + %1 = insertelement <2 x i32> undef, i32 %x, i32 0 + %2 = insertelement <2 x i32> %1, i32 %x, i32 1 + ret <2 x i32> %2 +} + +define <4 x i16> @check_i16(<8 x i16> %v) nounwind { +;CHECK: check_i16: +;CHECK: vdup.16 {{.*}}, d{{..}}[3] + %x = extractelement <8 x i16> %v, i32 3 + %1 = insertelement <4 x i16> undef, i16 %x, i32 0 + %2 = insertelement <4 x i16> %1, i16 %x, i32 1 + ret <4 x i16> %2 +} + +define <8 x i8> @check_i8(<16 x i8> %v) nounwind { +;CHECK: check_i8: +;CHECK: vdup.8 {{.*}}, d{{..}}[3] + %x = extractelement <16 x i8> %v, i32 3 + %1 = insertelement <8 x i8> undef, i8 %x, i32 0 + %2 = insertelement <8 x i8> %1, i8 %x, i32 1 + ret <8 x i8> %2 +} diff --git a/test/CodeGen/ARM/vector-extend-narrow.ll b/test/CodeGen/ARM/vector-extend-narrow.ll index 8fd3db2..22af797 100644 --- a/test/CodeGen/ARM/vector-extend-narrow.ll +++ b/test/CodeGen/ARM/vector-extend-narrow.ll @@ -62,3 +62,14 @@ define <4 x i8> @i(<4 x i8>* %x) { %2 = sdiv <4 x i8> zeroinitializer, %1 ret <4 x i8> %2 } +; CHECK: j: +define <4 x i32> @j(<4 x i8>* %in) nounwind { + ; CHECK: vld1 + ; CHECK: vmovl.u8 + ; CHECK: vmovl.u16 + ; CHECK-NOT: vand + %1 = load <4 x i8>* %in, align 4 + %2 = zext <4 x i8> %1 to <4 x i32> + ret <4 x i32> %2 +} + diff --git a/test/CodeGen/ARM/vext.ll b/test/CodeGen/ARM/vext.ll index e224bdf..f404eb8 100644 --- a/test/CodeGen/ARM/vext.ll +++ b/test/CodeGen/ARM/vext.ll @@ -74,6 +74,39 @@ define <16 x i8> @test_vextRq_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind { ret <16 x i8> %tmp3 } +define <16 x i8> @test_vextq_undef_op2(<16 x i8> %a) nounwind { +;CHECK: test_vextq_undef_op2: +;CHECK: vext +entry: + %tmp1 = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1> + ret <16 x i8> %tmp1 +} + +define <8 x i8> @test_vextd_undef_op2(<8 x i8> %a) nounwind { +;CHECK: test_vextd_undef_op2: +;CHECK: vext +entry: + %tmp1 = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1> + ret <8 x i8> %tmp1 +} + + +define <16 x i8> @test_vextq_undef_op2_undef(<16 x i8> %a) nounwind { +;CHECK: test_vextq_undef_op2_undef: +;CHECK: vext +entry: + %tmp1 = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 4, i32 undef, i32 undef, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1> + ret <16 x i8> %tmp1 +} + +define <8 x i8> @test_vextd_undef_op2_undef(<8 x i8> %a) nounwind { +;CHECK: test_vextd_undef_op2_undef: +;CHECK: vext +entry: + %tmp1 = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 6, i32 7, i32 undef, i32 1> + ret <8 x i8> %tmp1 +} + ; Tests for ReconstructShuffle function. Indices have to be carefully ; chosen to reach lowering phase as a BUILD_VECTOR. diff --git a/test/CodeGen/ARM/vget_lane.ll b/test/CodeGen/ARM/vget_lane.ll index 1fc885d..c9ce3b7 100644 --- a/test/CodeGen/ARM/vget_lane.ll +++ b/test/CodeGen/ARM/vget_lane.ll @@ -200,7 +200,7 @@ define <8 x i16> @vsetQ_lane16(<8 x i16>* %A, i16 %B) nounwind { define <4 x i32> @vsetQ_lane32(<4 x i32>* %A, i32 %B) nounwind { ;CHECK: vsetQ_lane32: -;CHECK: vmov.32 +;CHECK: vmov.32 d{{.*}}[1], r1 %tmp1 = load <4 x i32>* %A %tmp2 = insertelement <4 x i32> %tmp1, i32 %B, i32 1 ret <4 x i32> %tmp2 diff --git a/test/CodeGen/ARM/vselect_imax.ll b/test/CodeGen/ARM/vselect_imax.ll new file mode 100644 index 0000000..f599404 --- /dev/null +++ b/test/CodeGen/ARM/vselect_imax.ll @@ -0,0 +1,12 @@ +; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s +; Make sure that ARM backend with NEON handles vselect. + +define void @vmax_v4i32(<4 x i32>* %m, <4 x i32> %a, <4 x i32> %b) { +; CHECK: vcgt.s32 [[QR:q[0-9]+]], [[Q1:q[0-9]+]], [[Q2:q[0-9]+]] +; CHECK: vbsl [[QR]], [[Q1]], [[Q2]] + %cmpres = icmp sgt <4 x i32> %a, %b + %maxres = select <4 x i1> %cmpres, <4 x i32> %a, <4 x i32> %b + store <4 x i32> %maxres, <4 x i32>* %m + ret void +} + diff --git a/test/CodeGen/CellSPU/icmp16.ll b/test/CodeGen/CellSPU/icmp16.ll index 2f9b091..853ae1d 100644 --- a/test/CodeGen/CellSPU/icmp16.ll +++ b/test/CodeGen/CellSPU/icmp16.ll @@ -534,7 +534,7 @@ entry: define i16 @icmp_slt_immed04_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { ; CHECK: icmp_slt_immed04_i16: ; CHECK: lr -; CHECK-NETX: bi +; CHECK-NEXT: bi entry: %A = icmp slt i16 %arg1, 32768 @@ -559,7 +559,7 @@ define i1 @icmp_sle_setcc_i16(i16 %arg1, i16 %arg2, i16 %val1, i16 %val2) nounwi ; CHECK: ilhu ; CHECK: xorhi ; CHECK: iohl -; CHECK-NETX: bi +; CHECK: bi entry: %A = icmp sle i16 %arg1, %arg2 diff --git a/test/CodeGen/Generic/MachineBranchProb.ll b/test/CodeGen/Generic/MachineBranchProb.ll new file mode 100644 index 0000000..802ee2c --- /dev/null +++ b/test/CodeGen/Generic/MachineBranchProb.ll @@ -0,0 +1,32 @@ +; RUN: llc < %s -print-machineinstrs=expand-isel-pseudos -o /dev/null 2>&1 | FileCheck %s + +; Make sure we have the correct weight attached to each successor. +define i32 @test2(i32 %x) nounwind uwtable readnone ssp { +; CHECK: Machine code for function test2: +entry: + %conv = sext i32 %x to i64 + switch i64 %conv, label %return [ + i64 0, label %sw.bb + i64 1, label %sw.bb + i64 4, label %sw.bb + i64 5, label %sw.bb1 + ], !prof !0 +; CHECK: BB#0: derived from LLVM BB %entry +; CHECK: Successors according to CFG: BB#2(64) BB#4(14) +; CHECK: BB#4: derived from LLVM BB %entry +; CHECK: Successors according to CFG: BB#1(10) BB#5(4) +; CHECK: BB#5: derived from LLVM BB %entry +; CHECK: Successors according to CFG: BB#1(4) BB#3(7) + +sw.bb: + br label %return + +sw.bb1: + br label %return + +return: + %retval.0 = phi i32 [ 5, %sw.bb1 ], [ 1, %sw.bb ], [ 0, %entry ] + ret i32 %retval.0 +} + +!0 = metadata !{metadata !"branch_weights", i32 7, i32 6, i32 4, i32 4, i32 64} diff --git a/test/CodeGen/Hexagon/args.ll b/test/CodeGen/Hexagon/args.ll index e9ac8b6..8a6efb6 100644 --- a/test/CodeGen/Hexagon/args.ll +++ b/test/CodeGen/Hexagon/args.ll @@ -1,12 +1,12 @@ -; RUN: llc -march=hexagon -mcpu=hexagonv4 -disable-dfa-sched < %s | FileCheck %s +; RUN: llc -march=hexagon -mcpu=hexagonv4 -disable-hexagon-misched < %s | FileCheck %s ; CHECK: r[[T0:[0-9]+]] = #7 ; CHECK: memw(r29 + #0) = r[[T0]] +; CHECK: r5 = #6 ; CHECK: r0 = #1 ; CHECK: r1 = #2 ; CHECK: r2 = #3 ; CHECK: r3 = #4 ; CHECK: r4 = #5 -; CHECK: r5 = #6 define void @foo() nounwind { diff --git a/test/CodeGen/Hexagon/newvaluestore.ll b/test/CodeGen/Hexagon/newvaluestore.ll index ab69b22..186e393 100644 --- a/test/CodeGen/Hexagon/newvaluestore.ll +++ b/test/CodeGen/Hexagon/newvaluestore.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s +; RUN: llc -march=hexagon -mcpu=hexagonv4 -disable-hexagon-misched < %s | FileCheck %s ; Check that we generate new value store packet in V4 @i = global i32 0, align 4 diff --git a/test/CodeGen/Hexagon/remove_lsr.ll b/test/CodeGen/Hexagon/remove_lsr.ll new file mode 100644 index 0000000..79b5f4a --- /dev/null +++ b/test/CodeGen/Hexagon/remove_lsr.ll @@ -0,0 +1,80 @@ +; Test fix for PR-13709. +; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s +; CHECK: foo +; CHECK-NOT: lsr(r{{[0-9]+}}:{{[0-9]+}}, #32) +; CHECK-NOT: lsr(r{{[0-9]+}}:{{[0-9]+}}, #32) + +; Convert the sequence +; r17:16 = lsr(r11:10, #32) +; .. = r16 +; into +; r17:16 = lsr(r11:10, #32) +; .. = r11 +; This makes the lsr instruction dead and it gets removed subsequently +; by a dead code removal pass. + +%union.vect64 = type { i64 } +%union.vect32 = type { i32 } + +define void @foo(%union.vect64* nocapture %sss_extracted_bit_rx_data_ptr, + %union.vect32* nocapture %s_even, %union.vect32* nocapture %s_odd, + i8* nocapture %scr_s_even_code_ptr, i8* nocapture %scr_s_odd_code_ptr) + nounwind { +entry: + %scevgep = getelementptr %union.vect64* %sss_extracted_bit_rx_data_ptr, i32 1 + %scevgep28 = getelementptr %union.vect32* %s_odd, i32 1 + %scevgep32 = getelementptr %union.vect32* %s_even, i32 1 + %scevgep36 = getelementptr i8* %scr_s_odd_code_ptr, i32 1 + %scevgep39 = getelementptr i8* %scr_s_even_code_ptr, i32 1 + br label %for.body + +for.body: ; preds = %for.body, %entry + %lsr.iv42 = phi i32 [ %lsr.iv.next, %for.body ], [ 2, %entry ] + %lsr.iv40 = phi i8* [ %scevgep41, %for.body ], [ %scevgep39, %entry ] + %lsr.iv37 = phi i8* [ %scevgep38, %for.body ], [ %scevgep36, %entry ] + %lsr.iv33 = phi %union.vect32* [ %scevgep34, %for.body ], [ %scevgep32, %entry ] + %lsr.iv29 = phi %union.vect32* [ %scevgep30, %for.body ], [ %scevgep28, %entry ] + %lsr.iv = phi %union.vect64* [ %scevgep26, %for.body ], [ %scevgep, %entry ] + %predicate_1.023 = phi i8 [ undef, %entry ], [ %10, %for.body ] + %predicate.022 = phi i8 [ undef, %entry ], [ %9, %for.body ] + %val.021 = phi i64 [ undef, %entry ], [ %srcval, %for.body ] + %lsr.iv3335 = bitcast %union.vect32* %lsr.iv33 to i32* + %lsr.iv2931 = bitcast %union.vect32* %lsr.iv29 to i32* + %lsr.iv27 = bitcast %union.vect64* %lsr.iv to i64* + %0 = tail call i64 @llvm.hexagon.A2.vsubhs(i64 0, i64 %val.021) + %conv3 = sext i8 %predicate.022 to i32 + %1 = trunc i64 %val.021 to i32 + %2 = trunc i64 %0 to i32 + %3 = tail call i32 @llvm.hexagon.C2.mux(i32 %conv3, i32 %1, i32 %2) + store i32 %3, i32* %lsr.iv3335, align 4, !tbaa !0 + %conv8 = sext i8 %predicate_1.023 to i32 + %4 = lshr i64 %val.021, 32 + %5 = trunc i64 %4 to i32 + %6 = lshr i64 %0, 32 + %7 = trunc i64 %6 to i32 + %8 = tail call i32 @llvm.hexagon.C2.mux(i32 %conv8, i32 %5, i32 %7) + store i32 %8, i32* %lsr.iv2931, align 4, !tbaa !0 + %srcval = load i64* %lsr.iv27, align 8 + %9 = load i8* %lsr.iv40, align 1, !tbaa !1 + %10 = load i8* %lsr.iv37, align 1, !tbaa !1 + %lftr.wideiv = trunc i32 %lsr.iv42 to i8 + %exitcond = icmp eq i8 %lftr.wideiv, 32 + %scevgep26 = getelementptr %union.vect64* %lsr.iv, i32 1 + %scevgep30 = getelementptr %union.vect32* %lsr.iv29, i32 1 + %scevgep34 = getelementptr %union.vect32* %lsr.iv33, i32 1 + %scevgep38 = getelementptr i8* %lsr.iv37, i32 1 + %scevgep41 = getelementptr i8* %lsr.iv40, i32 1 + %lsr.iv.next = add i32 %lsr.iv42, 1 + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body + ret void +} + +declare i64 @llvm.hexagon.A2.vsubhs(i64, i64) nounwind readnone + +declare i32 @llvm.hexagon.C2.mux(i32, i32, i32) nounwind readnone + +!0 = metadata !{metadata !"long", metadata !1} +!1 = metadata !{metadata !"omnipotent char", metadata !2} +!2 = metadata !{metadata !"Simple C/C++ TBAA", null} diff --git a/test/CodeGen/Hexagon/static.ll b/test/CodeGen/Hexagon/static.ll index 2e4ab63..683a4c2 100644 --- a/test/CodeGen/Hexagon/static.ll +++ b/test/CodeGen/Hexagon/static.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon -mcpu=hexagonv4 -disable-dfa-sched < %s | FileCheck %s +; RUN: llc -march=hexagon -mcpu=hexagonv4 -disable-dfa-sched -disable-hexagon-misched < %s | FileCheck %s @num = external global i32 @acc = external global i32 diff --git a/test/CodeGen/MSP430/fp.ll b/test/CodeGen/MSP430/fp.ll new file mode 100644 index 0000000..c3273ef --- /dev/null +++ b/test/CodeGen/MSP430/fp.ll @@ -0,0 +1,17 @@ +; RUN: llc -O0 -disable-fp-elim < %s | FileCheck %s + +target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16" +target triple = "msp430---elf" + +define void @fp() nounwind { +entry: +; CHECK: fp: +; CHECK: push.w r4 +; CHECK: mov.w r1, r4 +; CHECK: sub.w #2, r1 + %i = alloca i16, align 2 +; CHECK: mov.w #0, -2(r4) + store i16 0, i16* %i, align 2 +; CHECK: pop.w r4 + ret void +} diff --git a/test/CodeGen/Mips/alloca16.ll b/test/CodeGen/Mips/alloca16.ll new file mode 100644 index 0000000..731edae --- /dev/null +++ b/test/CodeGen/Mips/alloca16.ll @@ -0,0 +1,75 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@iiii = global i32 25, align 4 +@jjjj = global i32 35, align 4 +@kkkk = global i32 100, align 4 +@t = global i32 25, align 4 +@riii = common global i32 0, align 4 +@rjjj = common global i32 0, align 4 +@rkkk = common global i32 0, align 4 + +define void @temp(i32 %foo) nounwind { +entry: + %foo.addr = alloca i32, align 4 + store i32 %foo, i32* %foo.addr, align 4 + %0 = load i32* %foo.addr, align 4 + store i32 %0, i32* @t, align 4 + ret void +} + +define void @test() nounwind { +entry: +; 16: .frame $16,24,$ra +; 16: save $ra, $s0, $s1, 24 +; 16: move $16, $sp +; 16: move ${{[0-9]+}}, $sp +; 16: subu $[[REGISTER:[0-9]+]], ${{[0-9]+}}, ${{[0-9]+}} +; 16: move $sp, $[[REGISTER]] + %sssi = alloca i32, align 4 + %ip = alloca i32*, align 4 + %sssj = alloca i32, align 4 + %0 = load i32* @iiii, align 4 + store i32 %0, i32* %sssi, align 4 + %1 = load i32* @kkkk, align 4 + %mul = mul nsw i32 %1, 100 + %2 = alloca i8, i32 %mul + %3 = bitcast i8* %2 to i32* + store i32* %3, i32** %ip, align 4 + %4 = load i32* @jjjj, align 4 + store i32 %4, i32* %sssj, align 4 + %5 = load i32* @jjjj, align 4 + %6 = load i32* @iiii, align 4 + %7 = load i32** %ip, align 4 + %arrayidx = getelementptr inbounds i32* %7, i32 %6 + store i32 %5, i32* %arrayidx, align 4 + %8 = load i32* @kkkk, align 4 + %9 = load i32* @jjjj, align 4 + %10 = load i32** %ip, align 4 + %arrayidx1 = getelementptr inbounds i32* %10, i32 %9 + store i32 %8, i32* %arrayidx1, align 4 + %11 = load i32* @iiii, align 4 + %12 = load i32* @kkkk, align 4 + %13 = load i32** %ip, align 4 + %arrayidx2 = getelementptr inbounds i32* %13, i32 %12 + store i32 %11, i32* %arrayidx2, align 4 + %14 = load i32** %ip, align 4 + %arrayidx3 = getelementptr inbounds i32* %14, i32 25 + %15 = load i32* %arrayidx3, align 4 + store i32 %15, i32* @riii, align 4 + %16 = load i32** %ip, align 4 + %arrayidx4 = getelementptr inbounds i32* %16, i32 35 + %17 = load i32* %arrayidx4, align 4 + store i32 %17, i32* @rjjj, align 4 + %18 = load i32** %ip, align 4 + %arrayidx5 = getelementptr inbounds i32* %18, i32 100 + %19 = load i32* %arrayidx5, align 4 + store i32 %19, i32* @rkkk, align 4 + %20 = load i32* @t, align 4 + %21 = load i32** %ip, align 4 + %arrayidx6 = getelementptr inbounds i32* %21, i32 %20 + %22 = load i32* %arrayidx6, align 4 +; 16: save 16 + call void @temp(i32 %22) +; 16: restore 16 + ret void +} diff --git a/test/CodeGen/Mips/atomic.ll b/test/CodeGen/Mips/atomic.ll index 050689d..819f258 100644 --- a/test/CodeGen/Mips/atomic.ll +++ b/test/CodeGen/Mips/atomic.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=mipsel < %s | FileCheck %s +; RUN: llc -march=mipsel --disable-machine-licm < %s | FileCheck %s @x = common global i32 0, align 4 @@ -181,8 +181,9 @@ entry: ; CHECK: $[[BB0:[A-Z_0-9]+]]: ; CHECK: ll $[[R10:[0-9]+]], 0($[[R2]]) +; CHECK: and $[[R18:[0-9]+]], $[[R9]], $[[R6]] ; CHECK: and $[[R13:[0-9]+]], $[[R10]], $[[R7]] -; CHECK: or $[[R14:[0-9]+]], $[[R13]], $[[R9]] +; CHECK: or $[[R14:[0-9]+]], $[[R13]], $[[R18]] ; CHECK: sc $[[R14]], 0($[[R2]]) ; CHECK: beq $[[R14]], $zero, $[[BB0]] diff --git a/test/CodeGen/Mips/atomicops.ll b/test/CodeGen/Mips/atomicops.ll new file mode 100644 index 0000000..b9c3804 --- /dev/null +++ b/test/CodeGen/Mips/atomicops.ll @@ -0,0 +1,40 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@.str = private unnamed_addr constant [8 x i8] c"%d, %d\0A\00", align 1 + +define i32 @foo(i32* %mem, i32 %val, i32 %c) nounwind { +entry: + %0 = atomicrmw add i32* %mem, i32 %val seq_cst + %add = add nsw i32 %0, %c + ret i32 %add +; 16: foo: +; 16: lw ${{[0-9]+}}, %call16(__sync_synchronize)(${{[0-9]+}}) +; 16: lw ${{[0-9]+}}, %call16(__sync_fetch_and_add_4)(${{[0-9]+}}) +} + +define i32 @main() nounwind { +entry: + %x = alloca i32, align 4 + store volatile i32 0, i32* %x, align 4 + %0 = atomicrmw add i32* %x, i32 1 seq_cst + %add.i = add nsw i32 %0, 2 + %1 = load volatile i32* %x, align 4 + %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), i32 %add.i, i32 %1) nounwind + %2 = cmpxchg i32* %x, i32 1, i32 2 seq_cst + %3 = load volatile i32* %x, align 4 + %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), i32 %2, i32 %3) nounwind + %4 = atomicrmw xchg i32* %x, i32 1 seq_cst + %5 = load volatile i32* %x, align 4 + %call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), i32 %4, i32 %5) nounwind +; 16: main: +; 16: lw ${{[0-9]+}}, %call16(__sync_synchronize)(${{[0-9]+}}) +; 16: lw ${{[0-9]+}}, %call16(__sync_fetch_and_add_4)(${{[0-9]+}}) +; 16: lw ${{[0-9]+}}, %call16(__sync_val_compare_and_swap_4)(${{[0-9]+}}) +; 16: lw ${{[0-9]+}}, %call16(__sync_lock_test_and_set_4)(${{[0-9]+}}) + + ret i32 0 +} + +declare i32 @printf(i8* nocapture, ...) nounwind + + diff --git a/test/CodeGen/Mips/brconeq.ll b/test/CodeGen/Mips/brconeq.ll new file mode 100644 index 0000000..6133915 --- /dev/null +++ b/test/CodeGen/Mips/brconeq.ll @@ -0,0 +1,38 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@i = global i32 5, align 4 +@j = global i32 10, align 4 +@result = global i32 0, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @i, align 4 + %1 = load i32* @j, align 4 + %cmp = icmp eq i32 %0, %1 +; 16: cmp ${{[0-9]+}}, ${{[0-9]+}} +; 16: bteqz $[[LABEL:[0-9A-Ba-b_]+]] +; 16: $[[LABEL]]: + br i1 %cmp, label %if.end, label %if.then + +if.then: ; preds = %entry + store i32 1, i32* @result, align 4 + br label %if.end + +if.end: ; preds = %entry, %if.then + ret void +} + + + + + + + + + + + + + + + diff --git a/test/CodeGen/Mips/brconeqk.ll b/test/CodeGen/Mips/brconeqk.ll new file mode 100644 index 0000000..2c0e72d --- /dev/null +++ b/test/CodeGen/Mips/brconeqk.ll @@ -0,0 +1,22 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@i = global i32 5, align 4 +@result = global i32 0, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @i, align 4 + %cmp = icmp eq i32 %0, 10 + br i1 %cmp, label %if.end, label %if.then +; 16: cmpi ${{[0-9]+}}, {{[0-9]+}} +; 16: bteqz $[[LABEL:[0-9A-Ba-b_]+]] +; 16: $[[LABEL]]: +if.then: ; preds = %entry + store i32 1, i32* @result, align 4 + br label %if.end + +if.end: ; preds = %entry, %if.then + ret void +} + + diff --git a/test/CodeGen/Mips/brconeqz.ll b/test/CodeGen/Mips/brconeqz.ll new file mode 100644 index 0000000..5586e7b --- /dev/null +++ b/test/CodeGen/Mips/brconeqz.ll @@ -0,0 +1,20 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@i = global i32 5, align 4 +@result = global i32 0, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @i, align 4 + %cmp = icmp eq i32 %0, 0 + br i1 %cmp, label %if.end, label %if.then +; 16: beqz ${{[0-9]+}}, $[[LABEL:[0-9A-Ba-b_]+]] +; 16: $[[LABEL]]: +if.then: ; preds = %entry + store i32 1, i32* @result, align 4 + br label %if.end + +if.end: ; preds = %entry, %if.then + ret void +} + diff --git a/test/CodeGen/Mips/brconge.ll b/test/CodeGen/Mips/brconge.ll new file mode 100644 index 0000000..02f0a63 --- /dev/null +++ b/test/CodeGen/Mips/brconge.ll @@ -0,0 +1,37 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@i = global i32 5, align 4 +@j = global i32 10, align 4 +@k = global i32 5, align 4 +@result1 = global i32 0, align 4 +@result2 = global i32 1, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @i, align 4 + %1 = load i32* @j, align 4 + %cmp = icmp slt i32 %0, %1 + br i1 %cmp, label %if.then, label %if.end + +; 16: slt ${{[0-9]+}}, ${{[0-9]+}} +; 16: bteqz $[[LABEL:[0-9A-Ba-b_]+]] +; 16: $[[LABEL]]: + +if.then: ; preds = %entry + store i32 1, i32* @result1, align 4 + br label %if.end + +if.end: ; preds = %if.then, %entry + %2 = load i32* @k, align 4 + %cmp1 = icmp slt i32 %0, %2 + br i1 %cmp1, label %if.then2, label %if.end3 + +if.then2: ; preds = %if.end + store i32 1, i32* @result1, align 4 + br label %if.end3 + +if.end3: ; preds = %if.then2, %if.end + ret void +} + + diff --git a/test/CodeGen/Mips/brcongt.ll b/test/CodeGen/Mips/brcongt.ll new file mode 100644 index 0000000..767b51b --- /dev/null +++ b/test/CodeGen/Mips/brcongt.ll @@ -0,0 +1,25 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@i = global i32 5, align 4 +@j = global i32 10, align 4 +@k = global i32 5, align 4 +@result = global i32 0, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @i, align 4 + %1 = load i32* @j, align 4 + %cmp = icmp sgt i32 %0, %1 + br i1 %cmp, label %if.end, label %if.then +; 16: slt ${{[0-9]+}}, ${{[0-9]+}} +; 16: btnez $[[LABEL:[0-9A-Ba-b_]+]] +; 16: $[[LABEL]]: +if.then: ; preds = %entry + store i32 1, i32* @result, align 4 + br label %if.end + +if.end: ; preds = %entry, %if.then + ret void +} + + diff --git a/test/CodeGen/Mips/brconle.ll b/test/CodeGen/Mips/brconle.ll new file mode 100644 index 0000000..854b248 --- /dev/null +++ b/test/CodeGen/Mips/brconle.ll @@ -0,0 +1,37 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@i = global i32 -5, align 4 +@j = global i32 10, align 4 +@k = global i32 -5, align 4 +@result1 = global i32 0, align 4 +@result2 = global i32 1, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @j, align 4 + %1 = load i32* @i, align 4 + %cmp = icmp sgt i32 %0, %1 + br i1 %cmp, label %if.then, label %if.end + +; 16: slt ${{[0-9]+}}, ${{[0-9]+}} +; 16: bteqz $[[LABEL:[0-9A-Ba-b_]+]] +; 16: $[[LABEL]]: + +if.then: ; preds = %entry + store i32 1, i32* @result1, align 4 + br label %if.end + +if.end: ; preds = %if.then, %entry + %2 = load i32* @k, align 4 + %cmp1 = icmp sgt i32 %1, %2 + br i1 %cmp1, label %if.then2, label %if.end3 + +if.then2: ; preds = %if.end + store i32 0, i32* @result1, align 4 + br label %if.end3 + +if.end3: ; preds = %if.then2, %if.end + ret void +} + + diff --git a/test/CodeGen/Mips/brconlt.ll b/test/CodeGen/Mips/brconlt.ll new file mode 100644 index 0000000..931a3e8 --- /dev/null +++ b/test/CodeGen/Mips/brconlt.ll @@ -0,0 +1,27 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@i = global i32 5, align 4 +@j = global i32 10, align 4 +@k = global i32 5, align 4 +@result = global i32 0, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @j, align 4 + %1 = load i32* @i, align 4 + %cmp = icmp slt i32 %0, %1 + br i1 %cmp, label %if.end, label %if.then + +; 16: slt ${{[0-9]+}}, ${{[0-9]+}} +; 16: btnez $[[LABEL:[0-9A-Ba-b_]+]] +; 16: $[[LABEL]]: + +if.then: ; preds = %entry + store i32 1, i32* @result, align 4 + br label %if.end + +if.end: ; preds = %entry, %if.then + ret void +} + + diff --git a/test/CodeGen/Mips/brconne.ll b/test/CodeGen/Mips/brconne.ll new file mode 100644 index 0000000..5d5bde3 --- /dev/null +++ b/test/CodeGen/Mips/brconne.ll @@ -0,0 +1,26 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@i = global i32 5, align 4 +@j = global i32 5, align 4 +@result = global i32 0, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @j, align 4 + %1 = load i32* @i, align 4 + %cmp = icmp eq i32 %0, %1 + br i1 %cmp, label %if.then, label %if.end +; 16: cmp ${{[0-9]+}}, ${{[0-9]+}} +; 16: btnez $[[LABEL:[0-9A-Ba-b_]+]] +; 16: lw ${{[0-9]+}}, %got(result)(${{[0-9]+}}) +; 16: $[[LABEL]]: + +if.then: ; preds = %entry + store i32 1, i32* @result, align 4 + br label %if.end + +if.end: ; preds = %if.then, %entry + ret void +} + + diff --git a/test/CodeGen/Mips/brconnek.ll b/test/CodeGen/Mips/brconnek.ll new file mode 100644 index 0000000..6208d7c --- /dev/null +++ b/test/CodeGen/Mips/brconnek.ll @@ -0,0 +1,25 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@j = global i32 5, align 4 +@result = global i32 0, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @j, align 4 + %cmp = icmp eq i32 %0, 5 + br i1 %cmp, label %if.then, label %if.end + +; 16: cmpi ${{[0-9]+}}, {{[0-9]+}} +; 16: btnez $[[LABEL:[0-9A-Ba-b_]+]] +; 16: lw ${{[0-9]+}}, %got(result)(${{[0-9]+}}) +; 16: $[[LABEL]]: + +if.then: ; preds = %entry + store i32 1, i32* @result, align 4 + br label %if.end + +if.end: ; preds = %if.then, %entry + ret void +} + + diff --git a/test/CodeGen/Mips/brconnez.ll b/test/CodeGen/Mips/brconnez.ll new file mode 100644 index 0000000..47db790 --- /dev/null +++ b/test/CodeGen/Mips/brconnez.ll @@ -0,0 +1,24 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@j = global i32 0, align 4 +@result = global i32 0, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @j, align 4 + %cmp = icmp eq i32 %0, 0 + br i1 %cmp, label %if.then, label %if.end + +; 16: bnez ${{[0-9]+}}, $[[LABEL:[0-9A-Ba-b_]+]] +; 16: lw ${{[0-9]+}}, %got(result)(${{[0-9]+}}) +; 16: $[[LABEL]]: + +if.then: ; preds = %entry + store i32 1, i32* @result, align 4 + br label %if.end + +if.end: ; preds = %if.then, %entry + ret void +} + + diff --git a/test/CodeGen/Mips/brdelayslot.ll b/test/CodeGen/Mips/brdelayslot.ll index b266ce6..2fdb736 100644 --- a/test/CodeGen/Mips/brdelayslot.ll +++ b/test/CodeGen/Mips/brdelayslot.ll @@ -1,15 +1,37 @@ -; RUN: llc -march=mipsel -enable-mips-delay-filler < %s | FileCheck %s +; RUN: llc -march=mipsel -O0 < %s | FileCheck %s -check-prefix=None +; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=Default define void @foo1() nounwind { entry: -; CHECK: jalr -; CHECK-NOT: nop -; CHECK: jr -; CHECK-NOT: nop -; CHECK: .end +; Default: jalr +; Default-NOT: nop +; Default: jr +; Default-NOT: nop +; Default: .end +; None: jalr +; None: nop +; None: jr +; None: nop +; None: .end tail call void @foo2(i32 3) nounwind ret void } declare void @foo2(i32) + +; Check that cvt.d.w goes into jalr's delay slot. +; +define void @foo3(i32 %a) nounwind { +entry: +; Default: foo3: +; Default: jalr +; Default: cvt.d.w + + %conv = sitofp i32 %a to double + tail call void @foo4(double %conv) nounwind + ret void +} + +declare void @foo4(double) + diff --git a/test/CodeGen/Mips/brind.ll b/test/CodeGen/Mips/brind.ll new file mode 100644 index 0000000..4c591fa --- /dev/null +++ b/test/CodeGen/Mips/brind.ll @@ -0,0 +1,40 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@main.L = internal unnamed_addr constant [5 x i8*] [i8* blockaddress(@main, %L1), i8* blockaddress(@main, %L2), i8* blockaddress(@main, %L3), i8* blockaddress(@main, %L4), i8* null], align 4 +@str = private unnamed_addr constant [2 x i8] c"A\00" +@str5 = private unnamed_addr constant [2 x i8] c"B\00" +@str6 = private unnamed_addr constant [2 x i8] c"C\00" +@str7 = private unnamed_addr constant [2 x i8] c"D\00" +@str8 = private unnamed_addr constant [2 x i8] c"E\00" + +define i32 @main() nounwind { +entry: + %puts = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8]* @str, i32 0, i32 0)) + br label %L1 + +L1: ; preds = %entry, %L3 + %i.0 = phi i32 [ 0, %entry ], [ %inc, %L3 ] + %puts5 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8]* @str5, i32 0, i32 0)) + br label %L2 + +L2: ; preds = %L1, %L3 + %i.1 = phi i32 [ %i.0, %L1 ], [ %inc, %L3 ] + %puts6 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8]* @str6, i32 0, i32 0)) + br label %L3 + +L3: ; preds = %L2, %L3 + %i.2 = phi i32 [ %i.1, %L2 ], [ %inc, %L3 ] + %puts7 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8]* @str7, i32 0, i32 0)) + %inc = add i32 %i.2, 1 + %arrayidx = getelementptr inbounds [5 x i8*]* @main.L, i32 0, i32 %i.2 + %0 = load i8** %arrayidx, align 4 + indirectbr i8* %0, [label %L1, label %L2, label %L3, label %L4] +; 16: jrc ${{[0-9]+}} +L4: ; preds = %L3 + %puts8 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8]* @str8, i32 0, i32 0)) + ret i32 0 +} + +declare i32 @puts(i8* nocapture) nounwind + + diff --git a/test/CodeGen/Mips/check-noat.ll b/test/CodeGen/Mips/check-noat.ll new file mode 100644 index 0000000..bfeff67 --- /dev/null +++ b/test/CodeGen/Mips/check-noat.ll @@ -0,0 +1,11 @@ +; RUN: llc -march=mipsel < %s | FileCheck %s + +define void @f() nounwind readnone { +entry: +; CHECK: f: +; CHECK: .set noat +; CHECK: .set at + + ret void +} + diff --git a/test/CodeGen/Mips/div.ll b/test/CodeGen/Mips/div.ll new file mode 100644 index 0000000..00e2c19 --- /dev/null +++ b/test/CodeGen/Mips/div.ll @@ -0,0 +1,18 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@iiii = global i32 100, align 4 +@jjjj = global i32 -4, align 4 +@kkkk = common global i32 0, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @iiii, align 4 + %1 = load i32* @jjjj, align 4 + %div = sdiv i32 %0, %1 +; 16: div $zero, ${{[0-9]+}}, ${{[0-9]+}} +; 16: mflo ${{[0-9]+}} + store i32 %div, i32* @kkkk, align 4 + ret void +} + + diff --git a/test/CodeGen/Mips/div_rem.ll b/test/CodeGen/Mips/div_rem.ll new file mode 100644 index 0000000..950192e --- /dev/null +++ b/test/CodeGen/Mips/div_rem.ll @@ -0,0 +1,21 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@iiii = global i32 103, align 4 +@jjjj = global i32 -4, align 4 +@kkkk = common global i32 0, align 4 +@llll = common global i32 0, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @iiii, align 4 + %1 = load i32* @jjjj, align 4 + %div = sdiv i32 %0, %1 + store i32 %div, i32* @kkkk, align 4 + %rem = srem i32 %0, %1 +; 16: div $zero, ${{[0-9]+}}, ${{[0-9]+}} +; 16: mflo ${{[0-9]+}} +; 16: mfhi ${{[0-9]+}} + store i32 %rem, i32* @llll, align 4 + ret void +} + diff --git a/test/CodeGen/Mips/divu.ll b/test/CodeGen/Mips/divu.ll new file mode 100644 index 0000000..b96a439 --- /dev/null +++ b/test/CodeGen/Mips/divu.ll @@ -0,0 +1,18 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@iiii = global i32 100, align 4 +@jjjj = global i32 4, align 4 +@kkkk = common global i32 0, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @iiii, align 4 + %1 = load i32* @jjjj, align 4 + %div = udiv i32 %0, %1 +; 16: divu $zero, ${{[0-9]+}}, ${{[0-9]+}} +; 16: mflo ${{[0-9]+}} + store i32 %div, i32* @kkkk, align 4 + ret void +} + + diff --git a/test/CodeGen/Mips/divu_remu.ll b/test/CodeGen/Mips/divu_remu.ll new file mode 100644 index 0000000..a6c1563 --- /dev/null +++ b/test/CodeGen/Mips/divu_remu.ll @@ -0,0 +1,23 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@iiii = global i32 103, align 4 +@jjjj = global i32 4, align 4 +@kkkk = common global i32 0, align 4 +@llll = common global i32 0, align 4 + + +define void @test() nounwind { +entry: + %0 = load i32* @iiii, align 4 + %1 = load i32* @jjjj, align 4 + %div = udiv i32 %0, %1 + store i32 %div, i32* @kkkk, align 4 + %rem = urem i32 %0, %1 +; 16: divu $zero, ${{[0-9]+}}, ${{[0-9]+}} +; 16: mflo ${{[0-9]+}} +; 16: mfhi ${{[0-9]+}} + store i32 %rem, i32* @llll, align 4 + ret void +} + + diff --git a/test/CodeGen/Mips/dsp-r1.ll b/test/CodeGen/Mips/dsp-r1.ll new file mode 100644 index 0000000..c9dc8cf --- /dev/null +++ b/test/CodeGen/Mips/dsp-r1.ll @@ -0,0 +1,1241 @@ +; RUN: llc -march=mipsel -mattr=+dsp < %s | FileCheck %s + +define i32 @test__builtin_mips_extr_w1(i32 %i0, i32, i64 %a0) nounwind { +entry: +; CHECK: extr.w + + %1 = tail call i32 @llvm.mips.extr.w(i64 %a0, i32 15) + ret i32 %1 +} + +declare i32 @llvm.mips.extr.w(i64, i32) nounwind + +define i32 @test__builtin_mips_extr_w2(i32 %i0, i32, i64 %a0, i32 %a1) nounwind { +entry: +; CHECK: extrv.w + + %1 = tail call i32 @llvm.mips.extr.w(i64 %a0, i32 %a1) + ret i32 %1 +} + +define i32 @test__builtin_mips_extr_r_w1(i32 %i0, i32, i64 %a0) nounwind { +entry: +; CHECK: extr_r.w + + %1 = tail call i32 @llvm.mips.extr.r.w(i64 %a0, i32 15) + ret i32 %1 +} + +declare i32 @llvm.mips.extr.r.w(i64, i32) nounwind + +define i32 @test__builtin_mips_extr_s_h1(i32 %i0, i32, i64 %a0, i32 %a1) nounwind { +entry: +; CHECK: extrv_s.h + + %1 = tail call i32 @llvm.mips.extr.s.h(i64 %a0, i32 %a1) + ret i32 %1 +} + +declare i32 @llvm.mips.extr.s.h(i64, i32) nounwind + +define i32 @test__builtin_mips_extr_rs_w1(i32 %i0, i32, i64 %a0) nounwind { +entry: +; CHECK: extr_rs.w + + %1 = tail call i32 @llvm.mips.extr.rs.w(i64 %a0, i32 15) + ret i32 %1 +} + +declare i32 @llvm.mips.extr.rs.w(i64, i32) nounwind + +define i32 @test__builtin_mips_extr_rs_w2(i32 %i0, i32, i64 %a0, i32 %a1) nounwind { +entry: +; CHECK: extrv_rs.w + + %1 = tail call i32 @llvm.mips.extr.rs.w(i64 %a0, i32 %a1) + ret i32 %1 +} + +define i32 @test__builtin_mips_extr_s_h2(i32 %i0, i32, i64 %a0) nounwind { +entry: +; CHECK: extr_s.h + + %1 = tail call i32 @llvm.mips.extr.s.h(i64 %a0, i32 15) + ret i32 %1 +} + +define i32 @test__builtin_mips_extr_r_w2(i32 %i0, i32, i64 %a0, i32 %a1) nounwind { +entry: +; CHECK: extrv_r.w + + %1 = tail call i32 @llvm.mips.extr.r.w(i64 %a0, i32 %a1) + ret i32 %1 +} + +define i32 @test__builtin_mips_extp1(i32 %i0, i32, i64 %a0) nounwind { +entry: +; CHECK: extp ${{[0-9]+}} + + %1 = tail call i32 @llvm.mips.extp(i64 %a0, i32 15) + ret i32 %1 +} + +declare i32 @llvm.mips.extp(i64, i32) nounwind + +define i32 @test__builtin_mips_extp2(i32 %i0, i32, i64 %a0, i32 %a1) nounwind { +entry: +; CHECK: extpv + + %1 = tail call i32 @llvm.mips.extp(i64 %a0, i32 %a1) + ret i32 %1 +} + +define i32 @test__builtin_mips_extpdp1(i32 %i0, i32, i64 %a0) nounwind { +entry: +; CHECK: extpdp ${{[0-9]+}} + + %1 = tail call i32 @llvm.mips.extpdp(i64 %a0, i32 15) + ret i32 %1 +} + +declare i32 @llvm.mips.extpdp(i64, i32) nounwind + +define i32 @test__builtin_mips_extpdp2(i32 %i0, i32, i64 %a0, i32 %a1) nounwind { +entry: +; CHECK: extpdpv + + %1 = tail call i32 @llvm.mips.extpdp(i64 %a0, i32 %a1) + ret i32 %1 +} + +define i64 @test__builtin_mips_dpau_h_qbl1(i32 %i0, i32, i64 %a0, i32 %a1.coerce, i32 %a2.coerce) nounwind readnone { +entry: +; CHECK: dpau.h.qbl + + %1 = bitcast i32 %a1.coerce to <4 x i8> + %2 = bitcast i32 %a2.coerce to <4 x i8> + %3 = tail call i64 @llvm.mips.dpau.h.qbl(i64 %a0, <4 x i8> %1, <4 x i8> %2) + ret i64 %3 +} + +declare i64 @llvm.mips.dpau.h.qbl(i64, <4 x i8>, <4 x i8>) nounwind readnone + +define i64 @test__builtin_mips_dpau_h_qbr1(i32 %i0, i32, i64 %a0, i32 %a1.coerce, i32 %a2.coerce) nounwind readnone { +entry: +; CHECK: dpau.h.qbr + + %1 = bitcast i32 %a1.coerce to <4 x i8> + %2 = bitcast i32 %a2.coerce to <4 x i8> + %3 = tail call i64 @llvm.mips.dpau.h.qbr(i64 %a0, <4 x i8> %1, <4 x i8> %2) + ret i64 %3 +} + +declare i64 @llvm.mips.dpau.h.qbr(i64, <4 x i8>, <4 x i8>) nounwind readnone + +define i64 @test__builtin_mips_dpsu_h_qbl1(i32 %i0, i32, i64 %a0, i32 %a1.coerce, i32 %a2.coerce) nounwind readnone { +entry: +; CHECK: dpsu.h.qbl + + %1 = bitcast i32 %a1.coerce to <4 x i8> + %2 = bitcast i32 %a2.coerce to <4 x i8> + %3 = tail call i64 @llvm.mips.dpsu.h.qbl(i64 %a0, <4 x i8> %1, <4 x i8> %2) + ret i64 %3 +} + +declare i64 @llvm.mips.dpsu.h.qbl(i64, <4 x i8>, <4 x i8>) nounwind readnone + +define i64 @test__builtin_mips_dpsu_h_qbr1(i32 %i0, i32, i64 %a0, i32 %a1.coerce, i32 %a2.coerce) nounwind readnone { +entry: +; CHECK: dpsu.h.qbr + + %1 = bitcast i32 %a1.coerce to <4 x i8> + %2 = bitcast i32 %a2.coerce to <4 x i8> + %3 = tail call i64 @llvm.mips.dpsu.h.qbr(i64 %a0, <4 x i8> %1, <4 x i8> %2) + ret i64 %3 +} + +declare i64 @llvm.mips.dpsu.h.qbr(i64, <4 x i8>, <4 x i8>) nounwind readnone + +define i64 @test__builtin_mips_dpaq_s_w_ph1(i32 %i0, i32, i64 %a0, i32 %a1.coerce, i32 %a2.coerce) nounwind { +entry: +; CHECK: dpaq_s.w.ph + + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = bitcast i32 %a2.coerce to <2 x i16> + %3 = tail call i64 @llvm.mips.dpaq.s.w.ph(i64 %a0, <2 x i16> %1, <2 x i16> %2) + ret i64 %3 +} + +declare i64 @llvm.mips.dpaq.s.w.ph(i64, <2 x i16>, <2 x i16>) nounwind + +define i64 @test__builtin_mips_dpaq_sa_l_w1(i32 %i0, i32, i64 %a0, i32 %a1, i32 %a2) nounwind { +entry: +; CHECK: dpaq_sa.l.w + + %1 = tail call i64 @llvm.mips.dpaq.sa.l.w(i64 %a0, i32 %a1, i32 %a2) + ret i64 %1 +} + +declare i64 @llvm.mips.dpaq.sa.l.w(i64, i32, i32) nounwind + +define i64 @test__builtin_mips_dpsq_s_w_ph1(i32 %i0, i32, i64 %a0, i32 %a1.coerce, i32 %a2.coerce) nounwind { +entry: +; CHECK: dpsq_s.w.ph + + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = bitcast i32 %a2.coerce to <2 x i16> + %3 = tail call i64 @llvm.mips.dpsq.s.w.ph(i64 %a0, <2 x i16> %1, <2 x i16> %2) + ret i64 %3 +} + +declare i64 @llvm.mips.dpsq.s.w.ph(i64, <2 x i16>, <2 x i16>) nounwind + +define i64 @test__builtin_mips_dpsq_sa_l_w1(i32 %i0, i32, i64 %a0, i32 %a1, i32 %a2) nounwind { +entry: +; CHECK: dpsq_sa.l.w + + %1 = tail call i64 @llvm.mips.dpsq.sa.l.w(i64 %a0, i32 %a1, i32 %a2) + ret i64 %1 +} + +declare i64 @llvm.mips.dpsq.sa.l.w(i64, i32, i32) nounwind + +define i64 @test__builtin_mips_mulsaq_s_w_ph1(i32 %i0, i32, i64 %a0, i32 %a1.coerce, i32 %a2.coerce) nounwind { +entry: +; CHECK: mulsaq_s.w.ph + + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = bitcast i32 %a2.coerce to <2 x i16> + %3 = tail call i64 @llvm.mips.mulsaq.s.w.ph(i64 %a0, <2 x i16> %1, <2 x i16> %2) + ret i64 %3 +} + +declare i64 @llvm.mips.mulsaq.s.w.ph(i64, <2 x i16>, <2 x i16>) nounwind + +define i64 @test__builtin_mips_maq_s_w_phl1(i32 %i0, i32, i64 %a0, i32 %a1.coerce, i32 %a2.coerce) nounwind { +entry: +; CHECK: maq_s.w.phl + + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = bitcast i32 %a2.coerce to <2 x i16> + %3 = tail call i64 @llvm.mips.maq.s.w.phl(i64 %a0, <2 x i16> %1, <2 x i16> %2) + ret i64 %3 +} + +declare i64 @llvm.mips.maq.s.w.phl(i64, <2 x i16>, <2 x i16>) nounwind + +define i64 @test__builtin_mips_maq_s_w_phr1(i32 %i0, i32, i64 %a0, i32 %a1.coerce, i32 %a2.coerce) nounwind { +entry: +; CHECK: maq_s.w.phr + + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = bitcast i32 %a2.coerce to <2 x i16> + %3 = tail call i64 @llvm.mips.maq.s.w.phr(i64 %a0, <2 x i16> %1, <2 x i16> %2) + ret i64 %3 +} + +declare i64 @llvm.mips.maq.s.w.phr(i64, <2 x i16>, <2 x i16>) nounwind + +define i64 @test__builtin_mips_maq_sa_w_phl1(i32 %i0, i32, i64 %a0, i32 %a1.coerce, i32 %a2.coerce) nounwind { +entry: +; CHECK: maq_sa.w.phl + + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = bitcast i32 %a2.coerce to <2 x i16> + %3 = tail call i64 @llvm.mips.maq.sa.w.phl(i64 %a0, <2 x i16> %1, <2 x i16> %2) + ret i64 %3 +} + +declare i64 @llvm.mips.maq.sa.w.phl(i64, <2 x i16>, <2 x i16>) nounwind + +define i64 @test__builtin_mips_maq_sa_w_phr1(i32 %i0, i32, i64 %a0, i32 %a1.coerce, i32 %a2.coerce) nounwind { +entry: +; CHECK: maq_sa.w.phr + + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = bitcast i32 %a2.coerce to <2 x i16> + %3 = tail call i64 @llvm.mips.maq.sa.w.phr(i64 %a0, <2 x i16> %1, <2 x i16> %2) + ret i64 %3 +} + +declare i64 @llvm.mips.maq.sa.w.phr(i64, <2 x i16>, <2 x i16>) nounwind + +define i64 @test__builtin_mips_shilo1(i32 %i0, i32, i64 %a0) nounwind readnone { +entry: +; CHECK: shilo $ac{{[0-9]}} + + %1 = tail call i64 @llvm.mips.shilo(i64 %a0, i32 0) + ret i64 %1 +} + +declare i64 @llvm.mips.shilo(i64, i32) nounwind readnone + +define i64 @test__builtin_mips_shilo2(i32 %i0, i32, i64 %a0, i32 %a1) nounwind readnone { +entry: +; CHECK: shilov + + %1 = tail call i64 @llvm.mips.shilo(i64 %a0, i32 %a1) + ret i64 %1 +} + +define i64 @test__builtin_mips_mthlip1(i32 %i0, i32, i64 %a0, i32 %a1) nounwind { +entry: +; CHECK: mthlip ${{[0-9]+}} + + %1 = tail call i64 @llvm.mips.mthlip(i64 %a0, i32 %a1) + ret i64 %1 +} + +declare i64 @llvm.mips.mthlip(i64, i32) nounwind + +define i32 @test__builtin_mips_bposge321(i32 %i0) nounwind readonly { +entry: +; CHECK: bposge32 $BB{{[0-9]+}} + + %0 = tail call i32 @llvm.mips.bposge32() + ret i32 %0 +} + +declare i32 @llvm.mips.bposge32() nounwind readonly + +define i64 @test__builtin_mips_madd1(i32 %i0, i32, i64 %a0, i32 %a1, i32 %a2) nounwind readnone { +entry: +; CHECK: madd $ac{{[0-9]}} + + %1 = tail call i64 @llvm.mips.madd(i64 %a0, i32 %a1, i32 %a2) + ret i64 %1 +} + +declare i64 @llvm.mips.madd(i64, i32, i32) nounwind readnone + +define i64 @test__builtin_mips_maddu1(i32 %i0, i32, i64 %a0, i32 %a1, i32 %a2) nounwind readnone { +entry: +; CHECK: maddu $ac{{[0-9]}} + + %1 = tail call i64 @llvm.mips.maddu(i64 %a0, i32 %a1, i32 %a2) + ret i64 %1 +} + +declare i64 @llvm.mips.maddu(i64, i32, i32) nounwind readnone + +define i64 @test__builtin_mips_msub1(i32 %i0, i32, i64 %a0, i32 %a1, i32 %a2) nounwind readnone { +entry: +; CHECK: msub $ac{{[0-9]}} + + %1 = tail call i64 @llvm.mips.msub(i64 %a0, i32 %a1, i32 %a2) + ret i64 %1 +} + +declare i64 @llvm.mips.msub(i64, i32, i32) nounwind readnone + +define i64 @test__builtin_mips_msubu1(i32 %i0, i32, i64 %a0, i32 %a1, i32 %a2) nounwind readnone { +entry: +; CHECK: msubu $ac{{[0-9]}} + + %1 = tail call i64 @llvm.mips.msubu(i64 %a0, i32 %a1, i32 %a2) + ret i64 %1 +} + +declare i64 @llvm.mips.msubu(i64, i32, i32) nounwind readnone + +define i64 @test__builtin_mips_mult1(i32 %i0, i32 %a0, i32 %a1) nounwind readnone { +entry: +; CHECK: mult $ac{{[0-9]}} + + %0 = tail call i64 @llvm.mips.mult(i32 %a0, i32 %a1) + ret i64 %0 +} + +declare i64 @llvm.mips.mult(i32, i32) nounwind readnone + +define i64 @test__builtin_mips_multu1(i32 %i0, i32 %a0, i32 %a1) nounwind readnone { +entry: +; CHECK: multu $ac{{[0-9]}} + + %0 = tail call i64 @llvm.mips.multu(i32 %a0, i32 %a1) + ret i64 %0 +} + +declare i64 @llvm.mips.multu(i32, i32) nounwind readnone + +define { i32 } @test__builtin_mips_addq_ph1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: addq.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call <2 x i16> @llvm.mips.addq.ph(<2 x i16> %0, <2 x i16> %1) + %3 = bitcast <2 x i16> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.addq.ph(<2 x i16>, <2 x i16>) nounwind + +define { i32 } @test__builtin_mips_addq_s_ph1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: addq_s.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call <2 x i16> @llvm.mips.addq.s.ph(<2 x i16> %0, <2 x i16> %1) + %3 = bitcast <2 x i16> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.addq.s.ph(<2 x i16>, <2 x i16>) nounwind + +define i32 @test__builtin_mips_addq_s_w1(i32 %i0, i32 %a0, i32 %a1) nounwind { +entry: +; CHECK: addq_s.w + + %0 = tail call i32 @llvm.mips.addq.s.w(i32 %a0, i32 %a1) + ret i32 %0 +} + +declare i32 @llvm.mips.addq.s.w(i32, i32) nounwind + +define { i32 } @test__builtin_mips_addu_qb1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: addu.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = bitcast i32 %a1.coerce to <4 x i8> + %2 = tail call <4 x i8> @llvm.mips.addu.qb(<4 x i8> %0, <4 x i8> %1) + %3 = bitcast <4 x i8> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <4 x i8> @llvm.mips.addu.qb(<4 x i8>, <4 x i8>) nounwind + +define { i32 } @test__builtin_mips_addu_s_qb1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: addu_s.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = bitcast i32 %a1.coerce to <4 x i8> + %2 = tail call <4 x i8> @llvm.mips.addu.s.qb(<4 x i8> %0, <4 x i8> %1) + %3 = bitcast <4 x i8> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <4 x i8> @llvm.mips.addu.s.qb(<4 x i8>, <4 x i8>) nounwind + +define { i32 } @test__builtin_mips_subq_ph1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: subq.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call <2 x i16> @llvm.mips.subq.ph(<2 x i16> %0, <2 x i16> %1) + %3 = bitcast <2 x i16> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.subq.ph(<2 x i16>, <2 x i16>) nounwind + +define { i32 } @test__builtin_mips_subq_s_ph1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: subq_s.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call <2 x i16> @llvm.mips.subq.s.ph(<2 x i16> %0, <2 x i16> %1) + %3 = bitcast <2 x i16> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.subq.s.ph(<2 x i16>, <2 x i16>) nounwind + +define i32 @test__builtin_mips_subq_s_w1(i32 %i0, i32 %a0, i32 %a1) nounwind { +entry: +; CHECK: subq_s.w + + %0 = tail call i32 @llvm.mips.subq.s.w(i32 %a0, i32 %a1) + ret i32 %0 +} + +declare i32 @llvm.mips.subq.s.w(i32, i32) nounwind + +define { i32 } @test__builtin_mips_subu_qb1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: subu.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = bitcast i32 %a1.coerce to <4 x i8> + %2 = tail call <4 x i8> @llvm.mips.subu.qb(<4 x i8> %0, <4 x i8> %1) + %3 = bitcast <4 x i8> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <4 x i8> @llvm.mips.subu.qb(<4 x i8>, <4 x i8>) nounwind + +define { i32 } @test__builtin_mips_subu_s_qb1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: subu_s.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = bitcast i32 %a1.coerce to <4 x i8> + %2 = tail call <4 x i8> @llvm.mips.subu.s.qb(<4 x i8> %0, <4 x i8> %1) + %3 = bitcast <4 x i8> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <4 x i8> @llvm.mips.subu.s.qb(<4 x i8>, <4 x i8>) nounwind + +define i32 @test__builtin_mips_addsc1(i32 %i0, i32 %a0, i32 %a1) nounwind { +entry: +; CHECK: addsc ${{[0-9]+}} + + %0 = tail call i32 @llvm.mips.addsc(i32 %a0, i32 %a1) + ret i32 %0 +} + +declare i32 @llvm.mips.addsc(i32, i32) nounwind + +define i32 @test__builtin_mips_addwc1(i32 %i0, i32 %a0, i32 %a1) nounwind { +entry: +; CHECK: addwc ${{[0-9]+}} + + %0 = tail call i32 @llvm.mips.addwc(i32 %a0, i32 %a1) + ret i32 %0 +} + +declare i32 @llvm.mips.addwc(i32, i32) nounwind + +define i32 @test__builtin_mips_modsub1(i32 %i0, i32 %a0, i32 %a1) nounwind readnone { +entry: +; CHECK: modsub ${{[0-9]+}} + + %0 = tail call i32 @llvm.mips.modsub(i32 %a0, i32 %a1) + ret i32 %0 +} + +declare i32 @llvm.mips.modsub(i32, i32) nounwind readnone + +define i32 @test__builtin_mips_raddu_w_qb1(i32 %i0, i32 %a0.coerce) nounwind readnone { +entry: +; CHECK: raddu.w.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = tail call i32 @llvm.mips.raddu.w.qb(<4 x i8> %0) + ret i32 %1 +} + +declare i32 @llvm.mips.raddu.w.qb(<4 x i8>) nounwind readnone + +define { i32 } @test__builtin_mips_muleu_s_ph_qbl1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: muleu_s.ph.qbl + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call <2 x i16> @llvm.mips.muleu.s.ph.qbl(<4 x i8> %0, <2 x i16> %1) + %3 = bitcast <2 x i16> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.muleu.s.ph.qbl(<4 x i8>, <2 x i16>) nounwind + +define { i32 } @test__builtin_mips_muleu_s_ph_qbr1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: muleu_s.ph.qbr + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call <2 x i16> @llvm.mips.muleu.s.ph.qbr(<4 x i8> %0, <2 x i16> %1) + %3 = bitcast <2 x i16> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.muleu.s.ph.qbr(<4 x i8>, <2 x i16>) nounwind + +define { i32 } @test__builtin_mips_mulq_rs_ph1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: mulq_rs.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call <2 x i16> @llvm.mips.mulq.rs.ph(<2 x i16> %0, <2 x i16> %1) + %3 = bitcast <2 x i16> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.mulq.rs.ph(<2 x i16>, <2 x i16>) nounwind + +define i32 @test__builtin_mips_muleq_s_w_phl1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: muleq_s.w.phl + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call i32 @llvm.mips.muleq.s.w.phl(<2 x i16> %0, <2 x i16> %1) + ret i32 %2 +} + +declare i32 @llvm.mips.muleq.s.w.phl(<2 x i16>, <2 x i16>) nounwind + +define i32 @test__builtin_mips_muleq_s_w_phr1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: muleq_s.w.phr + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call i32 @llvm.mips.muleq.s.w.phr(<2 x i16> %0, <2 x i16> %1) + ret i32 %2 +} + +declare i32 @llvm.mips.muleq.s.w.phr(<2 x i16>, <2 x i16>) nounwind + +define { i32 } @test__builtin_mips_precrq_qb_ph1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind readnone { +entry: +; CHECK: precrq.qb.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call <4 x i8> @llvm.mips.precrq.qb.ph(<2 x i16> %0, <2 x i16> %1) + %3 = bitcast <4 x i8> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <4 x i8> @llvm.mips.precrq.qb.ph(<2 x i16>, <2 x i16>) nounwind readnone + +define { i32 } @test__builtin_mips_precrq_ph_w1(i32 %i0, i32 %a0, i32 %a1) nounwind readnone { +entry: +; CHECK: precrq.ph.w + + %0 = tail call <2 x i16> @llvm.mips.precrq.ph.w(i32 %a0, i32 %a1) + %1 = bitcast <2 x i16> %0 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %1, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.precrq.ph.w(i32, i32) nounwind readnone + +define { i32 } @test__builtin_mips_precrq_rs_ph_w1(i32 %i0, i32 %a0, i32 %a1) nounwind { +entry: +; CHECK: precrq_rs.ph.w + + %0 = tail call <2 x i16> @llvm.mips.precrq.rs.ph.w(i32 %a0, i32 %a1) + %1 = bitcast <2 x i16> %0 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %1, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.precrq.rs.ph.w(i32, i32) nounwind + +define { i32 } @test__builtin_mips_precrqu_s_qb_ph1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: precrqu_s.qb.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call <4 x i8> @llvm.mips.precrqu.s.qb.ph(<2 x i16> %0, <2 x i16> %1) + %3 = bitcast <4 x i8> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <4 x i8> @llvm.mips.precrqu.s.qb.ph(<2 x i16>, <2 x i16>) nounwind + + +define i32 @test__builtin_mips_cmpu_eq_qb1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: cmpu.eq.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = bitcast i32 %a1.coerce to <4 x i8> + tail call void @llvm.mips.cmpu.eq.qb(<4 x i8> %0, <4 x i8> %1) + %2 = tail call i32 @llvm.mips.rddsp(i32 31) + ret i32 %2 +} + +declare void @llvm.mips.cmpu.eq.qb(<4 x i8>, <4 x i8>) nounwind + +declare i32 @llvm.mips.rddsp(i32) nounwind readonly + +define i32 @test__builtin_mips_cmpu_lt_qb1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: cmpu.lt.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = bitcast i32 %a1.coerce to <4 x i8> + tail call void @llvm.mips.cmpu.lt.qb(<4 x i8> %0, <4 x i8> %1) + %2 = tail call i32 @llvm.mips.rddsp(i32 31) + ret i32 %2 +} + +declare void @llvm.mips.cmpu.lt.qb(<4 x i8>, <4 x i8>) nounwind + +define i32 @test__builtin_mips_cmpu_le_qb1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: cmpu.le.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = bitcast i32 %a1.coerce to <4 x i8> + tail call void @llvm.mips.cmpu.le.qb(<4 x i8> %0, <4 x i8> %1) + %2 = tail call i32 @llvm.mips.rddsp(i32 31) + ret i32 %2 +} + +declare void @llvm.mips.cmpu.le.qb(<4 x i8>, <4 x i8>) nounwind + +define i32 @test__builtin_mips_cmpgu_eq_qb1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: cmpgu.eq.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = bitcast i32 %a1.coerce to <4 x i8> + %2 = tail call i32 @llvm.mips.cmpgu.eq.qb(<4 x i8> %0, <4 x i8> %1) + ret i32 %2 +} + +declare i32 @llvm.mips.cmpgu.eq.qb(<4 x i8>, <4 x i8>) nounwind + +define i32 @test__builtin_mips_cmpgu_lt_qb1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: cmpgu.lt.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = bitcast i32 %a1.coerce to <4 x i8> + %2 = tail call i32 @llvm.mips.cmpgu.lt.qb(<4 x i8> %0, <4 x i8> %1) + ret i32 %2 +} + +declare i32 @llvm.mips.cmpgu.lt.qb(<4 x i8>, <4 x i8>) nounwind + +define i32 @test__builtin_mips_cmpgu_le_qb1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: cmpgu.le.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = bitcast i32 %a1.coerce to <4 x i8> + %2 = tail call i32 @llvm.mips.cmpgu.le.qb(<4 x i8> %0, <4 x i8> %1) + ret i32 %2 +} + +declare i32 @llvm.mips.cmpgu.le.qb(<4 x i8>, <4 x i8>) nounwind + +define i32 @test__builtin_mips_cmp_eq_ph1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: cmp.eq.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + tail call void @llvm.mips.cmp.eq.ph(<2 x i16> %0, <2 x i16> %1) + %2 = tail call i32 @llvm.mips.rddsp(i32 31) + ret i32 %2 +} + +declare void @llvm.mips.cmp.eq.ph(<2 x i16>, <2 x i16>) nounwind + +define i32 @test__builtin_mips_cmp_lt_ph1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: cmp.lt.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + tail call void @llvm.mips.cmp.lt.ph(<2 x i16> %0, <2 x i16> %1) + %2 = tail call i32 @llvm.mips.rddsp(i32 31) + ret i32 %2 +} + +declare void @llvm.mips.cmp.lt.ph(<2 x i16>, <2 x i16>) nounwind + +define i32 @test__builtin_mips_cmp_le_ph1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: cmp.le.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + tail call void @llvm.mips.cmp.le.ph(<2 x i16> %0, <2 x i16> %1) + %2 = tail call i32 @llvm.mips.rddsp(i32 31) + ret i32 %2 +} + +declare void @llvm.mips.cmp.le.ph(<2 x i16>, <2 x i16>) nounwind + +define { i32 } @test__builtin_mips_pick_qb1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind readonly { +entry: +; CHECK: pick.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = bitcast i32 %a1.coerce to <4 x i8> + %2 = tail call <4 x i8> @llvm.mips.pick.qb(<4 x i8> %0, <4 x i8> %1) + %3 = bitcast <4 x i8> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <4 x i8> @llvm.mips.pick.qb(<4 x i8>, <4 x i8>) nounwind readonly + +define { i32 } @test__builtin_mips_pick_ph1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind readonly { +entry: +; CHECK: pick.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call <2 x i16> @llvm.mips.pick.ph(<2 x i16> %0, <2 x i16> %1) + %3 = bitcast <2 x i16> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.pick.ph(<2 x i16>, <2 x i16>) nounwind readonly + +define { i32 } @test__builtin_mips_packrl_ph1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind readnone { +entry: +; CHECK: packrl.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call <2 x i16> @llvm.mips.packrl.ph(<2 x i16> %0, <2 x i16> %1) + %3 = bitcast <2 x i16> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.packrl.ph(<2 x i16>, <2 x i16>) nounwind readnone + +define i32 @test__builtin_mips_rddsp1(i32 %i0) nounwind readonly { +entry: +; CHECK: rddsp ${{[0-9]+}} + + %0 = tail call i32 @llvm.mips.rddsp(i32 31) + ret i32 %0 +} + +define { i32 } @test__builtin_mips_shll_qb1(i32 %i0, i32 %a0.coerce) nounwind { +entry: +; CHECK: shll.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = tail call <4 x i8> @llvm.mips.shll.qb(<4 x i8> %0, i32 3) + %2 = bitcast <4 x i8> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +declare <4 x i8> @llvm.mips.shll.qb(<4 x i8>, i32) nounwind + +define { i32 } @test__builtin_mips_shll_qb2(i32 %i0, i32 %a0.coerce, i32 %a1) nounwind { +entry: +; CHECK: shllv.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = tail call <4 x i8> @llvm.mips.shll.qb(<4 x i8> %0, i32 %a1) + %2 = bitcast <4 x i8> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +define { i32 } @test__builtin_mips_shll_ph1(i32 %i0, i32 %a0.coerce) nounwind { +entry: +; CHECK: shll.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = tail call <2 x i16> @llvm.mips.shll.ph(<2 x i16> %0, i32 7) + %2 = bitcast <2 x i16> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.shll.ph(<2 x i16>, i32) nounwind + +define { i32 } @test__builtin_mips_shll_ph2(i32 %i0, i32 %a0.coerce, i32 %a1) nounwind { +entry: +; CHECK: shllv.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = tail call <2 x i16> @llvm.mips.shll.ph(<2 x i16> %0, i32 %a1) + %2 = bitcast <2 x i16> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +define { i32 } @test__builtin_mips_shll_s_ph1(i32 %i0, i32 %a0.coerce) nounwind { +entry: +; CHECK: shll_s.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = tail call <2 x i16> @llvm.mips.shll.s.ph(<2 x i16> %0, i32 7) + %2 = bitcast <2 x i16> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.shll.s.ph(<2 x i16>, i32) nounwind + +define { i32 } @test__builtin_mips_shll_s_ph2(i32 %i0, i32 %a0.coerce, i32 %a1) nounwind { +entry: +; CHECK: shllv_s.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = tail call <2 x i16> @llvm.mips.shll.s.ph(<2 x i16> %0, i32 %a1) + %2 = bitcast <2 x i16> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +define i32 @test__builtin_mips_shll_s_w1(i32 %i0, i32 %a0) nounwind { +entry: +; CHECK: shll_s.w + + %0 = tail call i32 @llvm.mips.shll.s.w(i32 %a0, i32 15) + ret i32 %0 +} + +declare i32 @llvm.mips.shll.s.w(i32, i32) nounwind + +define i32 @test__builtin_mips_shll_s_w2(i32 %i0, i32 %a0, i32 %a1) nounwind { +entry: +; CHECK: shllv_s.w + + %0 = tail call i32 @llvm.mips.shll.s.w(i32 %a0, i32 %a1) + ret i32 %0 +} + +define { i32 } @test__builtin_mips_shrl_qb1(i32 %i0, i32 %a0.coerce) nounwind readnone { +entry: +; CHECK: shrl.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = tail call <4 x i8> @llvm.mips.shrl.qb(<4 x i8> %0, i32 3) + %2 = bitcast <4 x i8> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +declare <4 x i8> @llvm.mips.shrl.qb(<4 x i8>, i32) nounwind readnone + +define { i32 } @test__builtin_mips_shrl_qb2(i32 %i0, i32 %a0.coerce, i32 %a1) nounwind readnone { +entry: +; CHECK: shrlv.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = tail call <4 x i8> @llvm.mips.shrl.qb(<4 x i8> %0, i32 %a1) + %2 = bitcast <4 x i8> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +define { i32 } @test__builtin_mips_shra_ph1(i32 %i0, i32 %a0.coerce) nounwind readnone { +entry: +; CHECK: shra.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = tail call <2 x i16> @llvm.mips.shra.ph(<2 x i16> %0, i32 7) + %2 = bitcast <2 x i16> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.shra.ph(<2 x i16>, i32) nounwind readnone + +define { i32 } @test__builtin_mips_shra_ph2(i32 %i0, i32 %a0.coerce, i32 %a1) nounwind readnone { +entry: +; CHECK: shrav.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = tail call <2 x i16> @llvm.mips.shra.ph(<2 x i16> %0, i32 %a1) + %2 = bitcast <2 x i16> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +define { i32 } @test__builtin_mips_shra_r_ph1(i32 %i0, i32 %a0.coerce) nounwind readnone { +entry: +; CHECK: shra_r.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = tail call <2 x i16> @llvm.mips.shra.r.ph(<2 x i16> %0, i32 7) + %2 = bitcast <2 x i16> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.shra.r.ph(<2 x i16>, i32) nounwind readnone + +define { i32 } @test__builtin_mips_shra_r_ph2(i32 %i0, i32 %a0.coerce, i32 %a1) nounwind readnone { +entry: +; CHECK: shrav_r.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = tail call <2 x i16> @llvm.mips.shra.r.ph(<2 x i16> %0, i32 %a1) + %2 = bitcast <2 x i16> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +define i32 @test__builtin_mips_shra_r_w1(i32 %i0, i32 %a0) nounwind readnone { +entry: +; CHECK: shra_r.w + + %0 = tail call i32 @llvm.mips.shra.r.w(i32 %a0, i32 15) + ret i32 %0 +} + +declare i32 @llvm.mips.shra.r.w(i32, i32) nounwind readnone + +define i32 @test__builtin_mips_shra_r_w2(i32 %i0, i32 %a0, i32 %a1) nounwind readnone { +entry: +; CHECK: shrav_r.w + + %0 = tail call i32 @llvm.mips.shra.r.w(i32 %a0, i32 %a1) + ret i32 %0 +} + +define { i32 } @test__builtin_mips_absq_s_ph1(i32 %i0, i32 %a0.coerce) nounwind { +entry: +; CHECK: absq_s.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = tail call <2 x i16> @llvm.mips.absq.s.ph(<2 x i16> %0) + %2 = bitcast <2 x i16> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.absq.s.ph(<2 x i16>) nounwind + +define i32 @test__builtin_mips_absq_s_w1(i32 %i0, i32 %a0) nounwind { +entry: +; CHECK: absq_s.w + + %0 = tail call i32 @llvm.mips.absq.s.w(i32 %a0) + ret i32 %0 +} + +declare i32 @llvm.mips.absq.s.w(i32) nounwind + +define i32 @test__builtin_mips_preceq_w_phl1(i32 %i0, i32 %a0.coerce) nounwind readnone { +entry: +; CHECK: preceq.w.phl + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = tail call i32 @llvm.mips.preceq.w.phl(<2 x i16> %0) + ret i32 %1 +} + +declare i32 @llvm.mips.preceq.w.phl(<2 x i16>) nounwind readnone + +define i32 @test__builtin_mips_preceq_w_phr1(i32 %i0, i32 %a0.coerce) nounwind readnone { +entry: +; CHECK: preceq.w.phr + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = tail call i32 @llvm.mips.preceq.w.phr(<2 x i16> %0) + ret i32 %1 +} + +declare i32 @llvm.mips.preceq.w.phr(<2 x i16>) nounwind readnone + +define { i32 } @test__builtin_mips_precequ_ph_qbl1(i32 %i0, i32 %a0.coerce) nounwind readnone { +entry: +; CHECK: precequ.ph.qbl + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = tail call <2 x i16> @llvm.mips.precequ.ph.qbl(<4 x i8> %0) + %2 = bitcast <2 x i16> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.precequ.ph.qbl(<4 x i8>) nounwind readnone + +define { i32 } @test__builtin_mips_precequ_ph_qbr1(i32 %i0, i32 %a0.coerce) nounwind readnone { +entry: +; CHECK: precequ.ph.qbr + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = tail call <2 x i16> @llvm.mips.precequ.ph.qbr(<4 x i8> %0) + %2 = bitcast <2 x i16> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.precequ.ph.qbr(<4 x i8>) nounwind readnone + +define { i32 } @test__builtin_mips_precequ_ph_qbla1(i32 %i0, i32 %a0.coerce) nounwind readnone { +entry: +; CHECK: precequ.ph.qbla + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = tail call <2 x i16> @llvm.mips.precequ.ph.qbla(<4 x i8> %0) + %2 = bitcast <2 x i16> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.precequ.ph.qbla(<4 x i8>) nounwind readnone + +define { i32 } @test__builtin_mips_precequ_ph_qbra1(i32 %i0, i32 %a0.coerce) nounwind readnone { +entry: +; CHECK: precequ.ph.qbra + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = tail call <2 x i16> @llvm.mips.precequ.ph.qbra(<4 x i8> %0) + %2 = bitcast <2 x i16> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.precequ.ph.qbra(<4 x i8>) nounwind readnone + +define { i32 } @test__builtin_mips_preceu_ph_qbl1(i32 %i0, i32 %a0.coerce) nounwind readnone { +entry: +; CHECK: preceu.ph.qbl + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = tail call <2 x i16> @llvm.mips.preceu.ph.qbl(<4 x i8> %0) + %2 = bitcast <2 x i16> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.preceu.ph.qbl(<4 x i8>) nounwind readnone + +define { i32 } @test__builtin_mips_preceu_ph_qbr1(i32 %i0, i32 %a0.coerce) nounwind readnone { +entry: +; CHECK: preceu.ph.qbr + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = tail call <2 x i16> @llvm.mips.preceu.ph.qbr(<4 x i8> %0) + %2 = bitcast <2 x i16> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.preceu.ph.qbr(<4 x i8>) nounwind readnone + +define { i32 } @test__builtin_mips_preceu_ph_qbla1(i32 %i0, i32 %a0.coerce) nounwind readnone { +entry: +; CHECK: preceu.ph.qbla + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = tail call <2 x i16> @llvm.mips.preceu.ph.qbla(<4 x i8> %0) + %2 = bitcast <2 x i16> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.preceu.ph.qbla(<4 x i8>) nounwind readnone + +define { i32 } @test__builtin_mips_preceu_ph_qbra1(i32 %i0, i32 %a0.coerce) nounwind readnone { +entry: +; CHECK: preceu.ph.qbra + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = tail call <2 x i16> @llvm.mips.preceu.ph.qbra(<4 x i8> %0) + %2 = bitcast <2 x i16> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.preceu.ph.qbra(<4 x i8>) nounwind readnone + +define { i32 } @test__builtin_mips_repl_qb1(i32 %i0) nounwind readnone { +entry: +; CHECK: repl.qb + + %0 = tail call <4 x i8> @llvm.mips.repl.qb(i32 127) + %1 = bitcast <4 x i8> %0 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %1, 0 + ret { i32 } %.fca.0.insert +} + +declare <4 x i8> @llvm.mips.repl.qb(i32) nounwind readnone + +define { i32 } @test__builtin_mips_repl_qb2(i32 %i0, i32 %a0) nounwind readnone { +entry: +; CHECK: replv.qb + + %0 = tail call <4 x i8> @llvm.mips.repl.qb(i32 %a0) + %1 = bitcast <4 x i8> %0 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %1, 0 + ret { i32 } %.fca.0.insert +} + +define { i32 } @test__builtin_mips_repl_ph1(i32 %i0) nounwind readnone { +entry: +; CHECK: repl.ph + + %0 = tail call <2 x i16> @llvm.mips.repl.ph(i32 0) + %1 = bitcast <2 x i16> %0 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %1, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.repl.ph(i32) nounwind readnone + +define { i32 } @test__builtin_mips_repl_ph2(i32 %i0, i32 %a0) nounwind readnone { +entry: +; CHECK: replv.ph + + %0 = tail call <2 x i16> @llvm.mips.repl.ph(i32 %a0) + %1 = bitcast <2 x i16> %0 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %1, 0 + ret { i32 } %.fca.0.insert +} + +define i32 @test__builtin_mips_bitrev1(i32 %i0, i32 %a0) nounwind readnone { +entry: +; CHECK: bitrev ${{[0-9]+}} + + %0 = tail call i32 @llvm.mips.bitrev(i32 %a0) + ret i32 %0 +} + +declare i32 @llvm.mips.bitrev(i32) nounwind readnone + +define i32 @test__builtin_mips_lbux1(i32 %i0, i8* %a0, i32 %a1) nounwind readonly { +entry: +; CHECK: lbux ${{[0-9]+}} + + %0 = tail call i32 @llvm.mips.lbux(i8* %a0, i32 %a1) + ret i32 %0 +} + +declare i32 @llvm.mips.lbux(i8*, i32) nounwind readonly + +define i32 @test__builtin_mips_lhx1(i32 %i0, i8* %a0, i32 %a1) nounwind readonly { +entry: +; CHECK: lhx ${{[0-9]+}} + + %0 = tail call i32 @llvm.mips.lhx(i8* %a0, i32 %a1) + ret i32 %0 +} + +declare i32 @llvm.mips.lhx(i8*, i32) nounwind readonly + +define i32 @test__builtin_mips_lwx1(i32 %i0, i8* %a0, i32 %a1) nounwind readonly { +entry: +; CHECK: lwx ${{[0-9]+}} + + %0 = tail call i32 @llvm.mips.lwx(i8* %a0, i32 %a1) + ret i32 %0 +} + +declare i32 @llvm.mips.lwx(i8*, i32) nounwind readonly + +define i32 @test__builtin_mips_wrdsp1(i32 %i0, i32 %a0) nounwind { +entry: +; CHECK: wrdsp ${{[0-9]+}} + + tail call void @llvm.mips.wrdsp(i32 %a0, i32 31) + %0 = tail call i32 @llvm.mips.rddsp(i32 31) + ret i32 %0 +} + +declare void @llvm.mips.wrdsp(i32, i32) nounwind diff --git a/test/CodeGen/Mips/dsp-r2.ll b/test/CodeGen/Mips/dsp-r2.ll new file mode 100644 index 0000000..631f9e4 --- /dev/null +++ b/test/CodeGen/Mips/dsp-r2.ll @@ -0,0 +1,568 @@ +; RUN: llc -march=mipsel -mattr=+dspr2 < %s | FileCheck %s + +define i64 @test__builtin_mips_dpa_w_ph1(i32 %i0, i32, i64 %a0, i32 %a1.coerce, i32 %a2.coerce) nounwind readnone { +entry: +; CHECK: dpa.w.ph + + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = bitcast i32 %a2.coerce to <2 x i16> + %3 = tail call i64 @llvm.mips.dpa.w.ph(i64 %a0, <2 x i16> %1, <2 x i16> %2) + ret i64 %3 +} + +declare i64 @llvm.mips.dpa.w.ph(i64, <2 x i16>, <2 x i16>) nounwind readnone + +define i64 @test__builtin_mips_dps_w_ph1(i32 %i0, i32, i64 %a0, i32 %a1.coerce, i32 %a2.coerce) nounwind readnone { +entry: +; CHECK: dps.w.ph + + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = bitcast i32 %a2.coerce to <2 x i16> + %3 = tail call i64 @llvm.mips.dps.w.ph(i64 %a0, <2 x i16> %1, <2 x i16> %2) + ret i64 %3 +} + +declare i64 @llvm.mips.dps.w.ph(i64, <2 x i16>, <2 x i16>) nounwind readnone + +define i64 @test__builtin_mips_mulsa_w_ph1(i32 %i0, i32, i64 %a0, i32 %a1.coerce, i32 %a2.coerce) nounwind readnone { +entry: +; CHECK: mulsa.w.ph + + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = bitcast i32 %a2.coerce to <2 x i16> + %3 = tail call i64 @llvm.mips.mulsa.w.ph(i64 %a0, <2 x i16> %1, <2 x i16> %2) + ret i64 %3 +} + +declare i64 @llvm.mips.mulsa.w.ph(i64, <2 x i16>, <2 x i16>) nounwind readnone + +define i64 @test__builtin_mips_dpax_w_ph1(i32 %i0, i32, i64 %a0, i32 %a1.coerce, i32 %a2.coerce) nounwind readnone { +entry: +; CHECK: dpax.w.ph + + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = bitcast i32 %a2.coerce to <2 x i16> + %3 = tail call i64 @llvm.mips.dpax.w.ph(i64 %a0, <2 x i16> %1, <2 x i16> %2) + ret i64 %3 +} + +declare i64 @llvm.mips.dpax.w.ph(i64, <2 x i16>, <2 x i16>) nounwind readnone + +define i64 @test__builtin_mips_dpsx_w_ph1(i32 %i0, i32, i64 %a0, i32 %a1.coerce, i32 %a2.coerce) nounwind readnone { +entry: +; CHECK: dpsx.w.ph + + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = bitcast i32 %a2.coerce to <2 x i16> + %3 = tail call i64 @llvm.mips.dpsx.w.ph(i64 %a0, <2 x i16> %1, <2 x i16> %2) + ret i64 %3 +} + +declare i64 @llvm.mips.dpsx.w.ph(i64, <2 x i16>, <2 x i16>) nounwind readnone + +define i64 @test__builtin_mips_dpaqx_s_w_ph1(i32 %i0, i32, i64 %a0, i32 %a1.coerce, i32 %a2.coerce) nounwind { +entry: +; CHECK: dpaqx_s.w.ph + + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = bitcast i32 %a2.coerce to <2 x i16> + %3 = tail call i64 @llvm.mips.dpaqx.s.w.ph(i64 %a0, <2 x i16> %1, <2 x i16> %2) + ret i64 %3 +} + +declare i64 @llvm.mips.dpaqx.s.w.ph(i64, <2 x i16>, <2 x i16>) nounwind + +define i64 @test__builtin_mips_dpaqx_sa_w_ph1(i32 %i0, i32, i64 %a0, i32 %a1.coerce, i32 %a2.coerce) nounwind { +entry: +; CHECK: dpaqx_sa.w.ph + + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = bitcast i32 %a2.coerce to <2 x i16> + %3 = tail call i64 @llvm.mips.dpaqx.sa.w.ph(i64 %a0, <2 x i16> %1, <2 x i16> %2) + ret i64 %3 +} + +declare i64 @llvm.mips.dpaqx.sa.w.ph(i64, <2 x i16>, <2 x i16>) nounwind + +define i64 @test__builtin_mips_dpsqx_s_w_ph1(i32 %i0, i32, i64 %a0, i32 %a1.coerce, i32 %a2.coerce) nounwind { +entry: +; CHECK: dpsqx_s.w.ph + + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = bitcast i32 %a2.coerce to <2 x i16> + %3 = tail call i64 @llvm.mips.dpsqx.s.w.ph(i64 %a0, <2 x i16> %1, <2 x i16> %2) + ret i64 %3 +} + +declare i64 @llvm.mips.dpsqx.s.w.ph(i64, <2 x i16>, <2 x i16>) nounwind + +define i64 @test__builtin_mips_dpsqx_sa_w_ph1(i32 %i0, i32, i64 %a0, i32 %a1.coerce, i32 %a2.coerce) nounwind { +entry: +; CHECK: dpsqx_sa.w.ph + + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = bitcast i32 %a2.coerce to <2 x i16> + %3 = tail call i64 @llvm.mips.dpsqx.sa.w.ph(i64 %a0, <2 x i16> %1, <2 x i16> %2) + ret i64 %3 +} + +declare i64 @llvm.mips.dpsqx.sa.w.ph(i64, <2 x i16>, <2 x i16>) nounwind + +define { i32 } @test__builtin_mips_addu_ph1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: addu.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call <2 x i16> @llvm.mips.addu.ph(<2 x i16> %0, <2 x i16> %1) + %3 = bitcast <2 x i16> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.addu.ph(<2 x i16>, <2 x i16>) nounwind + +define { i32 } @test__builtin_mips_addu_s_ph1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: addu_s.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call <2 x i16> @llvm.mips.addu.s.ph(<2 x i16> %0, <2 x i16> %1) + %3 = bitcast <2 x i16> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.addu.s.ph(<2 x i16>, <2 x i16>) nounwind + +define { i32 } @test__builtin_mips_mulq_s_ph1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: mulq_s.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call <2 x i16> @llvm.mips.mulq.s.ph(<2 x i16> %0, <2 x i16> %1) + %3 = bitcast <2 x i16> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.mulq.s.ph(<2 x i16>, <2 x i16>) nounwind + +define { i32 } @test__builtin_mips_subu_ph1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: subu.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call <2 x i16> @llvm.mips.subu.ph(<2 x i16> %0, <2 x i16> %1) + %3 = bitcast <2 x i16> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.subu.ph(<2 x i16>, <2 x i16>) nounwind + +define { i32 } @test__builtin_mips_subu_s_ph1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: subu_s.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call <2 x i16> @llvm.mips.subu.s.ph(<2 x i16> %0, <2 x i16> %1) + %3 = bitcast <2 x i16> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.subu.s.ph(<2 x i16>, <2 x i16>) nounwind + +define i32 @test__builtin_mips_cmpgdu_eq_qb1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: cmpgdu.eq.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = bitcast i32 %a1.coerce to <4 x i8> + %2 = tail call i32 @llvm.mips.cmpgdu.eq.qb(<4 x i8> %0, <4 x i8> %1) + ret i32 %2 +} + +declare i32 @llvm.mips.cmpgdu.eq.qb(<4 x i8>, <4 x i8>) nounwind + +define i32 @test__builtin_mips_cmpgdu_lt_qb1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: cmpgdu.lt.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = bitcast i32 %a1.coerce to <4 x i8> + %2 = tail call i32 @llvm.mips.cmpgdu.lt.qb(<4 x i8> %0, <4 x i8> %1) + ret i32 %2 +} + +declare i32 @llvm.mips.cmpgdu.lt.qb(<4 x i8>, <4 x i8>) nounwind + +define i32 @test__builtin_mips_cmpgdu_le_qb1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: cmpgdu.le.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = bitcast i32 %a1.coerce to <4 x i8> + %2 = tail call i32 @llvm.mips.cmpgdu.le.qb(<4 x i8> %0, <4 x i8> %1) + ret i32 %2 +} + +declare i32 @llvm.mips.cmpgdu.le.qb(<4 x i8>, <4 x i8>) nounwind + +define { i32 } @test__builtin_mips_precr_qb_ph1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: precr.qb.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call <4 x i8> @llvm.mips.precr.qb.ph(<2 x i16> %0, <2 x i16> %1) + %3 = bitcast <4 x i8> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <4 x i8> @llvm.mips.precr.qb.ph(<2 x i16>, <2 x i16>) nounwind + +define { i32 } @test__builtin_mips_precr_sra_ph_w1(i32 %i0, i32 %a0, i32 %a1) nounwind readnone { +entry: +; CHECK: precr_sra.ph.w + + %0 = tail call <2 x i16> @llvm.mips.precr.sra.ph.w(i32 %a0, i32 %a1, i32 15) + %1 = bitcast <2 x i16> %0 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %1, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.precr.sra.ph.w(i32, i32, i32) nounwind readnone + +define { i32 } @test__builtin_mips_precr_sra_r_ph_w1(i32 %i0, i32 %a0, i32 %a1) nounwind readnone { +entry: +; CHECK: precr_sra_r.ph.w + + %0 = tail call <2 x i16> @llvm.mips.precr.sra.r.ph.w(i32 %a0, i32 %a1, i32 15) + %1 = bitcast <2 x i16> %0 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %1, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.precr.sra.r.ph.w(i32, i32, i32) nounwind readnone + +define { i32 } @test__builtin_mips_shra_qb1(i32 %i0, i32 %a0.coerce) nounwind readnone { +entry: +; CHECK: shra.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = tail call <4 x i8> @llvm.mips.shra.qb(<4 x i8> %0, i32 3) + %2 = bitcast <4 x i8> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +declare <4 x i8> @llvm.mips.shra.qb(<4 x i8>, i32) nounwind readnone + +define { i32 } @test__builtin_mips_shra_r_qb1(i32 %i0, i32 %a0.coerce) nounwind readnone { +entry: +; CHECK: shra_r.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = tail call <4 x i8> @llvm.mips.shra.r.qb(<4 x i8> %0, i32 3) + %2 = bitcast <4 x i8> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +declare <4 x i8> @llvm.mips.shra.r.qb(<4 x i8>, i32) nounwind readnone + +define { i32 } @test__builtin_mips_shra_qb2(i32 %i0, i32 %a0.coerce, i32 %a1) nounwind readnone { +entry: +; CHECK: shrav.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = tail call <4 x i8> @llvm.mips.shra.qb(<4 x i8> %0, i32 %a1) + %2 = bitcast <4 x i8> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +define { i32 } @test__builtin_mips_shra_r_qb2(i32 %i0, i32 %a0.coerce, i32 %a1) nounwind readnone { +entry: +; CHECK: shrav_r.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = tail call <4 x i8> @llvm.mips.shra.r.qb(<4 x i8> %0, i32 %a1) + %2 = bitcast <4 x i8> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +define { i32 } @test__builtin_mips_shrl_ph1(i32 %i0, i32 %a0.coerce) nounwind readnone { +entry: +; CHECK: shrl.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = tail call <2 x i16> @llvm.mips.shrl.ph(<2 x i16> %0, i32 7) + %2 = bitcast <2 x i16> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.shrl.ph(<2 x i16>, i32) nounwind readnone + +define { i32 } @test__builtin_mips_shrl_ph2(i32 %i0, i32 %a0.coerce, i32 %a1) nounwind readnone { +entry: +; CHECK: shrlv.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = tail call <2 x i16> @llvm.mips.shrl.ph(<2 x i16> %0, i32 %a1) + %2 = bitcast <2 x i16> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +define { i32 } @test__builtin_mips_absq_s_qb1(i32 %i0, i32 %a0.coerce) nounwind { +entry: +; CHECK: absq_s.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = tail call <4 x i8> @llvm.mips.absq.s.qb(<4 x i8> %0) + %2 = bitcast <4 x i8> %1 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0 + ret { i32 } %.fca.0.insert +} + +declare <4 x i8> @llvm.mips.absq.s.qb(<4 x i8>) nounwind + +define { i32 } @test__builtin_mips_mul_ph1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: mul.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call <2 x i16> @llvm.mips.mul.ph(<2 x i16> %0, <2 x i16> %1) + %3 = bitcast <2 x i16> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.mul.ph(<2 x i16>, <2 x i16>) nounwind + +define { i32 } @test__builtin_mips_mul_s_ph1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind { +entry: +; CHECK: mul_s.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call <2 x i16> @llvm.mips.mul.s.ph(<2 x i16> %0, <2 x i16> %1) + %3 = bitcast <2 x i16> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.mul.s.ph(<2 x i16>, <2 x i16>) nounwind + +define i32 @test__builtin_mips_mulq_rs_w1(i32 %i0, i32 %a0, i32 %a1) nounwind { +entry: +; CHECK: mulq_rs.w + + %0 = tail call i32 @llvm.mips.mulq.rs.w(i32 %a0, i32 %a1) + ret i32 %0 +} + +declare i32 @llvm.mips.mulq.rs.w(i32, i32) nounwind + +define i32 @test__builtin_mips_mulq_s_w1(i32 %i0, i32 %a0, i32 %a1) nounwind { +entry: +; CHECK: mulq_s.w + + %0 = tail call i32 @llvm.mips.mulq.s.w(i32 %a0, i32 %a1) + ret i32 %0 +} + +declare i32 @llvm.mips.mulq.s.w(i32, i32) nounwind + +define { i32 } @test__builtin_mips_adduh_qb1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind readnone { +entry: +; CHECK: adduh.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = bitcast i32 %a1.coerce to <4 x i8> + %2 = tail call <4 x i8> @llvm.mips.adduh.qb(<4 x i8> %0, <4 x i8> %1) + %3 = bitcast <4 x i8> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <4 x i8> @llvm.mips.adduh.qb(<4 x i8>, <4 x i8>) nounwind readnone + +define { i32 } @test__builtin_mips_adduh_r_qb1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind readnone { +entry: +; CHECK: adduh_r.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = bitcast i32 %a1.coerce to <4 x i8> + %2 = tail call <4 x i8> @llvm.mips.adduh.r.qb(<4 x i8> %0, <4 x i8> %1) + %3 = bitcast <4 x i8> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <4 x i8> @llvm.mips.adduh.r.qb(<4 x i8>, <4 x i8>) nounwind readnone + +define { i32 } @test__builtin_mips_subuh_qb1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind readnone { +entry: +; CHECK: subuh.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = bitcast i32 %a1.coerce to <4 x i8> + %2 = tail call <4 x i8> @llvm.mips.subuh.qb(<4 x i8> %0, <4 x i8> %1) + %3 = bitcast <4 x i8> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <4 x i8> @llvm.mips.subuh.qb(<4 x i8>, <4 x i8>) nounwind readnone + +define { i32 } @test__builtin_mips_subuh_r_qb1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind readnone { +entry: +; CHECK: subuh_r.qb + + %0 = bitcast i32 %a0.coerce to <4 x i8> + %1 = bitcast i32 %a1.coerce to <4 x i8> + %2 = tail call <4 x i8> @llvm.mips.subuh.r.qb(<4 x i8> %0, <4 x i8> %1) + %3 = bitcast <4 x i8> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <4 x i8> @llvm.mips.subuh.r.qb(<4 x i8>, <4 x i8>) nounwind readnone + +define { i32 } @test__builtin_mips_addqh_ph1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind readnone { +entry: +; CHECK: addqh.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call <2 x i16> @llvm.mips.addqh.ph(<2 x i16> %0, <2 x i16> %1) + %3 = bitcast <2 x i16> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.addqh.ph(<2 x i16>, <2 x i16>) nounwind readnone + +define { i32 } @test__builtin_mips_addqh_r_ph1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind readnone { +entry: +; CHECK: addqh_r.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call <2 x i16> @llvm.mips.addqh.r.ph(<2 x i16> %0, <2 x i16> %1) + %3 = bitcast <2 x i16> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.addqh.r.ph(<2 x i16>, <2 x i16>) nounwind readnone + +define i32 @test__builtin_mips_addqh_w1(i32 %i0, i32 %a0, i32 %a1) nounwind readnone { +entry: +; CHECK: addqh.w + + %0 = tail call i32 @llvm.mips.addqh.w(i32 %a0, i32 %a1) + ret i32 %0 +} + +declare i32 @llvm.mips.addqh.w(i32, i32) nounwind readnone + +define i32 @test__builtin_mips_addqh_r_w1(i32 %i0, i32 %a0, i32 %a1) nounwind readnone { +entry: +; CHECK: addqh_r.w + + %0 = tail call i32 @llvm.mips.addqh.r.w(i32 %a0, i32 %a1) + ret i32 %0 +} + +declare i32 @llvm.mips.addqh.r.w(i32, i32) nounwind readnone + +define { i32 } @test__builtin_mips_subqh_ph1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind readnone { +entry: +; CHECK: subqh.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call <2 x i16> @llvm.mips.subqh.ph(<2 x i16> %0, <2 x i16> %1) + %3 = bitcast <2 x i16> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.subqh.ph(<2 x i16>, <2 x i16>) nounwind readnone + +define { i32 } @test__builtin_mips_subqh_r_ph1(i32 %i0, i32 %a0.coerce, i32 %a1.coerce) nounwind readnone { +entry: +; CHECK: subqh_r.ph + + %0 = bitcast i32 %a0.coerce to <2 x i16> + %1 = bitcast i32 %a1.coerce to <2 x i16> + %2 = tail call <2 x i16> @llvm.mips.subqh.r.ph(<2 x i16> %0, <2 x i16> %1) + %3 = bitcast <2 x i16> %2 to i32 + %.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0 + ret { i32 } %.fca.0.insert +} + +declare <2 x i16> @llvm.mips.subqh.r.ph(<2 x i16>, <2 x i16>) nounwind readnone + +define i32 @test__builtin_mips_subqh_w1(i32 %i0, i32 %a0, i32 %a1) nounwind readnone { +entry: +; CHECK: subqh.w + + %0 = tail call i32 @llvm.mips.subqh.w(i32 %a0, i32 %a1) + ret i32 %0 +} + +declare i32 @llvm.mips.subqh.w(i32, i32) nounwind readnone + +define i32 @test__builtin_mips_subqh_r_w1(i32 %i0, i32 %a0, i32 %a1) nounwind readnone { +entry: +; CHECK: subqh_r.w + + %0 = tail call i32 @llvm.mips.subqh.r.w(i32 %a0, i32 %a1) + ret i32 %0 +} + +declare i32 @llvm.mips.subqh.r.w(i32, i32) nounwind readnone + +define i32 @test__builtin_mips_append1(i32 %i0, i32 %a0, i32 %a1) nounwind readnone { +entry: +; CHECK: append ${{[0-9]+}} + + %0 = tail call i32 @llvm.mips.append(i32 %a0, i32 %a1, i32 15) + ret i32 %0 +} + +declare i32 @llvm.mips.append(i32, i32, i32) nounwind readnone + +define i32 @test__builtin_mips_balign1(i32 %i0, i32 %a0, i32 %a1) nounwind readnone { +entry: +; CHECK: balign ${{[0-9]+}} + + %0 = tail call i32 @llvm.mips.balign(i32 %a0, i32 %a1, i32 1) + ret i32 %0 +} + +declare i32 @llvm.mips.balign(i32, i32, i32) nounwind readnone + +define i32 @test__builtin_mips_prepend1(i32 %i0, i32 %a0, i32 %a1) nounwind readnone { +entry: +; CHECK: prepend ${{[0-9]+}} + + %0 = tail call i32 @llvm.mips.prepend(i32 %a0, i32 %a1, i32 15) + ret i32 %0 +} + +declare i32 @llvm.mips.prepend(i32, i32, i32) nounwind readnone diff --git a/test/CodeGen/Mips/eh-dwarf-cfa.ll b/test/CodeGen/Mips/eh-dwarf-cfa.ll new file mode 100644 index 0000000..3a21332 --- /dev/null +++ b/test/CodeGen/Mips/eh-dwarf-cfa.ll @@ -0,0 +1,63 @@ +; RUN: llc -march=mipsel -mcpu=mips32 < %s | FileCheck %s +; RUN: llc -march=mips64el -mcpu=mips64 < %s | \ +; RUN: FileCheck %s -check-prefix=CHECK-MIPS64 + +declare i8* @llvm.eh.dwarf.cfa(i32) nounwind +declare i8* @llvm.frameaddress(i32) nounwind readnone + +define i8* @f1() nounwind { +entry: + %x = alloca [32 x i8], align 1 + %0 = call i8* @llvm.eh.dwarf.cfa(i32 0) + ret i8* %0 + +; CHECK: addiu $sp, $sp, -32 +; CHECK: addiu $2, $sp, 32 +} + + +define i8* @f2() nounwind { +entry: + %x = alloca [65536 x i8], align 1 + %0 = call i8* @llvm.eh.dwarf.cfa(i32 0) + ret i8* %0 + +; check stack size (65536 + 8) +; CHECK: lui $[[R0:[a-z0-9]+]], 65535 +; CHECK: addiu $[[R0]], $[[R0]], -8 +; CHECK: addu $sp, $sp, $[[R0]] + +; check return value ($sp + stack size) +; CHECK: lui $[[R1:[a-z0-9]+]], 1 +; CHECK: addu $[[R1]], $sp, $[[R1]] +; CHECK: addiu $2, $[[R1]], 8 +} + + +define i32 @f3() nounwind { +entry: + %x = alloca [32 x i8], align 1 + %0 = call i8* @llvm.eh.dwarf.cfa(i32 0) + %1 = ptrtoint i8* %0 to i32 + %2 = call i8* @llvm.frameaddress(i32 0) + %3 = ptrtoint i8* %2 to i32 + %add = add i32 %1, %3 + ret i32 %add + +; CHECK: addiu $sp, $sp, -40 + +; check return value ($fp + stack size + $fp) +; CHECK: addiu $[[R0:[a-z0-9]+]], $fp, 40 +; CHECK: addu $2, $[[R0]], $fp +} + + +define i8* @f4() nounwind { +entry: + %x = alloca [32 x i8], align 1 + %0 = call i8* @llvm.eh.dwarf.cfa(i32 0) + ret i8* %0 + +; CHECK-MIPS64: daddiu $sp, $sp, -32 +; CHECK-MIPS64: daddiu $2, $sp, 32 +} diff --git a/test/CodeGen/Mips/helloworld.ll b/test/CodeGen/Mips/helloworld.ll index bee93ac..aee58b6 100644 --- a/test/CodeGen/Mips/helloworld.ll +++ b/test/CodeGen/Mips/helloworld.ll @@ -24,10 +24,10 @@ entry: ; C1: addiu ${{[0-9]+}}, %lo($.str) ; C2: move $25, ${{[0-9]+}} ; C1: move $gp, ${{[0-9]+}} -; C1: jalr ${{[0-9]+}} +; C1: jalrc ${{[0-9]+}} ; SR: restore $ra, [[FS]] ; PE: li $2, 0 -; PE: jr $ra +; PE: jrc $ra } diff --git a/test/CodeGen/Mips/i32k.ll b/test/CodeGen/Mips/i32k.ll new file mode 100644 index 0000000..c6da8b1 --- /dev/null +++ b/test/CodeGen/Mips/i32k.ll @@ -0,0 +1,17 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16a +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16b + +@.str = private unnamed_addr constant [4 x i8] c"%i\0A\00", align 1 + +define i32 @main() nounwind { +entry: + %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32 1075344593) nounwind +; 16a: li ${{[0-9]+}}, 29905 +; 16b: li ${{[0-9]+}}, 16408 + %call1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32 -1075344593) nounwind +; 16a: li ${{[0-9]+}}, 49127 +; 16b: li ${{[0-9]+}}, 35631 + ret i32 0 +} + +declare i32 @printf(i8* nocapture, ...) nounwind diff --git a/test/CodeGen/Mips/init-array.ll b/test/CodeGen/Mips/init-array.ll new file mode 100644 index 0000000..f96ce26 --- /dev/null +++ b/test/CodeGen/Mips/init-array.ll @@ -0,0 +1,14 @@ +; RUN: llc -mtriple mipsel-unknown-linux -use-init-array < %s | FileCheck %s + +target triple = "mipsel-unknown-linux" + +@llvm.global_ctors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 65535, void ()* @test }] +; CHECK: .section +; CHECK: .init_array +; CHECK-NOT: .ctors +; CHECK: .4byte test + +define internal void @test() section ".text.startup" { +entry: + ret void +} diff --git a/test/CodeGen/Mips/largeimm1.ll b/test/CodeGen/Mips/largeimm1.ll index d65cc02..1c0f69c 100644 --- a/test/CodeGen/Mips/largeimm1.ll +++ b/test/CodeGen/Mips/largeimm1.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=mipsel < %s | FileCheck %s -; CHECK: lui $at, 49152 -; CHECK: lui $at, 16384 +; CHECK: lui ${{[0-9]+}}, 49152 +; CHECK: lui ${{[0-9]+}}, 16384 define void @f() nounwind { entry: %a1 = alloca [1073741824 x i8], align 1 diff --git a/test/CodeGen/Mips/largeimmprinting.ll b/test/CodeGen/Mips/largeimmprinting.ll index 2e54879..1e96346 100644 --- a/test/CodeGen/Mips/largeimmprinting.ll +++ b/test/CodeGen/Mips/largeimmprinting.ll @@ -1,4 +1,6 @@ -; RUN: llc -march=mipsel -mcpu=mips32r2 < %s | FileCheck %s +; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=32 +; RUN: llc -march=mips64el -mcpu=mips64 -mattr=n64 < %s | \ +; RUN: FileCheck %s -check-prefix=64 %struct.S1 = type { [65536 x i8] } @@ -6,9 +8,21 @@ define void @f() nounwind { entry: -; CHECK: lui $at, 65535 -; CHECK: addiu $at, $at, -16 -; CHECK: addu $sp, $sp, $at +; 32: lui $[[R0:[0-9]+]], 65535 +; 32: addiu $[[R0]], $[[R0]], -24 +; 32: addu $sp, $sp, $[[R0]] +; 32: lui $[[R1:[0-9]+]], 1 +; 32: addu $[[R1]], $sp, $[[R1]] +; 32: sw $ra, 20($[[R1]]) +; 64: daddiu $[[R0:[0-9]+]], $zero, 1 +; 64: dsll $[[R0]], $[[R0]], 48 +; 64: daddiu $[[R0]], $[[R0]], -1 +; 64: dsll $[[R0]], $[[R0]], 16 +; 64: daddiu $[[R0]], $[[R0]], -48 +; 64: daddu $sp, $sp, $[[R0]] +; 64: lui $[[R1:[0-9]+]], 1 +; 64: daddu $[[R1]], $sp, $[[R1]] +; 64: sd $ra, 40($[[R1]]) %agg.tmp = alloca %struct.S1, align 1 %tmp = getelementptr inbounds %struct.S1* %agg.tmp, i32 0, i32 0, i32 0 diff --git a/test/CodeGen/Mips/llcarry.ll b/test/CodeGen/Mips/llcarry.ll new file mode 100644 index 0000000..7763dae --- /dev/null +++ b/test/CodeGen/Mips/llcarry.ll @@ -0,0 +1,51 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@i = global i64 4294967295, align 8 +@j = global i64 15, align 8 +@ii = global i64 4294967295, align 8 +@k = common global i64 0, align 8 +@l = common global i64 0, align 8 +@m = common global i64 0, align 8 + +define void @test1() nounwind { +entry: + %0 = load i64* @i, align 8 + %1 = load i64* @j, align 8 + %add = add nsw i64 %1, %0 + store i64 %add, i64* @k, align 8 +; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}} +; 16: sltu ${{[0-9]+}}, ${{[0-9]+}} +; 16: move ${{[0-9]+}}, $t8 +; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}} +; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}} + ret void +} + +define void @test2() nounwind { +entry: + %0 = load i64* @i, align 8 + %1 = load i64* @j, align 8 + %sub = sub nsw i64 %0, %1 +; 16: subu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}} +; 16: sltu ${{[0-9]+}}, ${{[0-9]+}} +; 16: move ${{[0-9]+}}, $t8 +; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}} +; 16: subu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}} + store i64 %sub, i64* @l, align 8 + ret void +} + +define void @test3() nounwind { +entry: + %0 = load i64* @ii, align 8 + %add = add nsw i64 %0, 15 +; 16: addiu ${{[0-9]+}}, 15 +; 16: sltu ${{[0-9]+}}, ${{[0-9]+}} +; 16: move ${{[0-9]+}}, $t8 +; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}} +; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}} + store i64 %add, i64* @m, align 8 + ret void +} + + diff --git a/test/CodeGen/Mips/longbranch.ll b/test/CodeGen/Mips/longbranch.ll index 0227b88..1a4f79c 100644 --- a/test/CodeGen/Mips/longbranch.ll +++ b/test/CodeGen/Mips/longbranch.ll @@ -6,9 +6,15 @@ define void @foo1(i32 %s) nounwind { entry: ; O32: bal +; O32: lui $1, 0 +; O32: addiu $1, $1, {{[0-9]+}} +; N64: lui $1, 0 +; N64: daddiu $1, $1, 0 +; N64: dsll $1, $1, 16 +; N64: daddiu $1, $1, 0 ; N64: bal -; N64: highest -; N64: higher +; N64: dsll $1, $1, 16 +; N64: daddiu $1, $1, {{[0-9]+}} %tobool = icmp eq i32 %s, 0 br i1 %tobool, label %if.end, label %if.then diff --git a/test/CodeGen/Mips/mips64-sret.ll b/test/CodeGen/Mips/mips64-sret.ll new file mode 100644 index 0000000..e26b022 --- /dev/null +++ b/test/CodeGen/Mips/mips64-sret.ll @@ -0,0 +1,16 @@ +; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 -O3 < %s | FileCheck %s + +%struct.S = type { [8 x i32] } + +@g = common global %struct.S zeroinitializer, align 4 + +define void @f(%struct.S* noalias sret %agg.result) nounwind { +entry: +; CHECK: daddu $2, $zero, $4 + + %0 = bitcast %struct.S* %agg.result to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast (%struct.S* @g to i8*), i64 32, i32 4, i1 false) + ret void +} + +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind diff --git a/test/CodeGen/Mips/misha.ll b/test/CodeGen/Mips/misha.ll new file mode 100644 index 0000000..80637ed --- /dev/null +++ b/test/CodeGen/Mips/misha.ll @@ -0,0 +1,69 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +define i32 @sumc(i8* nocapture %to, i8* nocapture %from, i32) nounwind { +entry: + %sext = shl i32 %0, 16 + %conv = ashr exact i32 %sext, 16 + %cmp8 = icmp eq i32 %conv, 0 + br i1 %cmp8, label %for.end, label %for.body.lr.ph + +for.body.lr.ph: ; preds = %entry + %.pre = load i8* %to, align 1 + br label %for.body + +for.body: ; preds = %for.body.lr.ph, %for.body + %1 = phi i8 [ %.pre, %for.body.lr.ph ], [ %conv4, %for.body ] + %i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ] + %from.addr.09 = phi i8* [ %from, %for.body.lr.ph ], [ %incdec.ptr, %for.body ] + %incdec.ptr = getelementptr inbounds i8* %from.addr.09, i32 1 + %2 = load i8* %from.addr.09, align 1 + %conv27 = zext i8 %2 to i32 + %conv36 = zext i8 %1 to i32 + %add = add nsw i32 %conv36, %conv27 + %conv4 = trunc i32 %add to i8 + store i8 %conv4, i8* %to, align 1 + %inc = add nsw i32 %i.010, 1 + %cmp = icmp eq i32 %inc, %conv + br i1 %cmp, label %for.end, label %for.body +; 16: sumc: +; 16: lbu ${{[0-9]+}}, 0(${{[0-9]+}}) +; 16: lbu ${{[0-9]+}}, 0(${{[0-9]+}}) +; 16: sum: +; 16: lhu ${{[0-9]+}}, 0(${{[0-9]+}}) +; 16: lhu ${{[0-9]+}}, 0(${{[0-9]+}}) + +for.end: ; preds = %for.body, %entry + ret i32 undef +} + +define i32 @sum(i16* nocapture %to, i16* nocapture %from, i32) nounwind { +entry: + %sext = shl i32 %0, 16 + %conv = ashr exact i32 %sext, 16 + %cmp8 = icmp eq i32 %conv, 0 + br i1 %cmp8, label %for.end, label %for.body.lr.ph + +for.body.lr.ph: ; preds = %entry + %.pre = load i16* %to, align 2 + br label %for.body + +for.body: ; preds = %for.body.lr.ph, %for.body + %1 = phi i16 [ %.pre, %for.body.lr.ph ], [ %conv4, %for.body ] + %i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ] + %from.addr.09 = phi i16* [ %from, %for.body.lr.ph ], [ %incdec.ptr, %for.body ] + %incdec.ptr = getelementptr inbounds i16* %from.addr.09, i32 1 + %2 = load i16* %from.addr.09, align 2 + %conv27 = zext i16 %2 to i32 + %conv36 = zext i16 %1 to i32 + %add = add nsw i32 %conv36, %conv27 + %conv4 = trunc i32 %add to i16 + store i16 %conv4, i16* %to, align 2 + %inc = add nsw i32 %i.010, 1 + %cmp = icmp eq i32 %inc, %conv + br i1 %cmp, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret i32 undef +} + + diff --git a/test/CodeGen/Mips/mul.ll b/test/CodeGen/Mips/mul.ll new file mode 100644 index 0000000..4ce801b --- /dev/null +++ b/test/CodeGen/Mips/mul.ll @@ -0,0 +1,17 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@iiii = global i32 5, align 4 +@jjjj = global i32 -6, align 4 +@kkkk = common global i32 0, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @iiii, align 4 + %1 = load i32* @jjjj, align 4 + %mul = mul nsw i32 %1, %0 +; 16: mult ${{[0-9]+}}, ${{[0-9]+}} +; 16: mflo ${{[0-9]+}} + + store i32 %mul, i32* @kkkk, align 4 + ret void +} diff --git a/test/CodeGen/Mips/mulll.ll b/test/CodeGen/Mips/mulll.ll new file mode 100644 index 0000000..e37b919 --- /dev/null +++ b/test/CodeGen/Mips/mulll.ll @@ -0,0 +1,21 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@iiii = global i64 5, align 8 +@jjjj = global i64 -6, align 8 +@kkkk = common global i64 0, align 8 + +define void @test() nounwind { +entry: + %0 = load i64* @iiii, align 8 + %1 = load i64* @jjjj, align 8 + %mul = mul nsw i64 %1, %0 + store i64 %mul, i64* @kkkk, align 8 +; 16: multu ${{[0-9]+}}, ${{[0-9]+}} +; 16: mfhi ${{[0-9]+}} +; 16: mult ${{[0-9]+}}, ${{[0-9]+}} +; 16: mflo ${{[0-9]+}} +; 16: mult ${{[0-9]+}}, ${{[0-9]+}} +; 16: mflo ${{[0-9]+}} + + ret void +} diff --git a/test/CodeGen/Mips/mulull.ll b/test/CodeGen/Mips/mulull.ll new file mode 100644 index 0000000..4d23c69 --- /dev/null +++ b/test/CodeGen/Mips/mulull.ll @@ -0,0 +1,21 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@iiii = global i64 5, align 8 +@jjjj = global i64 6, align 8 +@kkkk = common global i64 0, align 8 +@.str = private unnamed_addr constant [20 x i8] c"%lld * %lld = %lld\0A\00", align 1 + +define void @test() nounwind { +entry: + %0 = load i64* @iiii, align 8 + %1 = load i64* @jjjj, align 8 + %mul = mul nsw i64 %1, %0 + store i64 %mul, i64* @kkkk, align 8 +; 16: multu ${{[0-9]+}}, ${{[0-9]+}} +; 16: mfhi ${{[0-9]+}} +; 16: mult ${{[0-9]+}}, ${{[0-9]+}} +; 16: mflo ${{[0-9]+}} +; 16: mult ${{[0-9]+}}, ${{[0-9]+}} +; 16: mflo ${{[0-9]+}} + ret void +} diff --git a/test/CodeGen/Mips/null.ll b/test/CodeGen/Mips/null.ll index 7beae99..00c66a9 100644 --- a/test/CodeGen/Mips/null.ll +++ b/test/CodeGen/Mips/null.ll @@ -8,6 +8,6 @@ entry: ; 16: .set mips16 # @main -; 16: jr $ra +; 16: jrc $ra } diff --git a/test/CodeGen/Mips/o32_cc_byval.ll b/test/CodeGen/Mips/o32_cc_byval.ll index eac0d80..5558ba6 100644 --- a/test/CodeGen/Mips/o32_cc_byval.ll +++ b/test/CodeGen/Mips/o32_cc_byval.ll @@ -119,6 +119,16 @@ entry: ret void } +%struct.S4 = type { [4 x i32] } + +define void @f5(i64 %a0, %struct.S4* nocapture byval %a1) nounwind { +entry: + tail call void @f6(%struct.S4* byval %a1, i64 %a0) nounwind + ret void +} + +declare void @f6(%struct.S4* nocapture byval, i64) + !0 = metadata !{metadata !"int", metadata !1} !1 = metadata !{metadata !"omnipotent char", metadata !2} !2 = metadata !{metadata !"Simple C/C++ TBAA", null} diff --git a/test/CodeGen/Mips/rem.ll b/test/CodeGen/Mips/rem.ll new file mode 100644 index 0000000..b18f85d --- /dev/null +++ b/test/CodeGen/Mips/rem.ll @@ -0,0 +1,19 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@iiii = global i32 103, align 4 +@jjjj = global i32 -4, align 4 +@kkkk = common global i32 0, align 4 + + +define void @test() nounwind { +entry: + %0 = load i32* @iiii, align 4 + %1 = load i32* @jjjj, align 4 + %rem = srem i32 %0, %1 +; 16: div $zero, ${{[0-9]+}}, ${{[0-9]+}} +; 16: mfhi ${{[0-9]+}} + store i32 %rem, i32* @kkkk, align 4 + ret void +} + + diff --git a/test/CodeGen/Mips/remat-immed-load.ll b/test/CodeGen/Mips/remat-immed-load.ll new file mode 100644 index 0000000..d93964b --- /dev/null +++ b/test/CodeGen/Mips/remat-immed-load.ll @@ -0,0 +1,51 @@ +; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=32 +; RUN: llc -march=mips64el -mcpu=mips64 -mattr=n64 < %s | FileCheck %s -check-prefix=64 + +define void @f0() nounwind { +entry: +; 32: addiu $4, $zero, 1 +; 32: addiu $4, $zero, 1 + + tail call void @foo1(i32 1) nounwind + tail call void @foo1(i32 1) nounwind + ret void +} + +declare void @foo1(i32) + +define void @f3() nounwind { +entry: +; 64: daddiu $4, $zero, 1 +; 64: daddiu $4, $zero, 1 + + tail call void @foo2(i64 1) nounwind + tail call void @foo2(i64 1) nounwind + ret void +} + +declare void @foo2(i64) + +define void @f5() nounwind { +entry: +; 32: lui $4, 1 +; 32: lui $4, 1 + + tail call void @f6(i32 65536) nounwind + tail call void @f6(i32 65536) nounwind + ret void +} + +declare void @f6(i32) + +define void @f7() nounwind { +entry: +; 64: lui $4, 1 +; 64: lui $4, 1 + + tail call void @f8(i64 65536) nounwind + tail call void @f8(i64 65536) nounwind + ret void +} + +declare void @f8(i64) + diff --git a/test/CodeGen/Mips/remu.ll b/test/CodeGen/Mips/remu.ll new file mode 100644 index 0000000..472503c --- /dev/null +++ b/test/CodeGen/Mips/remu.ll @@ -0,0 +1,18 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@iiii = global i32 103, align 4 +@jjjj = global i32 4, align 4 +@kkkk = common global i32 0, align 4 +@.str = private unnamed_addr constant [15 x i8] c"%u = %u %% %u\0A\00", align 1 + +define void @test() nounwind { +entry: + %0 = load i32* @iiii, align 4 + %1 = load i32* @jjjj, align 4 + %rem = urem i32 %0, %1 +; 16: divu $zero, ${{[0-9]+}}, ${{[0-9]+}} +; 16: mfhi ${{[0-9]+}} + store i32 %rem, i32* @kkkk, align 4 + ret void +} + diff --git a/test/CodeGen/Mips/return-vector.ll b/test/CodeGen/Mips/return-vector.ll new file mode 100644 index 0000000..739c43c --- /dev/null +++ b/test/CodeGen/Mips/return-vector.ll @@ -0,0 +1,244 @@ +; RUN: llc -march=mipsel < %s | FileCheck %s + + +; Check that function accesses vector return value from stack in cases when +; vector can't be returned in registers. Also check that caller passes in +; register $4 stack address where the vector should be placed. + + +declare <8 x i32> @i8(...) +declare <4 x float> @f4(...) +declare <4 x double> @d4(...) + +define i32 @call_i8() { +entry: + %call = call <8 x i32> (...)* @i8() + %v0 = extractelement <8 x i32> %call, i32 0 + %v1 = extractelement <8 x i32> %call, i32 1 + %v2 = extractelement <8 x i32> %call, i32 2 + %v3 = extractelement <8 x i32> %call, i32 3 + %v4 = extractelement <8 x i32> %call, i32 4 + %v5 = extractelement <8 x i32> %call, i32 5 + %v6 = extractelement <8 x i32> %call, i32 6 + %v7 = extractelement <8 x i32> %call, i32 7 + %add1 = add i32 %v0, %v1 + %add2 = add i32 %v2, %v3 + %add3 = add i32 %v4, %v5 + %add4 = add i32 %v6, %v7 + %add5 = add i32 %add1, %add2 + %add6 = add i32 %add3, %add4 + %add7 = add i32 %add5, %add6 + ret i32 %add7 + +; CHECK: call_i8: +; CHECK: call16(i8) +; CHECK: addiu $4, $sp, 32 +; CHECK: lw $[[R0:[a-z0-9]+]], 60($sp) +; CHECK: lw $[[R1:[a-z0-9]+]], 56($sp) +; CHECK: lw $[[R2:[a-z0-9]+]], 52($sp) +; CHECK: lw $[[R3:[a-z0-9]+]], 48($sp) +; CHECK: lw $[[R4:[a-z0-9]+]], 44($sp) +; CHECK: lw $[[R5:[a-z0-9]+]], 40($sp) +; CHECK: lw $[[R6:[a-z0-9]+]], 36($sp) +; CHECK: lw $[[R7:[a-z0-9]+]], 32($sp) +} + + +define float @call_f4() { +entry: + %call = call <4 x float> (...)* @f4() + %v0 = extractelement <4 x float> %call, i32 0 + %v1 = extractelement <4 x float> %call, i32 1 + %v2 = extractelement <4 x float> %call, i32 2 + %v3 = extractelement <4 x float> %call, i32 3 + %add1 = fadd float %v0, %v1 + %add2 = fadd float %v2, %v3 + %add3 = fadd float %add1, %add2 + ret float %add3 + +; CHECK: call_f4: +; CHECK: call16(f4) +; CHECK: addiu $4, $sp, 16 +; CHECK: lwc1 $[[R0:[a-z0-9]+]], 28($sp) +; CHECK: lwc1 $[[R1:[a-z0-9]+]], 24($sp) +; CHECK: lwc1 $[[R3:[a-z0-9]+]], 20($sp) +; CHECK: lwc1 $[[R4:[a-z0-9]+]], 16($sp) +} + + +define double @call_d4() { +entry: + %call = call <4 x double> (...)* @d4() + %v0 = extractelement <4 x double> %call, i32 0 + %v1 = extractelement <4 x double> %call, i32 1 + %v2 = extractelement <4 x double> %call, i32 2 + %v3 = extractelement <4 x double> %call, i32 3 + %add1 = fadd double %v0, %v1 + %add2 = fadd double %v2, %v3 + %add3 = fadd double %add1, %add2 + ret double %add3 + +; CHECK: call_d4: +; CHECK: call16(d4) +; CHECK: addiu $4, $sp, 32 +; CHECK: ldc1 $[[R0:[a-z0-9]+]], 56($sp) +; CHECK: ldc1 $[[R1:[a-z0-9]+]], 48($sp) +; CHECK: ldc1 $[[R3:[a-z0-9]+]], 40($sp) +; CHECK: ldc1 $[[R4:[a-z0-9]+]], 32($sp) +} + + + +; Check that function accesses vector return value from registers in cases when +; vector can be returned in registers + + +declare <4 x i32> @i4(...) +declare <2 x float> @f2(...) +declare <2 x double> @d2(...) + +define i32 @call_i4() { +entry: + %call = call <4 x i32> (...)* @i4() + %v0 = extractelement <4 x i32> %call, i32 0 + %v1 = extractelement <4 x i32> %call, i32 1 + %v2 = extractelement <4 x i32> %call, i32 2 + %v3 = extractelement <4 x i32> %call, i32 3 + %add1 = add i32 %v0, %v1 + %add2 = add i32 %v2, %v3 + %add3 = add i32 %add1, %add2 + ret i32 %add3 + +; CHECK: call_i4: +; CHECK: call16(i4) +; CHECK-NOT: lw +; CHECK: addu $[[R2:[a-z0-9]+]], $[[R0:[a-z0-9]+]], $[[R1:[a-z0-9]+]] +; CHECK: addu $[[R5:[a-z0-9]+]], $[[R3:[a-z0-9]+]], $[[R4:[a-z0-9]+]] +; CHECK: addu $[[R6:[a-z0-9]+]], $[[R5]], $[[R2]] +} + + +define float @call_f2() { +entry: + %call = call <2 x float> (...)* @f2() + %v0 = extractelement <2 x float> %call, i32 0 + %v1 = extractelement <2 x float> %call, i32 1 + %add1 = fadd float %v0, %v1 + ret float %add1 + +; CHECK: call_f2: +; CHECK: call16(f2) +; CHECK-NOT: lwc1 +; CHECK: add.s $[[R2:[a-z0-9]+]], $[[R0:[a-z0-9]+]], $[[R1:[a-z0-9]+]] +} + + +define double @call_d2() { +entry: + %call = call <2 x double> (...)* @d2() + %v0 = extractelement <2 x double> %call, i32 0 + %v1 = extractelement <2 x double> %call, i32 1 + %add1 = fadd double %v0, %v1 + ret double %add1 + +; CHECK: call_d2: +; CHECK: call16(d2) +; CHECK-NOT: ldc1 +; CHECK: add.d $[[R2:[a-z0-9]+]], $[[R0:[a-z0-9]+]], $[[R1:[a-z0-9]+]] +} + + + +; Check that function returns vector on stack in cases when vector can't be +; returned in registers. Also check that vector is placed on stack starting +; from the address in register $4. + + +define <8 x i32> @return_i8() { +entry: + ret <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + +; CHECK: return_i8: +; CHECK: sw $[[R0:[a-z0-9]+]], 28($4) +; CHECK: sw $[[R1:[a-z0-9]+]], 24($4) +; CHECK: sw $[[R2:[a-z0-9]+]], 20($4) +; CHECK: sw $[[R3:[a-z0-9]+]], 16($4) +; CHECK: sw $[[R4:[a-z0-9]+]], 12($4) +; CHECK: sw $[[R5:[a-z0-9]+]], 8($4) +; CHECK: sw $[[R6:[a-z0-9]+]], 4($4) +; CHECK: sw $[[R7:[a-z0-9]+]], 0($4) +} + + +define <4 x float> @return_f4(float %a, float %b, float %c, float %d) { +entry: + %vecins1 = insertelement <4 x float> undef, float %a, i32 0 + %vecins2 = insertelement <4 x float> %vecins1, float %b, i32 1 + %vecins3 = insertelement <4 x float> %vecins2, float %c, i32 2 + %vecins4 = insertelement <4 x float> %vecins3, float %d, i32 3 + ret <4 x float> %vecins4 + +; CHECK: return_f4: +; CHECK: lwc1 $[[R0:[a-z0-9]+]], 16($sp) +; CHECK: swc1 $[[R0]], 12($4) +; CHECK: sw $7, 8($4) +; CHECK: sw $6, 4($4) +; CHECK: sw $5, 0($4) +} + + +define <4 x double> @return_d4(double %a, double %b, double %c, double %d) { +entry: + %vecins1 = insertelement <4 x double> undef, double %a, i32 0 + %vecins2 = insertelement <4 x double> %vecins1, double %b, i32 1 + %vecins3 = insertelement <4 x double> %vecins2, double %c, i32 2 + %vecins4 = insertelement <4 x double> %vecins3, double %d, i32 3 + ret <4 x double> %vecins4 + +; CHECK: return_d4: +; CHECK: sdc1 $[[R0:[a-z0-9]+]], 24($4) +; CHECK: sdc1 $[[R1:[a-z0-9]+]], 16($4) +; CHECK: sdc1 $[[R2:[a-z0-9]+]], 8($4) +; CHECK: sdc1 $[[R3:[a-z0-9]+]], 0($4) +} + + + +; Check that function returns vector in registers in cases when vector can be +; returned in registers. + + +define <4 x i32> @return_i4() { +entry: + ret <4 x i32> <i32 0, i32 1, i32 2, i32 3> + +; CHECK: return_i4: +; CHECK: addiu $2, $zero, 0 +; CHECK: addiu $3, $zero, 1 +; CHECK: addiu $4, $zero, 2 +; CHECK: addiu $5, $zero, 3 +} + + +define <2 x float> @return_f2(float %a, float %b) { +entry: + %vecins1 = insertelement <2 x float> undef, float %a, i32 0 + %vecins2 = insertelement <2 x float> %vecins1, float %b, i32 1 + ret <2 x float> %vecins2 + +; CHECK: return_f2: +; CHECK: mov.s $f0, $f12 +; CHECK: mov.s $f2, $f14 +} + + +define <2 x double> @return_d2(double %a, double %b) { +entry: + %vecins1 = insertelement <2 x double> undef, double %a, i32 0 + %vecins2 = insertelement <2 x double> %vecins1, double %b, i32 1 + ret <2 x double> %vecins2 + +; CHECK: return_d2: +; CHECK: mov.d $f0, $f12 +; CHECK: mov.d $f2, $f14 +} diff --git a/test/CodeGen/Mips/selpat.ll b/test/CodeGen/Mips/selpat.ll new file mode 100644 index 0000000..cda0c96 --- /dev/null +++ b/test/CodeGen/Mips/selpat.ll @@ -0,0 +1,350 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@t = global i32 10, align 4 +@f = global i32 199, align 4 +@a = global i32 1, align 4 +@b = global i32 10, align 4 +@c = global i32 1, align 4 +@z1 = common global i32 0, align 4 +@z2 = common global i32 0, align 4 +@z3 = common global i32 0, align 4 +@z4 = common global i32 0, align 4 + +define void @calc_seleq() nounwind { +entry: + %0 = load i32* @a, align 4 + %1 = load i32* @b, align 4 + %cmp = icmp eq i32 %0, %1 + %2 = load i32* @f, align 4 + %3 = load i32* @t, align 4 + %cond = select i1 %cmp, i32 %2, i32 %3 + store i32 %cond, i32* @z1, align 4 +; 16: cmp ${{[0-9]+}}, ${{[0-9]+}} +; 16: bteqz .+4 +; 16: move ${{[0-9]+}}, ${{[0-9]+}} + store i32 %cond, i32* @z2, align 4 + %4 = load i32* @c, align 4 + %cmp6 = icmp eq i32 %4, %0 + %cond10 = select i1 %cmp6, i32 %3, i32 %2 + store i32 %cond10, i32* @z3, align 4 + store i32 %cond10, i32* @z4, align 4 + ret void +} + + +define void @calc_seleqk() nounwind { +entry: + %0 = load i32* @a, align 4 + %cmp = icmp eq i32 %0, 1 + %1 = load i32* @t, align 4 + %2 = load i32* @f, align 4 + %cond = select i1 %cmp, i32 %1, i32 %2 + store i32 %cond, i32* @z1, align 4 +; 16: cmpi ${{[0-9]+}}, 1 +; 16: bteqz .+4 +; 16: move ${{[0-9]+}}, ${{[0-9]+}} + %cmp1 = icmp eq i32 %0, 10 + %cond5 = select i1 %cmp1, i32 %2, i32 %1 + store i32 %cond5, i32* @z2, align 4 + %3 = load i32* @b, align 4 + %cmp6 = icmp eq i32 %3, 3 + %cond10 = select i1 %cmp6, i32 %2, i32 %1 + store i32 %cond10, i32* @z3, align 4 +; 16: cmpi ${{[0-9]+}}, 10 +; 16: bteqz .+4 +; 16: move ${{[0-9]+}}, ${{[0-9]+}} + %cmp11 = icmp eq i32 %3, 10 + %cond15 = select i1 %cmp11, i32 %1, i32 %2 + store i32 %cond15, i32* @z4, align 4 + ret void +} + +define void @calc_seleqz() nounwind { +entry: + %0 = load i32* @a, align 4 + %cmp = icmp eq i32 %0, 0 + %1 = load i32* @t, align 4 + %2 = load i32* @f, align 4 + %cond = select i1 %cmp, i32 %1, i32 %2 + store i32 %cond, i32* @z1, align 4 +; 16: beqz ${{[0-9]+}}, .+4 +; 16: move ${{[0-9]+}}, ${{[0-9]+}} + %3 = load i32* @b, align 4 + %cmp1 = icmp eq i32 %3, 0 + %cond5 = select i1 %cmp1, i32 %2, i32 %1 + store i32 %cond5, i32* @z2, align 4 + %4 = load i32* @c, align 4 + %cmp6 = icmp eq i32 %4, 0 + %cond10 = select i1 %cmp6, i32 %1, i32 %2 + store i32 %cond10, i32* @z3, align 4 + store i32 %cond, i32* @z4, align 4 + ret void +} + +define void @calc_selge() nounwind { +entry: + %0 = load i32* @a, align 4 + %1 = load i32* @b, align 4 + %cmp = icmp sge i32 %0, %1 + %2 = load i32* @f, align 4 + %3 = load i32* @t, align 4 + %cond = select i1 %cmp, i32 %2, i32 %3 + store i32 %cond, i32* @z1, align 4 +; 16: slt ${{[0-9]+}}, ${{[0-9]+}} +; 16: bteqz .+4 +; 16: move ${{[0-9]+}}, ${{[0-9]+}} + %cmp1 = icmp sge i32 %1, %0 + %cond5 = select i1 %cmp1, i32 %3, i32 %2 + store i32 %cond5, i32* @z2, align 4 + %4 = load i32* @c, align 4 + %cmp6 = icmp sge i32 %4, %0 + %cond10 = select i1 %cmp6, i32 %3, i32 %2 + store i32 %cond10, i32* @z3, align 4 + %cmp11 = icmp sge i32 %0, %4 + %cond15 = select i1 %cmp11, i32 %3, i32 %2 + store i32 %cond15, i32* @z4, align 4 + ret void +} + +define i32 @calc_selgt() nounwind { +entry: + %0 = load i32* @a, align 4 + %1 = load i32* @b, align 4 + %cmp = icmp sgt i32 %0, %1 +; 16: slt ${{[0-9]+}}, ${{[0-9]+}} +; 16: btnez .+4 +; 16: move ${{[0-9]+}}, ${{[0-9]+}} + %2 = load i32* @f, align 4 + %3 = load i32* @t, align 4 + %cond = select i1 %cmp, i32 %2, i32 %3 + store i32 %cond, i32* @z1, align 4 + %cmp1 = icmp sgt i32 %1, %0 + %cond5 = select i1 %cmp1, i32 %3, i32 %2 + store i32 %cond5, i32* @z2, align 4 + %4 = load i32* @c, align 4 + %cmp6 = icmp sgt i32 %4, %0 + %cond10 = select i1 %cmp6, i32 %2, i32 %3 + store i32 %cond10, i32* @z3, align 4 + %cmp11 = icmp sgt i32 %0, %4 + %cond15 = select i1 %cmp11, i32 %2, i32 %3 + store i32 %cond15, i32* @z4, align 4 + ret i32 undef +} + +define void @calc_selle() nounwind { +entry: + %0 = load i32* @a, align 4 + %1 = load i32* @b, align 4 + %cmp = icmp sle i32 %0, %1 + %2 = load i32* @t, align 4 + %3 = load i32* @f, align 4 + %cond = select i1 %cmp, i32 %2, i32 %3 + store i32 %cond, i32* @z1, align 4 +; 16: slt ${{[0-9]+}}, ${{[0-9]+}} +; 16: bteqz .+4 +; 16: move ${{[0-9]+}}, ${{[0-9]+}} + %cmp1 = icmp sle i32 %1, %0 + %cond5 = select i1 %cmp1, i32 %3, i32 %2 + store i32 %cond5, i32* @z2, align 4 + %4 = load i32* @c, align 4 + %cmp6 = icmp sle i32 %4, %0 + %cond10 = select i1 %cmp6, i32 %2, i32 %3 + store i32 %cond10, i32* @z3, align 4 + %cmp11 = icmp sle i32 %0, %4 + %cond15 = select i1 %cmp11, i32 %2, i32 %3 + store i32 %cond15, i32* @z4, align 4 + ret void +} + +define void @calc_selltk() nounwind { +entry: + %0 = load i32* @a, align 4 + %cmp = icmp slt i32 %0, 10 + %1 = load i32* @t, align 4 + %2 = load i32* @f, align 4 + %cond = select i1 %cmp, i32 %1, i32 %2 + store i32 %cond, i32* @z1, align 4 +; 16: slti ${{[0-9]+}}, {{[0-9]+}} +; 16: btnez .+4 +; 16: move ${{[0-9]+}}, ${{[0-9]+}} + %3 = load i32* @b, align 4 + %cmp1 = icmp slt i32 %3, 2 + %cond5 = select i1 %cmp1, i32 %2, i32 %1 + store i32 %cond5, i32* @z2, align 4 + %4 = load i32* @c, align 4 + %cmp6 = icmp sgt i32 %4, 2 + %cond10 = select i1 %cmp6, i32 %2, i32 %1 + store i32 %cond10, i32* @z3, align 4 + %cmp11 = icmp sgt i32 %0, 2 + %cond15 = select i1 %cmp11, i32 %2, i32 %1 + store i32 %cond15, i32* @z4, align 4 + ret void +} + + +define void @calc_selne() nounwind { +entry: + %0 = load i32* @a, align 4 + %1 = load i32* @b, align 4 + %cmp = icmp ne i32 %0, %1 + %2 = load i32* @t, align 4 + %3 = load i32* @f, align 4 + %cond = select i1 %cmp, i32 %2, i32 %3 + store i32 %cond, i32* @z1, align 4 +; 16: cmp ${{[0-9]+}}, ${{[0-9]+}} +; 16: btnez .+4 +; 16: move ${{[0-9]+}}, ${{[0-9]+}} + store i32 %cond, i32* @z2, align 4 + %4 = load i32* @c, align 4 + %cmp6 = icmp ne i32 %4, %0 + %cond10 = select i1 %cmp6, i32 %3, i32 %2 + store i32 %cond10, i32* @z3, align 4 + store i32 %cond10, i32* @z4, align 4 + ret void +} + +define void @calc_selnek() nounwind { +entry: + %0 = load i32* @a, align 4 + %cmp = icmp ne i32 %0, 1 + %1 = load i32* @f, align 4 + %2 = load i32* @t, align 4 + %cond = select i1 %cmp, i32 %1, i32 %2 + store i32 %cond, i32* @z1, align 4 +; 16: cmpi ${{[0-9]+}}, 1 +; 16: btnez .+4 +; 16: move ${{[0-9]+}}, ${{[0-9]+}} + %cmp1 = icmp ne i32 %0, 10 + %cond5 = select i1 %cmp1, i32 %2, i32 %1 + store i32 %cond5, i32* @z2, align 4 + %3 = load i32* @b, align 4 + %cmp6 = icmp ne i32 %3, 3 + %cond10 = select i1 %cmp6, i32 %2, i32 %1 + store i32 %cond10, i32* @z3, align 4 +; 16: cmpi ${{[0-9]+}}, 10 +; 16: btnez .+4 +; 16: move ${{[0-9]+}}, ${{[0-9]+}} + %cmp11 = icmp ne i32 %3, 10 + %cond15 = select i1 %cmp11, i32 %1, i32 %2 + store i32 %cond15, i32* @z4, align 4 + ret void +} + +define void @calc_selnez() nounwind { +entry: + %0 = load i32* @a, align 4 + %cmp = icmp ne i32 %0, 0 + %1 = load i32* @f, align 4 + %2 = load i32* @t, align 4 + %cond = select i1 %cmp, i32 %1, i32 %2 + store i32 %cond, i32* @z1, align 4 +; 16: bnez ${{[0-9]+}}, .+4 +; 16: move ${{[0-9]+}}, ${{[0-9]+}} + %3 = load i32* @b, align 4 + %cmp1 = icmp ne i32 %3, 0 + %cond5 = select i1 %cmp1, i32 %2, i32 %1 + store i32 %cond5, i32* @z2, align 4 + %4 = load i32* @c, align 4 + %cmp6 = icmp ne i32 %4, 0 + %cond10 = select i1 %cmp6, i32 %1, i32 %2 + store i32 %cond10, i32* @z3, align 4 + store i32 %cond, i32* @z4, align 4 + ret void +} + +define void @calc_selnez2() nounwind { +entry: + %0 = load i32* @a, align 4 + %tobool = icmp ne i32 %0, 0 + %1 = load i32* @f, align 4 + %2 = load i32* @t, align 4 + %cond = select i1 %tobool, i32 %1, i32 %2 + store i32 %cond, i32* @z1, align 4 +; 16: bnez ${{[0-9]+}}, .+4 +; 16: move ${{[0-9]+}}, ${{[0-9]+}} + %3 = load i32* @b, align 4 + %tobool1 = icmp ne i32 %3, 0 + %cond5 = select i1 %tobool1, i32 %2, i32 %1 + store i32 %cond5, i32* @z2, align 4 + %4 = load i32* @c, align 4 + %tobool6 = icmp ne i32 %4, 0 + %cond10 = select i1 %tobool6, i32 %1, i32 %2 + store i32 %cond10, i32* @z3, align 4 + store i32 %cond, i32* @z4, align 4 + ret void +} + +define void @calc_seluge() nounwind { +entry: + %0 = load i32* @a, align 4 + %1 = load i32* @b, align 4 + %cmp = icmp uge i32 %0, %1 + %2 = load i32* @f, align 4 + %3 = load i32* @t, align 4 + %cond = select i1 %cmp, i32 %2, i32 %3 + store i32 %cond, i32* @z1, align 4 +; 16: sltu ${{[0-9]+}}, ${{[0-9]+}} +; 16: bteqz .+4 +; 16: move ${{[0-9]+}}, ${{[0-9]+}} + %cmp1 = icmp uge i32 %1, %0 + %cond5 = select i1 %cmp1, i32 %3, i32 %2 + store i32 %cond5, i32* @z2, align 4 + %4 = load i32* @c, align 4 + %cmp6 = icmp uge i32 %4, %0 + %cond10 = select i1 %cmp6, i32 %3, i32 %2 + store i32 %cond10, i32* @z3, align 4 + %cmp11 = icmp uge i32 %0, %4 + %cond15 = select i1 %cmp11, i32 %3, i32 %2 + store i32 %cond15, i32* @z4, align 4 + ret void +} + +define void @calc_selugt() nounwind { +entry: + %0 = load i32* @a, align 4 + %1 = load i32* @b, align 4 + %cmp = icmp ugt i32 %0, %1 + %2 = load i32* @f, align 4 + %3 = load i32* @t, align 4 + %cond = select i1 %cmp, i32 %2, i32 %3 + store i32 %cond, i32* @z1, align 4 +; 16: sltu ${{[0-9]+}}, ${{[0-9]+}} +; 16: btnez .+4 +; 16: move ${{[0-9]+}}, ${{[0-9]+}} + %cmp1 = icmp ugt i32 %1, %0 + %cond5 = select i1 %cmp1, i32 %3, i32 %2 + store i32 %cond5, i32* @z2, align 4 + %4 = load i32* @c, align 4 + %cmp6 = icmp ugt i32 %4, %0 + %cond10 = select i1 %cmp6, i32 %2, i32 %3 + store i32 %cond10, i32* @z3, align 4 + %cmp11 = icmp ugt i32 %0, %4 + %cond15 = select i1 %cmp11, i32 %2, i32 %3 + store i32 %cond15, i32* @z4, align 4 + ret void +} + +define void @calc_selule() nounwind { +entry: + %0 = load i32* @a, align 4 + %1 = load i32* @b, align 4 + %cmp = icmp ule i32 %0, %1 + %2 = load i32* @t, align 4 + %3 = load i32* @f, align 4 + %cond = select i1 %cmp, i32 %2, i32 %3 + store i32 %cond, i32* @z1, align 4 +; 16: sltu ${{[0-9]+}}, ${{[0-9]+}} +; 16: bteqz .+4 +; 16: move ${{[0-9]+}}, ${{[0-9]+}} + %cmp1 = icmp ule i32 %1, %0 + %cond5 = select i1 %cmp1, i32 %3, i32 %2 + store i32 %cond5, i32* @z2, align 4 + %4 = load i32* @c, align 4 + %cmp6 = icmp ule i32 %4, %0 + %cond10 = select i1 %cmp6, i32 %2, i32 %3 + store i32 %cond10, i32* @z3, align 4 + %cmp11 = icmp ule i32 %0, %4 + %cond15 = select i1 %cmp11, i32 %2, i32 %3 + store i32 %cond15, i32* @z4, align 4 + ret void +} diff --git a/test/CodeGen/Mips/seteq.ll b/test/CodeGen/Mips/seteq.ll new file mode 100644 index 0000000..da840c8 --- /dev/null +++ b/test/CodeGen/Mips/seteq.ll @@ -0,0 +1,21 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@i = global i32 1, align 4 +@j = global i32 10, align 4 +@k = global i32 1, align 4 +@r1 = common global i32 0, align 4 +@r2 = common global i32 0, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @i, align 4 + %1 = load i32* @k, align 4 + %cmp = icmp eq i32 %0, %1 + %conv = zext i1 %cmp to i32 + store i32 %conv, i32* @r1, align 4 +; 16: xor $[[REGISTER:[0-9A-Ba-b_]+]], ${{[0-9]+}} +; 16: sltiu $[[REGISTER:[0-9A-Ba-b_]+]], 1 +; 16: move ${{[0-9]+}}, $t8 + ret void +} + diff --git a/test/CodeGen/Mips/seteqz.ll b/test/CodeGen/Mips/seteqz.ll new file mode 100644 index 0000000..d445be6 --- /dev/null +++ b/test/CodeGen/Mips/seteqz.ll @@ -0,0 +1,24 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@i = global i32 0, align 4 +@j = global i32 99, align 4 +@r1 = common global i32 0, align 4 +@r2 = common global i32 0, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @i, align 4 + %cmp = icmp eq i32 %0, 0 + %conv = zext i1 %cmp to i32 + store i32 %conv, i32* @r1, align 4 +; 16: sltiu ${{[0-9]+}}, 1 +; 16: move ${{[0-9]+}}, $t8 + %1 = load i32* @j, align 4 + %cmp1 = icmp eq i32 %1, 99 + %conv2 = zext i1 %cmp1 to i32 + store i32 %conv2, i32* @r2, align 4 +; 16: xor $[[REGISTER:[0-9A-Ba-b_]+]], ${{[0-9]+}} +; 16: sltiu $[[REGISTER:[0-9A-Ba-b_]+]], 1 +; 16: move ${{[0-9]+}}, $t8 + ret void +} diff --git a/test/CodeGen/Mips/setge.ll b/test/CodeGen/Mips/setge.ll new file mode 100644 index 0000000..94b499b --- /dev/null +++ b/test/CodeGen/Mips/setge.ll @@ -0,0 +1,27 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@j = global i32 -5, align 4 +@k = global i32 10, align 4 +@l = global i32 20, align 4 +@m = global i32 10, align 4 +@r1 = common global i32 0, align 4 +@r2 = common global i32 0, align 4 +@r3 = common global i32 0, align 4 +@.str = private unnamed_addr constant [22 x i8] c"1 = %i\0A1 = %i\0A0 = %i\0A\00", align 1 + +define void @test() nounwind { +entry: + %0 = load i32* @k, align 4 + %1 = load i32* @j, align 4 + %cmp = icmp sge i32 %0, %1 + %conv = zext i1 %cmp to i32 + store i32 %conv, i32* @r1, align 4 +; 16: slt ${{[0-9]+}}, ${{[0-9]+}} +; 16: move $[[REGISTER:[0-9]+]], $t8 +; 16: xor $[[REGISTER]], ${{[0-9]+}} + %2 = load i32* @m, align 4 + %cmp1 = icmp sge i32 %0, %2 + %conv2 = zext i1 %cmp1 to i32 + store i32 %conv2, i32* @r2, align 4 + ret void +} diff --git a/test/CodeGen/Mips/setgek.ll b/test/CodeGen/Mips/setgek.ll new file mode 100644 index 0000000..b6bae09 --- /dev/null +++ b/test/CodeGen/Mips/setgek.ll @@ -0,0 +1,18 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@k = global i32 10, align 4 +@r1 = common global i32 0, align 4 +@r2 = common global i32 0, align 4 +@r3 = common global i32 0, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @k, align 4 + %cmp = icmp sgt i32 %0, -32769 + %conv = zext i1 %cmp to i32 + store i32 %conv, i32* @r1, align 4 +; 16: slti ${{[0-9]+}}, -32768 +; 16: move ${{[0-9]+}}, $t8 +; 16: xor ${{[0-9]+}}, ${{[0-9]+}} + ret void +} diff --git a/test/CodeGen/Mips/setle.ll b/test/CodeGen/Mips/setle.ll new file mode 100644 index 0000000..f36fb43 --- /dev/null +++ b/test/CodeGen/Mips/setle.ll @@ -0,0 +1,26 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@j = global i32 -5, align 4 +@k = global i32 10, align 4 +@l = global i32 20, align 4 +@m = global i32 10, align 4 +@r1 = common global i32 0, align 4 +@r2 = common global i32 0, align 4 +@r3 = common global i32 0, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @j, align 4 + %1 = load i32* @k, align 4 + %cmp = icmp sle i32 %0, %1 + %conv = zext i1 %cmp to i32 + store i32 %conv, i32* @r1, align 4 +; 16: slt ${{[0-9]+}}, ${{[0-9]+}} +; 16: move $[[REGISTER:[0-9]+]], $t8 +; 16: xor $[[REGISTER]], ${{[0-9]+}} + %2 = load i32* @m, align 4 + %cmp1 = icmp sle i32 %2, %1 + %conv2 = zext i1 %cmp1 to i32 + store i32 %conv2, i32* @r2, align 4 + ret void +} diff --git a/test/CodeGen/Mips/setlt.ll b/test/CodeGen/Mips/setlt.ll new file mode 100644 index 0000000..435be8e --- /dev/null +++ b/test/CodeGen/Mips/setlt.ll @@ -0,0 +1,21 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@j = global i32 -5, align 4 +@k = global i32 10, align 4 +@l = global i32 20, align 4 +@m = global i32 10, align 4 +@r1 = common global i32 0, align 4 +@r2 = common global i32 0, align 4 +@r3 = common global i32 0, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @j, align 4 + %1 = load i32* @k, align 4 + %cmp = icmp slt i32 %0, %1 + %conv = zext i1 %cmp to i32 + store i32 %conv, i32* @r1, align 4 +; 16: slt ${{[0-9]+}}, ${{[0-9]+}} +; 16: move ${{[0-9]+}}, $t8 + ret void +} diff --git a/test/CodeGen/Mips/setltk.ll b/test/CodeGen/Mips/setltk.ll new file mode 100644 index 0000000..c0b610e --- /dev/null +++ b/test/CodeGen/Mips/setltk.ll @@ -0,0 +1,20 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@j = global i32 -5, align 4 +@k = global i32 10, align 4 +@l = global i32 20, align 4 +@m = global i32 10, align 4 +@r1 = common global i32 0, align 4 +@r2 = common global i32 0, align 4 +@r3 = common global i32 0, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @j, align 4 + %cmp = icmp slt i32 %0, 10 + %conv = zext i1 %cmp to i32 + store i32 %conv, i32* @r1, align 4 +; 16: slti $[[REGISTER:[0-9]+]], 10 +; 16: move $[[REGISTER]], $t8 + ret void +} diff --git a/test/CodeGen/Mips/setne.ll b/test/CodeGen/Mips/setne.ll new file mode 100644 index 0000000..6460c83 --- /dev/null +++ b/test/CodeGen/Mips/setne.ll @@ -0,0 +1,20 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@i = global i32 1, align 4 +@j = global i32 10, align 4 +@k = global i32 1, align 4 +@r1 = common global i32 0, align 4 +@r2 = common global i32 0, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @i, align 4 + %1 = load i32* @k, align 4 + %cmp = icmp ne i32 %0, %1 + %conv = zext i1 %cmp to i32 + store i32 %conv, i32* @r1, align 4 +; 16: xor $[[REGISTER:[0-9]+]], ${{[0-9]+}} +; 16: sltu ${{[0-9]+}}, $[[REGISTER]] +; 16: move ${{[0-9]+}}, $t8 + ret void +} diff --git a/test/CodeGen/Mips/setuge.ll b/test/CodeGen/Mips/setuge.ll new file mode 100644 index 0000000..ac72b66 --- /dev/null +++ b/test/CodeGen/Mips/setuge.ll @@ -0,0 +1,26 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@j = global i32 5, align 4 +@k = global i32 10, align 4 +@l = global i32 20, align 4 +@m = global i32 10, align 4 +@r1 = common global i32 0, align 4 +@r2 = common global i32 0, align 4 +@r3 = common global i32 0, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @k, align 4 + %1 = load i32* @j, align 4 + %cmp = icmp uge i32 %0, %1 + %conv = zext i1 %cmp to i32 + store i32 %conv, i32* @r1, align 4 +; 16: sltu ${{[0-9]+}}, ${{[0-9]+}} +; 16: move $[[REGISTER:[0-9]+]], $t8 +; 16: xor $[[REGISTER]], ${{[0-9]+}} + %2 = load i32* @m, align 4 + %cmp1 = icmp uge i32 %0, %2 + %conv2 = zext i1 %cmp1 to i32 + store i32 %conv2, i32* @r2, align 4 + ret void +} diff --git a/test/CodeGen/Mips/setugt.ll b/test/CodeGen/Mips/setugt.ll new file mode 100644 index 0000000..328f0e3 --- /dev/null +++ b/test/CodeGen/Mips/setugt.ll @@ -0,0 +1,21 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@j = global i32 5, align 4 +@k = global i32 10, align 4 +@l = global i32 20, align 4 +@m = global i32 10, align 4 +@r1 = common global i32 0, align 4 +@r2 = common global i32 0, align 4 +@r3 = common global i32 0, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @k, align 4 + %1 = load i32* @j, align 4 + %cmp = icmp ugt i32 %0, %1 + %conv = zext i1 %cmp to i32 + store i32 %conv, i32* @r1, align 4 +; 16: sltu ${{[0-9]+}}, ${{[0-9]+}} +; 16: move ${{[0-9]+}}, $t8 + ret void +} diff --git a/test/CodeGen/Mips/setule.ll b/test/CodeGen/Mips/setule.ll new file mode 100644 index 0000000..792f2ae --- /dev/null +++ b/test/CodeGen/Mips/setule.ll @@ -0,0 +1,26 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@j = global i32 5, align 4 +@k = global i32 10, align 4 +@l = global i32 20, align 4 +@m = global i32 10, align 4 +@r1 = common global i32 0, align 4 +@r2 = common global i32 0, align 4 +@r3 = common global i32 0, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @j, align 4 + %1 = load i32* @k, align 4 + %cmp = icmp ule i32 %0, %1 + %conv = zext i1 %cmp to i32 + store i32 %conv, i32* @r1, align 4 +; 16: sltu ${{[0-9]+}}, ${{[0-9]+}} +; 16: move $[[REGISTER:[0-9]+]], $t8 +; 16: xor $[[REGISTER]], ${{[0-9]+}} + %2 = load i32* @m, align 4 + %cmp1 = icmp ule i32 %2, %1 + %conv2 = zext i1 %cmp1 to i32 + store i32 %conv2, i32* @r2, align 4 + ret void +} diff --git a/test/CodeGen/Mips/setult.ll b/test/CodeGen/Mips/setult.ll new file mode 100644 index 0000000..56d2e8d --- /dev/null +++ b/test/CodeGen/Mips/setult.ll @@ -0,0 +1,21 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@j = global i32 5, align 4 +@k = global i32 10, align 4 +@l = global i32 20, align 4 +@m = global i32 10, align 4 +@r1 = common global i32 0, align 4 +@r2 = common global i32 0, align 4 +@r3 = common global i32 0, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @j, align 4 + %1 = load i32* @k, align 4 + %cmp = icmp ult i32 %0, %1 + %conv = zext i1 %cmp to i32 + store i32 %conv, i32* @r1, align 4 +; 16: sltu ${{[0-9]+}}, ${{[0-9]+}} +; 16: move ${{[0-9]+}}, $t8 + ret void +} diff --git a/test/CodeGen/Mips/setultk.ll b/test/CodeGen/Mips/setultk.ll new file mode 100644 index 0000000..75b270e --- /dev/null +++ b/test/CodeGen/Mips/setultk.ll @@ -0,0 +1,20 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@j = global i32 5, align 4 +@k = global i32 10, align 4 +@l = global i32 20, align 4 +@m = global i32 10, align 4 +@r1 = common global i32 0, align 4 +@r2 = common global i32 0, align 4 +@r3 = common global i32 0, align 4 + +define void @test() nounwind { +entry: + %0 = load i32* @j, align 4 + %cmp = icmp ult i32 %0, 10 + %conv = zext i1 %cmp to i32 + store i32 %conv, i32* @r1, align 4 +; 16: sltiu $[[REGISTER:[0-9]+]], 10 +; 16: move $[[REGISTER]], $t8 + ret void +} diff --git a/test/CodeGen/Mips/small-section-reserve-gp.ll b/test/CodeGen/Mips/small-section-reserve-gp.ll new file mode 100644 index 0000000..03503fb --- /dev/null +++ b/test/CodeGen/Mips/small-section-reserve-gp.ll @@ -0,0 +1,12 @@ +; RUN: llc -mtriple=mipsel-sde-elf -march=mipsel -relocation-model=static < %s \ +; RUN: | FileCheck %s + +@i = internal unnamed_addr global i32 0, align 4 + +define i32 @geti() nounwind readonly { +entry: +; CHECK: lw ${{[0-9]+}}, %gp_rel(i)($gp) + %0 = load i32* @i, align 4 + ret i32 %0 +} + diff --git a/test/CodeGen/Mips/stchar.ll b/test/CodeGen/Mips/stchar.ll new file mode 100644 index 0000000..c00c9fd --- /dev/null +++ b/test/CodeGen/Mips/stchar.ll @@ -0,0 +1,90 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16_h +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16_b + +@.str = private unnamed_addr constant [9 x i8] c"%hd %c \0A\00", align 1 +@sp = common global i16* null, align 4 +@cp = common global i8* null, align 4 + +define void @p1(i16 signext %s, i8 signext %c) nounwind { +entry: + %conv = sext i16 %s to i32 + %conv1 = sext i8 %c to i32 + %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0), i32 %conv, i32 %conv1) nounwind + ret void +} + +declare i32 @printf(i8* nocapture, ...) nounwind + +define void @p2() nounwind { +entry: + %0 = load i16** @sp, align 4 + %1 = load i16* %0, align 2 + %2 = load i8** @cp, align 4 + %3 = load i8* %2, align 1 + %conv.i = sext i16 %1 to i32 + %conv1.i = sext i8 %3 to i32 + %call.i = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0), i32 %conv.i, i32 %conv1.i) nounwind + %4 = load i16** @sp, align 4 + store i16 32, i16* %4, align 2 + %5 = load i8** @cp, align 4 + store i8 97, i8* %5, align 1 + ret void +} + +define void @test() nounwind { +entry: + %s = alloca i16, align 4 + %c = alloca i8, align 4 + store i16 16, i16* %s, align 4 + store i8 99, i8* %c, align 4 + store i16* %s, i16** @sp, align 4 + store i8* %c, i8** @cp, align 4 + %call.i.i = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0), i32 16, i32 99) nounwind + %0 = load i16** @sp, align 4 + store i16 32, i16* %0, align 2 + %1 = load i8** @cp, align 4 + store i8 97, i8* %1, align 1 + %2 = load i16* %s, align 4 + %3 = load i8* %c, align 4 + %conv.i = sext i16 %2 to i32 + %conv1.i = sext i8 %3 to i32 + %call.i = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0), i32 %conv.i, i32 %conv1.i) nounwind + ret void +; 16_b: test: +; 16_h: test: +; 16_b: sb ${{[0-9]+}}, [[offset1:[0-9]+]](${{[0-9]+}}) +; 16_b: lb ${{[0-9]+}}, [[offset1]](${{[0-9]+}}) +; 16_h: sh ${{[0-9]+}}, [[offset2:[0-9]+]](${{[0-9]+}}) +; 16_h: lh ${{[0-9]+}}, [[offset2]](${{[0-9]+}}) +} + +define i32 @main() nounwind { +entry: + %s.i = alloca i16, align 4 + %c.i = alloca i8, align 4 + %0 = bitcast i16* %s.i to i8* + call void @llvm.lifetime.start(i64 -1, i8* %0) nounwind + call void @llvm.lifetime.start(i64 -1, i8* %c.i) nounwind + store i16 16, i16* %s.i, align 4 + store i8 99, i8* %c.i, align 4 + store i16* %s.i, i16** @sp, align 4 + store i8* %c.i, i8** @cp, align 4 + %call.i.i.i = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0), i32 16, i32 99) nounwind + %1 = load i16** @sp, align 4 + store i16 32, i16* %1, align 2 + %2 = load i8** @cp, align 4 + store i8 97, i8* %2, align 1 + %3 = load i16* %s.i, align 4 + %4 = load i8* %c.i, align 4 + %conv.i.i = sext i16 %3 to i32 + %conv1.i.i = sext i8 %4 to i32 + %call.i.i = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0), i32 %conv.i.i, i32 %conv1.i.i) nounwind + call void @llvm.lifetime.end(i64 -1, i8* %0) nounwind + call void @llvm.lifetime.end(i64 -1, i8* %c.i) nounwind + ret i32 0 +} + +declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind + +declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind + diff --git a/test/CodeGen/Mips/stldst.ll b/test/CodeGen/Mips/stldst.ll new file mode 100644 index 0000000..4182b9e --- /dev/null +++ b/test/CodeGen/Mips/stldst.ll @@ -0,0 +1,41 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 + +@kkkk = global i32 67, align 4 +@llll = global i32 33, align 4 +@mmmm = global i32 44, align 4 +@nnnn = global i32 55, align 4 +@oooo = global i32 32, align 4 +@pppp = global i32 41, align 4 +@qqqq = global i32 59, align 4 +@rrrr = global i32 60, align 4 +@.str = private unnamed_addr constant [32 x i8] c"%i %i %i %i %i %i %i %i %i %i \0A\00", align 1 + +define i32 @main() nounwind { +entry: + %0 = load i32* @kkkk, align 4 + %1 = load i32* @llll, align 4 + %add = add nsw i32 %0, 10 + %add1 = add nsw i32 %1, 10 + %2 = load i32* @mmmm, align 4 + %sub = add nsw i32 %2, -3 + %3 = load i32* @nnnn, align 4 + %add2 = add nsw i32 %3, 10 + %4 = load i32* @oooo, align 4 + %add3 = add nsw i32 %4, 4 + %5 = load i32* @pppp, align 4 + %sub4 = add nsw i32 %5, -5 + %6 = load i32* @qqqq, align 4 + %sub5 = add nsw i32 %6, -10 + %7 = load i32* @rrrr, align 4 + %add6 = add nsw i32 %7, 6 + + %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([32 x i8]* @.str, i32 0, i32 0), i32 %sub5, i32 %add6, i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7) nounwind + %call7 = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([32 x i8]* @.str, i32 0, i32 0), i32 %0, i32 %1, i32 %add, i32 %add1, i32 %sub, i32 %add2, i32 %add3, i32 %sub4, i32 %sub5, i32 %add6) nounwind + ret i32 0 +} +; 16: sw ${{[0-9]+}}, {{[0-9]+}} ( $sp ); # 4-byte Folded Spill +; 16: lw ${{[0-9]+}}, {{[0-9]+}} ( $sp ); # 4-byte Folded Reload +; 16: sw ${{[0-9]+}}, {{[0-9]+}} ( $sp ); # 4-byte Folded Spill +; 16: lw ${{[0-9]+}}, {{[0-9]+}} ( $sp ); # 4-byte Folded Reload + +declare i32 @printf(i8* nocapture, ...) nounwind diff --git a/test/CodeGen/Mips/tailcall.ll b/test/CodeGen/Mips/tailcall.ll new file mode 100644 index 0000000..bcd33fc --- /dev/null +++ b/test/CodeGen/Mips/tailcall.ll @@ -0,0 +1,245 @@ +; RUN: llc -march=mipsel -relocation-model=pic -enable-mips-tail-calls < %s | \ +; RUN: FileCheck %s -check-prefix=PIC32 +; RUN: llc -march=mipsel -relocation-model=static \ +; RUN: -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=STATIC32 +; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=+n64 -enable-mips-tail-calls \ +; RUN: < %s | FileCheck %s -check-prefix=N64 +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic \ +; RUN: -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=PIC16 + +@g0 = common global i32 0, align 4 +@g1 = common global i32 0, align 4 +@g2 = common global i32 0, align 4 +@g3 = common global i32 0, align 4 +@g4 = common global i32 0, align 4 +@g5 = common global i32 0, align 4 +@g6 = common global i32 0, align 4 +@g7 = common global i32 0, align 4 +@g8 = common global i32 0, align 4 +@g9 = common global i32 0, align 4 + +define i32 @caller1(i32 %a0) nounwind { +entry: +; PIC32-NOT: jalr +; STATIC32-NOT: jal +; N64-NOT: jalr +; PIC16: jalrc + + %call = tail call i32 @callee1(i32 1, i32 1, i32 1, i32 %a0) nounwind + ret i32 %call +} + +declare i32 @callee1(i32, i32, i32, i32) + +define i32 @caller2(i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwind { +entry: +; PIC32: jalr +; STATIC32: jal +; N64-NOT: jalr +; PIC16: jalrc + + %call = tail call i32 @callee2(i32 1, i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwind + ret i32 %call +} + +declare i32 @callee2(i32, i32, i32, i32, i32) + +define i32 @caller3(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4) nounwind { +entry: +; PIC32: jalr +; STATIC32: jal +; N64-NOT: jalr +; PIC16: jalrc + + %call = tail call i32 @callee3(i32 1, i32 1, i32 1, i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4) nounwind + ret i32 %call +} + +declare i32 @callee3(i32, i32, i32, i32, i32, i32, i32, i32) + +define i32 @caller4(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7) nounwind { +entry: +; PIC32: jalr +; STATIC32: jal +; N64: jalr +; PIC16: jalrc + + %call = tail call i32 @callee4(i32 1, i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7) nounwind + ret i32 %call +} + +declare i32 @callee4(i32, i32, i32, i32, i32, i32, i32, i32, i32) + +define i32 @caller5() nounwind readonly { +entry: +; PIC32: .ent caller5 +; PIC32-NOT: jalr +; PIC32: .end caller5 +; STATIC32: .ent caller5 +; STATIC32-NOT: jal +; STATIC32: .end caller5 +; N64: .ent caller5 +; N64-NOT: jalr +; N64: .end caller5 +; PIC16: .ent caller5 +; PIC16: jalrc +; PIC16: .end caller5 + + %0 = load i32* @g0, align 4 + %1 = load i32* @g1, align 4 + %2 = load i32* @g2, align 4 + %3 = load i32* @g3, align 4 + %4 = load i32* @g4, align 4 + %5 = load i32* @g5, align 4 + %6 = load i32* @g6, align 4 + %7 = load i32* @g7, align 4 + %8 = load i32* @g8, align 4 + %9 = load i32* @g9, align 4 + %call = tail call fastcc i32 @callee5(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9) + ret i32 %call +} + +define internal fastcc i32 @callee5(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9) nounwind readnone noinline { +entry: + %add = add nsw i32 %a1, %a0 + %add1 = add nsw i32 %add, %a2 + %add2 = add nsw i32 %add1, %a3 + %add3 = add nsw i32 %add2, %a4 + %add4 = add nsw i32 %add3, %a5 + %add5 = add nsw i32 %add4, %a6 + %add6 = add nsw i32 %add5, %a7 + %add7 = add nsw i32 %add6, %a8 + %add8 = add nsw i32 %add7, %a9 + ret i32 %add8 +} + +declare i32 @callee8(i32, ...) + +define i32 @caller8_0() nounwind { +entry: + %call = tail call fastcc i32 @caller8_1() + ret i32 %call +} + +define internal fastcc i32 @caller8_1() nounwind noinline { +entry: +; PIC32: .ent caller8_1 +; PIC32: jalr +; PIC32: .end caller8_1 +; STATIC32: .ent caller8_1 +; STATIC32: jal +; STATIC32: .end caller8_1 +; N64: .ent caller8_1 +; N64-NOT: jalr +; N64: .end caller8_1 +; PIC16: .ent caller8_1 +; PIC16: jalrc +; PIC16: .end caller8_1 + + %call = tail call i32 (i32, ...)* @callee8(i32 2, i32 1) nounwind + ret i32 %call +} + +%struct.S = type { [2 x i32] } + +@gs1 = external global %struct.S + +declare i32 @callee9(%struct.S* byval) + +define i32 @caller9_0() nounwind { +entry: + %call = tail call fastcc i32 @caller9_1() + ret i32 %call +} + +define internal fastcc i32 @caller9_1() nounwind noinline { +entry: +; PIC32: .ent caller9_1 +; PIC32: jalr +; PIC32: .end caller9_1 +; STATIC32: .ent caller9_1 +; STATIC32: jal +; STATIC32: .end caller9_1 +; N64: .ent caller9_1 +; N64: jalr +; N64: .end caller9_1 +; PIC16: .ent caller9_1 +; PIC16: jalrc +; PIC16: .end caller9_1 + + %call = tail call i32 @callee9(%struct.S* byval @gs1) nounwind + ret i32 %call +} + +declare i32 @callee10(i32, i32, i32, i32, i32, i32, i32, i32, i32) + +define i32 @caller10(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8) nounwind { +entry: +; PIC32: .ent caller10 +; PIC32-NOT: jalr +; STATIC32: .ent caller10 +; STATIC32-NOT: jal +; N64: .ent caller10 +; N64-NOT: jalr +; PIC16: .ent caller10 +; PIC16: jalrc + + %call = tail call i32 @callee10(i32 %a8, i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7) nounwind + ret i32 %call +} + +declare i32 @callee11(%struct.S* byval) + +define i32 @caller11() nounwind noinline { +entry: +; PIC32: .ent caller11 +; PIC32: jalr +; STATIC32: .ent caller11 +; STATIC32: jal +; N64: .ent caller11 +; N64: jalr +; PIC16: .ent caller11 +; PIC16: jalrc + + %call = tail call i32 @callee11(%struct.S* byval @gs1) nounwind + ret i32 %call +} + +declare i32 @callee12() + +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind + +define i32 @caller12(%struct.S* nocapture byval %a0) nounwind { +entry: +; PIC32: .ent caller12 +; PIC32: jalr +; STATIC32: .ent caller12 +; STATIC32: jal +; N64: .ent caller12 +; N64: jalr +; PIC16: .ent caller12 +; PIC16: jalrc + + %0 = bitcast %struct.S* %a0 to i8* + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* bitcast (%struct.S* @gs1 to i8*), i8* %0, i32 8, i32 4, i1 false) + %call = tail call i32 @callee12() nounwind + ret i32 %call +} + +declare i32 @callee13(i32, ...) + +define i32 @caller13() nounwind { +entry: +; PIC32: .ent caller13 +; PIC32-NOT: jalr +; STATIC32: .ent caller13 +; STATIC32-NOT: jal +; N64: .ent caller13 +; N64-NOT: jalr +; PIC16: .ent caller13 +; PIC16: jalrc + + %call = tail call i32 (i32, ...)* @callee13(i32 1, i32 2) nounwind + ret i32 %call +} + diff --git a/test/CodeGen/Mips/tls-alias.ll b/test/CodeGen/Mips/tls-alias.ll index d681091..ce98cc8 100644 --- a/test/CodeGen/Mips/tls-alias.ll +++ b/test/CodeGen/Mips/tls-alias.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=mipsel -relocation-model=pic < %s | FileCheck %s +; RUN: llc -march=mipsel -relocation-model=pic -disable-mips-delay-filler < %s | FileCheck %s @foo = thread_local global i32 42 @bar = hidden alias i32* @foo diff --git a/test/CodeGen/Mips/tls.ll b/test/CodeGen/Mips/tls.ll index a7ddb96..72d30dc 100644 --- a/test/CodeGen/Mips/tls.ll +++ b/test/CodeGen/Mips/tls.ll @@ -1,8 +1,10 @@ -; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=PIC -; RUN: llc -march=mipsel -relocation-model=static < %s \ -; RUN: | FileCheck %s -check-prefix=STATIC -; RUN: llc -march=mipsel -relocation-model=static < %s \ -; RUN: -mips-fix-global-base-reg=false | FileCheck %s -check-prefix=STATICGP +; RUN: llc -march=mipsel -disable-mips-delay-filler < %s | \ +; RUN: FileCheck %s -check-prefix=PIC +; RUN: llc -march=mipsel -relocation-model=static -disable-mips-delay-filler < \ +; RUN: %s | FileCheck %s -check-prefix=STATIC +; RUN: llc -march=mipsel -relocation-model=static -disable-mips-delay-filler \ +; RUN: -mips-fix-global-base-reg=false < %s | \ +; RUN: FileCheck %s -check-prefix=STATICGP @t1 = thread_local global i32 0, align 4 diff --git a/test/CodeGen/Mips/tls16.ll b/test/CodeGen/Mips/tls16.ll new file mode 100644 index 0000000..861864b --- /dev/null +++ b/test/CodeGen/Mips/tls16.ll @@ -0,0 +1,13 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=PIC16 + +@a = thread_local global i32 4, align 4 + +define i32 @foo() nounwind readonly { +entry: + %0 = load i32* @a, align 4 +; PIC16: lw ${{[0-9]+}}, %call16(__tls_get_addr)(${{[0-9]+}}) +; PIC16: addiu ${{[0-9]+}}, %tlsgd(a) + ret i32 %0 +} + + diff --git a/test/CodeGen/Mips/tls16_2.ll b/test/CodeGen/Mips/tls16_2.ll new file mode 100644 index 0000000..b33e3c37 --- /dev/null +++ b/test/CodeGen/Mips/tls16_2.ll @@ -0,0 +1,15 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=PIC16 + +@f.i = internal thread_local unnamed_addr global i32 1, align 4 + +define i8* @f(i8* nocapture %a) nounwind { +entry: + %0 = load i32* @f.i, align 4 + %inc = add nsw i32 %0, 1 + store i32 %inc, i32* @f.i, align 4 + %1 = inttoptr i32 %inc to i8* +; PIC16: addiu ${{[0-9]+}}, %tlsldm(f.i) + ret i8* %1 +} + + diff --git a/test/CodeGen/Mips/uitofp.ll b/test/CodeGen/Mips/uitofp.ll new file mode 100644 index 0000000..aff70c2 --- /dev/null +++ b/test/CodeGen/Mips/uitofp.ll @@ -0,0 +1,12 @@ +; RUN: llc -march=mips -mattr=+single-float < %s + +define void @f0() nounwind { +entry: + %b = alloca i32, align 4 + %a = alloca float, align 4 + store volatile i32 1, i32* %b, align 4 + %0 = load volatile i32* %b, align 4 + %conv = uitofp i32 %0 to float + store float %conv, float* %a, align 4 + ret void +} diff --git a/test/CodeGen/Mips/ul1.ll b/test/CodeGen/Mips/ul1.ll new file mode 100644 index 0000000..7e64ff4 --- /dev/null +++ b/test/CodeGen/Mips/ul1.ll @@ -0,0 +1,15 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16 +%struct.ua = type <{ i16, i32 }> + +@foo = common global %struct.ua zeroinitializer, align 1 + +define i32 @main() nounwind { +entry: + store i32 10, i32* getelementptr inbounds (%struct.ua* @foo, i32 0, i32 1), align 1 +; 16: sb ${{[0-9]+}}, {{[0-9]+}}(${{[0-9]+}}) +; 16: sb ${{[0-9]+}}, {{[0-9]+}}(${{[0-9]+}}) +; 16: sb ${{[0-9]+}}, {{[0-9]+}}(${{[0-9]+}}) +; 16: sb ${{[0-9]+}}, {{[0-9]+}}(${{[0-9]+}}) + ret i32 0 +} + diff --git a/test/CodeGen/Mips/vector-load-store.ll b/test/CodeGen/Mips/vector-load-store.ll new file mode 100644 index 0000000..d889963 --- /dev/null +++ b/test/CodeGen/Mips/vector-load-store.ll @@ -0,0 +1,27 @@ +; RUN: llc -march=mipsel -mattr=+dsp < %s | FileCheck %s + +@g1 = common global <2 x i16> zeroinitializer, align 4 +@g0 = common global <2 x i16> zeroinitializer, align 4 +@g3 = common global <4 x i8> zeroinitializer, align 4 +@g2 = common global <4 x i8> zeroinitializer, align 4 + +define void @func_v2i16() nounwind { +entry: +; CHECK: lw +; CHECK: sw + + %0 = load <2 x i16>* @g1, align 4 + store <2 x i16> %0, <2 x i16>* @g0, align 4 + ret void +} + +define void @func_v4i8() nounwind { +entry: +; CHECK: lw +; CHECK: sw + + %0 = load <4 x i8>* @g3, align 4 + store <4 x i8> %0, <4 x i8>* @g2, align 4 + ret void +} + diff --git a/test/CodeGen/NVPTX/global-ordering.ll b/test/CodeGen/NVPTX/global-ordering.ll new file mode 100644 index 0000000..43394a7 --- /dev/null +++ b/test/CodeGen/NVPTX/global-ordering.ll @@ -0,0 +1,20 @@ +; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s --check-prefix=PTX32 +; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s --check-prefix=PTX64 + +; Make sure we emit these globals in def-use order + + +; PTX32: .visible .global .align 1 .u8 a = 2; +; PTX32-NEXT: .visible .global .align 4 .u32 a2 = a; +; PTX64: .visible .global .align 1 .u8 a = 2; +; PTX64-NEXT: .visible .global .align 8 .u64 a2 = a; +@a2 = addrspace(1) global i8 addrspace(1)* @a +@a = addrspace(1) global i8 2 + + +; PTX32: .visible .global .align 1 .u8 b = 1; +; PTX32-NEXT: .visible .global .align 4 .u32 b2[2] = {b, b}; +; PTX64: .visible .global .align 1 .u8 b = 1; +; PTX64-NEXT: .visible .global .align 8 .u64 b2[2] = {b, b}; +@b2 = addrspace(1) global [2 x i8 addrspace(1)*] [i8 addrspace(1)* @b, i8 addrspace(1)* @b] +@b = addrspace(1) global i8 1 diff --git a/test/CodeGen/NVPTX/param-align.ll b/test/CodeGen/NVPTX/param-align.ll new file mode 100644 index 0000000..84ccb65 --- /dev/null +++ b/test/CodeGen/NVPTX/param-align.ll @@ -0,0 +1,25 @@ +; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s + +;;; Need 4-byte alignment on float* passed byval +define ptx_device void @t1(float* byval %x) { +; CHECK: .func t1 +; CHECK: .param .align 4 .b8 t1_param_0[4] + ret void +} + + +;;; Need 8-byte alignment on double* passed byval +define ptx_device void @t2(double* byval %x) { +; CHECK: .func t2 +; CHECK: .param .align 8 .b8 t2_param_0[8] + ret void +} + + +;;; Need 4-byte alignment on float2* passed byval +%struct.float2 = type { float, float } +define ptx_device void @t3(%struct.float2* byval %x) { +; CHECK: .func t3 +; CHECK: .param .align 4 .b8 t3_param_0[8] + ret void +} diff --git a/test/CodeGen/NVPTX/pr13291-i1-store.ll b/test/CodeGen/NVPTX/pr13291-i1-store.ll new file mode 100644 index 0000000..779f779 --- /dev/null +++ b/test/CodeGen/NVPTX/pr13291-i1-store.ll @@ -0,0 +1,26 @@ +; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s --check-prefix=PTX32 +; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s --check-prefix=PTX64 + +define ptx_kernel void @t1(i1* %a) { +; PTX32: mov.u16 %rc{{[0-9]+}}, 0; +; PTX32-NEXT: st.u8 [%r{{[0-9]+}}], %rc{{[0-9]+}}; +; PTX64: mov.u16 %rc{{[0-9]+}}, 0; +; PTX64-NEXT: st.u8 [%rl{{[0-9]+}}], %rc{{[0-9]+}}; + store i1 false, i1* %a + ret void +} + + +define ptx_kernel void @t2(i1* %a, i8* %b) { +; PTX32: ld.u8 %rc{{[0-9]+}}, [%r{{[0-9]+}}] +; PTX32: and.b16 temp, %rc{{[0-9]+}}, 1; +; PTX32: setp.b16.eq %p{{[0-9]+}}, temp, 1; +; PTX64: ld.u8 %rc{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: and.b16 temp, %rc{{[0-9]+}}, 1; +; PTX64: setp.b16.eq %p{{[0-9]+}}, temp, 1; + + %t1 = load i1* %a + %t2 = select i1 %t1, i8 1, i8 2 + store i8 %t2, i8* %b + ret void +} diff --git a/test/CodeGen/NVPTX/ptx-version-30.ll b/test/CodeGen/NVPTX/ptx-version-30.ll new file mode 100644 index 0000000..0422b01 --- /dev/null +++ b/test/CodeGen/NVPTX/ptx-version-30.ll @@ -0,0 +1,6 @@ +; RUN: llc < %s -march=nvptx -mcpu=sm_20 -mattr=ptx30 | FileCheck %s +; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 -mattr=ptx30 | FileCheck %s + + +; CHECK: .version 3.0 + diff --git a/test/CodeGen/NVPTX/ptx-version-31.ll b/test/CodeGen/NVPTX/ptx-version-31.ll new file mode 100644 index 0000000..d6e5730 --- /dev/null +++ b/test/CodeGen/NVPTX/ptx-version-31.ll @@ -0,0 +1,6 @@ +; RUN: llc < %s -march=nvptx -mcpu=sm_20 -mattr=ptx31 | FileCheck %s +; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 -mattr=ptx31 | FileCheck %s + + +; CHECK: .version 3.1 + diff --git a/test/CodeGen/NVPTX/sm-version-10.ll b/test/CodeGen/NVPTX/sm-version-10.ll new file mode 100644 index 0000000..9324a37 --- /dev/null +++ b/test/CodeGen/NVPTX/sm-version-10.ll @@ -0,0 +1,6 @@ +; RUN: llc < %s -march=nvptx -mcpu=sm_10 | FileCheck %s +; RUN: llc < %s -march=nvptx64 -mcpu=sm_10 | FileCheck %s + + +; CHECK: .target sm_10 + diff --git a/test/CodeGen/NVPTX/sm-version-11.ll b/test/CodeGen/NVPTX/sm-version-11.ll new file mode 100644 index 0000000..9033a4e --- /dev/null +++ b/test/CodeGen/NVPTX/sm-version-11.ll @@ -0,0 +1,6 @@ +; RUN: llc < %s -march=nvptx -mcpu=sm_11 | FileCheck %s +; RUN: llc < %s -march=nvptx64 -mcpu=sm_11 | FileCheck %s + + +; CHECK: .target sm_11 + diff --git a/test/CodeGen/NVPTX/sm-version-12.ll b/test/CodeGen/NVPTX/sm-version-12.ll new file mode 100644 index 0000000..d8ee85c --- /dev/null +++ b/test/CodeGen/NVPTX/sm-version-12.ll @@ -0,0 +1,6 @@ +; RUN: llc < %s -march=nvptx -mcpu=sm_12 | FileCheck %s +; RUN: llc < %s -march=nvptx64 -mcpu=sm_12 | FileCheck %s + + +; CHECK: .target sm_12 + diff --git a/test/CodeGen/NVPTX/sm-version-13.ll b/test/CodeGen/NVPTX/sm-version-13.ll new file mode 100644 index 0000000..ad67d64 --- /dev/null +++ b/test/CodeGen/NVPTX/sm-version-13.ll @@ -0,0 +1,6 @@ +; RUN: llc < %s -march=nvptx -mcpu=sm_13 | FileCheck %s +; RUN: llc < %s -march=nvptx64 -mcpu=sm_13 | FileCheck %s + + +; CHECK: .target sm_13 + diff --git a/test/CodeGen/NVPTX/sm-version-20.ll b/test/CodeGen/NVPTX/sm-version-20.ll new file mode 100644 index 0000000..c21f49e --- /dev/null +++ b/test/CodeGen/NVPTX/sm-version-20.ll @@ -0,0 +1,6 @@ +; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s +; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s + + +; CHECK: .target sm_20 + diff --git a/test/CodeGen/NVPTX/sm-version-21.ll b/test/CodeGen/NVPTX/sm-version-21.ll new file mode 100644 index 0000000..4fb6de3 --- /dev/null +++ b/test/CodeGen/NVPTX/sm-version-21.ll @@ -0,0 +1,6 @@ +; RUN: llc < %s -march=nvptx -mcpu=sm_21 | FileCheck %s +; RUN: llc < %s -march=nvptx64 -mcpu=sm_21 | FileCheck %s + + +; CHECK: .target sm_21 + diff --git a/test/CodeGen/NVPTX/sm-version-30.ll b/test/CodeGen/NVPTX/sm-version-30.ll new file mode 100644 index 0000000..692b49a0 --- /dev/null +++ b/test/CodeGen/NVPTX/sm-version-30.ll @@ -0,0 +1,6 @@ +; RUN: llc < %s -march=nvptx -mcpu=sm_30 | FileCheck %s +; RUN: llc < %s -march=nvptx64 -mcpu=sm_30 | FileCheck %s + + +; CHECK: .target sm_30 + diff --git a/test/CodeGen/NVPTX/sm-version-35.ll b/test/CodeGen/NVPTX/sm-version-35.ll new file mode 100644 index 0000000..25368a0 --- /dev/null +++ b/test/CodeGen/NVPTX/sm-version-35.ll @@ -0,0 +1,6 @@ +; RUN: llc < %s -march=nvptx -mcpu=sm_35 | FileCheck %s +; RUN: llc < %s -march=nvptx64 -mcpu=sm_35 | FileCheck %s + + +; CHECK: .target sm_35 + diff --git a/test/CodeGen/PowerPC/2010-03-09-indirect-call.ll b/test/CodeGen/PowerPC/2010-03-09-indirect-call.ll index 0003a17..b95ac68 100644 --- a/test/CodeGen/PowerPC/2010-03-09-indirect-call.ll +++ b/test/CodeGen/PowerPC/2010-03-09-indirect-call.ll @@ -9,9 +9,8 @@ target triple = "powerpc-apple-darwin11.0" define void @foo() nounwind ssp { entry: -; Better: mtctr r12 -; CHECK: mr r12, [[REG:r[0-9]+]] -; CHECK: mtctr [[REG]] +; CHECK: mtctr r12 +; CHECK: bctrl %0 = load void (...)** @p, align 4 ; <void (...)*> [#uses=1] call void (...)* %0() nounwind br label %return diff --git a/test/CodeGen/PowerPC/2012-09-16-TOC-entry-check.ll b/test/CodeGen/PowerPC/2012-09-16-TOC-entry-check.ll new file mode 100644 index 0000000..9d2e390 --- /dev/null +++ b/test/CodeGen/PowerPC/2012-09-16-TOC-entry-check.ll @@ -0,0 +1,27 @@ +; RUN: llc < %s | FileCheck %s +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +; This test check if the TOC entry symbol name won't clash with global .LC0 +; and .LC2 symbols defined in the module. + +@.LC0 = internal global [5 x i8] c".LC0\00" +@.LC2 = internal global [5 x i8] c".LC2\00" + +define i32 @foo(double %X, double %Y) nounwind readnone { + ; The 1.0 and 3.0 constants generate two TOC entries + %cmp = fcmp oeq double %X, 1.000000e+00 + %conv = zext i1 %cmp to i32 + %cmp1 = fcmp oeq double %Y, 3.000000e+00 + %conv2 = zext i1 %cmp1 to i32 + %add = add nsw i32 %conv2, %conv + ret i32 %add +} + +; Check the creation of 2 .tc entries for both double constants. They +; should be .LC1 and .LC3 to avoid name clash with global constants +; .LC0 and .LC2 +; CHECK: .LC{{[13]}}: +; CHECK-NEXT: .tc {{[\._a-zA-Z0-9]+}}[TC],{{[\._a-zA-Z0-9]+}} +; CHECK: .LC{{[13]}}: +; CHECK-NEXT: .tc {{[\._a-zA-Z0-9]+}}[TC],{{[\._a-zA-Z0-9]+}} diff --git a/test/CodeGen/PowerPC/2012-10-11-dynalloc.ll b/test/CodeGen/PowerPC/2012-10-11-dynalloc.ll new file mode 100644 index 0000000..41533a8 --- /dev/null +++ b/test/CodeGen/PowerPC/2012-10-11-dynalloc.ll @@ -0,0 +1,18 @@ +; RUN: llc < %s | FileCheck %s +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +define void @test(i64 %n) nounwind { +entry: + %0 = alloca i8, i64 %n, align 1 + %1 = alloca i8, i64 %n, align 1 + call void @use(i8* %0, i8* %1) nounwind + ret void +} + +declare void @use(i8*, i8*) + +; Check we actually have two instances of dynamic stack allocation, +; identified by the stdux used to update the back-chain link. +; CHECK: stdux +; CHECK: stdux diff --git a/test/CodeGen/PowerPC/2012-10-12-bitcast.ll b/test/CodeGen/PowerPC/2012-10-12-bitcast.ll new file mode 100644 index 0000000..f841c5f --- /dev/null +++ b/test/CodeGen/PowerPC/2012-10-12-bitcast.ll @@ -0,0 +1,20 @@ +; RUN: llc -mattr=+altivec < %s | FileCheck %s +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +define i32 @test(<16 x i8> %v) nounwind { +entry: + %0 = bitcast <16 x i8> %v to i128 + %1 = lshr i128 %0, 96 + %2 = trunc i128 %1 to i32 + ret i32 %2 +} + +; Verify that bitcast handles big-endian platforms correctly +; by checking we load the result from the correct offset + +; CHECK: addi [[REGISTER:[0-9]+]], 1, -16 +; CHECK: stvx 2, 0, [[REGISTER]] +; CHECK: lwz 3, -16(1) +; CHECK: blr + diff --git a/test/CodeGen/PowerPC/asm-Zy.ll b/test/CodeGen/PowerPC/asm-Zy.ll new file mode 100644 index 0000000..691165f --- /dev/null +++ b/test/CodeGen/PowerPC/asm-Zy.ll @@ -0,0 +1,14 @@ +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64" +target triple = "powerpc64-bgq-linux" +; RUN: llc < %s -march=ppc64 -mcpu=a2 | FileCheck %s + +define i32 @zytest(i32 %a) nounwind { +entry: +; CHECK: @zytest + %r = call i32 asm "lwbrx $0, ${1:y}", "=r,Z"(i32 %a) nounwind, !srcloc !0 + ret i32 %r +; CHECK: lwbrx 3, 0, +} + +!0 = metadata !{i32 101688} + diff --git a/test/CodeGen/PowerPC/big-endian-formal-args.ll b/test/CodeGen/PowerPC/big-endian-formal-args.ll index 9a456b6..638059a 100644 --- a/test/CodeGen/PowerPC/big-endian-formal-args.ll +++ b/test/CodeGen/PowerPC/big-endian-formal-args.ll @@ -2,10 +2,10 @@ declare void @bar(i64 %x, i64 %y) -; CHECK: li {{[53]}}, 0 +; CHECK: li 3, 0 ; CHECK: li 4, 2 +; CHECK: li 5, 0 ; CHECK: li 6, 3 -; CHECK: mr {{[53]}}, {{[53]}} define void @foo() { call void @bar(i64 2, i64 3) diff --git a/test/CodeGen/PowerPC/bl8_elf_nop.ll b/test/CodeGen/PowerPC/bl8_elf_nop.ll deleted file mode 100644 index 386c59e..0000000 --- a/test/CodeGen/PowerPC/bl8_elf_nop.ll +++ /dev/null @@ -1,16 +0,0 @@ -; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu | FileCheck %s -target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" -target triple = "powerpc64-unknown-linux-gnu" - -declare i32 @clock() nounwind - -define i32 @func() { -entry: - %call = call i32 @clock() nounwind - %call2 = add i32 %call, 7 - ret i32 %call2 -} - -; CHECK: bl clock -; CHECK-NEXT: nop - diff --git a/test/CodeGen/PowerPC/coalesce-ext.ll b/test/CodeGen/PowerPC/coalesce-ext.ll index cc80f83..f19175c 100644 --- a/test/CodeGen/PowerPC/coalesce-ext.ll +++ b/test/CodeGen/PowerPC/coalesce-ext.ll @@ -13,5 +13,6 @@ define i32 @test1sext(i64 %A, i64 %B, i32* %P, i64 *%P2) nounwind { store volatile i32 %D, i32* %P ; Reuse low bits of extended register, don't extend live range of SUM. ; CHECK: stw [[EXT]] - ret i32 %D + %R = add i32 %D, %D + ret i32 %R } diff --git a/test/CodeGen/PowerPC/cr1eq-no-extra-moves.ll b/test/CodeGen/PowerPC/cr1eq-no-extra-moves.ll new file mode 100644 index 0000000..afa1ea8 --- /dev/null +++ b/test/CodeGen/PowerPC/cr1eq-no-extra-moves.ll @@ -0,0 +1,26 @@ +; RUN: llc < %s | FileCheck %s +target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32" +target triple = "powerpc-unknown-linux" + +@.str = private unnamed_addr constant [3 x i8] c"%i\00", align 1 + +define void @test(i32 %count) nounwind { +entry: +; CHECK: crxor 6, 6, 6 + %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i32 1) nounwind + %cmp2 = icmp sgt i32 %count, 0 + br i1 %cmp2, label %for.body, label %for.end + +for.body: ; preds = %entry, %for.body + %i.03 = phi i32 [ %inc, %for.body ], [ 0, %entry ] +; CHECK: crxor 6, 6, 6 + %call1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i32 1) nounwind + %inc = add nsw i32 %i.03, 1 + %exitcond = icmp eq i32 %inc, %count + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +declare i32 @printf(i8* nocapture, ...) nounwind diff --git a/test/CodeGen/PowerPC/crsave.ll b/test/CodeGen/PowerPC/crsave.ll new file mode 100644 index 0000000..3e98dbd --- /dev/null +++ b/test/CodeGen/PowerPC/crsave.ll @@ -0,0 +1,49 @@ +; RUN: llc -O0 -disable-fp-elim -mtriple=powerpc-unknown-linux-gnu < %s | FileCheck %s -check-prefix=PPC32 +; RUN: llc -O0 -disable-fp-elim -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s -check-prefix=PPC64 + +declare void @foo() + +define i32 @test_cr2() nounwind { +entry: + %ret = alloca i32, align 4 + %0 = call i32 asm sideeffect "\0A\09mtcr $4\0A\09cmp 2,$2,$1\0A\09mfcr $0", "=r,r,r,r,r,~{cr2}"(i32 1, i32 2, i32 3, i32 0) nounwind + store i32 %0, i32* %ret, align 4 + call void @foo() + %1 = load i32* %ret, align 4 + ret i32 %1 +} + +; PPC32: mfcr 12 +; PPC32-NEXT: stw 12, {{[0-9]+}}(31) +; PPC32: lwz 12, {{[0-9]+}}(31) +; PPC32-NEXT: mtcrf 32, 12 + +; PPC64: mfcr 12 +; PPC64-NEXT: stw 12, 8(1) +; PPC64: lwz 12, 8(1) +; PPC64-NEXT: mtcrf 32, 12 + +define i32 @test_cr234() nounwind { +entry: + %ret = alloca i32, align 4 + %0 = call i32 asm sideeffect "\0A\09mtcr $4\0A\09cmp 2,$2,$1\0A\09cmp 3,$2,$2\0A\09cmp 4,$2,$3\0A\09mfcr $0", "=r,r,r,r,r,~{cr2},~{cr3},~{cr4}"(i32 1, i32 2, i32 3, i32 0) nounwind + store i32 %0, i32* %ret, align 4 + call void @foo() + %1 = load i32* %ret, align 4 + ret i32 %1 +} + +; PPC32: mfcr 12 +; PPC32-NEXT: stw 12, {{[0-9]+}}(31) +; PPC32: lwz 12, {{[0-9]+}}(31) +; PPC32-NEXT: mtcrf 32, 12 +; PPC32-NEXT: mtcrf 16, 12 +; PPC32-NEXT: mtcrf 8, 12 + +; PPC64: mfcr 12 +; PPC64-NEXT: stw 12, 8(1) +; PPC64: lwz 12, 8(1) +; PPC64-NEXT: mtcrf 32, 12 +; PPC64-NEXT: mtcrf 16, 12 +; PPC64-NEXT: mtcrf 8, 12 + diff --git a/test/CodeGen/PowerPC/emptystruct.ll b/test/CodeGen/PowerPC/emptystruct.ll new file mode 100644 index 0000000..36b4abd --- /dev/null +++ b/test/CodeGen/PowerPC/emptystruct.ll @@ -0,0 +1,51 @@ +; RUN: llc -mcpu=pwr7 -O0 < %s | FileCheck %s + +; This tests correct handling of empty aggregate parameters and return values. +; An empty parameter passed by value does not consume a protocol register or +; a parameter save area doubleword. An empty parameter passed by reference +; is treated as any other pointer parameter. An empty aggregate return value +; is treated as any other aggregate return value, passed via address as a +; hidden parameter in GPR3. In this example, GPR3 contains the return value +; address, GPR4 contains the address of e2, and e1 and e3 are not passed or +; received. + +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +%struct.empty = type {} + +define void @callee(%struct.empty* noalias sret %agg.result, %struct.empty* byval %a1, %struct.empty* %a2, %struct.empty* byval %a3) nounwind { +entry: + %a2.addr = alloca %struct.empty*, align 8 + store %struct.empty* %a2, %struct.empty** %a2.addr, align 8 + %0 = load %struct.empty** %a2.addr, align 8 + %1 = bitcast %struct.empty* %agg.result to i8* + %2 = bitcast %struct.empty* %0 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 0, i32 1, i1 false) + ret void +} + +; CHECK: callee: +; CHECK: std 4, +; CHECK: std 3, +; CHECK-NOT: std 5, +; CHECK-NOT: std 6, +; CHECK: blr + +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind + +define void @caller(%struct.empty* noalias sret %agg.result) nounwind { +entry: + %e1 = alloca %struct.empty, align 1 + %e2 = alloca %struct.empty, align 1 + %e3 = alloca %struct.empty, align 1 + call void @callee(%struct.empty* sret %agg.result, %struct.empty* byval %e1, %struct.empty* %e2, %struct.empty* byval %e3) + ret void +} + +; CHECK: caller: +; CHECK: addi 4, +; CHECK: std 3, +; CHECK-NOT: std 5, +; CHECK-NOT: std 6, +; CHECK: bl callee diff --git a/test/CodeGen/PowerPC/floatPSA.ll b/test/CodeGen/PowerPC/floatPSA.ll new file mode 100644 index 0000000..b5631a1 --- /dev/null +++ b/test/CodeGen/PowerPC/floatPSA.ll @@ -0,0 +1,97 @@ +; RUN: llc -O0 -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s + +; This verifies that single-precision floating point values that can't +; be passed in registers are stored in the rightmost word of the parameter +; save area slot. There are 13 architected floating-point registers, so +; the 14th is passed in storage. The address of the 14th argument is +; 48 (fixed size of the linkage area) + 13 * 8 (first 13 args) + 4 +; (offset to second word) = 156. + +define float @bar(float %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h, float %i, float %j, float %k, float %l, float %m, float %n) nounwind { +entry: + %a.addr = alloca float, align 4 + %b.addr = alloca float, align 4 + %c.addr = alloca float, align 4 + %d.addr = alloca float, align 4 + %e.addr = alloca float, align 4 + %f.addr = alloca float, align 4 + %g.addr = alloca float, align 4 + %h.addr = alloca float, align 4 + %i.addr = alloca float, align 4 + %j.addr = alloca float, align 4 + %k.addr = alloca float, align 4 + %l.addr = alloca float, align 4 + %m.addr = alloca float, align 4 + %n.addr = alloca float, align 4 + store float %a, float* %a.addr, align 4 + store float %b, float* %b.addr, align 4 + store float %c, float* %c.addr, align 4 + store float %d, float* %d.addr, align 4 + store float %e, float* %e.addr, align 4 + store float %f, float* %f.addr, align 4 + store float %g, float* %g.addr, align 4 + store float %h, float* %h.addr, align 4 + store float %i, float* %i.addr, align 4 + store float %j, float* %j.addr, align 4 + store float %k, float* %k.addr, align 4 + store float %l, float* %l.addr, align 4 + store float %m, float* %m.addr, align 4 + store float %n, float* %n.addr, align 4 + %0 = load float* %n.addr, align 4 + ret float %0 +} + +; CHECK: lfs {{[0-9]+}}, 156(1) + +define float @foo() nounwind { +entry: + %a = alloca float, align 4 + %b = alloca float, align 4 + %c = alloca float, align 4 + %d = alloca float, align 4 + %e = alloca float, align 4 + %f = alloca float, align 4 + %g = alloca float, align 4 + %h = alloca float, align 4 + %i = alloca float, align 4 + %j = alloca float, align 4 + %k = alloca float, align 4 + %l = alloca float, align 4 + %m = alloca float, align 4 + %n = alloca float, align 4 + store float 1.000000e+00, float* %a, align 4 + store float 2.000000e+00, float* %b, align 4 + store float 3.000000e+00, float* %c, align 4 + store float 4.000000e+00, float* %d, align 4 + store float 5.000000e+00, float* %e, align 4 + store float 6.000000e+00, float* %f, align 4 + store float 7.000000e+00, float* %g, align 4 + store float 8.000000e+00, float* %h, align 4 + store float 9.000000e+00, float* %i, align 4 + store float 1.000000e+01, float* %j, align 4 + store float 1.100000e+01, float* %k, align 4 + store float 1.200000e+01, float* %l, align 4 + store float 1.300000e+01, float* %m, align 4 + store float 1.400000e+01, float* %n, align 4 + %0 = load float* %a, align 4 + %1 = load float* %b, align 4 + %2 = load float* %c, align 4 + %3 = load float* %d, align 4 + %4 = load float* %e, align 4 + %5 = load float* %f, align 4 + %6 = load float* %g, align 4 + %7 = load float* %h, align 4 + %8 = load float* %i, align 4 + %9 = load float* %j, align 4 + %10 = load float* %k, align 4 + %11 = load float* %l, align 4 + %12 = load float* %m, align 4 + %13 = load float* %n, align 4 + %call = call float @bar(float %0, float %1, float %2, float %3, float %4, float %5, float %6, float %7, float %8, float %9, float %10, float %11, float %12, float %13) + ret float %call +} + +; Note that stw is used instead of stfs because the value is a simple +; constant that can be created with a load-immediate in a GPR. +; CHECK: stw {{[0-9]+}}, 156(1) + diff --git a/test/CodeGen/PowerPC/fsl-e500mc.ll b/test/CodeGen/PowerPC/fsl-e500mc.ll new file mode 100644 index 0000000..09b7e41 --- /dev/null +++ b/test/CodeGen/PowerPC/fsl-e500mc.ll @@ -0,0 +1,22 @@ +; +; Test support for Freescale e500mc and its higher memcpy inlining thresholds. +; +; RUN: llc -mcpu=e500mc < %s 2>&1 | FileCheck %s +; CHECK-NOT: not a recognized processor for this target + +target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32" +target triple = "powerpc-fsl-linux" + +%struct.teststruct = type { [12 x i32], i32 } + +define void @copy(%struct.teststruct* noalias nocapture sret %agg.result, %struct.teststruct* nocapture %in) nounwind { +entry: +; CHECK: @copy +; CHECK-NOT: bl memcpy + %0 = bitcast %struct.teststruct* %agg.result to i8* + %1 = bitcast %struct.teststruct* %in to i8* + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %1, i32 52, i32 4, i1 false) + ret void +} + +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind diff --git a/test/CodeGen/PowerPC/fsl-e5500.ll b/test/CodeGen/PowerPC/fsl-e5500.ll new file mode 100644 index 0000000..d47d8c8 --- /dev/null +++ b/test/CodeGen/PowerPC/fsl-e5500.ll @@ -0,0 +1,22 @@ +; +; Test support for Freescale e5500 and its higher memcpy inlining thresholds. +; +; RUN: llc -mcpu=e5500 < %s 2>&1 | FileCheck %s +; CHECK-NOT: not a recognized processor for this target + +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" +target triple = "powerpc64-fsl-linux" + +%struct.teststruct = type { [24 x i32], i32 } + +define void @copy(%struct.teststruct* noalias nocapture sret %agg.result, %struct.teststruct* nocapture %in) nounwind { +entry: +; CHECK: @copy +; CHECK-NOT: bl memcpy + %0 = bitcast %struct.teststruct* %agg.result to i8* + %1 = bitcast %struct.teststruct* %in to i8* + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 100, i32 4, i1 false) + ret void +} + +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind diff --git a/test/CodeGen/PowerPC/i64_fp_round.ll b/test/CodeGen/PowerPC/i64_fp_round.ll new file mode 100644 index 0000000..5a0c072 --- /dev/null +++ b/test/CodeGen/PowerPC/i64_fp_round.ll @@ -0,0 +1,27 @@ +; RUN: llc -mcpu=pwr7 < %s | FileCheck %s +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +define float @test(i64 %x) nounwind readnone { +entry: + %conv = sitofp i64 %x to float + ret float %conv +} + +; Verify that we get the code sequence needed to avoid double-rounding. +; Note that only parts of the sequence are checked for here, to allow +; for minor code generation differences. + +; CHECK: sradi [[REGISTER:[0-9]+]], 3, 53 +; CHECK: addi [[REGISTER:[0-9]+]], [[REGISTER]], 1 +; CHECK: cmpldi 0, [[REGISTER]], 1 +; CHECK: isel [[REGISTER:[0-9]+]], {{[0-9]+}}, 3, 1 +; CHECK: std [[REGISTER]], -{{[0-9]+}}(1) + + +; Also check that with -enable-unsafe-fp-math we do not get that extra +; code sequence. Simply verify that there is no "isel" present. + +; RUN: llc -mcpu=pwr7 -enable-unsafe-fp-math < %s | FileCheck %s -check-prefix=UNSAFE +; CHECK-UNSAFE-NOT: isel + diff --git a/test/CodeGen/PowerPC/inlineasm-copy.ll b/test/CodeGen/PowerPC/inlineasm-copy.ll index e1ff82d..59c3388 100644 --- a/test/CodeGen/PowerPC/inlineasm-copy.ll +++ b/test/CodeGen/PowerPC/inlineasm-copy.ll @@ -1,5 +1,6 @@ -; RUN: llc < %s -march=ppc32 | not grep mr +; RUN: llc < %s -march=ppc32 -verify-machineinstrs | FileCheck %s +; CHECK-NOT: mr define i32 @test(i32 %Y, i32 %X) { entry: %tmp = tail call i32 asm "foo $0", "=r"( ) ; <i32> [#uses=1] @@ -12,3 +13,9 @@ entry: ret i32 %tmp1 } +; CHECK: test3 +define i32 @test3(i32 %Y, i32 %X) { +entry: + %tmp1 = tail call { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } asm sideeffect "foo $0, $1", "=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19"( i32 %X, i32 %Y, i32 %X, i32 %Y, i32 %X, i32 %Y, i32 %X, i32 %Y, i32 %X, i32 %Y, i32 %X, i32 %Y, i32 %X, i32 %Y, i32 %X, i32 %Y, i32 %X, i32 %Y, i32 %X, i32 %Y ) ; <i32> [#uses=1] + ret i32 1 +} diff --git a/test/CodeGen/PowerPC/int-fp-conv-1.ll b/test/CodeGen/PowerPC/int-fp-conv-1.ll index 6c82723..d2887b9 100644 --- a/test/CodeGen/PowerPC/int-fp-conv-1.ll +++ b/test/CodeGen/PowerPC/int-fp-conv-1.ll @@ -1,4 +1,5 @@ -; RUN: llc < %s -march=ppc64 | grep __floatditf +; RUN: llc < %s -march=ppc64 | FileCheck %s +; CHECK-NOT: __floatditf define i64 @__fixunstfdi(ppc_fp128 %a) nounwind { entry: diff --git a/test/CodeGen/PowerPC/jaggedstructs.ll b/test/CodeGen/PowerPC/jaggedstructs.ll new file mode 100644 index 0000000..62aa7cf --- /dev/null +++ b/test/CodeGen/PowerPC/jaggedstructs.ll @@ -0,0 +1,48 @@ +; RUN: llc -mcpu=pwr7 -O0 < %s | FileCheck %s + +; This tests receiving and re-passing parameters consisting of structures +; of size 3, 5, 6, and 7. They are to be found/placed right-adjusted in +; the parameter registers. + +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +%struct.S3 = type { [3 x i8] } +%struct.S5 = type { [5 x i8] } +%struct.S6 = type { [6 x i8] } +%struct.S7 = type { [7 x i8] } + +define void @test(%struct.S3* byval %s3, %struct.S5* byval %s5, %struct.S6* byval %s6, %struct.S7* byval %s7) nounwind { +entry: + call void @check(%struct.S3* byval %s3, %struct.S5* byval %s5, %struct.S6* byval %s6, %struct.S7* byval %s7) + ret void +} + +; CHECK: std 6, 216(1) +; CHECK: std 5, 208(1) +; CHECK: std 4, 200(1) +; CHECK: std 3, 192(1) +; CHECK: lbz {{[0-9]+}}, 199(1) +; CHECK: stb {{[0-9]+}}, 55(1) +; CHECK: lhz {{[0-9]+}}, 197(1) +; CHECK: sth {{[0-9]+}}, 53(1) +; CHECK: lbz {{[0-9]+}}, 207(1) +; CHECK: stb {{[0-9]+}}, 63(1) +; CHECK: lwz {{[0-9]+}}, 203(1) +; CHECK: stw {{[0-9]+}}, 59(1) +; CHECK: lhz {{[0-9]+}}, 214(1) +; CHECK: sth {{[0-9]+}}, 70(1) +; CHECK: lwz {{[0-9]+}}, 210(1) +; CHECK: stw {{[0-9]+}}, 66(1) +; CHECK: lbz {{[0-9]+}}, 223(1) +; CHECK: stb {{[0-9]+}}, 79(1) +; CHECK: lhz {{[0-9]+}}, 221(1) +; CHECK: sth {{[0-9]+}}, 77(1) +; CHECK: lwz {{[0-9]+}}, 217(1) +; CHECK: stw {{[0-9]+}}, 73(1) +; CHECK: ld 6, 72(1) +; CHECK: ld 5, 64(1) +; CHECK: ld 4, 56(1) +; CHECK: ld 3, 48(1) + +declare void @check(%struct.S3* byval, %struct.S5* byval, %struct.S6* byval, %struct.S7* byval) diff --git a/test/CodeGen/PowerPC/misched.ll b/test/CodeGen/PowerPC/misched.ll new file mode 100644 index 0000000..d6fb3b3 --- /dev/null +++ b/test/CodeGen/PowerPC/misched.ll @@ -0,0 +1,45 @@ +; RUN: llc < %s -enable-misched -verify-machineinstrs +; PR14302 +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64" +target triple = "powerpc64-bgq-linux" + +@b = external global [16000 x double], align 32 + +define void @pr14302() nounwind { +entry: + tail call void @putchar() nounwind + br label %for.body + +for.body: ; preds = %for.body, %entry + br i1 undef, label %for.body, label %for.body24.i + +for.body24.i: ; preds = %for.body24.i, %for.body + store double 1.000000e+00, double* undef, align 8 + br i1 undef, label %for.body24.i58, label %for.body24.i + +for.body24.i58: ; preds = %for.body24.i58, %for.body24.i + %arrayidx26.i55.1 = getelementptr inbounds [16000 x double]* @b, i64 0, i64 undef + store double 1.000000e+00, double* %arrayidx26.i55.1, align 8 + br i1 undef, label %for.body24.i64, label %for.body24.i58 + +for.body24.i64: ; preds = %for.body24.i64, %for.body24.i58 + %exitcond.2489 = icmp eq i32 0, 16000 + br i1 %exitcond.2489, label %for.body24.i70, label %for.body24.i64 + +for.body24.i70: ; preds = %for.body24.i70, %for.body24.i64 + br i1 undef, label %for.body24.i76, label %for.body24.i70 + +for.body24.i76: ; preds = %for.body24.i76, %for.body24.i70 + br i1 undef, label %set1d.exit77, label %for.body24.i76 + +set1d.exit77: ; preds = %for.body24.i76 + br label %for.body29 + +for.body29: ; preds = %for.body29, %set1d.exit77 + br i1 undef, label %for.end35, label %for.body29 + +for.end35: ; preds = %for.body29 + ret void +} + +declare void @putchar() diff --git a/test/CodeGen/PowerPC/novrsave.ll b/test/CodeGen/PowerPC/novrsave.ll new file mode 100644 index 0000000..a70576a --- /dev/null +++ b/test/CodeGen/PowerPC/novrsave.ll @@ -0,0 +1,15 @@ +; RUN: llc -O0 -mtriple=powerpc-unknown-linux-gnu < %s | FileCheck %s +; RUN: llc -O0 -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s + +; This verifies that the code to update VRSAVE has been removed for SVR4. + +define <4 x float> @bar(<4 x float> %v) nounwind { +entry: + %v.addr = alloca <4 x float>, align 16 + store <4 x float> %v, <4 x float>* %v.addr, align 16 + %0 = load <4 x float>* %v.addr, align 16 + ret <4 x float> %0 +} + +; CHECK-NOT: mfspr +; CHECK-NOT: mtspr diff --git a/test/CodeGen/PowerPC/ppc64-abi-extend.ll b/test/CodeGen/PowerPC/ppc64-abi-extend.ll new file mode 100644 index 0000000..8baf1c6 --- /dev/null +++ b/test/CodeGen/PowerPC/ppc64-abi-extend.ll @@ -0,0 +1,97 @@ +; Verify that i32 argument/return values are extended to i64 + +; RUN: llc < %s | FileCheck %s +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +@si = common global i32 0, align 4 +@ui = common global i32 0, align 4 + +declare void @arg_si(i32 signext) +declare void @arg_ui(i32 zeroext) + +declare signext i32 @ret_si() +declare zeroext i32 @ret_ui() + +define void @pass_arg_si() nounwind { +entry: + %0 = load i32* @si, align 4 + tail call void @arg_si(i32 signext %0) nounwind + ret void +} +; CHECK: @pass_arg_si +; CHECK: lwa 3, +; CHECK: bl arg_si + +define void @pass_arg_ui() nounwind { +entry: + %0 = load i32* @ui, align 4 + tail call void @arg_ui(i32 zeroext %0) nounwind + ret void +} +; CHECK: @pass_arg_ui +; CHECK: lwz 3, +; CHECK: bl arg_ui + +define i64 @use_arg_si(i32 signext %x) nounwind readnone { +entry: + %conv = sext i32 %x to i64 + ret i64 %conv +} +; CHECK: @use_arg_si +; CHECK: %entry +; CHECK-NEXT: blr + +define i64 @use_arg_ui(i32 zeroext %x) nounwind readnone { +entry: + %conv = zext i32 %x to i64 + ret i64 %conv +} +; CHECK: @use_arg_ui +; CHECK: %entry +; CHECK-NEXT: blr + +define signext i32 @pass_ret_si() nounwind readonly { +entry: + %0 = load i32* @si, align 4 + ret i32 %0 +} +; CHECK: @pass_ret_si +; CHECK: lwa 3, +; CHECK: blr + +define zeroext i32 @pass_ret_ui() nounwind readonly { +entry: + %0 = load i32* @ui, align 4 + ret i32 %0 +} +; CHECK: @pass_ret_ui +; CHECK: lwz 3, +; CHECK: blr + +define i64 @use_ret_si() nounwind { +entry: + %call = tail call signext i32 @ret_si() nounwind + %conv = sext i32 %call to i64 + ret i64 %conv +} +; CHECK: @use_ret_si +; CHECK: bl ret_si +; This is to verify the return register (3) set up by the ret_si +; call is passed on unmodified as return value of use_ret_si. +; CHECK-NOT: 3 +; CHECK: blr + +define i64 @use_ret_ui() nounwind { +entry: + %call = tail call zeroext i32 @ret_ui() nounwind + %conv = zext i32 %call to i64 + ret i64 %conv +} +; CHECK: @use_ret_ui +; CHECK: bl ret_ui +; This is to verify the return register (3) set up by the ret_ui +; call is passed on unmodified as return value of use_ret_ui. +; CHECK-NOT: 3 +; CHECK: blr + diff --git a/test/CodeGen/PowerPC/ppc64-align-long-double.ll b/test/CodeGen/PowerPC/ppc64-align-long-double.ll new file mode 100644 index 0000000..10b70d0 --- /dev/null +++ b/test/CodeGen/PowerPC/ppc64-align-long-double.ll @@ -0,0 +1,26 @@ +; RUN: llc -mcpu=pwr7 -O0 < %s | FileCheck %s + +; Verify internal alignment of long double in a struct. The double +; argument comes in in GPR3; GPR4 is skipped; GPRs 5 and 6 contain +; the long double. Check that these are stored to proper locations +; in the parameter save area and loaded from there for return in FPR1/2. + +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +%struct.S = type { double, ppc_fp128 } + +define ppc_fp128 @test(%struct.S* byval %x) nounwind { +entry: + %b = getelementptr inbounds %struct.S* %x, i32 0, i32 1 + %0 = load ppc_fp128* %b, align 16 + ret ppc_fp128 %0 +} + +; CHECK: std 6, 72(1) +; CHECK: std 5, 64(1) +; CHECK: std 4, 56(1) +; CHECK: std 3, 48(1) +; CHECK: lfd 1, 64(1) +; CHECK: lfd 2, 72(1) + diff --git a/test/CodeGen/PowerPC/ppc64-calls.ll b/test/CodeGen/PowerPC/ppc64-calls.ll new file mode 100644 index 0000000..c382edbb --- /dev/null +++ b/test/CodeGen/PowerPC/ppc64-calls.ll @@ -0,0 +1,63 @@ +; RUN: llc < %s -march=ppc64 | FileCheck %s +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +define void @foo() nounwind readnone noinline { + ret void +} + +define weak void @foo_weak() nounwind { + ret void +} + +; Calls to local function does not require the TOC restore 'nop' +define void @test_direct() nounwind readnone { +; CHECK: test_direct: + tail call void @foo() nounwind +; CHECK: bl foo +; CHECK-NOT: nop + ret void +} + +; Calls to weak function requires a TOC restore 'nop' because they +; may be overridden in a different module. +define void @test_weak() nounwind readnone { +; CHECK: test_weak: + tail call void @foo_weak() nounwind +; CHECK: bl foo +; CHECK-NEXT: nop + ret void +} + +; Indirect calls requires a full stub creation +define void @test_indirect(void ()* nocapture %fp) nounwind { +; CHECK: test_indirect: + tail call void %fp() nounwind +; CHECK: ld [[FP:[0-9]+]], 0(3) +; CHECK: ld 11, 16(3) +; CHECK: ld 2, 8(3) +; CHECK-NEXT: mtctr [[FP]] +; CHECK-NEXT: bctrl +; CHECK-NEXT: ld 2, 40(1) + ret void +} + +; Absolute vales should be have the TOC restore 'nop' +define void @test_abs() nounwind { +; CHECK: test_abs: + tail call void inttoptr (i64 1024 to void ()*)() nounwind +; CHECK: bla 1024 +; CHECK-NEXT: nop + ret void +} + +declare double @sin(double) nounwind + +; External functions call should also have a 'nop' +define double @test_external(double %x) nounwind { +; CHECK: test_external: + %call = tail call double @sin(double %x) nounwind +; CHECK: bl sin +; CHECK-NEXT: nop + ret double %call +} diff --git a/test/CodeGen/PowerPC/ppc64-ind-call.ll b/test/CodeGen/PowerPC/ppc64-ind-call.ll deleted file mode 100644 index d5c4d46..0000000 --- a/test/CodeGen/PowerPC/ppc64-ind-call.ll +++ /dev/null @@ -1,16 +0,0 @@ -; RUN: llc < %s -march=ppc64 | FileCheck %s -target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" -target triple = "powerpc64-unknown-linux-gnu" - -define void @test1() { -entry: - %call.i75 = call zeroext i8 undef(i8* undef, i8 zeroext 10) - unreachable -} - -; CHECK: @test1 -; CHECK: ld 11, 0(3) -; CHECK: ld 2, 8(3) -; CHECK: bctrl -; CHECK: ld 2, 40(1) - diff --git a/test/CodeGen/PowerPC/ppc64-linux-func-size.ll b/test/CodeGen/PowerPC/ppc64-linux-func-size.ll index e5aa1f1..e1d50ba 100644 --- a/test/CodeGen/PowerPC/ppc64-linux-func-size.ll +++ b/test/CodeGen/PowerPC/ppc64-linux-func-size.ll @@ -5,6 +5,7 @@ ; CHECK-NEXT: .align 3 ; CHECK-NEXT: .quad .L.test1 ; CHECK-NEXT: .quad .TOC.@tocbase +; CHECK-NEXT: .quad 0 ; CHECK-NEXT: .text ; CHECK-NEXT: .L.test1: diff --git a/test/CodeGen/PowerPC/ppc64-toc.ll b/test/CodeGen/PowerPC/ppc64-toc.ll new file mode 100644 index 0000000..a29bdcb --- /dev/null +++ b/test/CodeGen/PowerPC/ppc64-toc.ll @@ -0,0 +1,68 @@ +; RUN: llc < %s | FileCheck %s +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +@double_array = global [32 x double] zeroinitializer, align 8 +@number64 = global i64 10, align 8 +@internal_static_var.x = internal unnamed_addr global i64 0, align 8 + +define i64 @access_int64(i64 %a) nounwind readonly { +entry: +; CHECK: access_int64: +; CHECK-NEXT: .align 3 +; CHECK-NEXT: .quad .L.access_int64 +; CHECK-NEXT: .quad .TOC.@tocbase +; CHECK-NEXT: .quad 0 +; CHECK-NEXT: .text + %0 = load i64* @number64, align 8 +; CHECK: ld {{[0-9]+}}, .LC{{[0-9]+}}@toc(2) + %cmp = icmp eq i64 %0, %a + %conv1 = zext i1 %cmp to i64 + ret i64 %conv1 +} + +define i64 @internal_static_var(i64 %a) nounwind { +entry: +; CHECK: internal_static_var: +; CHECK: ld {{[0-9]+}}, .LC{{[0-9]+}}@toc(2) + %0 = load i64* @internal_static_var.x, align 8 + %cmp = icmp eq i64 %0, %a + %conv1 = zext i1 %cmp to i64 + ret i64 %conv1 +} + +define i32 @access_double(double %a) nounwind readnone { +entry: +; CHECK: access_double: +; CHECK: ld {{[0-9]+}}, .LC{{[0-9]+}}@toc(2) + %cmp = fcmp oeq double %a, 2.000000e+00 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + + +define i32 @access_double_array(double %a, i32 %i) nounwind readonly { +entry: +; CHECK: access_double_array: + %idxprom = sext i32 %i to i64 + %arrayidx = getelementptr inbounds [32 x double]* @double_array, i64 0, i64 %idxprom + %0 = load double* %arrayidx, align 8 +; CHECK: ld {{[0-9]+}}, .LC{{[0-9]+}}@toc(2) + %cmp = fcmp oeq double %0, %a + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +; Check the creation of 4 .tc entries: +; * int64_t global 'number64' +; * double constant 2.0 +; * double array 'double_array' +; * static int64_t 'x' accessed within '@internal_static_var' +; CHECK: .LC{{[0-9]+}}: +; CHECK-NEXT: .tc {{[\._a-zA-Z0-9]+}}[TC],{{[\._a-zA-Z0-9]+}} +; CHECK-NEXT: .LC{{[0-9]+}}: +; CHECK-NEXT: .tc {{[\._a-zA-Z0-9]+}}[TC],{{[\._a-zA-Z0-9]+}} +; CHECK-NEXT: .LC{{[0-9]+}}: +; CHECK-NEXT: .tc {{[\._a-zA-Z0-9]+}}[TC],{{[\._a-zA-Z0-9]+}} +; CHECK-NEXT: .LC{{[0-9]+}}: +; CHECK-NEXT: .tc {{[\._a-zA-Z0-9]+}}[TC],{{[\._a-zA-Z0-9]+}} diff --git a/test/CodeGen/PowerPC/ppc64-zext.ll b/test/CodeGen/PowerPC/ppc64-zext.ll new file mode 100644 index 0000000..eb55445 --- /dev/null +++ b/test/CodeGen/PowerPC/ppc64-zext.ll @@ -0,0 +1,11 @@ +; RUN: llc < %s | FileCheck %s +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux" + +define i64 @fun(i32 %arg32) nounwind { +entry: +; CHECK: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 32 + %o = zext i32 %arg32 to i64 + ret i64 %o +} + diff --git a/test/CodeGen/PowerPC/pr12757.ll b/test/CodeGen/PowerPC/pr12757.ll new file mode 100644 index 0000000..c344656 --- /dev/null +++ b/test/CodeGen/PowerPC/pr12757.ll @@ -0,0 +1,14 @@ +; RUN: llc < %s | FileCheck %s +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +define i32 @__flt_rounds() nounwind { +entry: + %0 = tail call i64 asm sideeffect "mffs $0", "=f"() nounwind + %conv = trunc i64 %0 to i32 + ret i32 %conv +} + +; CHECK: @__flt_rounds +; CHECK: mffs + diff --git a/test/CodeGen/PowerPC/pr13641.ll b/test/CodeGen/PowerPC/pr13641.ll new file mode 100644 index 0000000..c4d3f3a --- /dev/null +++ b/test/CodeGen/PowerPC/pr13641.ll @@ -0,0 +1,11 @@ +; RUN: llc < %s | FileCheck %s +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +define void @foo() nounwind { + ret void +} + +; CHECK: blr +; CHECK-NEXT: .long 0 +; CHECK-NEXT: .quad 0 diff --git a/test/CodeGen/PowerPC/pr13891.ll b/test/CodeGen/PowerPC/pr13891.ll new file mode 100644 index 0000000..3ae7385 --- /dev/null +++ b/test/CodeGen/PowerPC/pr13891.ll @@ -0,0 +1,27 @@ +; RUN: llc < %s | FileCheck %s +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +%struct.foo = type { i8, i8 } + +define void @_Z5check3foos(%struct.foo* nocapture byval %f, i16 signext %i) noinline { +; CHECK: _Z5check3foos: +; CHECK: sth 3, {{[0-9]+}}(1) +; CHECK: lha {{[0-9]+}}, {{[0-9]+}}(1) +entry: + %0 = bitcast %struct.foo* %f to i16* + %1 = load i16* %0, align 2 + %bf.val.sext = ashr i16 %1, 8 + %cmp = icmp eq i16 %bf.val.sext, %i + br i1 %cmp, label %if.end, label %if.then + +if.then: ; preds = %entry + %conv = sext i16 %bf.val.sext to i32 + tail call void @exit(i32 %conv) + br label %if.end + +if.end: ; preds = %entry, %if.then + ret void +} + +declare void @exit(i32) diff --git a/test/CodeGen/PowerPC/remat-imm.ll b/test/CodeGen/PowerPC/remat-imm.ll new file mode 100644 index 0000000..520921f --- /dev/null +++ b/test/CodeGen/PowerPC/remat-imm.ll @@ -0,0 +1,16 @@ +; RUN: llc < %s | FileCheck %s +; ModuleID = 'test.c' +target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32" +target triple = "powerpc-unknown-linux" + +@.str = private unnamed_addr constant [6 x i8] c"%d,%d\00", align 1 + +define i32 @main() nounwind { +entry: +; CHECK: li 4, 128 +; CHECK-NOT: mr 4, {{.*}} + %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([6 x i8]* @.str, i32 0, i32 0), i32 128, i32 128) nounwind + ret i32 0 +} + +declare i32 @printf(i8* nocapture, ...) nounwind diff --git a/test/CodeGen/PowerPC/structsinmem.ll b/test/CodeGen/PowerPC/structsinmem.ll new file mode 100644 index 0000000..884d3a8 --- /dev/null +++ b/test/CodeGen/PowerPC/structsinmem.ll @@ -0,0 +1,227 @@ +; RUN: llc -mcpu=pwr7 -O0 -disable-fp-elim < %s | FileCheck %s + +; FIXME: The code generation for packed structs is very poor because the +; PowerPC target wrongly rejects all unaligned loads. This test case will +; need to be revised when that is fixed. + +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +%struct.s1 = type { i8 } +%struct.s2 = type { i16 } +%struct.s4 = type { i32 } +%struct.t1 = type { i8 } +%struct.t3 = type <{ i16, i8 }> +%struct.t5 = type <{ i32, i8 }> +%struct.t6 = type <{ i32, i16 }> +%struct.t7 = type <{ i32, i16, i8 }> +%struct.s3 = type { i16, i8 } +%struct.s5 = type { i32, i8 } +%struct.s6 = type { i32, i16 } +%struct.s7 = type { i32, i16, i8 } +%struct.t2 = type <{ i16 }> +%struct.t4 = type <{ i32 }> + +@caller1.p1 = private unnamed_addr constant %struct.s1 { i8 1 }, align 1 +@caller1.p2 = private unnamed_addr constant %struct.s2 { i16 2 }, align 2 +@caller1.p3 = private unnamed_addr constant { i16, i8, i8 } { i16 4, i8 8, i8 undef }, align 2 +@caller1.p4 = private unnamed_addr constant %struct.s4 { i32 16 }, align 4 +@caller1.p5 = private unnamed_addr constant { i32, i8, [3 x i8] } { i32 32, i8 64, [3 x i8] undef }, align 4 +@caller1.p6 = private unnamed_addr constant { i32, i16, [2 x i8] } { i32 128, i16 256, [2 x i8] undef }, align 4 +@caller1.p7 = private unnamed_addr constant { i32, i16, i8, i8 } { i32 512, i16 1024, i8 -3, i8 undef }, align 4 +@caller2.p1 = private unnamed_addr constant %struct.t1 { i8 1 }, align 1 +@caller2.p2 = private unnamed_addr constant { i16 } { i16 2 }, align 1 +@caller2.p3 = private unnamed_addr constant %struct.t3 <{ i16 4, i8 8 }>, align 1 +@caller2.p4 = private unnamed_addr constant { i32 } { i32 16 }, align 1 +@caller2.p5 = private unnamed_addr constant %struct.t5 <{ i32 32, i8 64 }>, align 1 +@caller2.p6 = private unnamed_addr constant %struct.t6 <{ i32 128, i16 256 }>, align 1 +@caller2.p7 = private unnamed_addr constant %struct.t7 <{ i32 512, i16 1024, i8 -3 }>, align 1 + +define i32 @caller1() nounwind { +entry: + %p1 = alloca %struct.s1, align 1 + %p2 = alloca %struct.s2, align 2 + %p3 = alloca %struct.s3, align 2 + %p4 = alloca %struct.s4, align 4 + %p5 = alloca %struct.s5, align 4 + %p6 = alloca %struct.s6, align 4 + %p7 = alloca %struct.s7, align 4 + %0 = bitcast %struct.s1* %p1 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.s1* @caller1.p1, i32 0, i32 0), i64 1, i32 1, i1 false) + %1 = bitcast %struct.s2* %p2 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast (%struct.s2* @caller1.p2 to i8*), i64 2, i32 2, i1 false) + %2 = bitcast %struct.s3* %p3 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* bitcast ({ i16, i8, i8 }* @caller1.p3 to i8*), i64 4, i32 2, i1 false) + %3 = bitcast %struct.s4* %p4 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* bitcast (%struct.s4* @caller1.p4 to i8*), i64 4, i32 4, i1 false) + %4 = bitcast %struct.s5* %p5 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* bitcast ({ i32, i8, [3 x i8] }* @caller1.p5 to i8*), i64 8, i32 4, i1 false) + %5 = bitcast %struct.s6* %p6 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast ({ i32, i16, [2 x i8] }* @caller1.p6 to i8*), i64 8, i32 4, i1 false) + %6 = bitcast %struct.s7* %p7 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast ({ i32, i16, i8, i8 }* @caller1.p7 to i8*), i64 8, i32 4, i1 false) + %call = call i32 @callee1(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, %struct.s1* byval %p1, %struct.s2* byval %p2, %struct.s3* byval %p3, %struct.s4* byval %p4, %struct.s5* byval %p5, %struct.s6* byval %p6, %struct.s7* byval %p7) + ret i32 %call + +; CHECK: stb {{[0-9]+}}, 119(1) +; CHECK: sth {{[0-9]+}}, 126(1) +; CHECK: stw {{[0-9]+}}, 132(1) +; CHECK: stw {{[0-9]+}}, 140(1) +; CHECK: std {{[0-9]+}}, 144(1) +; CHECK: std {{[0-9]+}}, 152(1) +; CHECK: std {{[0-9]+}}, 160(1) +} + +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind + +define internal i32 @callee1(i32 %z1, i32 %z2, i32 %z3, i32 %z4, i32 %z5, i32 %z6, i32 %z7, i32 %z8, %struct.s1* byval %v1, %struct.s2* byval %v2, %struct.s3* byval %v3, %struct.s4* byval %v4, %struct.s5* byval %v5, %struct.s6* byval %v6, %struct.s7* byval %v7) nounwind { +entry: + %z1.addr = alloca i32, align 4 + %z2.addr = alloca i32, align 4 + %z3.addr = alloca i32, align 4 + %z4.addr = alloca i32, align 4 + %z5.addr = alloca i32, align 4 + %z6.addr = alloca i32, align 4 + %z7.addr = alloca i32, align 4 + %z8.addr = alloca i32, align 4 + store i32 %z1, i32* %z1.addr, align 4 + store i32 %z2, i32* %z2.addr, align 4 + store i32 %z3, i32* %z3.addr, align 4 + store i32 %z4, i32* %z4.addr, align 4 + store i32 %z5, i32* %z5.addr, align 4 + store i32 %z6, i32* %z6.addr, align 4 + store i32 %z7, i32* %z7.addr, align 4 + store i32 %z8, i32* %z8.addr, align 4 + %a = getelementptr inbounds %struct.s1* %v1, i32 0, i32 0 + %0 = load i8* %a, align 1 + %conv = zext i8 %0 to i32 + %a1 = getelementptr inbounds %struct.s2* %v2, i32 0, i32 0 + %1 = load i16* %a1, align 2 + %conv2 = sext i16 %1 to i32 + %add = add nsw i32 %conv, %conv2 + %a3 = getelementptr inbounds %struct.s3* %v3, i32 0, i32 0 + %2 = load i16* %a3, align 2 + %conv4 = sext i16 %2 to i32 + %add5 = add nsw i32 %add, %conv4 + %a6 = getelementptr inbounds %struct.s4* %v4, i32 0, i32 0 + %3 = load i32* %a6, align 4 + %add7 = add nsw i32 %add5, %3 + %a8 = getelementptr inbounds %struct.s5* %v5, i32 0, i32 0 + %4 = load i32* %a8, align 4 + %add9 = add nsw i32 %add7, %4 + %a10 = getelementptr inbounds %struct.s6* %v6, i32 0, i32 0 + %5 = load i32* %a10, align 4 + %add11 = add nsw i32 %add9, %5 + %a12 = getelementptr inbounds %struct.s7* %v7, i32 0, i32 0 + %6 = load i32* %a12, align 4 + %add13 = add nsw i32 %add11, %6 + ret i32 %add13 + +; CHECK: lha {{[0-9]+}}, 126(1) +; CHECK: lbz {{[0-9]+}}, 119(1) +; CHECK: lha {{[0-9]+}}, 132(1) +; CHECK: lwz {{[0-9]+}}, 140(1) +; CHECK: lwz {{[0-9]+}}, 144(1) +; CHECK: lwz {{[0-9]+}}, 152(1) +; CHECK: lwz {{[0-9]+}}, 160(1) +} + +define i32 @caller2() nounwind { +entry: + %p1 = alloca %struct.t1, align 1 + %p2 = alloca %struct.t2, align 1 + %p3 = alloca %struct.t3, align 1 + %p4 = alloca %struct.t4, align 1 + %p5 = alloca %struct.t5, align 1 + %p6 = alloca %struct.t6, align 1 + %p7 = alloca %struct.t7, align 1 + %0 = bitcast %struct.t1* %p1 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.t1* @caller2.p1, i32 0, i32 0), i64 1, i32 1, i1 false) + %1 = bitcast %struct.t2* %p2 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ({ i16 }* @caller2.p2 to i8*), i64 2, i32 1, i1 false) + %2 = bitcast %struct.t3* %p3 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* bitcast (%struct.t3* @caller2.p3 to i8*), i64 3, i32 1, i1 false) + %3 = bitcast %struct.t4* %p4 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* bitcast ({ i32 }* @caller2.p4 to i8*), i64 4, i32 1, i1 false) + %4 = bitcast %struct.t5* %p5 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* bitcast (%struct.t5* @caller2.p5 to i8*), i64 5, i32 1, i1 false) + %5 = bitcast %struct.t6* %p6 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast (%struct.t6* @caller2.p6 to i8*), i64 6, i32 1, i1 false) + %6 = bitcast %struct.t7* %p7 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast (%struct.t7* @caller2.p7 to i8*), i64 7, i32 1, i1 false) + %call = call i32 @callee2(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, %struct.t1* byval %p1, %struct.t2* byval %p2, %struct.t3* byval %p3, %struct.t4* byval %p4, %struct.t5* byval %p5, %struct.t6* byval %p6, %struct.t7* byval %p7) + ret i32 %call + +; CHECK: stb {{[0-9]+}}, 119(1) +; CHECK: sth {{[0-9]+}}, 126(1) +; CHECK: stb {{[0-9]+}}, 135(1) +; CHECK: sth {{[0-9]+}}, 133(1) +; CHECK: stw {{[0-9]+}}, 140(1) +; CHECK: stb {{[0-9]+}}, 151(1) +; CHECK: stw {{[0-9]+}}, 147(1) +; CHECK: sth {{[0-9]+}}, 158(1) +; CHECK: stw {{[0-9]+}}, 154(1) +; CHECK: stb {{[0-9]+}}, 167(1) +; CHECK: sth {{[0-9]+}}, 165(1) +; CHECK: stw {{[0-9]+}}, 161(1) +} + +define internal i32 @callee2(i32 %z1, i32 %z2, i32 %z3, i32 %z4, i32 %z5, i32 %z6, i32 %z7, i32 %z8, %struct.t1* byval %v1, %struct.t2* byval %v2, %struct.t3* byval %v3, %struct.t4* byval %v4, %struct.t5* byval %v5, %struct.t6* byval %v6, %struct.t7* byval %v7) nounwind { +entry: + %z1.addr = alloca i32, align 4 + %z2.addr = alloca i32, align 4 + %z3.addr = alloca i32, align 4 + %z4.addr = alloca i32, align 4 + %z5.addr = alloca i32, align 4 + %z6.addr = alloca i32, align 4 + %z7.addr = alloca i32, align 4 + %z8.addr = alloca i32, align 4 + store i32 %z1, i32* %z1.addr, align 4 + store i32 %z2, i32* %z2.addr, align 4 + store i32 %z3, i32* %z3.addr, align 4 + store i32 %z4, i32* %z4.addr, align 4 + store i32 %z5, i32* %z5.addr, align 4 + store i32 %z6, i32* %z6.addr, align 4 + store i32 %z7, i32* %z7.addr, align 4 + store i32 %z8, i32* %z8.addr, align 4 + %a = getelementptr inbounds %struct.t1* %v1, i32 0, i32 0 + %0 = load i8* %a, align 1 + %conv = zext i8 %0 to i32 + %a1 = getelementptr inbounds %struct.t2* %v2, i32 0, i32 0 + %1 = load i16* %a1, align 1 + %conv2 = sext i16 %1 to i32 + %add = add nsw i32 %conv, %conv2 + %a3 = getelementptr inbounds %struct.t3* %v3, i32 0, i32 0 + %2 = load i16* %a3, align 1 + %conv4 = sext i16 %2 to i32 + %add5 = add nsw i32 %add, %conv4 + %a6 = getelementptr inbounds %struct.t4* %v4, i32 0, i32 0 + %3 = load i32* %a6, align 1 + %add7 = add nsw i32 %add5, %3 + %a8 = getelementptr inbounds %struct.t5* %v5, i32 0, i32 0 + %4 = load i32* %a8, align 1 + %add9 = add nsw i32 %add7, %4 + %a10 = getelementptr inbounds %struct.t6* %v6, i32 0, i32 0 + %5 = load i32* %a10, align 1 + %add11 = add nsw i32 %add9, %5 + %a12 = getelementptr inbounds %struct.t7* %v7, i32 0, i32 0 + %6 = load i32* %a12, align 1 + %add13 = add nsw i32 %add11, %6 + ret i32 %add13 + +; CHECK: lbz {{[0-9]+}}, 149(1) +; CHECK: lbz {{[0-9]+}}, 150(1) +; CHECK: lbz {{[0-9]+}}, 147(1) +; CHECK: lbz {{[0-9]+}}, 148(1) +; CHECK: lbz {{[0-9]+}}, 133(1) +; CHECK: lbz {{[0-9]+}}, 134(1) +; CHECK: lha {{[0-9]+}}, 126(1) +; CHECK: lbz {{[0-9]+}}, 119(1) +; CHECK: lwz {{[0-9]+}}, 140(1) +; CHECK: lhz {{[0-9]+}}, 154(1) +; CHECK: lhz {{[0-9]+}}, 156(1) +; CHECK: lbz {{[0-9]+}}, 163(1) +; CHECK: lbz {{[0-9]+}}, 164(1) +; CHECK: lbz {{[0-9]+}}, 161(1) +; CHECK: lbz {{[0-9]+}}, 162(1) +} diff --git a/test/CodeGen/PowerPC/structsinregs.ll b/test/CodeGen/PowerPC/structsinregs.ll new file mode 100644 index 0000000..ef706af --- /dev/null +++ b/test/CodeGen/PowerPC/structsinregs.ll @@ -0,0 +1,213 @@ +; RUN: llc -mcpu=pwr7 -O0 -disable-fp-elim < %s | FileCheck %s + +; FIXME: The code generation for packed structs is very poor because the +; PowerPC target wrongly rejects all unaligned loads. This test case will +; need to be revised when that is fixed. + +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +%struct.s1 = type { i8 } +%struct.s2 = type { i16 } +%struct.s4 = type { i32 } +%struct.t1 = type { i8 } +%struct.t3 = type <{ i16, i8 }> +%struct.t5 = type <{ i32, i8 }> +%struct.t6 = type <{ i32, i16 }> +%struct.t7 = type <{ i32, i16, i8 }> +%struct.s3 = type { i16, i8 } +%struct.s5 = type { i32, i8 } +%struct.s6 = type { i32, i16 } +%struct.s7 = type { i32, i16, i8 } +%struct.t2 = type <{ i16 }> +%struct.t4 = type <{ i32 }> + +@caller1.p1 = private unnamed_addr constant %struct.s1 { i8 1 }, align 1 +@caller1.p2 = private unnamed_addr constant %struct.s2 { i16 2 }, align 2 +@caller1.p3 = private unnamed_addr constant { i16, i8, i8 } { i16 4, i8 8, i8 undef }, align 2 +@caller1.p4 = private unnamed_addr constant %struct.s4 { i32 16 }, align 4 +@caller1.p5 = private unnamed_addr constant { i32, i8, [3 x i8] } { i32 32, i8 64, [3 x i8] undef }, align 4 +@caller1.p6 = private unnamed_addr constant { i32, i16, [2 x i8] } { i32 128, i16 256, [2 x i8] undef }, align 4 +@caller1.p7 = private unnamed_addr constant { i32, i16, i8, i8 } { i32 512, i16 1024, i8 -3, i8 undef }, align 4 +@caller2.p1 = private unnamed_addr constant %struct.t1 { i8 1 }, align 1 +@caller2.p2 = private unnamed_addr constant { i16 } { i16 2 }, align 1 +@caller2.p3 = private unnamed_addr constant %struct.t3 <{ i16 4, i8 8 }>, align 1 +@caller2.p4 = private unnamed_addr constant { i32 } { i32 16 }, align 1 +@caller2.p5 = private unnamed_addr constant %struct.t5 <{ i32 32, i8 64 }>, align 1 +@caller2.p6 = private unnamed_addr constant %struct.t6 <{ i32 128, i16 256 }>, align 1 +@caller2.p7 = private unnamed_addr constant %struct.t7 <{ i32 512, i16 1024, i8 -3 }>, align 1 + +define i32 @caller1() nounwind { +entry: + %p1 = alloca %struct.s1, align 1 + %p2 = alloca %struct.s2, align 2 + %p3 = alloca %struct.s3, align 2 + %p4 = alloca %struct.s4, align 4 + %p5 = alloca %struct.s5, align 4 + %p6 = alloca %struct.s6, align 4 + %p7 = alloca %struct.s7, align 4 + %0 = bitcast %struct.s1* %p1 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.s1* @caller1.p1, i32 0, i32 0), i64 1, i32 1, i1 false) + %1 = bitcast %struct.s2* %p2 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast (%struct.s2* @caller1.p2 to i8*), i64 2, i32 2, i1 false) + %2 = bitcast %struct.s3* %p3 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* bitcast ({ i16, i8, i8 }* @caller1.p3 to i8*), i64 4, i32 2, i1 false) + %3 = bitcast %struct.s4* %p4 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* bitcast (%struct.s4* @caller1.p4 to i8*), i64 4, i32 4, i1 false) + %4 = bitcast %struct.s5* %p5 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* bitcast ({ i32, i8, [3 x i8] }* @caller1.p5 to i8*), i64 8, i32 4, i1 false) + %5 = bitcast %struct.s6* %p6 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast ({ i32, i16, [2 x i8] }* @caller1.p6 to i8*), i64 8, i32 4, i1 false) + %6 = bitcast %struct.s7* %p7 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast ({ i32, i16, i8, i8 }* @caller1.p7 to i8*), i64 8, i32 4, i1 false) + %call = call i32 @callee1(%struct.s1* byval %p1, %struct.s2* byval %p2, %struct.s3* byval %p3, %struct.s4* byval %p4, %struct.s5* byval %p5, %struct.s6* byval %p6, %struct.s7* byval %p7) + ret i32 %call + +; CHECK: ld 9, 128(31) +; CHECK: ld 8, 136(31) +; CHECK: ld 7, 144(31) +; CHECK: lwz 6, 152(31) +; CHECK: lwz 5, 160(31) +; CHECK: lhz 4, 168(31) +; CHECK: lbz 3, 176(31) +} + +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind + +define internal i32 @callee1(%struct.s1* byval %v1, %struct.s2* byval %v2, %struct.s3* byval %v3, %struct.s4* byval %v4, %struct.s5* byval %v5, %struct.s6* byval %v6, %struct.s7* byval %v7) nounwind { +entry: + %a = getelementptr inbounds %struct.s1* %v1, i32 0, i32 0 + %0 = load i8* %a, align 1 + %conv = zext i8 %0 to i32 + %a1 = getelementptr inbounds %struct.s2* %v2, i32 0, i32 0 + %1 = load i16* %a1, align 2 + %conv2 = sext i16 %1 to i32 + %add = add nsw i32 %conv, %conv2 + %a3 = getelementptr inbounds %struct.s3* %v3, i32 0, i32 0 + %2 = load i16* %a3, align 2 + %conv4 = sext i16 %2 to i32 + %add5 = add nsw i32 %add, %conv4 + %a6 = getelementptr inbounds %struct.s4* %v4, i32 0, i32 0 + %3 = load i32* %a6, align 4 + %add7 = add nsw i32 %add5, %3 + %a8 = getelementptr inbounds %struct.s5* %v5, i32 0, i32 0 + %4 = load i32* %a8, align 4 + %add9 = add nsw i32 %add7, %4 + %a10 = getelementptr inbounds %struct.s6* %v6, i32 0, i32 0 + %5 = load i32* %a10, align 4 + %add11 = add nsw i32 %add9, %5 + %a12 = getelementptr inbounds %struct.s7* %v7, i32 0, i32 0 + %6 = load i32* %a12, align 4 + %add13 = add nsw i32 %add11, %6 + ret i32 %add13 + +; CHECK: std 9, 96(1) +; CHECK: std 8, 88(1) +; CHECK: std 7, 80(1) +; CHECK: stw 6, 76(1) +; CHECK: stw 5, 68(1) +; CHECK: sth 4, 62(1) +; CHECK: stb 3, 55(1) +; CHECK: lha {{[0-9]+}}, 62(1) +; CHECK: lbz {{[0-9]+}}, 55(1) +; CHECK: lha {{[0-9]+}}, 68(1) +; CHECK: lwz {{[0-9]+}}, 76(1) +; CHECK: lwz {{[0-9]+}}, 80(1) +; CHECK: lwz {{[0-9]+}}, 88(1) +; CHECK: lwz {{[0-9]+}}, 96(1) +} + +define i32 @caller2() nounwind { +entry: + %p1 = alloca %struct.t1, align 1 + %p2 = alloca %struct.t2, align 1 + %p3 = alloca %struct.t3, align 1 + %p4 = alloca %struct.t4, align 1 + %p5 = alloca %struct.t5, align 1 + %p6 = alloca %struct.t6, align 1 + %p7 = alloca %struct.t7, align 1 + %0 = bitcast %struct.t1* %p1 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.t1* @caller2.p1, i32 0, i32 0), i64 1, i32 1, i1 false) + %1 = bitcast %struct.t2* %p2 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ({ i16 }* @caller2.p2 to i8*), i64 2, i32 1, i1 false) + %2 = bitcast %struct.t3* %p3 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* bitcast (%struct.t3* @caller2.p3 to i8*), i64 3, i32 1, i1 false) + %3 = bitcast %struct.t4* %p4 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* bitcast ({ i32 }* @caller2.p4 to i8*), i64 4, i32 1, i1 false) + %4 = bitcast %struct.t5* %p5 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* bitcast (%struct.t5* @caller2.p5 to i8*), i64 5, i32 1, i1 false) + %5 = bitcast %struct.t6* %p6 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast (%struct.t6* @caller2.p6 to i8*), i64 6, i32 1, i1 false) + %6 = bitcast %struct.t7* %p7 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast (%struct.t7* @caller2.p7 to i8*), i64 7, i32 1, i1 false) + %call = call i32 @callee2(%struct.t1* byval %p1, %struct.t2* byval %p2, %struct.t3* byval %p3, %struct.t4* byval %p4, %struct.t5* byval %p5, %struct.t6* byval %p6, %struct.t7* byval %p7) + ret i32 %call + +; CHECK: stb {{[0-9]+}}, 71(1) +; CHECK: sth {{[0-9]+}}, 69(1) +; CHECK: stb {{[0-9]+}}, 87(1) +; CHECK: stw {{[0-9]+}}, 83(1) +; CHECK: sth {{[0-9]+}}, 94(1) +; CHECK: stw {{[0-9]+}}, 90(1) +; CHECK: stb {{[0-9]+}}, 103(1) +; CHECK: sth {{[0-9]+}}, 101(1) +; CHECK: stw {{[0-9]+}}, 97(1) +; CHECK: ld 9, 96(1) +; CHECK: ld 8, 88(1) +; CHECK: ld 7, 80(1) +; CHECK: lwz 6, 152(31) +; CHECK: ld 5, 64(1) +; CHECK: lhz 4, 168(31) +; CHECK: lbz 3, 176(31) +} + +define internal i32 @callee2(%struct.t1* byval %v1, %struct.t2* byval %v2, %struct.t3* byval %v3, %struct.t4* byval %v4, %struct.t5* byval %v5, %struct.t6* byval %v6, %struct.t7* byval %v7) nounwind { +entry: + %a = getelementptr inbounds %struct.t1* %v1, i32 0, i32 0 + %0 = load i8* %a, align 1 + %conv = zext i8 %0 to i32 + %a1 = getelementptr inbounds %struct.t2* %v2, i32 0, i32 0 + %1 = load i16* %a1, align 1 + %conv2 = sext i16 %1 to i32 + %add = add nsw i32 %conv, %conv2 + %a3 = getelementptr inbounds %struct.t3* %v3, i32 0, i32 0 + %2 = load i16* %a3, align 1 + %conv4 = sext i16 %2 to i32 + %add5 = add nsw i32 %add, %conv4 + %a6 = getelementptr inbounds %struct.t4* %v4, i32 0, i32 0 + %3 = load i32* %a6, align 1 + %add7 = add nsw i32 %add5, %3 + %a8 = getelementptr inbounds %struct.t5* %v5, i32 0, i32 0 + %4 = load i32* %a8, align 1 + %add9 = add nsw i32 %add7, %4 + %a10 = getelementptr inbounds %struct.t6* %v6, i32 0, i32 0 + %5 = load i32* %a10, align 1 + %add11 = add nsw i32 %add9, %5 + %a12 = getelementptr inbounds %struct.t7* %v7, i32 0, i32 0 + %6 = load i32* %a12, align 1 + %add13 = add nsw i32 %add11, %6 + ret i32 %add13 + +; CHECK: std 9, 96(1) +; CHECK: std 8, 88(1) +; CHECK: std 7, 80(1) +; CHECK: stw 6, 76(1) +; CHECK: std 5, 64(1) +; CHECK: sth 4, 62(1) +; CHECK: stb 3, 55(1) +; CHECK: lbz {{[0-9]+}}, 85(1) +; CHECK: lbz {{[0-9]+}}, 86(1) +; CHECK: lbz {{[0-9]+}}, 83(1) +; CHECK: lbz {{[0-9]+}}, 84(1) +; CHECK: lbz {{[0-9]+}}, 69(1) +; CHECK: lbz {{[0-9]+}}, 70(1) +; CHECK: lha {{[0-9]+}}, 62(1) +; CHECK: lbz {{[0-9]+}}, 55(1) +; CHECK: lwz {{[0-9]+}}, 76(1) +; CHECK: lhz {{[0-9]+}}, 90(1) +; CHECK: lhz {{[0-9]+}}, 92(1) +; CHECK: lbz {{[0-9]+}}, 99(1) +; CHECK: lbz {{[0-9]+}}, 100(1) +; CHECK: lbz {{[0-9]+}}, 97(1) +; CHECK: lbz {{[0-9]+}}, 98(1) +} diff --git a/test/CodeGen/PowerPC/varargs-struct-float.ll b/test/CodeGen/PowerPC/varargs-struct-float.ll new file mode 100644 index 0000000..fb1835f --- /dev/null +++ b/test/CodeGen/PowerPC/varargs-struct-float.ll @@ -0,0 +1,23 @@ +; RUN: llc -mcpu=pwr7 -O0 < %s | FileCheck %s + +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +%struct.Sf1 = type { float } + +define void @foo(float inreg %s.coerce) nounwind { +entry: + %s = alloca %struct.Sf1, align 4 + %coerce.dive = getelementptr %struct.Sf1* %s, i32 0, i32 0 + store float %s.coerce, float* %coerce.dive, align 1 + %coerce.dive1 = getelementptr %struct.Sf1* %s, i32 0, i32 0 + %0 = load float* %coerce.dive1, align 1 + call void (i32, ...)* @testvaSf1(i32 1, float inreg %0) + ret void +} + +; CHECK: stfs {{[0-9]+}}, 60(1) +; CHECK: ld 4, 56(1) +; CHECK: bl + +declare void @testvaSf1(i32, ...) diff --git a/test/CodeGen/PowerPC/vec_cmp.ll b/test/CodeGen/PowerPC/vec_cmp.ll new file mode 100644 index 0000000..3180f46 --- /dev/null +++ b/test/CodeGen/PowerPC/vec_cmp.ll @@ -0,0 +1,527 @@ +; RUN: llc -mcpu=pwr6 -mattr=+altivec < %s | FileCheck %s + +; Check vector comparisons using altivec. For non native types, just basic +; comparison instruction check is done. For altivec supported type (16i8, +; 8i16, 4i32, and 4f32) all the comparisons operators (==, !=, >, >=, <, <=) +; are checked. + + +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +define <2 x i8> @v2si8_cmp(<2 x i8> %x, <2 x i8> %y) nounwind readnone { + %cmp = icmp eq <2 x i8> %x, %y + %sext = sext <2 x i1> %cmp to <2 x i8> + ret <2 x i8> %sext +} +; CHECK: v2si8_cmp: +; CHECK: vcmpequb {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} + + +define <4 x i8> @v4si8_cmp(<4 x i8> %x, <4 x i8> %y) nounwind readnone { + %cmp = icmp eq <4 x i8> %x, %y + %sext = sext <4 x i1> %cmp to <4 x i8> + ret <4 x i8> %sext +} +; CHECK: v4si8_cmp: +; CHECK: vcmpequw {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} + + +define <8 x i8> @v8si8_cmp(<8 x i8> %x, <8 x i8> %y) nounwind readnone { + %cmp = icmp eq <8 x i8> %x, %y + %sext = sext <8 x i1> %cmp to <8 x i8> + ret <8 x i8> %sext +} +; CHECK: v8si8_cmp: +; CHECK: vcmpequh {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} + + +; Adicional tests for v16i8 since it is a altivec native type + +define <16 x i8> @v16si8_cmp_eq(<16 x i8> %x, <16 x i8> %y) nounwind readnone { + %cmp = icmp eq <16 x i8> %x, %y + %sext = sext <16 x i1> %cmp to <16 x i8> + ret <16 x i8> %sext +} +; CHECK: v16si8_cmp_eq: +; CHECK: vcmpequb 2, 2, 3 + +define <16 x i8> @v16si8_cmp_ne(<16 x i8> %x, <16 x i8> %y) nounwind readnone { +entry: + %cmp = icmp ne <16 x i8> %x, %y + %sext = sext <16 x i1> %cmp to <16 x i8> + ret <16 x i8> %sext +} +; CHECK: v16si8_cmp_ne: +; CHECK: vcmpequb [[RET:[0-9]+]], 2, 3 +; CHECK-NOR: vnor 2, [[RET]], [[RET]] + +define <16 x i8> @v16si8_cmp_le(<16 x i8> %x, <16 x i8> %y) nounwind readnone { +entry: + %cmp = icmp sle <16 x i8> %x, %y + %sext = sext <16 x i1> %cmp to <16 x i8> + ret <16 x i8> %sext +} +; CHECK: v16si8_cmp_le: +; CHECK: vcmpequb [[RCMPEQ:[0-9]+]], 2, 3 +; CHECK-NEXT: vcmpgtsb [[RCMPLE:[0-9]+]], 3, 2 +; CHECK-NEXT: vor 2, [[RCMPLE]], [[RCMPEQ]] + +define <16 x i8> @v16ui8_cmp_le(<16 x i8> %x, <16 x i8> %y) nounwind readnone { +entry: + %cmp = icmp ule <16 x i8> %x, %y + %sext = sext <16 x i1> %cmp to <16 x i8> + ret <16 x i8> %sext +} +; CHECK: v16ui8_cmp_le: +; CHECK: vcmpequb [[RCMPEQ:[0-9]+]], 2, 3 +; CHECK-NEXT: vcmpgtub [[RCMPLE:[0-9]+]], 3, 2 +; CHECK-NEXT: vor 2, [[RCMPLE]], [[RCMPEQ]] + +define <16 x i8> @v16si8_cmp_lt(<16 x i8> %x, <16 x i8> %y) nounwind readnone { +entry: + %cmp = icmp slt <16 x i8> %x, %y + %sext = sext <16 x i1> %cmp to <16 x i8> + ret <16 x i8> %sext +} +; CHECK: v16si8_cmp_lt: +; CHECK: vcmpgtsb 2, 3, 2 + +define <16 x i8> @v16ui8_cmp_lt(<16 x i8> %x, <16 x i8> %y) nounwind readnone { +entry: + %cmp = icmp ult <16 x i8> %x, %y + %sext = sext <16 x i1> %cmp to <16 x i8> + ret <16 x i8> %sext +} +; CHECK: v16ui8_cmp_lt: +; CHECK: vcmpgtub 2, 3, 2 + +define <16 x i8> @v16si8_cmp_gt(<16 x i8> %x, <16 x i8> %y) nounwind readnone { +entry: + %cmp = icmp sgt <16 x i8> %x, %y + %sext = sext <16 x i1> %cmp to <16 x i8> + ret <16 x i8> %sext +} +; CHECK: v16si8_cmp_gt: +; CHECK: vcmpgtsb 2, 2, 3 + +define <16 x i8> @v16ui8_cmp_gt(<16 x i8> %x, <16 x i8> %y) nounwind readnone { +entry: + %cmp = icmp ugt <16 x i8> %x, %y + %sext = sext <16 x i1> %cmp to <16 x i8> + ret <16 x i8> %sext +} +; CHECK: v16ui8_cmp_gt: +; CHECK: vcmpgtub 2, 2, 3 + +define <16 x i8> @v16si8_cmp_ge(<16 x i8> %x, <16 x i8> %y) nounwind readnone { +entry: + %cmp = icmp sge <16 x i8> %x, %y + %sext = sext <16 x i1> %cmp to <16 x i8> + ret <16 x i8> %sext +} +; CHECK: v16si8_cmp_ge: +; CHECK: vcmpequb [[RCMPEQ:[0-9]+]], 2, 3 +; CHECK-NEXT: vcmpgtsb [[RCMPGT:[0-9]+]], 2, 3 +; CHECK-NEXT: vor 2, [[RCMPGT]], [[RCMPEQ]] + +define <16 x i8> @v16ui8_cmp_ge(<16 x i8> %x, <16 x i8> %y) nounwind readnone { +entry: + %cmp = icmp uge <16 x i8> %x, %y + %sext = sext <16 x i1> %cmp to <16 x i8> + ret <16 x i8> %sext +} +; CHECK: v16ui8_cmp_ge: +; CHECK: vcmpequb [[RCMPEQ:[0-9]+]], 2, 3 +; CHECK-NEXT: vcmpgtub [[RCMPGT:[0-9]+]], 2, 3 +; CHECK-NEXT: vor 2, [[RCMPGT]], [[RCMPEQ]] + + +define <32 x i8> @v32si8_cmp(<32 x i8> %x, <32 x i8> %y) nounwind readnone { + %cmp = icmp eq <32 x i8> %x, %y + %sext = sext <32 x i1> %cmp to <32 x i8> + ret <32 x i8> %sext +} +; CHECK: v32si8_cmp: +; CHECK: vcmpequb {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequb {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} + + +define <2 x i16> @v2si16_cmp(<2 x i16> %x, <2 x i16> %y) nounwind readnone { + %cmp = icmp eq <2 x i16> %x, %y + %sext = sext <2 x i1> %cmp to <2 x i16> + ret <2 x i16> %sext +} +; CHECK: v2si16_cmp: +; CHECK: vcmpequh {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} + + +define <4 x i16> @v4si16_cmp(<4 x i16> %x, <4 x i16> %y) nounwind readnone { + %cmp = icmp eq <4 x i16> %x, %y + %sext = sext <4 x i1> %cmp to <4 x i16> + ret <4 x i16> %sext +} +; CHECK: v4si16_cmp: +; CHECK: vcmpequw {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} + + +; Adicional tests for v8i16 since it is an altivec native type + +define <8 x i16> @v8si16_cmp_eq(<8 x i16> %x, <8 x i16> %y) nounwind readnone { +entry: + %cmp = icmp eq <8 x i16> %x, %y + %sext = sext <8 x i1> %cmp to <8 x i16> + ret <8 x i16> %sext +} +; CHECK: v8si16_cmp_eq: +; CHECK: vcmpequh 2, 2, 3 + +define <8 x i16> @v8si16_cmp_ne(<8 x i16> %x, <8 x i16> %y) nounwind readnone { +entry: + %cmp = icmp ne <8 x i16> %x, %y + %sext = sext <8 x i1> %cmp to <8 x i16> + ret <8 x i16> %sext +} +; CHECK: v8si16_cmp_ne: +; CHECK: vcmpequh [[RET:[0-9]+]], 2, 3 +; CHECK-NEXT: vnor 2, [[RET]], [[RET]] + +define <8 x i16> @v8si16_cmp_le(<8 x i16> %x, <8 x i16> %y) nounwind readnone { +entry: + %cmp = icmp sle <8 x i16> %x, %y + %sext = sext <8 x i1> %cmp to <8 x i16> + ret <8 x i16> %sext +} +; CHECK: v8si16_cmp_le: +; CHECK: vcmpequh [[RCMPEQ:[0-9]+]], 2, 3 +; CHECK-NEXT: vcmpgtsh [[RCMPLE:[0-9]+]], 3, 2 +; CHECK-NEXT: vor 2, [[RCMPLE]], [[RCMPEQ]] + +define <8 x i16> @v8ui16_cmp_le(<8 x i16> %x, <8 x i16> %y) nounwind readnone { +entry: + %cmp = icmp ule <8 x i16> %x, %y + %sext = sext <8 x i1> %cmp to <8 x i16> + ret <8 x i16> %sext +} +; CHECK: v8ui16_cmp_le: +; CHECK: vcmpequh [[RCMPEQ:[0-9]+]], 2, 3 +; CHECK-NEXT: vcmpgtuh [[RCMPLE:[0-9]+]], 3, 2 +; CHECK-NEXT: vor 2, [[RCMPLE]], [[RCMPEQ]] + +define <8 x i16> @v8si16_cmp_lt(<8 x i16> %x, <8 x i16> %y) nounwind readnone { +entry: + %cmp = icmp slt <8 x i16> %x, %y + %sext = sext <8 x i1> %cmp to <8 x i16> + ret <8 x i16> %sext +} +; CHECK: v8si16_cmp_lt: +; CHECK: vcmpgtsh 2, 3, 2 + +define <8 x i16> @v8ui16_cmp_lt(<8 x i16> %x, <8 x i16> %y) nounwind readnone { +entry: + %cmp = icmp ult <8 x i16> %x, %y + %sext = sext <8 x i1> %cmp to <8 x i16> + ret <8 x i16> %sext +} +; CHECK: v8ui16_cmp_lt: +; CHECK: vcmpgtuh 2, 3, 2 + +define <8 x i16> @v8si16_cmp_gt(<8 x i16> %x, <8 x i16> %y) nounwind readnone { +entry: + %cmp = icmp sgt <8 x i16> %x, %y + %sext = sext <8 x i1> %cmp to <8 x i16> + ret <8 x i16> %sext +} +; CHECK: v8si16_cmp_gt: +; CHECK: vcmpgtsh 2, 2, 3 + +define <8 x i16> @v8ui16_cmp_gt(<8 x i16> %x, <8 x i16> %y) nounwind readnone { +entry: + %cmp = icmp ugt <8 x i16> %x, %y + %sext = sext <8 x i1> %cmp to <8 x i16> + ret <8 x i16> %sext +} +; CHECK: v8ui16_cmp_gt: +; CHECK: vcmpgtuh 2, 2, 3 + +define <8 x i16> @v8si16_cmp_ge(<8 x i16> %x, <8 x i16> %y) nounwind readnone { +entry: + %cmp = icmp sge <8 x i16> %x, %y + %sext = sext <8 x i1> %cmp to <8 x i16> + ret <8 x i16> %sext +} +; CHECK: v8si16_cmp_ge: +; CHECK: vcmpequh [[RCMPEQ:[0-9]+]], 2, 3 +; CHECK-NEXT: vcmpgtsh [[RCMPGT:[0-9]+]], 2, 3 +; CHECK-NEXT: vor 2, [[RCMPGT]], [[RCMPEQ]] + +define <8 x i16> @v8ui16_cmp_ge(<8 x i16> %x, <8 x i16> %y) nounwind readnone { +entry: + %cmp = icmp uge <8 x i16> %x, %y + %sext = sext <8 x i1> %cmp to <8 x i16> + ret <8 x i16> %sext +} +; CHECK: v8ui16_cmp_ge: +; CHECK: vcmpequh [[RCMPEQ:[0-9]+]], 2, 3 +; CHECK-NEXT: vcmpgtuh [[RCMPGT:[0-9]+]], 2, 3 +; CHECK-NEXT: vor 2, [[RCMPGT]], [[RCMPEQ]] + + +define <16 x i16> @v16si16_cmp(<16 x i16> %x, <16 x i16> %y) nounwind readnone { + %cmp = icmp eq <16 x i16> %x, %y + %sext = sext <16 x i1> %cmp to <16 x i16> + ret <16 x i16> %sext +} +; CHECK: v16si16_cmp: +; CHECK: vcmpequh {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequh {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} + + +define <32 x i16> @v32si16_cmp(<32 x i16> %x, <32 x i16> %y) nounwind readnone { + %cmp = icmp eq <32 x i16> %x, %y + %sext = sext <32 x i1> %cmp to <32 x i16> + ret <32 x i16> %sext +} +; CHECK: v32si16_cmp: +; CHECK: vcmpequh {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequh {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequh {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequh {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} + + +define <2 x i32> @v2si32_cmp(<2 x i32> %x, <2 x i32> %y) nounwind readnone { + %cmp = icmp eq <2 x i32> %x, %y + %sext = sext <2 x i1> %cmp to <2 x i32> + ret <2 x i32> %sext +} +; CHECK: v2si32_cmp: +; CHECK: vcmpequw {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} + + +; Adicional tests for v4si32 since it is an altivec native type + +define <4 x i32> @v4si32_cmp_eq(<4 x i32> %x, <4 x i32> %y) nounwind readnone { +entry: + %cmp = icmp eq <4 x i32> %x, %y + %sext = sext <4 x i1> %cmp to <4 x i32> + ret <4 x i32> %sext +} +; CHECK: v4si32_cmp_eq: +; CHECK: vcmpequw 2, 2, 3 + +define <4 x i32> @v4si32_cmp_ne(<4 x i32> %x, <4 x i32> %y) nounwind readnone { +entry: + %cmp = icmp ne <4 x i32> %x, %y + %sext = sext <4 x i1> %cmp to <4 x i32> + ret <4 x i32> %sext +} +; CHECK: v4si32_cmp_ne: +; CHECK: vcmpequw [[RCMP:[0-9]+]], 2, 3 +; CHECK-NEXT: vnor 2, [[RCMP]], [[RCMP]] + +define <4 x i32> @v4si32_cmp_le(<4 x i32> %x, <4 x i32> %y) nounwind readnone { +entry: + %cmp = icmp sle <4 x i32> %x, %y + %sext = sext <4 x i1> %cmp to <4 x i32> + ret <4 x i32> %sext +} +; CHECK: v4si32_cmp_le: +; CHECK: vcmpequw [[RCMPEQ:[0-9]+]], 2, 3 +; CHECK-NEXT: vcmpgtsw [[RCMPLE:[0-9]+]], 3, 2 +; CHECK-NEXT: vor 2, [[RCMPLE]], [[RCMPEQ]] + +define <4 x i32> @v4ui32_cmp_le(<4 x i32> %x, <4 x i32> %y) nounwind readnone { +entry: + %cmp = icmp ule <4 x i32> %x, %y + %sext = sext <4 x i1> %cmp to <4 x i32> + ret <4 x i32> %sext +} +; CHECK: v4ui32_cmp_le: +; CHECK: vcmpequw [[RCMPEQ:[0-9]+]], 2, 3 +; CHECK-NEXT: vcmpgtuw [[RCMPLE:[0-9]+]], 3, 2 +; CHECK-NEXT: vor 2, [[RCMPLE]], [[RCMPEQ]] + +define <4 x i32> @v4si32_cmp_lt(<4 x i32> %x, <4 x i32> %y) nounwind readnone { +entry: + %cmp = icmp slt <4 x i32> %x, %y + %sext = sext <4 x i1> %cmp to <4 x i32> + ret <4 x i32> %sext +} +; CHECK: v4si32_cmp_lt: +; CHECK: vcmpgtsw 2, 3, 2 + +define <4 x i32> @v4ui32_cmp_lt(<4 x i32> %x, <4 x i32> %y) nounwind readnone { +entry: + %cmp = icmp ult <4 x i32> %x, %y + %sext = sext <4 x i1> %cmp to <4 x i32> + ret <4 x i32> %sext +} +; CHECK: v4ui32_cmp_lt: +; CHECK: vcmpgtuw 2, 3, 2 + +define <4 x i32> @v4si32_cmp_gt(<4 x i32> %x, <4 x i32> %y) nounwind readnone { +entry: + %cmp = icmp sgt <4 x i32> %x, %y + %sext = sext <4 x i1> %cmp to <4 x i32> + ret <4 x i32> %sext +} +; CHECK: v4si32_cmp_gt: +; CHECK: vcmpgtsw 2, 2, 3 + +define <4 x i32> @v4ui32_cmp_gt(<4 x i32> %x, <4 x i32> %y) nounwind readnone { +entry: + %cmp = icmp ugt <4 x i32> %x, %y + %sext = sext <4 x i1> %cmp to <4 x i32> + ret <4 x i32> %sext +} +; CHECK: v4ui32_cmp_gt: +; CHECK: vcmpgtuw 2, 2, 3 + +define <4 x i32> @v4si32_cmp_ge(<4 x i32> %x, <4 x i32> %y) nounwind readnone { +entry: + %cmp = icmp sge <4 x i32> %x, %y + %sext = sext <4 x i1> %cmp to <4 x i32> + ret <4 x i32> %sext +} +; CHECK: v4si32_cmp_ge: +; CHECK: vcmpequw [[RCMPEQ:[0-9]+]], 2, 3 +; CHECK-NEXT: vcmpgtsw [[RCMPGT:[0-9]+]], 2, 3 +; CHECK-NEXT: vor 2, [[RCMPGT]], [[RCMPEQ]] + +define <4 x i32> @v4ui32_cmp_ge(<4 x i32> %x, <4 x i32> %y) nounwind readnone { +entry: + %cmp = icmp uge <4 x i32> %x, %y + %sext = sext <4 x i1> %cmp to <4 x i32> + ret <4 x i32> %sext +} +; CHECK: v4ui32_cmp_ge: +; CHECK: vcmpequw [[RCMPEQ:[0-9]+]], 2, 3 +; CHECK-NEXT: vcmpgtuw [[RCMPGT:[0-9]+]], 2, 3 +; CHECK-NEXT: vor 2, [[RCMPGT]], [[RCMPEQ]] + + +define <8 x i32> @v8si32_cmp(<8 x i32> %x, <8 x i32> %y) nounwind readnone { + %cmp = icmp eq <8 x i32> %x, %y + %sext = sext <8 x i1> %cmp to <8 x i32> + ret <8 x i32> %sext +} +; CHECK: v8si32_cmp: +; CHECK: vcmpequw {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequw {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} + + +define <16 x i32> @v16si32_cmp(<16 x i32> %x, <16 x i32> %y) nounwind readnone { + %cmp = icmp eq <16 x i32> %x, %y + %sext = sext <16 x i1> %cmp to <16 x i32> + ret <16 x i32> %sext +} +; CHECK: v16si32_cmp: +; CHECK: vcmpequw {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequw {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequw {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequw {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} + + +define <32 x i32> @v32si32_cmp(<32 x i32> %x, <32 x i32> %y) nounwind readnone { + %cmp = icmp eq <32 x i32> %x, %y + %sext = sext <32 x i1> %cmp to <32 x i32> + ret <32 x i32> %sext +} +; CHECK: v32si32_cmp: +; CHECK: vcmpequw {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequw {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequw {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequw {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequw {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequw {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequw {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequw {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} + + +define <2 x float> @v2f32_cmp(<2 x float> %x, <2 x float> %y) nounwind readnone { +entry: + %cmp = fcmp oeq <2 x float> %x, %y + %sext = sext <2 x i1> %cmp to <2 x i32> + %0 = bitcast <2 x i32> %sext to <2 x float> + ret <2 x float> %0 +} +; CHECK: v2f32_cmp: +; CHECK: vcmpeqfp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} + + +; Adicional tests for v4f32 since it is a altivec native type + +define <4 x float> @v4f32_cmp_eq(<4 x float> %x, <4 x float> %y) nounwind readnone { +entry: + %cmp = fcmp oeq <4 x float> %x, %y + %sext = sext <4 x i1> %cmp to <4 x i32> + %0 = bitcast <4 x i32> %sext to <4 x float> + ret <4 x float> %0 +} +; CHECK: v4f32_cmp_eq: +; CHECK: vcmpeqfp 2, 2, 3 + +define <4 x float> @v4f32_cmp_ne(<4 x float> %x, <4 x float> %y) nounwind readnone { +entry: + %cmp = fcmp une <4 x float> %x, %y + %sext = sext <4 x i1> %cmp to <4 x i32> + %0 = bitcast <4 x i32> %sext to <4 x float> + ret <4 x float> %0 +} +; CHECK: v4f32_cmp_ne: +; CHECK: vcmpeqfp [[RET:[0-9]+]], 2, 3 +; CHECK-NEXT: vnor 2, [[RET]], [[RET]] + +define <4 x float> @v4f32_cmp_le(<4 x float> %x, <4 x float> %y) nounwind readnone { +entry: + %cmp = fcmp ole <4 x float> %x, %y + %sext = sext <4 x i1> %cmp to <4 x i32> + %0 = bitcast <4 x i32> %sext to <4 x float> + ret <4 x float> %0 +} +; CHECK: v4f32_cmp_le: +; CHECK: vcmpeqfp [[RCMPEQ:[0-9]+]], 2, 3 +; CHECK-NEXT: vcmpgtfp [[RCMPLE:[0-9]+]], 3, 2 +; CHECK-NEXT: vor 2, [[RCMPLE]], [[RCMPEQ]] + +define <4 x float> @v4f32_cmp_lt(<4 x float> %x, <4 x float> %y) nounwind readnone { +entry: + %cmp = fcmp olt <4 x float> %x, %y + %sext = sext <4 x i1> %cmp to <4 x i32> + %0 = bitcast <4 x i32> %sext to <4 x float> + ret <4 x float> %0 +} +; CHECK: v4f32_cmp_lt: +; CHECK: vcmpgtfp 2, 3, 2 + +define <4 x float> @v4f32_cmp_ge(<4 x float> %x, <4 x float> %y) nounwind readnone { +entry: + %cmp = fcmp oge <4 x float> %x, %y + %sext = sext <4 x i1> %cmp to <4 x i32> + %0 = bitcast <4 x i32> %sext to <4 x float> + ret <4 x float> %0 +} +; CHECK: v4f32_cmp_ge: +; CHECK: vcmpgefp 2, 2, 3 + +define <4 x float> @v4f32_cmp_gt(<4 x float> %x, <4 x float> %y) nounwind readnone { +entry: + %cmp = fcmp ogt <4 x float> %x, %y + %sext = sext <4 x i1> %cmp to <4 x i32> + %0 = bitcast <4 x i32> %sext to <4 x float> + ret <4 x float> %0 +} +; CHECK: v4f32_cmp_gt: +; CHECK: vcmpgtfp 2, 2, 3 + + +define <8 x float> @v8f32_cmp(<8 x float> %x, <8 x float> %y) nounwind readnone { +entry: + %cmp = fcmp oeq <8 x float> %x, %y + %sext = sext <8 x i1> %cmp to <8 x i32> + %0 = bitcast <8 x i32> %sext to <8 x float> + ret <8 x float> %0 +} +; CHECK: v8f32_cmp: +; CHECK: vcmpeqfp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpeqfp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} diff --git a/test/CodeGen/PowerPC/vec_conv.ll b/test/CodeGen/PowerPC/vec_conv.ll new file mode 100644 index 0000000..a475e94 --- /dev/null +++ b/test/CodeGen/PowerPC/vec_conv.ll @@ -0,0 +1,57 @@ +; RUN: llc -mattr=+altivec < %s | FileCheck %s + +; Check vector float/int conversion using altivec. + +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +@cte_float = global <4 x float> <float 6.5e+00, float 6.5e+00, float 6.5e+00, float 6.5e+00>, align 16 +@cte_int = global <4 x i32> <i32 6, i32 6, i32 6, i32 6>, align 16 + + +define void @v4f32_to_v4i32(<4 x float> %x, <4 x i32>* nocapture %y) nounwind { +entry: + %0 = load <4 x float>* @cte_float, align 16 + %mul = fmul <4 x float> %0, %x + %1 = fptosi <4 x float> %mul to <4 x i32> + store <4 x i32> %1, <4 x i32>* %y, align 16 + ret void +} +;CHECK: v4f32_to_v4i32: +;CHECK: vctsxs {{[0-9]+}}, {{[0-9]+}}, 0 + + +define void @v4f32_to_v4u32(<4 x float> %x, <4 x i32>* nocapture %y) nounwind { +entry: + %0 = load <4 x float>* @cte_float, align 16 + %mul = fmul <4 x float> %0, %x + %1 = fptoui <4 x float> %mul to <4 x i32> + store <4 x i32> %1, <4 x i32>* %y, align 16 + ret void +} +;CHECK: v4f32_to_v4u32: +;CHECK: vctuxs {{[0-9]+}}, {{[0-9]+}}, 0 + + +define void @v4i32_to_v4f32(<4 x i32> %x, <4 x float>* nocapture %y) nounwind { +entry: + %0 = load <4 x i32>* @cte_int, align 16 + %mul = mul <4 x i32> %0, %x + %1 = sitofp <4 x i32> %mul to <4 x float> + store <4 x float> %1, <4 x float>* %y, align 16 + ret void +} +;CHECK: v4i32_to_v4f32: +;CHECK: vcfsx {{[0-9]+}}, {{[0-9]+}}, 0 + + +define void @v4u32_to_v4f32(<4 x i32> %x, <4 x float>* nocapture %y) nounwind { +entry: + %0 = load <4 x i32>* @cte_int, align 16 + %mul = mul <4 x i32> %0, %x + %1 = uitofp <4 x i32> %mul to <4 x float> + store <4 x float> %1, <4 x float>* %y, align 16 + ret void +} +;CHECK: v4u32_to_v4f32: +;CHECK: vcfux {{[0-9]+}}, {{[0-9]+}}, 0 diff --git a/test/CodeGen/PowerPC/vec_extload.ll b/test/CodeGen/PowerPC/vec_extload.ll new file mode 100644 index 0000000..201c15b --- /dev/null +++ b/test/CodeGen/PowerPC/vec_extload.ll @@ -0,0 +1,155 @@ +; RUN: llc -mcpu=pwr6 -mattr=+altivec < %s | FileCheck %s + +; Check vector extend load expansion with altivec enabled. + +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +; Altivec does not provides an sext intruction, so it expands +; a set of vector stores (stvx), bytes load/sign expand/store +; (lbz/stb), and a final vector load (lvx) to load the result +; extended vector. +define <16 x i8> @v16si8_sext_in_reg(<16 x i8> %a) { + %b = trunc <16 x i8> %a to <16 x i4> + %c = sext <16 x i4> %b to <16 x i8> + ret <16 x i8> %c +} +; CHECK: v16si8_sext_in_reg: +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lbz +; CHECK: stb +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lbz +; CHECK: stb +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lbz +; CHECK: stb +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lbz +; CHECK: stb +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lbz +; CHECK: stb +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lbz +; CHECK: stb +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lbz +; CHECK: stb +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lbz +; CHECK: stb +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lbz +; CHECK: stb +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lbz +; CHECK: stb +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lbz +; CHECK: stb +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lbz +; CHECK: stb +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lbz +; CHECK: stb +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lbz +; CHECK: stb +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lbz +; CHECK: stb +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lbz +; CHECK: stb +; CHECK: lvx 2, {{[0-9]+}}, {{[0-9]+}} + +; The zero extend uses a more clever logic: a vector splat +; and a logic and to set higher bits to 0. +define <16 x i8> @v16si8_zext_in_reg(<16 x i8> %a) { + %b = trunc <16 x i8> %a to <16 x i4> + %c = zext <16 x i4> %b to <16 x i8> + ret <16 x i8> %c +} +; CHECK: v16si8_zext_in_reg: +; CHECK: vspltisb [[VMASK:[0-9]+]], 15 +; CHECK-NEXT: vand 2, 2, [[VMASK]] + +; Same as v16si8_sext_in_reg, expands to load/store halfwords (lhz/sth). +define <8 x i16> @v8si16_sext_in_reg(<8 x i16> %a) { + %b = trunc <8 x i16> %a to <8 x i8> + %c = sext <8 x i8> %b to <8 x i16> + ret <8 x i16> %c +} +; CHECK: v8si16_sext_in_reg: +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lhz +; CHECK: sth +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lhz +; CHECK: sth +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lhz +; CHECK: sth +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lhz +; CHECK: sth +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lhz +; CHECK: sth +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lhz +; CHECK: sth +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lhz +; CHECK: sth +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lhz +; CHECK: sth +; CHECK: lvx 2, {{[0-9]+}}, {{[0-9]+}} + +; Same as v8si16_sext_in_reg, but instead of creating the mask +; with a splat, loads it from memory. +define <8 x i16> @v8si16_zext_in_reg(<8 x i16> %a) { + %b = trunc <8 x i16> %a to <8 x i8> + %c = zext <8 x i8> %b to <8 x i16> + ret <8 x i16> %c +} +; CHECK: v8si16_zext_in_reg: +; CHECK: ld [[RMASKTOC:[0-9]+]], .LC{{[0-9]+}}@toc(2) +; CHECK-NEXT: lvx [[VMASK:[0-9]+]], {{[0-9]+}}, [[RMASKTOC]] +; CHECK-NEXT: vand 2, 2, [[VMASK]] + +; Same as v16si8_sext_in_reg, expands to load halfword (lha) and +; store words (stw). +define <4 x i32> @v4si32_sext_in_reg(<4 x i32> %a) { + %b = trunc <4 x i32> %a to <4 x i16> + %c = sext <4 x i16> %b to <4 x i32> + ret <4 x i32> %c +} +; CHECK: v4si32_sext_in_reg: +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lha +; CHECK: stw +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lha +; CHECK: stw +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lha +; CHECK: stw +; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}} +; CHECK: lha +; CHECK: stw +; CHECK: lvx 2, {{[0-9]+}}, {{[0-9]+}} + +; Same as v8si16_sext_in_reg. +define <4 x i32> @v4si32_zext_in_reg(<4 x i32> %a) { + %b = trunc <4 x i32> %a to <4 x i16> + %c = zext <4 x i16> %b to <4 x i32> + ret <4 x i32> %c +} +; CHECK: v4si32_zext_in_reg: +; CHECK: vspltisw [[VMASK:[0-9]+]], -16 +; CHECK-NEXT: vsrw [[VMASK]], [[VMASK]], [[VMASK]] +; CHECK-NEXT: vand 2, 2, [[VMASK]] diff --git a/test/CodeGen/PowerPC/vec_sqrt.ll b/test/CodeGen/PowerPC/vec_sqrt.ll new file mode 100644 index 0000000..055da1a --- /dev/null +++ b/test/CodeGen/PowerPC/vec_sqrt.ll @@ -0,0 +1,71 @@ +; RUN: llc -mcpu=pwr6 -mattr=+altivec,+fsqrt < %s | FileCheck %s + +; Check for vector sqrt expansion using floating-point types, since altivec +; does not provide an fsqrt instruction for vector. + +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +declare <2 x float> @llvm.sqrt.v2f32(<2 x float> %val) +declare <4 x float> @llvm.sqrt.v4f32(<4 x float> %val) +declare <8 x float> @llvm.sqrt.v8f32(<8 x float> %val) +declare <2 x double> @llvm.sqrt.v2f64(<2 x double> %val) +declare <4 x double> @llvm.sqrt.v4f64(<4 x double> %val) + +define <2 x float> @v2f32_sqrt(<2 x float> %x) nounwind readnone { +entry: + %sqrt = call <2 x float> @llvm.sqrt.v2f32 (<2 x float> %x) + ret <2 x float> %sqrt +} +; sqrt (<2 x float>) is promoted to sqrt (<4 x float>) +; CHECK: v2f32_sqrt: +; CHECK: fsqrts {{[0-9]+}}, {{[0-9]+}} +; CHECK: fsqrts {{[0-9]+}}, {{[0-9]+}} +; CHECK: fsqrts {{[0-9]+}}, {{[0-9]+}} +; CHECK: fsqrts {{[0-9]+}}, {{[0-9]+}} + +define <4 x float> @v4f32_sqrt(<4 x float> %x) nounwind readnone { +entry: + %sqrt = call <4 x float> @llvm.sqrt.v4f32 (<4 x float> %x) + ret <4 x float> %sqrt +} +; CHECK: v4f32_sqrt: +; CHECK: fsqrts {{[0-9]+}}, {{[0-9]+}} +; CHECK: fsqrts {{[0-9]+}}, {{[0-9]+}} +; CHECK: fsqrts {{[0-9]+}}, {{[0-9]+}} +; CHECK: fsqrts {{[0-9]+}}, {{[0-9]+}} + +define <8 x float> @v8f32_sqrt(<8 x float> %x) nounwind readnone { +entry: + %sqrt = call <8 x float> @llvm.sqrt.v8f32 (<8 x float> %x) + ret <8 x float> %sqrt +} +; CHECK: v8f32_sqrt: +; CHECK: fsqrts {{[0-9]+}}, {{[0-9]+}} +; CHECK: fsqrts {{[0-9]+}}, {{[0-9]+}} +; CHECK: fsqrts {{[0-9]+}}, {{[0-9]+}} +; CHECK: fsqrts {{[0-9]+}}, {{[0-9]+}} +; CHECK: fsqrts {{[0-9]+}}, {{[0-9]+}} +; CHECK: fsqrts {{[0-9]+}}, {{[0-9]+}} +; CHECK: fsqrts {{[0-9]+}}, {{[0-9]+}} +; CHECK: fsqrts {{[0-9]+}}, {{[0-9]+}} + +define <2 x double> @v2f64_sqrt(<2 x double> %x) nounwind readnone { +entry: + %sqrt = call <2 x double> @llvm.sqrt.v2f64 (<2 x double> %x) + ret <2 x double> %sqrt +} +; CHECK: v2f64_sqrt: +; CHECK: fsqrt {{[0-9]+}}, {{[0-9]+}} +; CHECK: fsqrt {{[0-9]+}}, {{[0-9]+}} + +define <4 x double> @v4f64_sqrt(<4 x double> %x) nounwind readnone { +entry: + %sqrt = call <4 x double> @llvm.sqrt.v4f64 (<4 x double> %x) + ret <4 x double> %sqrt +} +; CHECK: v4f64_sqrt: +; CHECK: fsqrt {{[0-9]+}}, {{[0-9]+}} +; CHECK: fsqrt {{[0-9]+}}, {{[0-9]+}} +; CHECK: fsqrt {{[0-9]+}}, {{[0-9]+}} +; CHECK: fsqrt {{[0-9]+}}, {{[0-9]+}} diff --git a/test/CodeGen/PowerPC/vrspill.ll b/test/CodeGen/PowerPC/vrspill.ll new file mode 100644 index 0000000..7641017 --- /dev/null +++ b/test/CodeGen/PowerPC/vrspill.ll @@ -0,0 +1,19 @@ +; RUN: llc -O0 -mtriple=powerpc-unknown-linux-gnu -mattr=+altivec -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -O0 -mtriple=powerpc64-unknown-linux-gnu -mattr=+altivec -verify-machineinstrs < %s | FileCheck %s + +; This verifies that we generate correct spill/reload code for vector regs. + +define void @addrtaken(i32 %i, <4 x float> %w) nounwind { +entry: + %i.addr = alloca i32, align 4 + %w.addr = alloca <4 x float>, align 16 + store i32 %i, i32* %i.addr, align 4 + store <4 x float> %w, <4 x float>* %w.addr, align 16 + call void @foo(i32* %i.addr) + ret void +} + +; CHECK: stvx 2, 0, 0 +; CHECK: lvx 2, 0, 0 + +declare void @foo(i32*) diff --git a/test/CodeGen/SPARC/2011-01-11-CC.ll b/test/CodeGen/SPARC/2011-01-11-CC.ll index 3ceda95..f676fd8 100755 --- a/test/CodeGen/SPARC/2011-01-11-CC.ll +++ b/test/CodeGen/SPARC/2011-01-11-CC.ll @@ -54,7 +54,7 @@ entry: ; V8: {{be|bne}} ; V9: test_select_dfp_icc ; V9: subcc -; V9=NOT: {{be|bne}} +; V9-NOT: {{be|bne}} ; V9: fmovd{{e|ne}} %icc %0 = icmp eq i32 %a, 0 %1 = select i1 %0, double %f1, double %f2 diff --git a/test/CodeGen/Thumb2/buildvector-crash.ll b/test/CodeGen/Thumb2/buildvector-crash.ll index 01ef472..ce42f4b 100644 --- a/test/CodeGen/Thumb2/buildvector-crash.ll +++ b/test/CodeGen/Thumb2/buildvector-crash.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -O3 -mtriple=thumbv7-apple-darwin10 -mcpu=cortex-a8 | FileCheck %s +; RUN: llc < %s -O3 -mtriple=thumbv7-apple-ios -mcpu=cortex-a8 | FileCheck %s ; Formerly crashed, 3573915. define void @RotateStarsFP_Vec() nounwind { @@ -13,5 +13,5 @@ bb8: ; preds = %bb8, %bb.nph372 store <4 x float> %3, <4 x float>* undef, align 4 br label %bb8 ; CHECK: RotateStarsFP_Vec: -; CHECK: vldmia +; CHECK: vld1.64 } diff --git a/test/CodeGen/Thumb2/carry.ll b/test/CodeGen/Thumb2/carry.ll index de6f6e2..85b4370 100644 --- a/test/CodeGen/Thumb2/carry.ll +++ b/test/CodeGen/Thumb2/carry.ll @@ -20,3 +20,16 @@ entry: %tmp2 = sub i64 %tmp1, %b ret i64 %tmp2 } + +; rdar://12559385 +define i64 @f3(i32 %vi) { +entry: +; CHECK: f3: +; CHECK: movw [[REG:r[0-9]+]], #36102 +; CHECK: sbcs r{{[0-9]+}}, [[REG]] + %v0 = zext i32 %vi to i64 + %v1 = xor i64 %v0, -155057456198619 + %v4 = add i64 %v1, 155057456198619 + %v5 = add i64 %v4, %v1 + ret i64 %v5 +} diff --git a/test/CodeGen/Thumb2/cortex-fp.ll b/test/CodeGen/Thumb2/cortex-fp.ll index d06f8a7..b7df2fb 100644 --- a/test/CodeGen/Thumb2/cortex-fp.ll +++ b/test/CodeGen/Thumb2/cortex-fp.ll @@ -7,8 +7,8 @@ define float @foo(float %a, float %b) { entry: ; CHECK: foo ; CORTEXM3: blx ___mulsf3 -; CORTEXM4: vmul.f32 s0, s1, s0 -; CORTEXA8: vmul.f32 d0, d1, d0 +; CORTEXM4: vmul.f32 s0, s2, s0 +; CORTEXA8: vmul.f32 d %0 = fmul float %a, %b ret float %0 } @@ -19,6 +19,6 @@ entry: %0 = fmul double %a, %b ; CORTEXM3: blx ___muldf3 ; CORTEXM4: blx ___muldf3 -; CORTEXA8: vmul.f64 d16, d17, d16 +; CORTEXA8: vmul.f64 d ret double %0 } diff --git a/test/CodeGen/Thumb2/div.ll b/test/CodeGen/Thumb2/div.ll index 2c00c70..f89746a 100644 --- a/test/CodeGen/Thumb2/div.ll +++ b/test/CodeGen/Thumb2/div.ll @@ -2,6 +2,8 @@ ; RUN: | FileCheck %s -check-prefix=CHECK-THUMB ; RUN: llc < %s -march=thumb -mcpu=cortex-m3 -mattr=+thumb2 \ ; RUN: | FileCheck %s -check-prefix=CHECK-THUMBV7M +; RUN: llc < %s -march=thumb -mcpu=swift \ +; RUN: | FileCheck %s -check-prefix=CHECK-SWIFT-T2 define i32 @f1(i32 %a, i32 %b) { entry: @@ -9,6 +11,8 @@ entry: ; CHECK-THUMB: __divsi3 ; CHECK-THUMBV7M: f1 ; CHECK-THUMBV7M: sdiv +; CHECK-SWIFT-T2: f1 +; CHECK-SWIFT-T2: sdiv %tmp1 = sdiv i32 %a, %b ; <i32> [#uses=1] ret i32 %tmp1 } @@ -19,6 +23,8 @@ entry: ; CHECK-THUMB: __udivsi3 ; CHECK-THUMBV7M: f2 ; CHECK-THUMBV7M: udiv +; CHECK-SWIFT-T2: f2 +; CHECK-SWIFT-T2: udiv %tmp1 = udiv i32 %a, %b ; <i32> [#uses=1] ret i32 %tmp1 } @@ -29,6 +35,8 @@ entry: ; CHECK-THUMB: __modsi3 ; CHECK-THUMBV7M: f3 ; CHECK-THUMBV7M: sdiv +; CHECK-SWIFT-T2: f3 +; CHECK-SWIFT-T2: sdiv %tmp1 = srem i32 %a, %b ; <i32> [#uses=1] ret i32 %tmp1 } @@ -39,6 +47,8 @@ entry: ; CHECK-THUMB: __umodsi3 ; CHECK-THUMBV7M: f4 ; CHECK-THUMBV7M: udiv +; CHECK-SWIFT-T2: f4 +; CHECK-SWIFT-T2: udiv %tmp1 = urem i32 %a, %b ; <i32> [#uses=1] ret i32 %tmp1 } diff --git a/test/CodeGen/Thumb2/longMACt.ll b/test/CodeGen/Thumb2/longMACt.ll new file mode 100644 index 0000000..beefd60 --- /dev/null +++ b/test/CodeGen/Thumb2/longMACt.ll @@ -0,0 +1,44 @@ +; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s +; Check generated signed and unsigned multiply accumulate long. + +define i64 @MACLongTest1(i32 %a, i32 %b, i64 %c) { +;CHECK: MACLongTest1: +;CHECK: umlal + %conv = zext i32 %a to i64 + %conv1 = zext i32 %b to i64 + %mul = mul i64 %conv1, %conv + %add = add i64 %mul, %c + ret i64 %add +} + +define i64 @MACLongTest2(i32 %a, i32 %b, i64 %c) { +;CHECK: MACLongTest2: +;CHECK: smlal + %conv = sext i32 %a to i64 + %conv1 = sext i32 %b to i64 + %mul = mul nsw i64 %conv1, %conv + %add = add nsw i64 %mul, %c + ret i64 %add +} + +define i64 @MACLongTest3(i32 %a, i32 %b, i32 %c) { +;CHECK: MACLongTest3: +;CHECK: umlal + %conv = zext i32 %b to i64 + %conv1 = zext i32 %a to i64 + %mul = mul i64 %conv, %conv1 + %conv2 = zext i32 %c to i64 + %add = add i64 %mul, %conv2 + ret i64 %add +} + +define i64 @MACLongTest4(i32 %a, i32 %b, i32 %c) { +;CHECK: MACLongTest4: +;CHECK: smlal + %conv = sext i32 %b to i64 + %conv1 = sext i32 %a to i64 + %mul = mul nsw i64 %conv, %conv1 + %conv2 = sext i32 %c to i64 + %add = add nsw i64 %mul, %conv2 + ret i64 %add +} diff --git a/test/CodeGen/Thumb2/thumb2-mla.ll b/test/CodeGen/Thumb2/thumb2-mla.ll index c4cc749..594d974 100644 --- a/test/CodeGen/Thumb2/thumb2-mla.ll +++ b/test/CodeGen/Thumb2/thumb2-mla.ll @@ -1,4 +1,5 @@ ; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s +; RUN: llc < %s -march=thumb -mattr=+thumb2 -arm-use-mulops=false | FileCheck %s -check-prefix=NO_MULOPS define i32 @f1(i32 %a, i32 %b, i32 %c) { %tmp1 = mul i32 %a, %b @@ -7,6 +8,9 @@ define i32 @f1(i32 %a, i32 %b, i32 %c) { } ; CHECK: f1: ; CHECK: mla r0, r0, r1, r2 +; NO_MULOPS: f1: +; NO_MULOPS: muls r0, r1, r0 +; NO_MULOPS-NEXT: add r0, r2 define i32 @f2(i32 %a, i32 %b, i32 %c) { %tmp1 = mul i32 %a, %b @@ -15,3 +19,6 @@ define i32 @f2(i32 %a, i32 %b, i32 %c) { } ; CHECK: f2: ; CHECK: mla r0, r0, r1, r2 +; NO_MULOPS: f2: +; NO_MULOPS: muls r0, r1, r0 +; NO_MULOPS-NEXT: add r0, r2 diff --git a/test/CodeGen/Thumb2/thumb2-select_xform.ll b/test/CodeGen/Thumb2/thumb2-select_xform.ll index ead198f..ed4d26d 100644 --- a/test/CodeGen/Thumb2/thumb2-select_xform.ll +++ b/test/CodeGen/Thumb2/thumb2-select_xform.ll @@ -5,7 +5,7 @@ define i32 @t1(i32 %a, i32 %b, i32 %c) nounwind { ; CHECK: mvn r0, #-2147483648 ; CHECK: cmp r2, #10 ; CHECK: it le -; CHECK: addle.w r1, r1, r0 +; CHECK: addle r1, r0 ; CHECK: mov r0, r1 %tmp1 = icmp sgt i32 %c, 10 %tmp2 = select i1 %tmp1, i32 0, i32 2147483647 @@ -30,7 +30,7 @@ define i32 @t3(i32 %a, i32 %b, i32 %c, i32 %d) nounwind { ; CHECK: t3 ; CHECK: cmp r2, #10 ; CHECK: it le -; CHECK: suble.w r1, r1, #10 +; CHECK: suble r1, #10 ; CHECK: mov r0, r1 %tmp1 = icmp sgt i32 %c, 10 %tmp2 = select i1 %tmp1, i32 0, i32 10 diff --git a/test/CodeGen/Thumb2/thumb2-smla.ll b/test/CodeGen/Thumb2/thumb2-smla.ll index c128ecc..aaaedfa 100644 --- a/test/CodeGen/Thumb2/thumb2-smla.ll +++ b/test/CodeGen/Thumb2/thumb2-smla.ll @@ -1,8 +1,12 @@ ; RUN: llc < %s -march=thumb -mattr=+thumb2,+t2xtpk,+t2dsp | FileCheck %s +; RUN: llc < %s -march=thumb -mattr=+thumb2,+t2xtpk,+t2dsp -arm-use-mulops=false | FileCheck %s -check-prefix=NO_MULOPS define i32 @f3(i32 %a, i16 %x, i32 %y) { ; CHECK: f3 ; CHECK: smlabt r0, r1, r2, r0 +; NO_MULOPS: f3 +; NO_MULOPS: smultb r1, r2, r1 +; NO_MULOPS-NEXT: add r0, r1 %tmp = sext i16 %x to i32 ; <i32> [#uses=1] %tmp2 = ashr i32 %y, 16 ; <i32> [#uses=1] %tmp3 = mul i32 %tmp2, %tmp ; <i32> [#uses=1] diff --git a/test/CodeGen/Thumb2/thumb2-uxtb.ll b/test/CodeGen/Thumb2/thumb2-uxtb.ll index 35914b1..2074f98 100644 --- a/test/CodeGen/Thumb2/thumb2-uxtb.ll +++ b/test/CodeGen/Thumb2/thumb2-uxtb.ll @@ -128,9 +128,9 @@ define i32 @test10(i32 %p0) { ; ARMv7M: test10 ; ARMv7M: mov.w r1, #16253176 -; ARMv7M: mov.w r2, #458759 ; ARMv7M: and.w r0, r1, r0, lsr #7 -; ARMv7M: and.w r1, r2, r0, lsr #5 +; ARMv7M: mov.w r1, #458759 +; ARMv7M: and.w r1, r1, r0, lsr #5 ; ARMv7M: orrs r0, r1 %tmp1 = lshr i32 %p0, 7 ; <i32> [#uses=1] %tmp2 = and i32 %tmp1, 16253176 ; <i32> [#uses=2] diff --git a/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll b/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll index 8b55bd7..3d058bc 100644 --- a/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll +++ b/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple=i386-apple-darwin | FileCheck %s +; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=corei7 | FileCheck %s ; rdar://r7512579 ; PHI defs in the atomic loop should be used by the add / adc @@ -7,17 +7,16 @@ define void @t(i64* nocapture %p) nounwind ssp { entry: ; CHECK: t: -; CHECK: movl $1 -; CHECK: movl (%ebp), %eax -; CHECK: movl 4(%ebp), %edx +; CHECK: movl ([[REG:%[a-z]+]]), %eax +; CHECK: movl 4([[REG]]), %edx ; CHECK: LBB0_1: -; CHECK-NOT: movl $1 -; CHECK-NOT: movl $0 -; CHECK: addl -; CHECK: adcl +; CHECK: movl %eax, %ebx +; CHECK: addl {{%[a-z]+}}, %ebx +; CHECK: movl %edx, %ecx +; CHECK: adcl {{%[a-z]+}}, %ecx ; CHECK: lock -; CHECK: cmpxchg8b -; CHECK: jne +; CHECK-NEXT: cmpxchg8b ([[REG]]) +; CHECK-NEXT: jne %0 = atomicrmw add i64* %p, i64 1 seq_cst ret void } diff --git a/test/CodeGen/X86/2012-01-18-vbitcast.ll b/test/CodeGen/X86/2012-01-18-vbitcast.ll index 8a3ccc8..3ce7db6 100644 --- a/test/CodeGen/X86/2012-01-18-vbitcast.ll +++ b/test/CodeGen/X86/2012-01-18-vbitcast.ll @@ -2,8 +2,8 @@ ;CHECK: vcast define <2 x i32> @vcast(<2 x float> %a, <2 x float> %b) { -;CHECK: pshufd -;CHECK: pshufd +;CHECK: pmovzxdq +;CHECK: pmovzxdq %af = bitcast <2 x float> %a to <2 x i32> %bf = bitcast <2 x float> %b to <2 x i32> %x = sub <2 x i32> %af, %bf diff --git a/test/CodeGen/X86/2012-03-15-build_vector_wl.ll b/test/CodeGen/X86/2012-03-15-build_vector_wl.ll index fec17e9..c4b307e 100644 --- a/test/CodeGen/X86/2012-03-15-build_vector_wl.ll +++ b/test/CodeGen/X86/2012-03-15-build_vector_wl.ll @@ -4,7 +4,7 @@ define <4 x i8> @build_vector_again(<16 x i8> %in) nounwind readnone { entry: %out = shufflevector <16 x i8> %in, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> -; CHECK: shufb +; CHECK: pmovzxbd ret <4 x i8> %out ; CHECK: ret } diff --git a/test/CodeGen/X86/2012-04-26-sdglue.ll b/test/CodeGen/X86/2012-04-26-sdglue.ll index 9a66b67..0465952 100644 --- a/test/CodeGen/X86/2012-04-26-sdglue.ll +++ b/test/CodeGen/X86/2012-04-26-sdglue.ll @@ -5,7 +5,7 @@ ; It's hard to test for the ISEL condition because CodeGen optimizes ; away the bugpointed code. Just ensure the basics are still there. ;CHECK: func: -;CHECK: vpxor +;CHECK: vxorps ;CHECK: vinsertf128 ;CHECK: vpshufd ;CHECK: vpshufd diff --git a/test/CodeGen/X86/2012-07-10-extload64.ll b/test/CodeGen/X86/2012-07-10-extload64.ll index 906b748..4abdded 100644 --- a/test/CodeGen/X86/2012-07-10-extload64.ll +++ b/test/CodeGen/X86/2012-07-10-extload64.ll @@ -3,7 +3,7 @@ ; CHECK: load_store define void @load_store(<4 x i16>* %in) { entry: -; CHECK: movsd +; CHECK: pmovzxwd %A27 = load <4 x i16>* %in, align 4 %A28 = add <4 x i16> %A27, %A27 ; CHECK: movlpd @@ -27,6 +27,6 @@ define <2 x i32> @load_64(<2 x i32>* %ptr) { BB: %t = load <2 x i32>* %ptr ret <2 x i32> %t -;CHECK: movsd +;CHECK: pmovzxdq ;CHECK: ret } diff --git a/test/CodeGen/X86/2012-08-16-setcc.ll b/test/CodeGen/X86/2012-08-16-setcc.ll new file mode 100644 index 0000000..ed51156 --- /dev/null +++ b/test/CodeGen/X86/2012-08-16-setcc.ll @@ -0,0 +1,45 @@ +; RUN: llc < %s -mtriple=x86_64-apple-macosx | FileCheck %s + +; rdar://12081007 + +; CHECK: and_1: +; CHECK: andb +; CHECK-NEXT: cmovnel +; CHECK: ret +define i32 @and_1(i8 zeroext %a, i8 zeroext %b, i32 %x) { + %1 = and i8 %b, %a + %2 = icmp ne i8 %1, 0 + %3 = select i1 %2, i32 %x, i32 0 + ret i32 %3 +} + +; CHECK: and_2: +; CHECK: andb +; CHECK-NEXT: setne +; CHECK: ret +define zeroext i1 @and_2(i8 zeroext %a, i8 zeroext %b) { + %1 = and i8 %b, %a + %2 = icmp ne i8 %1, 0 + ret i1 %2 +} + +; CHECK: xor_1: +; CHECK: xorb +; CHECK-NEXT: cmovnel +; CHECK: ret +define i32 @xor_1(i8 zeroext %a, i8 zeroext %b, i32 %x) { + %1 = xor i8 %b, %a + %2 = icmp ne i8 %1, 0 + %3 = select i1 %2, i32 %x, i32 0 + ret i32 %3 +} + +; CHECK: xor_2: +; CHECK: xorb +; CHECK-NEXT: setne +; CHECK: ret +define zeroext i1 @xor_2(i8 zeroext %a, i8 zeroext %b) { + %1 = xor i8 %b, %a + %2 = icmp ne i8 %1, 0 + ret i1 %2 +} diff --git a/test/CodeGen/X86/2012-08-28-UnsafeMathCrash.ll b/test/CodeGen/X86/2012-08-28-UnsafeMathCrash.ll new file mode 100644 index 0000000..6ebbb2e --- /dev/null +++ b/test/CodeGen/X86/2012-08-28-UnsafeMathCrash.ll @@ -0,0 +1,20 @@ +; RUN: llc < %s -enable-unsafe-fp-math +; <rdar://problem/12180135> +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128" +target triple = "i386-apple-macosx10.8.0" + +define i32 @foo(float %mean) nounwind readnone ssp align 2 { +entry: + %cmp = fcmp olt float %mean, -3.000000e+00 + %f.0 = select i1 %cmp, float -3.000000e+00, float %mean + %cmp2 = fcmp ult float %f.0, 3.000000e+00 + %f.1 = select i1 %cmp2, float %f.0, float 0x4007EB8520000000 + %add = fadd float %f.1, 3.000000e+00 + %div = fdiv float %add, 2.343750e-02 + %0 = fpext float %div to double + %conv = select i1 undef, double 2.550000e+02, double %0 + %add8 = fadd double %conv, 5.000000e-01 + %conv9 = fptosi double %add8 to i32 + %.conv9 = select i1 undef, i32 255, i32 %conv9 + ret i32 %.conv9 +} diff --git a/test/CodeGen/X86/2012-09-13-dagco-fneg.ll b/test/CodeGen/X86/2012-09-13-dagco-fneg.ll new file mode 100644 index 0000000..7b9bab9 --- /dev/null +++ b/test/CodeGen/X86/2012-09-13-dagco-fneg.ll @@ -0,0 +1,21 @@ +; RUN: llc -march=x86-64 -mcpu=corei7 < %s | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +; CHECK: foo +; Make sure we are not trying to use scalar xor on the high bits of the vector. +; CHECK-NOT: xorq +; CHECK: xorl +; CHECK-NEXT: ret + +define i32 @foo() { +bb: + %tmp44.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, <float 0.000000e+00, float 0.000000e+00, float 1.000000e+00, float 0.000000e+00> + %0 = bitcast <4 x float> %tmp44.i to i128 + %1 = zext i128 %0 to i512 + %2 = shl nuw nsw i512 %1, 256 + %ins = or i512 %2, 3325764857622480139933400731976840738652108318779753826115024029985671937147149347761402413803120180680770390816681124225944317364750115981129923635970048 + store i512 %ins, i512* undef, align 64 + ret i32 0 +} diff --git a/test/CodeGen/X86/2012-09-28-CGPBug.ll b/test/CodeGen/X86/2012-09-28-CGPBug.ll new file mode 100644 index 0000000..32d7d01 --- /dev/null +++ b/test/CodeGen/X86/2012-09-28-CGPBug.ll @@ -0,0 +1,53 @@ +; RUN: llc -mtriple=i386-apple-macosx < %s | FileCheck %s +; rdar://12396696 + +@JT = global [4 x i32] [i32 sub (i32 ptrtoint (i8* blockaddress(@h, %18) to i32), i32 ptrtoint (i8* blockaddress(@h, %11) to i32)), i32 sub (i32 ptrtoint (i8* blockaddress(@h, %17) to i32), i32 ptrtoint (i8* blockaddress(@h, %11) to i32)), i32 sub (i32 ptrtoint (i8* blockaddress(@h, %22) to i32), i32 ptrtoint (i8* blockaddress(@h, %18) to i32)), i32 sub (i32 ptrtoint (i8* blockaddress(@h, %22) to i32), i32 ptrtoint (i8* blockaddress(@h, %17) to i32))] +@gGlobalLock = external global i8* +@.str40 = external global [35 x i8] + +; CHECK: _JT: +; CHECK-NOT: .long Ltmp{{[0-9]+}}-1 +; CHECK-NOT: .long 1-Ltmp{{[0-9]+}} +; CHECK: .long Ltmp{{[0-9]+}}-Ltmp{{[0-9]+}} +; CHECK: .long Ltmp{{[0-9]+}}-Ltmp{{[0-9]+}} +; CHECK: .long Ltmp{{[0-9]+}}-Ltmp{{[0-9]+}} +; CHECK: .long Ltmp{{[0-9]+}}-Ltmp{{[0-9]+}} + +define void @h(i8*) nounwind ssp { + %2 = alloca i8* + store i8* %0, i8** %2 + %3 = load i8** %2 + %4 = bitcast i8* %3 to { i32, i32 }* + %5 = getelementptr { i32, i32 }* %4, i32 0, i32 0 + %6 = load i32* %5 + %7 = srem i32 %6, 2 + %8 = icmp slt i32 %6, 2 + %9 = select i1 %8, i32 %6, i32 %7 + %10 = icmp eq i32 %9, 0 + br label %11 + +; <label>:11 ; preds = %1 + %12 = zext i1 %10 to i32 + %13 = getelementptr [4 x i32]* @JT, i32 0, i32 %12 + %14 = load i32* %13 + %15 = add i32 %14, ptrtoint (i8* blockaddress(@h, %11) to i32) + %16 = inttoptr i32 %15 to i8* + indirectbr i8* %16, [label %17, label %18] + +; <label>:17 ; preds = %11 + tail call void (i8*, ...)* @g(i8* getelementptr inbounds ([35 x i8]* @.str40, i32 0, i32 0)) + br label %22 + +; <label>:18 ; preds = %11 + %19 = call i32 @f(i32 -1037694186) nounwind + %20 = inttoptr i32 %19 to i32 (i8**)* + %21 = tail call i32 %20(i8** @gGlobalLock) + br label %22 + +; <label>:22 ; preds = %18, %17 + ret void +} + +declare i32 @f(i32) + +declare void @g(i8*, ...) diff --git a/test/CodeGen/X86/2012-10-02-DAGCycle.ll b/test/CodeGen/X86/2012-10-02-DAGCycle.ll new file mode 100644 index 0000000..8d914db --- /dev/null +++ b/test/CodeGen/X86/2012-10-02-DAGCycle.ll @@ -0,0 +1,52 @@ +; RUN: llc -mtriple=i386-apple-macosx -relocation-model=pic < %s +; RUN: llc -mtriple=x86_64-apple-macosx -relocation-model=pic < %s + +; rdar://12393897 + +%TRp = type { i32, %TRH*, i32, i32 } +%TRH = type { i8*, i8*, i8*, i8*, {}* } + +define i32 @t(%TRp* inreg %rp) nounwind optsize ssp { +entry: + %handler = getelementptr inbounds %TRp* %rp, i32 0, i32 1 + %0 = load %TRH** %handler, align 4 + %sync = getelementptr inbounds %TRH* %0, i32 0, i32 4 + %sync12 = load {}** %sync, align 4 + %1 = bitcast {}* %sync12 to i32 (%TRp*)* + %call = tail call i32 %1(%TRp* inreg %rp) nounwind optsize + ret i32 %call +} + +%btConeShape = type { %btConvexInternalShape, float, float, float, [3 x i32] } +%btConvexInternalShape = type { %btConvexShape, %btVector, %btVector, float, float } +%btConvexShape = type { %btCollisionShape } +%btCollisionShape = type { i32 (...)**, i32, i8* } +%btVector = type { [4 x float] } + +define { <2 x float>, <2 x float> } @t2(%btConeShape* %this) unnamed_addr uwtable ssp align 2 { +entry: + %0 = getelementptr inbounds %btConeShape* %this, i64 0, i32 0 + br i1 undef, label %if.then, label %if.end17 + +if.then: ; preds = %entry + %vecnorm.sroa.2.8.copyload = load float* undef, align 4 + %cmp4 = fcmp olt float undef, 0x3D10000000000000 + %vecnorm.sroa.2.8.copyload36 = select i1 %cmp4, float -1.000000e+00, float %vecnorm.sroa.2.8.copyload + %call.i.i.i = tail call float @sqrtf(float 0.000000e+00) nounwind readnone + %div.i.i = fdiv float 1.000000e+00, %call.i.i.i + %mul7.i.i.i = fmul float %div.i.i, %vecnorm.sroa.2.8.copyload36 + %1 = load float (%btConvexInternalShape*)** undef, align 8 + %call12 = tail call float %1(%btConvexInternalShape* %0) + %mul7.i.i = fmul float %call12, %mul7.i.i.i + %retval.sroa.0.4.insert = insertelement <2 x float> zeroinitializer, float undef, i32 1 + %add13.i = fadd float undef, %mul7.i.i + %retval.sroa.1.8.insert = insertelement <2 x float> undef, float %add13.i, i32 0 + br label %if.end17 + +if.end17: ; preds = %if.then, %entry + %retval.sroa.1.8.load3338 = phi <2 x float> [ %retval.sroa.1.8.insert, %if.then ], [ undef, %entry ] + %retval.sroa.0.0.load3137 = phi <2 x float> [ %retval.sroa.0.4.insert, %if.then ], [ undef, %entry ] + ret { <2 x float>, <2 x float> } undef +} + +declare float @sqrtf(float) nounwind readnone diff --git a/test/CodeGen/X86/2012-10-03-DAGCycle.ll b/test/CodeGen/X86/2012-10-03-DAGCycle.ll new file mode 100644 index 0000000..72083c7 --- /dev/null +++ b/test/CodeGen/X86/2012-10-03-DAGCycle.ll @@ -0,0 +1,31 @@ +; RUN: llc -mtriple=x86_64-apple-macosx -mcpu=corei7 < %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +%struct.pluto.0 = type { %struct.bar.1, %struct.hoge.368* } +%struct.bar.1 = type { %i8* } +%i8 = type { i8 } +%struct.hoge.368 = type { i32, i32 } +%struct.widget.375 = type { i32, i32, %i8*, %struct.hoge.368* } + +define fastcc void @bar(%struct.pluto.0* %arg) nounwind uwtable ssp align 2 { +bb: + %tmp1 = alloca %struct.widget.375, align 8 + %tmp2 = getelementptr inbounds %struct.pluto.0* %arg, i64 0, i32 1 + %tmp3 = load %struct.hoge.368** %tmp2, align 8 + store %struct.pluto.0* %arg, %struct.pluto.0** undef, align 8 + %tmp = getelementptr inbounds %struct.widget.375* %tmp1, i64 0, i32 2 + %tmp4 = getelementptr %struct.pluto.0* %arg, i64 0, i32 0, i32 0 + %tmp5 = load %i8** %tmp4, align 8 + store %i8* %tmp5, %i8** %tmp, align 8 + %tmp6 = getelementptr inbounds %struct.widget.375* %tmp1, i64 0, i32 3 + store %struct.hoge.368* %tmp3, %struct.hoge.368** %tmp6, align 8 + br i1 undef, label %bb8, label %bb7 + +bb7: ; preds = %bb + unreachable + +bb8: ; preds = %bb + unreachable +} diff --git a/test/CodeGen/X86/2012-10-18-crash-dagco.ll b/test/CodeGen/X86/2012-10-18-crash-dagco.ll new file mode 100644 index 0000000..5b98624 --- /dev/null +++ b/test/CodeGen/X86/2012-10-18-crash-dagco.ll @@ -0,0 +1,61 @@ +; RUN: llc -march=x86-64 -mcpu=corei7 -disable-cgp-select2branch < %s + +; We should not crash on this test. + +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32" +target triple = "i386-apple-darwin9.0.0" + +@global = external constant [411 x i8], align 1 + +define void @snork() nounwind { +bb: + br i1 undef, label %bb26, label %bb27 + +bb26: ; preds = %bb48, %bb26, %bb + switch i32 undef, label %bb26 [ + i32 142771596, label %bb28 + ] + +bb27: ; preds = %bb48, %bb + switch i32 undef, label %bb49 [ + i32 142771596, label %bb28 + ] + +bb28: ; preds = %bb27, %bb26 + %tmp = load i32* null + %tmp29 = trunc i32 %tmp to i8 + store i8* undef, i8** undef + %tmp30 = load i32* null + %tmp31 = icmp eq i32 %tmp30, 0 + %tmp32 = getelementptr inbounds [411 x i8]* @global, i32 0, i32 undef + %tmp33 = load i8* %tmp32, align 1 + %tmp34 = getelementptr inbounds [411 x i8]* @global, i32 0, i32 0 + %tmp35 = load i8* %tmp34, align 1 + %tmp36 = select i1 %tmp31, i8 %tmp35, i8 %tmp33 + %tmp37 = select i1 undef, i8 %tmp29, i8 %tmp36 + %tmp38 = zext i8 %tmp37 to i32 + %tmp39 = select i1 undef, i32 0, i32 %tmp38 + %tmp40 = getelementptr inbounds i32* null, i32 %tmp39 + %tmp41 = load i32* %tmp40, align 4 + %tmp42 = load i32* undef, align 4 + %tmp43 = load i32* undef + %tmp44 = xor i32 %tmp42, %tmp43 + %tmp45 = lshr i32 %tmp44, 8 + %tmp46 = lshr i32 %tmp44, 7 + call void @spam() + unreachable + +bb47: ; No predecessors! + ret void + +bb48: ; No predecessors! + br i1 undef, label %bb27, label %bb26 + +bb49: ; preds = %bb49, %bb27 + br label %bb49 + +bb50: ; preds = %bb50 + br label %bb50 +} + +declare void @spam() noreturn nounwind diff --git a/test/CodeGen/X86/MergeConsecutiveStores.ll b/test/CodeGen/X86/MergeConsecutiveStores.ll new file mode 100644 index 0000000..64825ba --- /dev/null +++ b/test/CodeGen/X86/MergeConsecutiveStores.ll @@ -0,0 +1,305 @@ +; RUN: llc -march=x86-64 -mcpu=corei7 -mattr=+avx < %s | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +%struct.A = type { i8, i8, i8, i8, i8, i8, i8, i8 } +%struct.B = type { i32, i32, i32, i32, i32, i32, i32, i32 } + +; CHECK: merge_const_store +; save 1,2,3 ... as one big integer. +; CHECK: movabsq $578437695752307201 +; CHECK: ret +define void @merge_const_store(i32 %count, %struct.A* nocapture %p) nounwind uwtable noinline ssp { + %1 = icmp sgt i32 %count, 0 + br i1 %1, label %.lr.ph, label %._crit_edge +.lr.ph: + %i.02 = phi i32 [ %10, %.lr.ph ], [ 0, %0 ] + %.01 = phi %struct.A* [ %11, %.lr.ph ], [ %p, %0 ] + %2 = getelementptr inbounds %struct.A* %.01, i64 0, i32 0 + store i8 1, i8* %2, align 1 + %3 = getelementptr inbounds %struct.A* %.01, i64 0, i32 1 + store i8 2, i8* %3, align 1 + %4 = getelementptr inbounds %struct.A* %.01, i64 0, i32 2 + store i8 3, i8* %4, align 1 + %5 = getelementptr inbounds %struct.A* %.01, i64 0, i32 3 + store i8 4, i8* %5, align 1 + %6 = getelementptr inbounds %struct.A* %.01, i64 0, i32 4 + store i8 5, i8* %6, align 1 + %7 = getelementptr inbounds %struct.A* %.01, i64 0, i32 5 + store i8 6, i8* %7, align 1 + %8 = getelementptr inbounds %struct.A* %.01, i64 0, i32 6 + store i8 7, i8* %8, align 1 + %9 = getelementptr inbounds %struct.A* %.01, i64 0, i32 7 + store i8 8, i8* %9, align 1 + %10 = add nsw i32 %i.02, 1 + %11 = getelementptr inbounds %struct.A* %.01, i64 1 + %exitcond = icmp eq i32 %10, %count + br i1 %exitcond, label %._crit_edge, label %.lr.ph +._crit_edge: + ret void +} + +; Move the constants using a single vector store. +; CHECK: merge_const_store_vec +; CHECK: vmovups %ymm0, (%rsi) +; CHECK: ret +define void @merge_const_store_vec(i32 %count, %struct.B* nocapture %p) nounwind uwtable noinline ssp { + %1 = icmp sgt i32 %count, 0 + br i1 %1, label %.lr.ph, label %._crit_edge +.lr.ph: + %i.02 = phi i32 [ %10, %.lr.ph ], [ 0, %0 ] + %.01 = phi %struct.B* [ %11, %.lr.ph ], [ %p, %0 ] + %2 = getelementptr inbounds %struct.B* %.01, i64 0, i32 0 + store i32 0, i32* %2, align 4 + %3 = getelementptr inbounds %struct.B* %.01, i64 0, i32 1 + store i32 0, i32* %3, align 4 + %4 = getelementptr inbounds %struct.B* %.01, i64 0, i32 2 + store i32 0, i32* %4, align 4 + %5 = getelementptr inbounds %struct.B* %.01, i64 0, i32 3 + store i32 0, i32* %5, align 4 + %6 = getelementptr inbounds %struct.B* %.01, i64 0, i32 4 + store i32 0, i32* %6, align 4 + %7 = getelementptr inbounds %struct.B* %.01, i64 0, i32 5 + store i32 0, i32* %7, align 4 + %8 = getelementptr inbounds %struct.B* %.01, i64 0, i32 6 + store i32 0, i32* %8, align 4 + %9 = getelementptr inbounds %struct.B* %.01, i64 0, i32 7 + store i32 0, i32* %9, align 4 + %10 = add nsw i32 %i.02, 1 + %11 = getelementptr inbounds %struct.B* %.01, i64 1 + %exitcond = icmp eq i32 %10, %count + br i1 %exitcond, label %._crit_edge, label %.lr.ph +._crit_edge: + ret void +} + +; Move the first 4 constants as a single vector. Move the rest as scalars. +; CHECK: merge_nonconst_store +; CHECK: movl $67305985 +; CHECK: movb +; CHECK: movb +; CHECK: movb +; CHECK: movb +; CHECK: ret +define void @merge_nonconst_store(i32 %count, i8 %zz, %struct.A* nocapture %p) nounwind uwtable noinline ssp { + %1 = icmp sgt i32 %count, 0 + br i1 %1, label %.lr.ph, label %._crit_edge +.lr.ph: + %i.02 = phi i32 [ %10, %.lr.ph ], [ 0, %0 ] + %.01 = phi %struct.A* [ %11, %.lr.ph ], [ %p, %0 ] + %2 = getelementptr inbounds %struct.A* %.01, i64 0, i32 0 + store i8 1, i8* %2, align 1 + %3 = getelementptr inbounds %struct.A* %.01, i64 0, i32 1 + store i8 2, i8* %3, align 1 + %4 = getelementptr inbounds %struct.A* %.01, i64 0, i32 2 + store i8 3, i8* %4, align 1 + %5 = getelementptr inbounds %struct.A* %.01, i64 0, i32 3 + store i8 4, i8* %5, align 1 + %6 = getelementptr inbounds %struct.A* %.01, i64 0, i32 4 + store i8 %zz, i8* %6, align 1 ; <----------- Not a const; + %7 = getelementptr inbounds %struct.A* %.01, i64 0, i32 5 + store i8 6, i8* %7, align 1 + %8 = getelementptr inbounds %struct.A* %.01, i64 0, i32 6 + store i8 7, i8* %8, align 1 + %9 = getelementptr inbounds %struct.A* %.01, i64 0, i32 7 + store i8 8, i8* %9, align 1 + %10 = add nsw i32 %i.02, 1 + %11 = getelementptr inbounds %struct.A* %.01, i64 1 + %exitcond = icmp eq i32 %10, %count + br i1 %exitcond, label %._crit_edge, label %.lr.ph +._crit_edge: + ret void +} + + +;CHECK: merge_loads_i16 +; load: +;CHECK: movw +; store: +;CHECK: movw +;CHECK: ret +define void @merge_loads_i16(i32 %count, %struct.A* noalias nocapture %q, %struct.A* noalias nocapture %p) nounwind uwtable noinline ssp { + %1 = icmp sgt i32 %count, 0 + br i1 %1, label %.lr.ph, label %._crit_edge + +.lr.ph: ; preds = %0 + %2 = getelementptr inbounds %struct.A* %q, i64 0, i32 0 + %3 = getelementptr inbounds %struct.A* %q, i64 0, i32 1 + br label %4 + +; <label>:4 ; preds = %4, %.lr.ph + %i.02 = phi i32 [ 0, %.lr.ph ], [ %9, %4 ] + %.01 = phi %struct.A* [ %p, %.lr.ph ], [ %10, %4 ] + %5 = load i8* %2, align 1 + %6 = load i8* %3, align 1 + %7 = getelementptr inbounds %struct.A* %.01, i64 0, i32 0 + store i8 %5, i8* %7, align 1 + %8 = getelementptr inbounds %struct.A* %.01, i64 0, i32 1 + store i8 %6, i8* %8, align 1 + %9 = add nsw i32 %i.02, 1 + %10 = getelementptr inbounds %struct.A* %.01, i64 1 + %exitcond = icmp eq i32 %9, %count + br i1 %exitcond, label %._crit_edge, label %4 + +._crit_edge: ; preds = %4, %0 + ret void +} + +; The loads and the stores are interleved. Can't merge them. +;CHECK: no_merge_loads +;CHECK: movb +;CHECK: movb +;CHECK: movb +;CHECK: movb +;CHECK: ret +define void @no_merge_loads(i32 %count, %struct.A* noalias nocapture %q, %struct.A* noalias nocapture %p) nounwind uwtable noinline ssp { + %1 = icmp sgt i32 %count, 0 + br i1 %1, label %.lr.ph, label %._crit_edge + +.lr.ph: ; preds = %0 + %2 = getelementptr inbounds %struct.A* %q, i64 0, i32 0 + %3 = getelementptr inbounds %struct.A* %q, i64 0, i32 1 + br label %a4 + +a4: ; preds = %4, %.lr.ph + %i.02 = phi i32 [ 0, %.lr.ph ], [ %a9, %a4 ] + %.01 = phi %struct.A* [ %p, %.lr.ph ], [ %a10, %a4 ] + %a5 = load i8* %2, align 1 + %a7 = getelementptr inbounds %struct.A* %.01, i64 0, i32 0 + store i8 %a5, i8* %a7, align 1 + %a8 = getelementptr inbounds %struct.A* %.01, i64 0, i32 1 + %a6 = load i8* %3, align 1 + store i8 %a6, i8* %a8, align 1 + %a9 = add nsw i32 %i.02, 1 + %a10 = getelementptr inbounds %struct.A* %.01, i64 1 + %exitcond = icmp eq i32 %a9, %count + br i1 %exitcond, label %._crit_edge, label %a4 + +._crit_edge: ; preds = %4, %0 + ret void +} + + +;CHECK: merge_loads_integer +; load: +;CHECK: movq +; store: +;CHECK: movq +;CHECK: ret +define void @merge_loads_integer(i32 %count, %struct.B* noalias nocapture %q, %struct.B* noalias nocapture %p) nounwind uwtable noinline ssp { + %1 = icmp sgt i32 %count, 0 + br i1 %1, label %.lr.ph, label %._crit_edge + +.lr.ph: ; preds = %0 + %2 = getelementptr inbounds %struct.B* %q, i64 0, i32 0 + %3 = getelementptr inbounds %struct.B* %q, i64 0, i32 1 + br label %4 + +; <label>:4 ; preds = %4, %.lr.ph + %i.02 = phi i32 [ 0, %.lr.ph ], [ %9, %4 ] + %.01 = phi %struct.B* [ %p, %.lr.ph ], [ %10, %4 ] + %5 = load i32* %2 + %6 = load i32* %3 + %7 = getelementptr inbounds %struct.B* %.01, i64 0, i32 0 + store i32 %5, i32* %7 + %8 = getelementptr inbounds %struct.B* %.01, i64 0, i32 1 + store i32 %6, i32* %8 + %9 = add nsw i32 %i.02, 1 + %10 = getelementptr inbounds %struct.B* %.01, i64 1 + %exitcond = icmp eq i32 %9, %count + br i1 %exitcond, label %._crit_edge, label %4 + +._crit_edge: ; preds = %4, %0 + ret void +} + + +;CHECK: merge_loads_vector +; load: +;CHECK: movups +; store: +;CHECK: movups +;CHECK: ret +define void @merge_loads_vector(i32 %count, %struct.B* noalias nocapture %q, %struct.B* noalias nocapture %p) nounwind uwtable noinline ssp { + %a1 = icmp sgt i32 %count, 0 + br i1 %a1, label %.lr.ph, label %._crit_edge + +.lr.ph: ; preds = %0 + %a2 = getelementptr inbounds %struct.B* %q, i64 0, i32 0 + %a3 = getelementptr inbounds %struct.B* %q, i64 0, i32 1 + %a4 = getelementptr inbounds %struct.B* %q, i64 0, i32 2 + %a5 = getelementptr inbounds %struct.B* %q, i64 0, i32 3 + br label %block4 + +block4: ; preds = %4, %.lr.ph + %i.02 = phi i32 [ 0, %.lr.ph ], [ %c9, %block4 ] + %.01 = phi %struct.B* [ %p, %.lr.ph ], [ %c10, %block4 ] + %a7 = getelementptr inbounds %struct.B* %.01, i64 0, i32 0 + %a8 = getelementptr inbounds %struct.B* %.01, i64 0, i32 1 + %a9 = getelementptr inbounds %struct.B* %.01, i64 0, i32 2 + %a10 = getelementptr inbounds %struct.B* %.01, i64 0, i32 3 + %b1 = load i32* %a2 + %b2 = load i32* %a3 + %b3 = load i32* %a4 + %b4 = load i32* %a5 + store i32 %b1, i32* %a7 + store i32 %b2, i32* %a8 + store i32 %b3, i32* %a9 + store i32 %b4, i32* %a10 + %c9 = add nsw i32 %i.02, 1 + %c10 = getelementptr inbounds %struct.B* %.01, i64 1 + %exitcond = icmp eq i32 %c9, %count + br i1 %exitcond, label %._crit_edge, label %block4 + +._crit_edge: ; preds = %4, %0 + ret void +} + +;CHECK: merge_loads_no_align +; load: +;CHECK: movl +;CHECK: movl +;CHECK: movl +;CHECK: movl +; store: +;CHECK: movl +;CHECK: movl +;CHECK: movl +;CHECK: movl +;CHECK: ret +define void @merge_loads_no_align(i32 %count, %struct.B* noalias nocapture %q, %struct.B* noalias nocapture %p) nounwind uwtable noinline ssp { + %a1 = icmp sgt i32 %count, 0 + br i1 %a1, label %.lr.ph, label %._crit_edge + +.lr.ph: ; preds = %0 + %a2 = getelementptr inbounds %struct.B* %q, i64 0, i32 0 + %a3 = getelementptr inbounds %struct.B* %q, i64 0, i32 1 + %a4 = getelementptr inbounds %struct.B* %q, i64 0, i32 2 + %a5 = getelementptr inbounds %struct.B* %q, i64 0, i32 3 + br label %block4 + +block4: ; preds = %4, %.lr.ph + %i.02 = phi i32 [ 0, %.lr.ph ], [ %c9, %block4 ] + %.01 = phi %struct.B* [ %p, %.lr.ph ], [ %c10, %block4 ] + %a7 = getelementptr inbounds %struct.B* %.01, i64 0, i32 0 + %a8 = getelementptr inbounds %struct.B* %.01, i64 0, i32 1 + %a9 = getelementptr inbounds %struct.B* %.01, i64 0, i32 2 + %a10 = getelementptr inbounds %struct.B* %.01, i64 0, i32 3 + %b1 = load i32* %a2, align 1 + %b2 = load i32* %a3, align 1 + %b3 = load i32* %a4, align 1 + %b4 = load i32* %a5, align 1 + store i32 %b1, i32* %a7, align 1 + store i32 %b2, i32* %a8, align 1 + store i32 %b3, i32* %a9, align 1 + store i32 %b4, i32* %a10, align 1 + %c9 = add nsw i32 %i.02, 1 + %c10 = getelementptr inbounds %struct.B* %.01, i64 1 + %exitcond = icmp eq i32 %c9, %count + br i1 %exitcond, label %._crit_edge, label %block4 + +._crit_edge: ; preds = %4, %0 + ret void +} + diff --git a/test/CodeGen/X86/StackColoring-dbg.ll b/test/CodeGen/X86/StackColoring-dbg.ll new file mode 100644 index 0000000..5982544 --- /dev/null +++ b/test/CodeGen/X86/StackColoring-dbg.ll @@ -0,0 +1,30 @@ +; RUN: llc -mcpu=corei7 -no-stack-coloring=false < %s + +; Make sure that we don't crash when dbg values are used. + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone + +define void @foo() nounwind uwtable ssp { +entry: + %x.i = alloca i8, align 1 + %y.i = alloca [256 x i8], align 16 + %0 = getelementptr inbounds [256 x i8]* %y.i, i64 0, i64 0 + br label %for.body + +for.body: + call void @llvm.lifetime.end(i64 -1, i8* %0) nounwind + call void @llvm.lifetime.start(i64 -1, i8* %x.i) nounwind + call void @llvm.dbg.declare(metadata !{i8* %x.i}, metadata !22) nounwind + br label %for.body +} + +declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind + +declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind + +!16 = metadata !{i32 786468, null, metadata !"char", null, i32 0, i64 8, i64 8, i64 0, i32 0, i32 6} +!2 = metadata !{i32 0} +!22 = metadata !{i32 786688, metadata !2, metadata !"x", metadata !2, i32 16, metadata !16, i32 0, i32 0} diff --git a/test/CodeGen/X86/StackColoring.ll b/test/CodeGen/X86/StackColoring.ll new file mode 100644 index 0000000..f8ae74f --- /dev/null +++ b/test/CodeGen/X86/StackColoring.ll @@ -0,0 +1,410 @@ +; RUN: llc -mcpu=corei7 -no-stack-coloring=false < %s | FileCheck %s --check-prefix=YESCOLOR +; RUN: llc -mcpu=corei7 -no-stack-coloring=true < %s | FileCheck %s --check-prefix=NOCOLOR + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +;YESCOLOR: subq $136, %rsp +;NOCOLOR: subq $264, %rsp + +define i32 @myCall_w2(i32 %in) { +entry: + %a = alloca [17 x i8*], align 8 + %a2 = alloca [16 x i8*], align 8 + %b = bitcast [17 x i8*]* %a to i8* + %b2 = bitcast [16 x i8*]* %a2 to i8* + call void @llvm.lifetime.start(i64 -1, i8* %b) + %t1 = call i32 @foo(i32 %in, i8* %b) + %t2 = call i32 @foo(i32 %in, i8* %b) + call void @llvm.lifetime.end(i64 -1, i8* %b) + call void @llvm.lifetime.start(i64 -1, i8* %b2) + %t3 = call i32 @foo(i32 %in, i8* %b2) + %t4 = call i32 @foo(i32 %in, i8* %b2) + call void @llvm.lifetime.end(i64 -1, i8* %b2) + %t5 = add i32 %t1, %t2 + %t6 = add i32 %t3, %t4 + %t7 = add i32 %t5, %t6 + ret i32 %t7 +} + + +;YESCOLOR: subq $272, %rsp +;NOCOLOR: subq $272, %rsp + +define i32 @myCall2_no_merge(i32 %in, i1 %d) { +entry: + %a = alloca [17 x i8*], align 8 + %a2 = alloca [16 x i8*], align 8 + %b = bitcast [17 x i8*]* %a to i8* + %b2 = bitcast [16 x i8*]* %a2 to i8* + call void @llvm.lifetime.start(i64 -1, i8* %b) + %t1 = call i32 @foo(i32 %in, i8* %b) + %t2 = call i32 @foo(i32 %in, i8* %b) + br i1 %d, label %bb2, label %bb3 +bb2: + call void @llvm.lifetime.start(i64 -1, i8* %b2) + %t3 = call i32 @foo(i32 %in, i8* %b2) + %t4 = call i32 @foo(i32 %in, i8* %b2) + call void @llvm.lifetime.end(i64 -1, i8* %b2) + %t5 = add i32 %t1, %t2 + %t6 = add i32 %t3, %t4 + %t7 = add i32 %t5, %t6 + call void @llvm.lifetime.end(i64 -1, i8* %b) + ret i32 %t7 +bb3: + call void @llvm.lifetime.end(i64 -1, i8* %b) + ret i32 0 +} + +;YESCOLOR: subq $144, %rsp +;NOCOLOR: subq $272, %rsp + +define i32 @myCall2_w2(i32 %in, i1 %d) { +entry: + %a = alloca [17 x i8*], align 8 + %a2 = alloca [16 x i8*], align 8 + %b = bitcast [17 x i8*]* %a to i8* + %b2 = bitcast [16 x i8*]* %a2 to i8* + call void @llvm.lifetime.start(i64 -1, i8* %b) + %t1 = call i32 @foo(i32 %in, i8* %b) + %t2 = call i32 @foo(i32 %in, i8* %b) + call void @llvm.lifetime.end(i64 -1, i8* %b) + br i1 %d, label %bb2, label %bb3 +bb2: + call void @llvm.lifetime.start(i64 -1, i8* %b2) + %t3 = call i32 @foo(i32 %in, i8* %b2) + %t4 = call i32 @foo(i32 %in, i8* %b2) + call void @llvm.lifetime.end(i64 -1, i8* %b2) + %t5 = add i32 %t1, %t2 + %t6 = add i32 %t3, %t4 + %t7 = add i32 %t5, %t6 + ret i32 %t7 +bb3: + ret i32 0 +} +;YESCOLOR: subq $208, %rsp +;NOCOLOR: subq $400, %rsp + + + + +define i32 @myCall_w4(i32 %in) { +entry: + %a1 = alloca [14 x i8*], align 8 + %a2 = alloca [13 x i8*], align 8 + %a3 = alloca [12 x i8*], align 8 + %a4 = alloca [11 x i8*], align 8 + %b1 = bitcast [14 x i8*]* %a1 to i8* + %b2 = bitcast [13 x i8*]* %a2 to i8* + %b3 = bitcast [12 x i8*]* %a3 to i8* + %b4 = bitcast [11 x i8*]* %a4 to i8* + call void @llvm.lifetime.start(i64 -1, i8* %b4) + call void @llvm.lifetime.start(i64 -1, i8* %b1) + %t1 = call i32 @foo(i32 %in, i8* %b1) + %t2 = call i32 @foo(i32 %in, i8* %b1) + call void @llvm.lifetime.end(i64 -1, i8* %b1) + call void @llvm.lifetime.start(i64 -1, i8* %b2) + %t9 = call i32 @foo(i32 %in, i8* %b2) + %t8 = call i32 @foo(i32 %in, i8* %b2) + call void @llvm.lifetime.end(i64 -1, i8* %b2) + call void @llvm.lifetime.start(i64 -1, i8* %b3) + %t3 = call i32 @foo(i32 %in, i8* %b3) + %t4 = call i32 @foo(i32 %in, i8* %b3) + call void @llvm.lifetime.end(i64 -1, i8* %b3) + %t11 = call i32 @foo(i32 %in, i8* %b4) + call void @llvm.lifetime.end(i64 -1, i8* %b4) + %t5 = add i32 %t1, %t2 + %t6 = add i32 %t3, %t4 + %t7 = add i32 %t5, %t6 + ret i32 %t7 +} + +;YESCOLOR: subq $112, %rsp +;NOCOLOR: subq $400, %rsp + +define i32 @myCall2_w4(i32 %in) { +entry: + %a1 = alloca [14 x i8*], align 8 + %a2 = alloca [13 x i8*], align 8 + %a3 = alloca [12 x i8*], align 8 + %a4 = alloca [11 x i8*], align 8 + %b1 = bitcast [14 x i8*]* %a1 to i8* + %b2 = bitcast [13 x i8*]* %a2 to i8* + %b3 = bitcast [12 x i8*]* %a3 to i8* + %b4 = bitcast [11 x i8*]* %a4 to i8* + call void @llvm.lifetime.start(i64 -1, i8* %b1) + %t1 = call i32 @foo(i32 %in, i8* %b1) + %t2 = call i32 @foo(i32 %in, i8* %b1) + call void @llvm.lifetime.end(i64 -1, i8* %b1) + call void @llvm.lifetime.start(i64 -1, i8* %b2) + %t9 = call i32 @foo(i32 %in, i8* %b2) + %t8 = call i32 @foo(i32 %in, i8* %b2) + call void @llvm.lifetime.end(i64 -1, i8* %b2) + call void @llvm.lifetime.start(i64 -1, i8* %b3) + %t3 = call i32 @foo(i32 %in, i8* %b3) + %t4 = call i32 @foo(i32 %in, i8* %b3) + call void @llvm.lifetime.end(i64 -1, i8* %b3) + br i1 undef, label %bb2, label %bb3 +bb2: + call void @llvm.lifetime.start(i64 -1, i8* %b4) + %t11 = call i32 @foo(i32 %in, i8* %b4) + call void @llvm.lifetime.end(i64 -1, i8* %b4) + %t5 = add i32 %t1, %t2 + %t6 = add i32 %t3, %t4 + %t7 = add i32 %t5, %t6 + ret i32 %t7 +bb3: + ret i32 0 +} + + +;YESCOLOR: subq $144, %rsp +;NOCOLOR: subq $272, %rsp + + +define i32 @myCall2_noend(i32 %in, i1 %d) { +entry: + %a = alloca [17 x i8*], align 8 + %a2 = alloca [16 x i8*], align 8 + %b = bitcast [17 x i8*]* %a to i8* + %b2 = bitcast [16 x i8*]* %a2 to i8* + call void @llvm.lifetime.start(i64 -1, i8* %b) + %t1 = call i32 @foo(i32 %in, i8* %b) + %t2 = call i32 @foo(i32 %in, i8* %b) + call void @llvm.lifetime.end(i64 -1, i8* %b) + br i1 %d, label %bb2, label %bb3 +bb2: + call void @llvm.lifetime.start(i64 -1, i8* %b2) + %t3 = call i32 @foo(i32 %in, i8* %b2) + %t4 = call i32 @foo(i32 %in, i8* %b2) + %t5 = add i32 %t1, %t2 + %t6 = add i32 %t3, %t4 + %t7 = add i32 %t5, %t6 + ret i32 %t7 +bb3: + ret i32 0 +} + +;YESCOLOR: subq $144, %rsp +;NOCOLOR: subq $272, %rsp +define i32 @myCall2_noend2(i32 %in, i1 %d) { +entry: + %a = alloca [17 x i8*], align 8 + %a2 = alloca [16 x i8*], align 8 + %b = bitcast [17 x i8*]* %a to i8* + %b2 = bitcast [16 x i8*]* %a2 to i8* + call void @llvm.lifetime.start(i64 -1, i8* %b) + %t1 = call i32 @foo(i32 %in, i8* %b) + %t2 = call i32 @foo(i32 %in, i8* %b) + br i1 %d, label %bb2, label %bb3 +bb2: + call void @llvm.lifetime.end(i64 -1, i8* %b) + call void @llvm.lifetime.start(i64 -1, i8* %b2) + %t3 = call i32 @foo(i32 %in, i8* %b2) + %t4 = call i32 @foo(i32 %in, i8* %b2) + %t5 = add i32 %t1, %t2 + %t6 = add i32 %t3, %t4 + %t7 = add i32 %t5, %t6 + ret i32 %t7 +bb3: + ret i32 0 +} + + +;YESCOLOR: subq $144, %rsp +;NOCOLOR: subq $272, %rsp +define i32 @myCall2_nostart(i32 %in, i1 %d) { +entry: + %a = alloca [17 x i8*], align 8 + %a2 = alloca [16 x i8*], align 8 + %b = bitcast [17 x i8*]* %a to i8* + %b2 = bitcast [16 x i8*]* %a2 to i8* + %t1 = call i32 @foo(i32 %in, i8* %b) + %t2 = call i32 @foo(i32 %in, i8* %b) + call void @llvm.lifetime.end(i64 -1, i8* %b) + br i1 %d, label %bb2, label %bb3 +bb2: + call void @llvm.lifetime.start(i64 -1, i8* %b2) + %t3 = call i32 @foo(i32 %in, i8* %b2) + %t4 = call i32 @foo(i32 %in, i8* %b2) + %t5 = add i32 %t1, %t2 + %t6 = add i32 %t3, %t4 + %t7 = add i32 %t5, %t6 + ret i32 %t7 +bb3: + ret i32 0 +} + +; Adopt the test from Transforms/Inline/array_merge.ll' +;YESCOLOR: subq $816, %rsp +;NOCOLOR: subq $1616, %rsp +define void @array_merge() nounwind ssp { +entry: + %A.i1 = alloca [100 x i32], align 4 + %B.i2 = alloca [100 x i32], align 4 + %A.i = alloca [100 x i32], align 4 + %B.i = alloca [100 x i32], align 4 + %0 = bitcast [100 x i32]* %A.i to i8* + call void @llvm.lifetime.start(i64 -1, i8* %0) nounwind + %1 = bitcast [100 x i32]* %B.i to i8* + call void @llvm.lifetime.start(i64 -1, i8* %1) nounwind + call void @bar([100 x i32]* %A.i, [100 x i32]* %B.i) nounwind + call void @llvm.lifetime.end(i64 -1, i8* %0) nounwind + call void @llvm.lifetime.end(i64 -1, i8* %1) nounwind + %2 = bitcast [100 x i32]* %A.i1 to i8* + call void @llvm.lifetime.start(i64 -1, i8* %2) nounwind + %3 = bitcast [100 x i32]* %B.i2 to i8* + call void @llvm.lifetime.start(i64 -1, i8* %3) nounwind + call void @bar([100 x i32]* %A.i1, [100 x i32]* %B.i2) nounwind + call void @llvm.lifetime.end(i64 -1, i8* %2) nounwind + call void @llvm.lifetime.end(i64 -1, i8* %3) nounwind + ret void +} + +;YESCOLOR: subq $272, %rsp +;NOCOLOR: subq $272, %rsp +define i32 @func_phi_lifetime(i32 %in, i1 %d) { +entry: + %a = alloca [17 x i8*], align 8 + %a2 = alloca [16 x i8*], align 8 + %b = bitcast [17 x i8*]* %a to i8* + %b2 = bitcast [16 x i8*]* %a2 to i8* + %t1 = call i32 @foo(i32 %in, i8* %b) + %t2 = call i32 @foo(i32 %in, i8* %b) + call void @llvm.lifetime.end(i64 -1, i8* %b) + br i1 %d, label %bb0, label %bb1 + +bb0: + %I1 = bitcast [17 x i8*]* %a to i8* + br label %bb2 + +bb1: + %I2 = bitcast [16 x i8*]* %a2 to i8* + br label %bb2 + +bb2: + %split = phi i8* [ %I1, %bb0 ], [ %I2, %bb1 ] + call void @llvm.lifetime.start(i64 -1, i8* %split) + %t3 = call i32 @foo(i32 %in, i8* %b2) + %t4 = call i32 @foo(i32 %in, i8* %b2) + %t5 = add i32 %t1, %t2 + %t6 = add i32 %t3, %t4 + %t7 = add i32 %t5, %t6 + call void @llvm.lifetime.end(i64 -1, i8* %split) + ret i32 %t7 +bb3: + ret i32 0 +} + + +;YESCOLOR: multi_region_bb +;NOCOLOR: multi_region_bb +define void @multi_region_bb() nounwind ssp { +entry: + %A.i1 = alloca [100 x i32], align 4 + %B.i2 = alloca [100 x i32], align 4 + %A.i = alloca [100 x i32], align 4 + %B.i = alloca [100 x i32], align 4 + %0 = bitcast [100 x i32]* %A.i to i8* + call void @llvm.lifetime.start(i64 -1, i8* %0) nounwind ; <---- start #1 + %1 = bitcast [100 x i32]* %B.i to i8* + call void @llvm.lifetime.start(i64 -1, i8* %1) nounwind + call void @bar([100 x i32]* %A.i, [100 x i32]* %B.i) nounwind + call void @llvm.lifetime.end(i64 -1, i8* %0) nounwind + call void @llvm.lifetime.end(i64 -1, i8* %1) nounwind + %2 = bitcast [100 x i32]* %A.i1 to i8* + call void @llvm.lifetime.start(i64 -1, i8* %2) nounwind + %3 = bitcast [100 x i32]* %B.i2 to i8* + call void @llvm.lifetime.start(i64 -1, i8* %3) nounwind + call void @llvm.lifetime.start(i64 -1, i8* %0) nounwind ; <---- start #2 + call void @bar([100 x i32]* %A.i1, [100 x i32]* %B.i2) nounwind + call void @llvm.lifetime.end(i64 -1, i8* %2) nounwind + call void @llvm.lifetime.end(i64 -1, i8* %0) nounwind + call void @llvm.lifetime.end(i64 -1, i8* %3) nounwind + ret void +} + + +;YESCOLOR: subq $272, %rsp +;NOCOLOR: subq $272, %rsp +define i32 @myCall_end_before_begin(i32 %in, i1 %d) { +entry: + %a = alloca [17 x i8*], align 8 + %a2 = alloca [16 x i8*], align 8 + %b = bitcast [17 x i8*]* %a to i8* + %b2 = bitcast [16 x i8*]* %a2 to i8* + %t1 = call i32 @foo(i32 %in, i8* %b) + %t2 = call i32 @foo(i32 %in, i8* %b) + call void @llvm.lifetime.end(i64 -1, i8* %b) + call void @llvm.lifetime.start(i64 -1, i8* %b) + br i1 %d, label %bb2, label %bb3 +bb2: + call void @llvm.lifetime.start(i64 -1, i8* %b2) + %t3 = call i32 @foo(i32 %in, i8* %b2) + %t4 = call i32 @foo(i32 %in, i8* %b2) + %t5 = add i32 %t1, %t2 + %t6 = add i32 %t3, %t4 + %t7 = add i32 %t5, %t6 + ret i32 %t7 +bb3: + ret i32 0 +} + +; Check that we don't assert and crash even when there are allocas +; outside the declared lifetime regions. +;YESCOLOR: bad_range +;NOCOLOR: bad_range +define void @bad_range() nounwind ssp { +entry: + %A.i1 = alloca [100 x i32], align 4 + %B.i2 = alloca [100 x i32], align 4 + %A.i = alloca [100 x i32], align 4 + %B.i = alloca [100 x i32], align 4 + %0 = bitcast [100 x i32]* %A.i to i8* + call void @llvm.lifetime.start(i64 -1, i8* %0) nounwind + %1 = bitcast [100 x i32]* %B.i to i8* + call void @llvm.lifetime.start(i64 -1, i8* %1) nounwind + call void @bar([100 x i32]* %A.i, [100 x i32]* %B.i) nounwind + call void @llvm.lifetime.end(i64 -1, i8* %0) nounwind + call void @llvm.lifetime.end(i64 -1, i8* %1) nounwind + br label %block2 + +block2: + ; I am used outside the marked lifetime. + call void @bar([100 x i32]* %A.i, [100 x i32]* %B.i) nounwind + ret void +} + + +; Check that we don't assert and crash even when there are usages +; of allocas which do not read or write outside the declared lifetime regions. +;YESCOLOR: shady_range +;NOCOLOR: shady_range + +%struct.Klass = type { i32, i32 } + +define i32 @shady_range(i32 %argc, i8** nocapture %argv) uwtable { + %a.i = alloca [4 x %struct.Klass], align 16 + %b.i = alloca [4 x %struct.Klass], align 16 + %a8 = bitcast [4 x %struct.Klass]* %a.i to i8* + %b8 = bitcast [4 x %struct.Klass]* %b.i to i8* + ; I am used outside the lifetime zone below: + %z2 = getelementptr inbounds [4 x %struct.Klass]* %a.i, i64 0, i64 0, i32 0 + call void @llvm.lifetime.start(i64 -1, i8* %a8) + call void @llvm.lifetime.start(i64 -1, i8* %b8) + %z3 = load i32* %z2, align 16 + %r = call i32 @foo(i32 %z3, i8* %a8) + %r2 = call i32 @foo(i32 %z3, i8* %b8) + call void @llvm.lifetime.end(i64 -1, i8* %a8) + call void @llvm.lifetime.end(i64 -1, i8* %b8) + ret i32 9 +} + +declare void @bar([100 x i32]* , [100 x i32]*) nounwind + +declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind + +declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind + +declare i32 @foo(i32, i8*) + diff --git a/test/CodeGen/X86/add-of-carry.ll b/test/CodeGen/X86/add-of-carry.ll index a4abccb..4e30f2b 100644 --- a/test/CodeGen/X86/add-of-carry.ll +++ b/test/CodeGen/X86/add-of-carry.ll @@ -30,4 +30,17 @@ entry: ret i32 %z.0 } +; <rdar://problem/12579915> +define i32 @test3(i32 %x, i32 %y, i32 %res) nounwind uwtable readnone ssp { +entry: + %cmp = icmp ugt i32 %x, %y + %dec = sext i1 %cmp to i32 + %dec.res = add nsw i32 %dec, %res + ret i32 %dec.res +; CHECK: test3: +; CHECK: cmpl +; CHECK: sbbl +; CHECK: ret +} + declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone diff --git a/test/CodeGen/X86/atom-bypass-slow-division.ll b/test/CodeGen/X86/atom-bypass-slow-division.ll new file mode 100644 index 0000000..e7c9605 --- /dev/null +++ b/test/CodeGen/X86/atom-bypass-slow-division.ll @@ -0,0 +1,112 @@ +; RUN: llc < %s -mcpu=atom -mtriple=i686-linux | FileCheck %s + +define i32 @test_get_quotient(i32 %a, i32 %b) nounwind { +; CHECK: test_get_quotient +; CHECK: orl %ecx, %edx +; CHECK-NEXT: testl $-256, %edx +; CHECK-NEXT: je +; CHECK: idivl +; CHECK: ret +; CHECK: divb +; CHECK: ret + %result = sdiv i32 %a, %b + ret i32 %result +} + +define i32 @test_get_remainder(i32 %a, i32 %b) nounwind { +; CHECK: test_get_remainder +; CHECK: orl %ecx, %edx +; CHECK-NEXT: testl $-256, %edx +; CHECK-NEXT: je +; CHECK: idivl +; CHECK: ret +; CHECK: divb +; CHECK: ret + %result = srem i32 %a, %b + ret i32 %result +} + +define i32 @test_get_quotient_and_remainder(i32 %a, i32 %b) nounwind { +; CHECK: test_get_quotient_and_remainder +; CHECK: orl %ecx, %edx +; CHECK-NEXT: testl $-256, %edx +; CHECK-NEXT: je +; CHECK: idivl +; CHECK: divb +; CHECK: addl +; CHECK: ret +; CEECK-NOT: idivl +; CHECK-NOT: divb + %resultdiv = sdiv i32 %a, %b + %resultrem = srem i32 %a, %b + %result = add i32 %resultdiv, %resultrem + ret i32 %result +} + +define i32 @test_use_div_and_idiv(i32 %a, i32 %b) nounwind { +; CHECK: test_use_div_and_idiv +; CHECK: idivl +; CHECK: divb +; CHECK: divl +; CHECK: divb +; CHECK: addl +; CHECK: ret + %resultidiv = sdiv i32 %a, %b + %resultdiv = udiv i32 %a, %b + %result = add i32 %resultidiv, %resultdiv + ret i32 %result +} + +define i32 @test_use_div_imm_imm() nounwind { +; CHECK: test_use_div_imm_imm +; CHECK: movl $64 + %resultdiv = sdiv i32 256, 4 + ret i32 %resultdiv +} + +define i32 @test_use_div_reg_imm(i32 %a) nounwind { +; CHECK: test_use_div_reg_imm +; CEHCK-NOT: test +; CHECK-NOT: idiv +; CHECK-NOT: divb + %resultdiv = sdiv i32 %a, 33 + ret i32 %resultdiv +} + +define i32 @test_use_rem_reg_imm(i32 %a) nounwind { +; CHECK: test_use_rem_reg_imm +; CEHCK-NOT: test +; CHECK-NOT: idiv +; CHECK-NOT: divb + %resultrem = srem i32 %a, 33 + ret i32 %resultrem +} + +define i32 @test_use_divrem_reg_imm(i32 %a) nounwind { +; CHECK: test_use_divrem_reg_imm +; CEHCK-NOT: test +; CHECK-NOT: idiv +; CHECK-NOT: divb + %resultdiv = sdiv i32 %a, 33 + %resultrem = srem i32 %a, 33 + %result = add i32 %resultdiv, %resultrem + ret i32 %result +} + +define i32 @test_use_div_imm_reg(i32 %a) nounwind { +; CHECK: test_use_div_imm_reg +; CHECK: test +; CHECK: idiv +; CHECK: divb + %resultdiv = sdiv i32 4, %a + ret i32 %resultdiv +} + +define i32 @test_use_rem_imm_reg(i32 %a) nounwind { +; CHECK: test_use_rem_imm_reg +; CHECK: test +; CHECK: idiv +; CHECK: divb + %resultdiv = sdiv i32 4, %a + ret i32 %resultdiv +} diff --git a/test/CodeGen/X86/atom-shuf.ll b/test/CodeGen/X86/atom-shuf.ll new file mode 100644 index 0000000..4c3f2f6 --- /dev/null +++ b/test/CodeGen/X86/atom-shuf.ll @@ -0,0 +1,9 @@ +; RUN: llc < %s -mtriple=x86_64-linux-pc -mcpu=atom | FileCheck %s + +define <16 x i8> @foo(<16 x i8> %in) { + %r = shufflevector <16 x i8> %in, <16 x i8> undef, <16 x i32> < i32 7, i32 3, i32 2, i32 11, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> + ret <16 x i8> %r +; CHECK: foo +; CHECK: pshufb +; CHECK-NEXT: ret +} diff --git a/test/CodeGen/X86/atomic-minmax-i6432.ll b/test/CodeGen/X86/atomic-minmax-i6432.ll new file mode 100644 index 0000000..e3ef605 --- /dev/null +++ b/test/CodeGen/X86/atomic-minmax-i6432.ll @@ -0,0 +1,67 @@ +; RUN: llc -march=x86 -mattr=+cmov -mtriple=i386-pc-linux < %s | FileCheck %s -check-prefix=LINUX +; RUN: llc -march=x86 -mtriple=i386-macosx -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC + +@sc64 = external global i64 + +define void @atomic_maxmin_i6432() { +; LINUX: atomic_maxmin_i6432 + %1 = atomicrmw max i64* @sc64, i64 5 acquire +; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]] +; LINUX: cmpl +; LINUX: setl +; LINUX: cmpl +; LINUX: setl +; LINUX: cmovne +; LINUX: cmovne +; LINUX: lock +; LINUX-NEXT: cmpxchg8b +; LINUX: jne [[LABEL]] + %2 = atomicrmw min i64* @sc64, i64 6 acquire +; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]] +; LINUX: cmpl +; LINUX: setg +; LINUX: cmpl +; LINUX: setg +; LINUX: cmovne +; LINUX: cmovne +; LINUX: lock +; LINUX-NEXT: cmpxchg8b +; LINUX: jne [[LABEL]] + %3 = atomicrmw umax i64* @sc64, i64 7 acquire +; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]] +; LINUX: cmpl +; LINUX: setb +; LINUX: cmpl +; LINUX: setb +; LINUX: cmovne +; LINUX: cmovne +; LINUX: lock +; LINUX-NEXT: cmpxchg8b +; LINUX: jne [[LABEL]] + %4 = atomicrmw umin i64* @sc64, i64 8 acquire +; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]] +; LINUX: cmpl +; LINUX: seta +; LINUX: cmpl +; LINUX: seta +; LINUX: cmovne +; LINUX: cmovne +; LINUX: lock +; LINUX-NEXT: cmpxchg8b +; LINUX: jne [[LABEL]] + ret void +} + +; rdar://12453106 +@id = internal global i64 0, align 8 + +define void @tf_bug(i8* %ptr) nounwind { +; PIC: tf_bug: +; PIC: movl _id-L1$pb( +; PIC: movl (_id-L1$pb)+4( + %tmp1 = atomicrmw add i64* @id, i64 1 seq_cst + %tmp2 = add i64 %tmp1, 1 + %tmp3 = bitcast i8* %ptr to i64* + store i64 %tmp2, i64* %tmp3, align 4 + ret void +} diff --git a/test/CodeGen/X86/atomic-pointer.ll b/test/CodeGen/X86/atomic-pointer.ll new file mode 100644 index 0000000..a455277 --- /dev/null +++ b/test/CodeGen/X86/atomic-pointer.ll @@ -0,0 +1,22 @@ +; RUN: llc < %s -mtriple=i686-none-linux | FileCheck %s + +define i32* @test_atomic_ptr_load(i32** %a0) { +; CHECK: test_atomic_ptr_load +; CHECK: movl +; CHECK: movl +; CHECK: ret +0: + %0 = load atomic i32** %a0 seq_cst, align 4 + ret i32* %0 +} + +define void @test_atomic_ptr_store(i32* %a0, i32** %a1) { +; CHECK: test_atomic_ptr_store +; CHECK: movl +; CHECK: movl +; CHECK: xchgl +; CHECK: ret +0: + store atomic i32* %a0, i32** %a1 seq_cst, align 4 + ret void +} diff --git a/test/CodeGen/X86/atomic16.ll b/test/CodeGen/X86/atomic16.ll new file mode 100644 index 0000000..824995d --- /dev/null +++ b/test/CodeGen/X86/atomic16.ll @@ -0,0 +1,250 @@ +; RUN: llc < %s -O0 -mtriple=x86_64-unknown-unknown -mcpu=corei7 -show-mc-encoding | FileCheck %s --check-prefix X64 +; RUN: llc < %s -O0 -mtriple=i386-unknown-unknown -mcpu=corei7 | FileCheck %s --check-prefix X32 + +@sc16 = external global i16 + +define void @atomic_fetch_add16() nounwind { +; X64: atomic_fetch_add16 +; X32: atomic_fetch_add16 +entry: +; 32-bit + %t1 = atomicrmw add i16* @sc16, i16 1 acquire +; X64: lock +; X64: incw +; X32: lock +; X32: incw + %t2 = atomicrmw add i16* @sc16, i16 3 acquire +; X64: lock +; X64: addw $3, {{.*}} # encoding: [0xf0,0x66 +; X32: lock +; X32: addw $3 + %t3 = atomicrmw add i16* @sc16, i16 5 acquire +; X64: lock +; X64: xaddw {{.*}} # encoding: [0xf0,0x66 +; X32: lock +; X32: xaddw + %t4 = atomicrmw add i16* @sc16, i16 %t3 acquire +; X64: lock +; X64: addw {{.*}} # encoding: [0xf0,0x66 +; X32: lock +; X32: addw + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_sub16() nounwind { +; X64: atomic_fetch_sub16 +; X32: atomic_fetch_sub16 + %t1 = atomicrmw sub i16* @sc16, i16 1 acquire +; X64: lock +; X64: decw +; X32: lock +; X32: decw + %t2 = atomicrmw sub i16* @sc16, i16 3 acquire +; X64: lock +; X64: subw $3, {{.*}} # encoding: [0xf0,0x66 +; X32: lock +; X32: subw $3 + %t3 = atomicrmw sub i16* @sc16, i16 5 acquire +; X64: lock +; X64: xaddw {{.*}} # encoding: [0xf0,0x66 +; X32: lock +; X32: xaddw + %t4 = atomicrmw sub i16* @sc16, i16 %t3 acquire +; X64: lock +; X64: subw {{.*}} # encoding: [0xf0,0x66 +; X32: lock +; X32: subw + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_and16() nounwind { +; X64: atomic_fetch_and16 +; X32: atomic_fetch_and16 + %t1 = atomicrmw and i16* @sc16, i16 3 acquire +; X64: lock +; X64: andw $3, {{.*}} # encoding: [0xf0,0x66 +; X32: lock +; X32: andw $3 + %t2 = atomicrmw and i16* @sc16, i16 5 acquire +; X64: andw +; X64: lock +; X64: cmpxchgw +; X32: andw +; X32: lock +; X32: cmpxchgw + %t3 = atomicrmw and i16* @sc16, i16 %t2 acquire +; X64: lock +; X64: andw {{.*}} # encoding: [0xf0,0x66 +; X32: lock +; X32: andw + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_or16() nounwind { +; X64: atomic_fetch_or16 +; X32: atomic_fetch_or16 + %t1 = atomicrmw or i16* @sc16, i16 3 acquire +; X64: lock +; X64: orw $3, {{.*}} # encoding: [0xf0,0x66 +; X32: lock +; X32: orw $3 + %t2 = atomicrmw or i16* @sc16, i16 5 acquire +; X64: orw +; X64: lock +; X64: cmpxchgw +; X32: orw +; X32: lock +; X32: cmpxchgw + %t3 = atomicrmw or i16* @sc16, i16 %t2 acquire +; X64: lock +; X64: orw {{.*}} # encoding: [0xf0,0x66 +; X32: lock +; X32: orw + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_xor16() nounwind { +; X64: atomic_fetch_xor16 +; X32: atomic_fetch_xor16 + %t1 = atomicrmw xor i16* @sc16, i16 3 acquire +; X64: lock +; X64: xorw $3, {{.*}} # encoding: [0xf0,0x66 +; X32: lock +; X32: xorw $3 + %t2 = atomicrmw xor i16* @sc16, i16 5 acquire +; X64: xorw +; X64: lock +; X64: cmpxchgw +; X32: xorw +; X32: lock +; X32: cmpxchgw + %t3 = atomicrmw xor i16* @sc16, i16 %t2 acquire +; X64: lock +; X64: xorw {{.*}} # encoding: [0xf0,0x66 +; X32: lock +; X32: xorw + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_nand16(i16 %x) nounwind { +; X64: atomic_fetch_nand16 +; X32: atomic_fetch_nand16 + %t1 = atomicrmw nand i16* @sc16, i16 %x acquire +; X64: andw +; X64: notw +; X64: lock +; X64: cmpxchgw +; X32: andw +; X32: notw +; X32: lock +; X32: cmpxchgw + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_max16(i16 %x) nounwind { + %t1 = atomicrmw max i16* @sc16, i16 %x acquire +; X64: cmpw +; X64: cmov +; X64: lock +; X64: cmpxchgw + +; X32: cmpw +; X32: cmov +; X32: lock +; X32: cmpxchgw + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_min16(i16 %x) nounwind { + %t1 = atomicrmw min i16* @sc16, i16 %x acquire +; X64: cmpw +; X64: cmov +; X64: lock +; X64: cmpxchgw + +; X32: cmpw +; X32: cmov +; X32: lock +; X32: cmpxchgw + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_umax16(i16 %x) nounwind { + %t1 = atomicrmw umax i16* @sc16, i16 %x acquire +; X64: cmpw +; X64: cmov +; X64: lock +; X64: cmpxchgw + +; X32: cmpw +; X32: cmov +; X32: lock +; X32: cmpxchgw + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_umin16(i16 %x) nounwind { + %t1 = atomicrmw umin i16* @sc16, i16 %x acquire +; X64: cmpw +; X64: cmov +; X64: lock +; X64: cmpxchgw +; X32: cmpw +; X32: cmov +; X32: lock +; X32: cmpxchgw + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_cmpxchg16() nounwind { + %t1 = cmpxchg i16* @sc16, i16 0, i16 1 acquire +; X64: lock +; X64: cmpxchgw +; X32: lock +; X32: cmpxchgw + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_store16(i16 %x) nounwind { + store atomic i16 %x, i16* @sc16 release, align 4 +; X64-NOT: lock +; X64: movw +; X32-NOT: lock +; X32: movw + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_swap16(i16 %x) nounwind { + %t1 = atomicrmw xchg i16* @sc16, i16 %x acquire +; X64-NOT: lock +; X64: xchgw +; X32-NOT: lock +; X32: xchgw + ret void +; X64: ret +; X32: ret +} diff --git a/test/CodeGen/X86/atomic32.ll b/test/CodeGen/X86/atomic32.ll new file mode 100644 index 0000000..dc927d8 --- /dev/null +++ b/test/CodeGen/X86/atomic32.ll @@ -0,0 +1,250 @@ +; RUN: llc < %s -O0 -march=x86-64 -mcpu=corei7 | FileCheck %s --check-prefix X64 +; RUN: llc < %s -O0 -march=x86 -mcpu=corei7 | FileCheck %s --check-prefix X32 + +@sc32 = external global i32 + +define void @atomic_fetch_add32() nounwind { +; X64: atomic_fetch_add32 +; X32: atomic_fetch_add32 +entry: +; 32-bit + %t1 = atomicrmw add i32* @sc32, i32 1 acquire +; X64: lock +; X64: incl +; X32: lock +; X32: incl + %t2 = atomicrmw add i32* @sc32, i32 3 acquire +; X64: lock +; X64: addl $3 +; X32: lock +; X32: addl $3 + %t3 = atomicrmw add i32* @sc32, i32 5 acquire +; X64: lock +; X64: xaddl +; X32: lock +; X32: xaddl + %t4 = atomicrmw add i32* @sc32, i32 %t3 acquire +; X64: lock +; X64: addl +; X32: lock +; X32: addl + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_sub32() nounwind { +; X64: atomic_fetch_sub32 +; X32: atomic_fetch_sub32 + %t1 = atomicrmw sub i32* @sc32, i32 1 acquire +; X64: lock +; X64: decl +; X32: lock +; X32: decl + %t2 = atomicrmw sub i32* @sc32, i32 3 acquire +; X64: lock +; X64: subl $3 +; X32: lock +; X32: subl $3 + %t3 = atomicrmw sub i32* @sc32, i32 5 acquire +; X64: lock +; X64: xaddl +; X32: lock +; X32: xaddl + %t4 = atomicrmw sub i32* @sc32, i32 %t3 acquire +; X64: lock +; X64: subl +; X32: lock +; X32: subl + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_and32() nounwind { +; X64: atomic_fetch_and32 +; X32: atomic_fetch_and32 + %t1 = atomicrmw and i32* @sc32, i32 3 acquire +; X64: lock +; X64: andl $3 +; X32: lock +; X32: andl $3 + %t2 = atomicrmw and i32* @sc32, i32 5 acquire +; X64: andl +; X64: lock +; X64: cmpxchgl +; X32: andl +; X32: lock +; X32: cmpxchgl + %t3 = atomicrmw and i32* @sc32, i32 %t2 acquire +; X64: lock +; X64: andl +; X32: lock +; X32: andl + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_or32() nounwind { +; X64: atomic_fetch_or32 +; X32: atomic_fetch_or32 + %t1 = atomicrmw or i32* @sc32, i32 3 acquire +; X64: lock +; X64: orl $3 +; X32: lock +; X32: orl $3 + %t2 = atomicrmw or i32* @sc32, i32 5 acquire +; X64: orl +; X64: lock +; X64: cmpxchgl +; X32: orl +; X32: lock +; X32: cmpxchgl + %t3 = atomicrmw or i32* @sc32, i32 %t2 acquire +; X64: lock +; X64: orl +; X32: lock +; X32: orl + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_xor32() nounwind { +; X64: atomic_fetch_xor32 +; X32: atomic_fetch_xor32 + %t1 = atomicrmw xor i32* @sc32, i32 3 acquire +; X64: lock +; X64: xorl $3 +; X32: lock +; X32: xorl $3 + %t2 = atomicrmw xor i32* @sc32, i32 5 acquire +; X64: xorl +; X64: lock +; X64: cmpxchgl +; X32: xorl +; X32: lock +; X32: cmpxchgl + %t3 = atomicrmw xor i32* @sc32, i32 %t2 acquire +; X64: lock +; X64: xorl +; X32: lock +; X32: xorl + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_nand32(i32 %x) nounwind { +; X64: atomic_fetch_nand32 +; X32: atomic_fetch_nand32 + %t1 = atomicrmw nand i32* @sc32, i32 %x acquire +; X64: andl +; X64: notl +; X64: lock +; X64: cmpxchgl +; X32: andl +; X32: notl +; X32: lock +; X32: cmpxchgl + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_max32(i32 %x) nounwind { + %t1 = atomicrmw max i32* @sc32, i32 %x acquire +; X64: cmpl +; X64: cmov +; X64: lock +; X64: cmpxchgl + +; X32: cmpl +; X32: cmov +; X32: lock +; X32: cmpxchgl + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_min32(i32 %x) nounwind { + %t1 = atomicrmw min i32* @sc32, i32 %x acquire +; X64: cmpl +; X64: cmov +; X64: lock +; X64: cmpxchgl + +; X32: cmpl +; X32: cmov +; X32: lock +; X32: cmpxchgl + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_umax32(i32 %x) nounwind { + %t1 = atomicrmw umax i32* @sc32, i32 %x acquire +; X64: cmpl +; X64: cmov +; X64: lock +; X64: cmpxchgl + +; X32: cmpl +; X32: cmov +; X32: lock +; X32: cmpxchgl + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_umin32(i32 %x) nounwind { + %t1 = atomicrmw umin i32* @sc32, i32 %x acquire +; X64: cmpl +; X64: cmov +; X64: lock +; X64: cmpxchgl +; X32: cmpl +; X32: cmov +; X32: lock +; X32: cmpxchgl + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_cmpxchg32() nounwind { + %t1 = cmpxchg i32* @sc32, i32 0, i32 1 acquire +; X64: lock +; X64: cmpxchgl +; X32: lock +; X32: cmpxchgl + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_store32(i32 %x) nounwind { + store atomic i32 %x, i32* @sc32 release, align 4 +; X64-NOT: lock +; X64: movl +; X32-NOT: lock +; X32: movl + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_swap32(i32 %x) nounwind { + %t1 = atomicrmw xchg i32* @sc32, i32 %x acquire +; X64-NOT: lock +; X64: xchgl +; X32-NOT: lock +; X32: xchgl + ret void +; X64: ret +; X32: ret +} diff --git a/test/CodeGen/X86/atomic64.ll b/test/CodeGen/X86/atomic64.ll new file mode 100644 index 0000000..45785cc --- /dev/null +++ b/test/CodeGen/X86/atomic64.ll @@ -0,0 +1,216 @@ +; RUN: llc < %s -O0 -march=x86-64 -mcpu=corei7 | FileCheck %s --check-prefix X64 + +@sc64 = external global i64 + +define void @atomic_fetch_add64() nounwind { +; X64: atomic_fetch_add64 +entry: + %t1 = atomicrmw add i64* @sc64, i64 1 acquire +; X64: lock +; X64: incq + %t2 = atomicrmw add i64* @sc64, i64 3 acquire +; X64: lock +; X64: addq $3 + %t3 = atomicrmw add i64* @sc64, i64 5 acquire +; X64: lock +; X64: xaddq + %t4 = atomicrmw add i64* @sc64, i64 %t3 acquire +; X64: lock +; X64: addq + ret void +; X64: ret +} + +define void @atomic_fetch_sub64() nounwind { +; X64: atomic_fetch_sub64 + %t1 = atomicrmw sub i64* @sc64, i64 1 acquire +; X64: lock +; X64: decq + %t2 = atomicrmw sub i64* @sc64, i64 3 acquire +; X64: lock +; X64: subq $3 + %t3 = atomicrmw sub i64* @sc64, i64 5 acquire +; X64: lock +; X64: xaddq + %t4 = atomicrmw sub i64* @sc64, i64 %t3 acquire +; X64: lock +; X64: subq + ret void +; X64: ret +} + +define void @atomic_fetch_and64() nounwind { +; X64: atomic_fetch_and64 + %t1 = atomicrmw and i64* @sc64, i64 3 acquire +; X64: lock +; X64: andq $3 + %t2 = atomicrmw and i64* @sc64, i64 5 acquire +; X64: andq +; X64: lock +; X64: cmpxchgq + %t3 = atomicrmw and i64* @sc64, i64 %t2 acquire +; X64: lock +; X64: andq + ret void +; X64: ret +} + +define void @atomic_fetch_or64() nounwind { +; X64: atomic_fetch_or64 + %t1 = atomicrmw or i64* @sc64, i64 3 acquire +; X64: lock +; X64: orq $3 + %t2 = atomicrmw or i64* @sc64, i64 5 acquire +; X64: orq +; X64: lock +; X64: cmpxchgq + %t3 = atomicrmw or i64* @sc64, i64 %t2 acquire +; X64: lock +; X64: orq + ret void +; X64: ret +} + +define void @atomic_fetch_xor64() nounwind { +; X64: atomic_fetch_xor64 + %t1 = atomicrmw xor i64* @sc64, i64 3 acquire +; X64: lock +; X64: xorq $3 + %t2 = atomicrmw xor i64* @sc64, i64 5 acquire +; X64: xorq +; X64: lock +; X64: cmpxchgq + %t3 = atomicrmw xor i64* @sc64, i64 %t2 acquire +; X64: lock +; X64: xorq + ret void +; X64: ret +} + +define void @atomic_fetch_nand64(i64 %x) nounwind { +; X64: atomic_fetch_nand64 +; X32: atomic_fetch_nand64 + %t1 = atomicrmw nand i64* @sc64, i64 %x acquire +; X64: andq +; X64: notq +; X64: lock +; X64: cmpxchgq +; X32: andl +; X32: andl +; X32: notl +; X32: notl +; X32: lock +; X32: cmpxchg8b + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_max64(i64 %x) nounwind { + %t1 = atomicrmw max i64* @sc64, i64 %x acquire +; X64: cmpq +; X64: cmov +; X64: lock +; X64: cmpxchgq + +; X32: cmpl +; X32: cmpl +; X32: cmov +; X32: cmov +; X32: cmov +; X32: lock +; X32: cmpxchg8b + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_min64(i64 %x) nounwind { + %t1 = atomicrmw min i64* @sc64, i64 %x acquire +; X64: cmpq +; X64: cmov +; X64: lock +; X64: cmpxchgq + +; X32: cmpl +; X32: cmpl +; X32: cmov +; X32: cmov +; X32: cmov +; X32: lock +; X32: cmpxchg8b + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_umax64(i64 %x) nounwind { + %t1 = atomicrmw umax i64* @sc64, i64 %x acquire +; X64: cmpq +; X64: cmov +; X64: lock +; X64: cmpxchgq + +; X32: cmpl +; X32: cmpl +; X32: cmov +; X32: cmov +; X32: cmov +; X32: lock +; X32: cmpxchg8b + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_umin64(i64 %x) nounwind { + %t1 = atomicrmw umin i64* @sc64, i64 %x acquire +; X64: cmpq +; X64: cmov +; X64: lock +; X64: cmpxchgq + +; X32: cmpl +; X32: cmpl +; X32: cmov +; X32: cmov +; X32: cmov +; X32: lock +; X32: cmpxchg8b + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_cmpxchg64() nounwind { + %t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire +; X64: lock +; X64: cmpxchgq +; X32: lock +; X32: cmpxchg8b + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_store64(i64 %x) nounwind { + store atomic i64 %x, i64* @sc64 release, align 8 +; X64-NOT: lock +; X64: movq +; X32: lock +; X32: cmpxchg8b + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_swap64(i64 %x) nounwind { + %t1 = atomicrmw xchg i64* @sc64, i64 %x acquire +; X64-NOT: lock +; X64: xchgq +; X32: lock +; X32: xchg8b + ret void +; X64: ret +; X32: ret +} diff --git a/test/CodeGen/X86/atomic6432.ll b/test/CodeGen/X86/atomic6432.ll new file mode 100644 index 0000000..f9b21c5 --- /dev/null +++ b/test/CodeGen/X86/atomic6432.ll @@ -0,0 +1,208 @@ +; RUN: llc < %s -O0 -march=x86 -mcpu=corei7 | FileCheck %s --check-prefix X32 + +@sc64 = external global i64 + +define void @atomic_fetch_add64() nounwind { +; X32: atomic_fetch_add64 +entry: + %t1 = atomicrmw add i64* @sc64, i64 1 acquire +; X32: addl +; X32: adcl +; X32: lock +; X32: cmpxchg8b + %t2 = atomicrmw add i64* @sc64, i64 3 acquire +; X32: addl +; X32: adcl +; X32: lock +; X32: cmpxchg8b + %t3 = atomicrmw add i64* @sc64, i64 5 acquire +; X32: addl +; X32: adcl +; X32: lock +; X32: cmpxchg8b + %t4 = atomicrmw add i64* @sc64, i64 %t3 acquire +; X32: addl +; X32: adcl +; X32: lock +; X32: cmpxchg8b + ret void +; X32: ret +} + +define void @atomic_fetch_sub64() nounwind { +; X32: atomic_fetch_sub64 + %t1 = atomicrmw sub i64* @sc64, i64 1 acquire +; X32: subl +; X32: sbbl +; X32: lock +; X32: cmpxchg8b + %t2 = atomicrmw sub i64* @sc64, i64 3 acquire +; X32: subl +; X32: sbbl +; X32: lock +; X32: cmpxchg8b + %t3 = atomicrmw sub i64* @sc64, i64 5 acquire +; X32: subl +; X32: sbbl +; X32: lock +; X32: cmpxchg8b + %t4 = atomicrmw sub i64* @sc64, i64 %t3 acquire +; X32: subl +; X32: sbbl +; X32: lock +; X32: cmpxchg8b + ret void +; X32: ret +} + +define void @atomic_fetch_and64() nounwind { +; X32: atomic_fetch_and64 + %t1 = atomicrmw and i64* @sc64, i64 3 acquire +; X32: andl +; X32: andl +; X32: lock +; X32: cmpxchg8b + %t2 = atomicrmw and i64* @sc64, i64 5 acquire +; X32: andl +; X32: andl +; X32: lock +; X32: cmpxchg8b + %t3 = atomicrmw and i64* @sc64, i64 %t2 acquire +; X32: andl +; X32: andl +; X32: lock +; X32: cmpxchg8b + ret void +; X32: ret +} + +define void @atomic_fetch_or64() nounwind { +; X32: atomic_fetch_or64 + %t1 = atomicrmw or i64* @sc64, i64 3 acquire +; X32: orl +; X32: orl +; X32: lock +; X32: cmpxchg8b + %t2 = atomicrmw or i64* @sc64, i64 5 acquire +; X32: orl +; X32: orl +; X32: lock +; X32: cmpxchg8b + %t3 = atomicrmw or i64* @sc64, i64 %t2 acquire +; X32: orl +; X32: orl +; X32: lock +; X32: cmpxchg8b + ret void +; X32: ret +} + +define void @atomic_fetch_xor64() nounwind { +; X32: atomic_fetch_xor64 + %t1 = atomicrmw xor i64* @sc64, i64 3 acquire +; X32: xorl +; X32: xorl +; X32: lock +; X32: cmpxchg8b + %t2 = atomicrmw xor i64* @sc64, i64 5 acquire +; X32: xorl +; X32: xorl +; X32: lock +; X32: cmpxchg8b + %t3 = atomicrmw xor i64* @sc64, i64 %t2 acquire +; X32: xorl +; X32: xorl +; X32: lock +; X32: cmpxchg8b + ret void +; X32: ret +} + +define void @atomic_fetch_nand64(i64 %x) nounwind { +; X32: atomic_fetch_nand64 + %t1 = atomicrmw nand i64* @sc64, i64 %x acquire +; X32: andl +; X32: andl +; X32: notl +; X32: notl +; X32: lock +; X32: cmpxchg8b + ret void +; X32: ret +} + +define void @atomic_fetch_max64(i64 %x) nounwind { + %t1 = atomicrmw max i64* @sc64, i64 %x acquire +; X32: cmpl +; X32: cmpl +; X32: cmov +; X32: cmov +; X32: cmov +; X32: lock +; X32: cmpxchg8b + ret void +; X32: ret +} + +define void @atomic_fetch_min64(i64 %x) nounwind { + %t1 = atomicrmw min i64* @sc64, i64 %x acquire +; X32: cmpl +; X32: cmpl +; X32: cmov +; X32: cmov +; X32: cmov +; X32: lock +; X32: cmpxchg8b + ret void +; X32: ret +} + +define void @atomic_fetch_umax64(i64 %x) nounwind { + %t1 = atomicrmw umax i64* @sc64, i64 %x acquire +; X32: cmpl +; X32: cmpl +; X32: cmov +; X32: cmov +; X32: cmov +; X32: lock +; X32: cmpxchg8b + ret void +; X32: ret +} + +define void @atomic_fetch_umin64(i64 %x) nounwind { + %t1 = atomicrmw umin i64* @sc64, i64 %x acquire +; X32: cmpl +; X32: cmpl +; X32: cmov +; X32: cmov +; X32: cmov +; X32: lock +; X32: cmpxchg8b + ret void +; X32: ret +} + +define void @atomic_fetch_cmpxchg64() nounwind { + %t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire +; X32: lock +; X32: cmpxchg8b + ret void +; X32: ret +} + +define void @atomic_fetch_store64(i64 %x) nounwind { + store atomic i64 %x, i64* @sc64 release, align 8 +; X32: lock +; X32: cmpxchg8b + ret void +; X32: ret +} + +define void @atomic_fetch_swap64(i64 %x) nounwind { + %t1 = atomicrmw xchg i64* @sc64, i64 %x acquire +; X32: lock +; X32: xchg8b + ret void +; X32: ret +} diff --git a/test/CodeGen/X86/atomic8.ll b/test/CodeGen/X86/atomic8.ll new file mode 100644 index 0000000..4124284 --- /dev/null +++ b/test/CodeGen/X86/atomic8.ll @@ -0,0 +1,250 @@ +; RUN: llc < %s -O0 -march=x86-64 -mcpu=corei7 | FileCheck %s --check-prefix X64 +; RUN: llc < %s -O0 -march=x86 -mcpu=corei7 | FileCheck %s --check-prefix X32 + +@sc8 = external global i8 + +define void @atomic_fetch_add8() nounwind { +; X64: atomic_fetch_add8 +; X32: atomic_fetch_add8 +entry: +; 32-bit + %t1 = atomicrmw add i8* @sc8, i8 1 acquire +; X64: lock +; X64: incb +; X32: lock +; X32: incb + %t2 = atomicrmw add i8* @sc8, i8 3 acquire +; X64: lock +; X64: addb $3 +; X32: lock +; X32: addb $3 + %t3 = atomicrmw add i8* @sc8, i8 5 acquire +; X64: lock +; X64: xaddb +; X32: lock +; X32: xaddb + %t4 = atomicrmw add i8* @sc8, i8 %t3 acquire +; X64: lock +; X64: addb +; X32: lock +; X32: addb + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_sub8() nounwind { +; X64: atomic_fetch_sub8 +; X32: atomic_fetch_sub8 + %t1 = atomicrmw sub i8* @sc8, i8 1 acquire +; X64: lock +; X64: decb +; X32: lock +; X32: decb + %t2 = atomicrmw sub i8* @sc8, i8 3 acquire +; X64: lock +; X64: subb $3 +; X32: lock +; X32: subb $3 + %t3 = atomicrmw sub i8* @sc8, i8 5 acquire +; X64: lock +; X64: xaddb +; X32: lock +; X32: xaddb + %t4 = atomicrmw sub i8* @sc8, i8 %t3 acquire +; X64: lock +; X64: subb +; X32: lock +; X32: subb + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_and8() nounwind { +; X64: atomic_fetch_and8 +; X32: atomic_fetch_and8 + %t1 = atomicrmw and i8* @sc8, i8 3 acquire +; X64: lock +; X64: andb $3 +; X32: lock +; X32: andb $3 + %t2 = atomicrmw and i8* @sc8, i8 5 acquire +; X64: andb +; X64: lock +; X64: cmpxchgb +; X32: andb +; X32: lock +; X32: cmpxchgb + %t3 = atomicrmw and i8* @sc8, i8 %t2 acquire +; X64: lock +; X64: andb +; X32: lock +; X32: andb + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_or8() nounwind { +; X64: atomic_fetch_or8 +; X32: atomic_fetch_or8 + %t1 = atomicrmw or i8* @sc8, i8 3 acquire +; X64: lock +; X64: orb $3 +; X32: lock +; X32: orb $3 + %t2 = atomicrmw or i8* @sc8, i8 5 acquire +; X64: orb +; X64: lock +; X64: cmpxchgb +; X32: orb +; X32: lock +; X32: cmpxchgb + %t3 = atomicrmw or i8* @sc8, i8 %t2 acquire +; X64: lock +; X64: orb +; X32: lock +; X32: orb + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_xor8() nounwind { +; X64: atomic_fetch_xor8 +; X32: atomic_fetch_xor8 + %t1 = atomicrmw xor i8* @sc8, i8 3 acquire +; X64: lock +; X64: xorb $3 +; X32: lock +; X32: xorb $3 + %t2 = atomicrmw xor i8* @sc8, i8 5 acquire +; X64: xorb +; X64: lock +; X64: cmpxchgb +; X32: xorb +; X32: lock +; X32: cmpxchgb + %t3 = atomicrmw xor i8* @sc8, i8 %t2 acquire +; X64: lock +; X64: xorb +; X32: lock +; X32: xorb + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_nand8(i8 %x) nounwind { +; X64: atomic_fetch_nand8 +; X32: atomic_fetch_nand8 + %t1 = atomicrmw nand i8* @sc8, i8 %x acquire +; X64: andb +; X64: notb +; X64: lock +; X64: cmpxchgb +; X32: andb +; X32: notb +; X32: lock +; X32: cmpxchgb + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_max8(i8 %x) nounwind { + %t1 = atomicrmw max i8* @sc8, i8 %x acquire +; X64: cmpb +; X64: cmov +; X64: lock +; X64: cmpxchgb + +; X32: cmpb +; X32: cmov +; X32: lock +; X32: cmpxchgb + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_min8(i8 %x) nounwind { + %t1 = atomicrmw min i8* @sc8, i8 %x acquire +; X64: cmpb +; X64: cmov +; X64: lock +; X64: cmpxchgb + +; X32: cmpb +; X32: cmov +; X32: lock +; X32: cmpxchgb + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_umax8(i8 %x) nounwind { + %t1 = atomicrmw umax i8* @sc8, i8 %x acquire +; X64: cmpb +; X64: cmov +; X64: lock +; X64: cmpxchgb + +; X32: cmpb +; X32: cmov +; X32: lock +; X32: cmpxchgb + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_umin8(i8 %x) nounwind { + %t1 = atomicrmw umin i8* @sc8, i8 %x acquire +; X64: cmpb +; X64: cmov +; X64: lock +; X64: cmpxchgb +; X32: cmpb +; X32: cmov +; X32: lock +; X32: cmpxchgb + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_cmpxchg8() nounwind { + %t1 = cmpxchg i8* @sc8, i8 0, i8 1 acquire +; X64: lock +; X64: cmpxchgb +; X32: lock +; X32: cmpxchgb + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_store8(i8 %x) nounwind { + store atomic i8 %x, i8* @sc8 release, align 4 +; X64-NOT: lock +; X64: movb +; X32-NOT: lock +; X32: movb + ret void +; X64: ret +; X32: ret +} + +define void @atomic_fetch_swap8(i8 %x) nounwind { + %t1 = atomicrmw xchg i8* @sc8, i8 %x acquire +; X64-NOT: lock +; X64: xchgb +; X32-NOT: lock +; X32: xchgb + ret void +; X64: ret +; X32: ret +} diff --git a/test/CodeGen/X86/atomic_add.ll b/test/CodeGen/X86/atomic_add.ll index 1fce256..d944998 100644 --- a/test/CodeGen/X86/atomic_add.ll +++ b/test/CodeGen/X86/atomic_add.ll @@ -178,7 +178,8 @@ entry: define void @sub2(i16* nocapture %p, i32 %v) nounwind ssp { entry: ; CHECK: sub2: -; CHECK: negl +; CHECK-NOT: negl +; CHECK: subw %0 = trunc i32 %v to i16 ; <i16> [#uses=1] %1 = atomicrmw sub i16* %p, i16 %0 monotonic ret void diff --git a/test/CodeGen/X86/atomic_op.ll b/test/CodeGen/X86/atomic_op.ll index 152bece..c5fa07d 100644 --- a/test/CodeGen/X86/atomic_op.ll +++ b/test/CodeGen/X86/atomic_op.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mcpu=generic -march=x86 | FileCheck %s +; RUN: llc < %s -mcpu=generic -march=x86 -mattr=+cmov | FileCheck %s target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" @@ -107,13 +107,12 @@ entry: ; CHECK: cmpxchgl %17 = cmpxchg i32* %val2, i32 1976, i32 1 monotonic store i32 %17, i32* %old + ; CHECK: movl [[R17atomic:.*]], %eax ; CHECK: movl $1401, %[[R17mask:[a-z]*]] - ; CHECK: movl [[R17atomic:.*]], %eax - ; CHECK: movl %eax, %[[R17newval:[a-z]*]] - ; CHECK: andl %[[R17mask]], %[[R17newval]] - ; CHECK: notl %[[R17newval]] + ; CHECK: andl %eax, %[[R17mask]] + ; CHECK: notl %[[R17mask]] ; CHECK: lock - ; CHECK: cmpxchgl %[[R17newval]], [[R17atomic]] + ; CHECK: cmpxchgl %[[R17mask]], [[R17atomic]] ; CHECK: jne ; CHECK: movl %eax, %18 = atomicrmw nand i32* %val2, i32 1401 monotonic diff --git a/test/CodeGen/X86/avx-basic.ll b/test/CodeGen/X86/avx-basic.ll index 8ad0fa8..95854c7 100644 --- a/test/CodeGen/X86/avx-basic.ll +++ b/test/CodeGen/X86/avx-basic.ll @@ -109,8 +109,8 @@ allocas: ; rdar://10566486 ; CHECK: fneg ; CHECK: vxorps -define <16 x float> @fneg(<16 x float> addrspace(1)* nocapture %out) nounwind { - %1 = fsub <16 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> +define <16 x float> @fneg(<16 x float> %a) nounwind { + %1 = fsub <16 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a ret <16 x float> %1 } diff --git a/test/CodeGen/X86/avx-intel-ocl.ll b/test/CodeGen/X86/avx-intel-ocl.ll new file mode 100644 index 0000000..1446b36 --- /dev/null +++ b/test/CodeGen/X86/avx-intel-ocl.ll @@ -0,0 +1,107 @@ +; RUN: llc < %s -mtriple=i386-pc-win32 -mcpu=corei7-avx -mattr=+avx | FileCheck -check-prefix=WIN32 %s +; RUN: llc < %s -mtriple=x86_64-win32 -mcpu=corei7-avx -mattr=+avx | FileCheck -check-prefix=WIN64 %s +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck -check-prefix=NOT_WIN %s + +declare <16 x float> @func_float16_ptr(<16 x float>, <16 x float> *) +declare <16 x float> @func_float16(<16 x float>, <16 x float>) +; WIN64: testf16_inp +; WIN64: vaddps {{.*}}, {{%ymm[0-1]}} +; WIN64: vaddps {{.*}}, {{%ymm[0-1]}} +; WIN64: leaq {{.*}}(%rsp), %rcx +; WIN64: call +; WIN64: ret + +; WIN32: testf16_inp +; WIN32: movl %eax, (%esp) +; WIN32: vaddps {{.*}}, {{%ymm[0-1]}} +; WIN32: vaddps {{.*}}, {{%ymm[0-1]}} +; WIN32: call +; WIN32: ret + +; NOT_WIN: testf16_inp +; NOT_WIN: vaddps {{.*}}, {{%ymm[0-1]}} +; NOT_WIN: vaddps {{.*}}, {{%ymm[0-1]}} +; NOT_WIN: leaq {{.*}}(%rsp), %rdi +; NOT_WIN: call +; NOT_WIN: ret + +;test calling conventions - input parameters +define <16 x float> @testf16_inp(<16 x float> %a, <16 x float> %b) nounwind { + %y = alloca <16 x float>, align 16 + %x = fadd <16 x float> %a, %b + %1 = call intel_ocl_bicc <16 x float> @func_float16_ptr(<16 x float> %x, <16 x float>* %y) + %2 = load <16 x float>* %y, align 16 + %3 = fadd <16 x float> %2, %1 + ret <16 x float> %3 +} + +;test calling conventions - preserved registers + +; preserved ymm6-ymm15 +; WIN64: testf16_regs +; WIN64: call +; WIN64: vaddps {{%ymm[6-7]}}, %ymm0, %ymm0 +; WIN64: vaddps {{%ymm[6-7]}}, %ymm1, %ymm1 +; WIN64: ret + +; preserved ymm8-ymm15 +; NOT_WIN: testf16_regs +; NOT_WIN: call +; NOT_WIN: vaddps {{%ymm[8-9]}}, %ymm0, %ymm0 +; NOT_WIN: vaddps {{%ymm[8-9]}}, %ymm1, %ymm1 +; NOT_WIN: ret + +define <16 x float> @testf16_regs(<16 x float> %a, <16 x float> %b) nounwind { + %y = alloca <16 x float>, align 16 + %x = fadd <16 x float> %a, %b + %1 = call intel_ocl_bicc <16 x float> @func_float16_ptr(<16 x float> %x, <16 x float>* %y) + %2 = load <16 x float>* %y, align 16 + %3 = fadd <16 x float> %1, %b + %4 = fadd <16 x float> %2, %3 + ret <16 x float> %4 +} + +; test calling conventions - prolog and epilog +; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill +; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill +; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill +; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill +; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill +; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill +; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill +; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill +; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill +; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill +; WIN64: call +; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload +; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload +; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload +; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload +; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload +; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload +; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload +; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload +; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload +; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload + +; NOT_WIN: vmovaps {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rbp) ## 32-byte Spill +; NOT_WIN: vmovaps {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rbp) ## 32-byte Spill +; NOT_WIN: vmovaps {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rbp) ## 32-byte Spill +; NOT_WIN: vmovaps {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rbp) ## 32-byte Spill +; NOT_WIN: vmovaps {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rbp) ## 32-byte Spill +; NOT_WIN: vmovaps {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rbp) ## 32-byte Spill +; NOT_WIN: vmovaps {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rbp) ## 32-byte Spill +; NOT_WIN: vmovaps {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rbp) ## 32-byte Spill +; NOT_WIN: call +; NOT_WIN: vmovaps {{.*}}(%rbp), {{%ymm([8-9]|1[0-5])}} ## 32-byte Reload +; NOT_WIN: vmovaps {{.*}}(%rbp), {{%ymm([8-9]|1[0-5])}} ## 32-byte Reload +; NOT_WIN: vmovaps {{.*}}(%rbp), {{%ymm([8-9]|1[0-5])}} ## 32-byte Reload +; NOT_WIN: vmovaps {{.*}}(%rbp), {{%ymm([8-9]|1[0-5])}} ## 32-byte Reload +; NOT_WIN: vmovaps {{.*}}(%rbp), {{%ymm([8-9]|1[0-5])}} ## 32-byte Reload +; NOT_WIN: vmovaps {{.*}}(%rbp), {{%ymm([8-9]|1[0-5])}} ## 32-byte Reload +; NOT_WIN: vmovaps {{.*}}(%rbp), {{%ymm([8-9]|1[0-5])}} ## 32-byte Reload +; NOT_WIN: vmovaps {{.*}}(%rbp), {{%ymm([8-9]|1[0-5])}} ## 32-byte Reload +define intel_ocl_bicc <16 x float> @test_prolog_epilog(<16 x float> %a, <16 x float> %b) nounwind { + %c = call <16 x float> @func_float16(<16 x float> %a, <16 x float> %b) + ret <16 x float> %c +} diff --git a/test/CodeGen/X86/avx-intrinsics-x86.ll b/test/CodeGen/X86/avx-intrinsics-x86.ll index c44beb4..88ecd5a 100644 --- a/test/CodeGen/X86/avx-intrinsics-x86.ll +++ b/test/CodeGen/X86/avx-intrinsics-x86.ll @@ -1140,9 +1140,9 @@ declare <4 x float> @llvm.x86.sse41.round.ss(<4 x float>, <4 x float>, i32) noun define i32 @test_x86_sse42_pcmpestri128(<16 x i8> %a0, <16 x i8> %a2) { - ; CHECK: movl - ; CHECK: movl - ; CHECK: vpcmpestri + ; CHECK: movl $7 + ; CHECK: movl $7 + ; CHECK: vpcmpestri $7 ; CHECK: movl %res = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1] ret i32 %res @@ -1150,6 +1150,18 @@ define i32 @test_x86_sse42_pcmpestri128(<16 x i8> %a0, <16 x i8> %a2) { declare i32 @llvm.x86.sse42.pcmpestri128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone +define i32 @test_x86_sse42_pcmpestri128_load(<16 x i8>* %a0, <16 x i8>* %a2) { + ; CHECK: movl $7 + ; CHECK: movl $7 + ; CHECK: vpcmpestri $7, ( + ; CHECK: movl + %1 = load <16 x i8>* %a0 + %2 = load <16 x i8>* %a2 + %res = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %1, i32 7, <16 x i8> %2, i32 7, i8 7) ; <i32> [#uses=1] + ret i32 %res +} + + define i32 @test_x86_sse42_pcmpestria128(<16 x i8> %a0, <16 x i8> %a2) { ; CHECK: movl ; CHECK: movl @@ -1216,8 +1228,19 @@ define <16 x i8> @test_x86_sse42_pcmpestrm128(<16 x i8> %a0, <16 x i8> %a2) { declare <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone +define <16 x i8> @test_x86_sse42_pcmpestrm128_load(<16 x i8> %a0, <16 x i8>* %a2) { + ; CHECK: movl $7 + ; CHECK: movl $7 + ; CHECK: vpcmpestrm $7, + ; CHECK-NOT: vmov + %1 = load <16 x i8>* %a2 + %res = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %a0, i32 7, <16 x i8> %1, i32 7, i8 7) ; <<16 x i8>> [#uses=1] + ret <16 x i8> %res +} + + define i32 @test_x86_sse42_pcmpistri128(<16 x i8> %a0, <16 x i8> %a1) { - ; CHECK: vpcmpistri + ; CHECK: vpcmpistri $7 ; CHECK: movl %res = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1] ret i32 %res @@ -1225,6 +1248,16 @@ define i32 @test_x86_sse42_pcmpistri128(<16 x i8> %a0, <16 x i8> %a1) { declare i32 @llvm.x86.sse42.pcmpistri128(<16 x i8>, <16 x i8>, i8) nounwind readnone +define i32 @test_x86_sse42_pcmpistri128_load(<16 x i8>* %a0, <16 x i8>* %a1) { + ; CHECK: vpcmpistri $7, ( + ; CHECK: movl + %1 = load <16 x i8>* %a0 + %2 = load <16 x i8>* %a1 + %res = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %1, <16 x i8> %2, i8 7) ; <i32> [#uses=1] + ret i32 %res +} + + define i32 @test_x86_sse42_pcmpistria128(<16 x i8> %a0, <16 x i8> %a1) { ; CHECK: vpcmpistri ; CHECK: seta @@ -1271,7 +1304,7 @@ declare i32 @llvm.x86.sse42.pcmpistriz128(<16 x i8>, <16 x i8>, i8) nounwind rea define <16 x i8> @test_x86_sse42_pcmpistrm128(<16 x i8> %a0, <16 x i8> %a1) { - ; CHECK: vpcmpistrm + ; CHECK: vpcmpistrm $7 ; CHECK-NOT: vmov %res = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <<16 x i8>> [#uses=1] ret <16 x i8> %res @@ -1279,6 +1312,15 @@ define <16 x i8> @test_x86_sse42_pcmpistrm128(<16 x i8> %a0, <16 x i8> %a1) { declare <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8>, <16 x i8>, i8) nounwind readnone +define <16 x i8> @test_x86_sse42_pcmpistrm128_load(<16 x i8> %a0, <16 x i8>* %a1) { + ; CHECK: vpcmpistrm $7, ( + ; CHECK-NOT: vmov + %1 = load <16 x i8>* %a1 + %res = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %1, i8 7) ; <<16 x i8>> [#uses=1] + ret <16 x i8> %res +} + + define <4 x float> @test_x86_sse_add_ss(<4 x float> %a0, <4 x float> %a1) { ; CHECK: vaddss %res = call <4 x float> @llvm.x86.sse.add.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1] diff --git a/test/CodeGen/X86/avx-shuffle.ll b/test/CodeGen/X86/avx-shuffle.ll index 9b41709..ec11654 100644 --- a/test/CodeGen/X86/avx-shuffle.ll +++ b/test/CodeGen/X86/avx-shuffle.ll @@ -229,9 +229,8 @@ define <8 x float> @test17(<4 x float> %y) { } ; CHECK: test18 -; CHECK: vshufps -; CHECK: vshufps -; CHECK: vunpcklps +; CHECK: vmovshdup +; CHECK: vblendps ; CHECK: ret define <8 x float> @test18(<8 x float> %A, <8 x float>%B) nounwind { %S = shufflevector <8 x float> %A, <8 x float> %B, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15> @@ -239,9 +238,8 @@ define <8 x float> @test18(<8 x float> %A, <8 x float>%B) nounwind { } ; CHECK: test19 -; CHECK: vshufps -; CHECK: vshufps -; CHECK: vunpcklps +; CHECK: vmovsldup +; CHECK: vblendps ; CHECK: ret define <8 x float> @test19(<8 x float> %A, <8 x float>%B) nounwind { %S = shufflevector <8 x float> %A, <8 x float> %B, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14> diff --git a/test/CodeGen/X86/avx-vextractf128.ll b/test/CodeGen/X86/avx-vextractf128.ll index fe0f6ca..ff56a45 100644 --- a/test/CodeGen/X86/avx-vextractf128.ll +++ b/test/CodeGen/X86/avx-vextractf128.ll @@ -19,12 +19,12 @@ entry: } ; CHECK: @t0 -; CHECK-NOT: vextractf128 $0, %ymm0, %xmm0 +; CHECK-NOT: vextractf128 $1, %ymm0, %xmm0 ; CHECK-NOT: vmovaps %xmm0, (%rdi) -; CHECK: vextractf128 $0, %ymm0, (%rdi) +; CHECK: vextractf128 $1, %ymm0, (%rdi) define void @t0(float* nocapture %addr, <8 x float> %a) nounwind uwtable ssp { entry: - %0 = tail call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %a, i8 0) + %0 = tail call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %a, i8 1) %1 = bitcast float* %addr to <4 x float>* store <4 x float> %0, <4 x float>* %1, align 16 ret void @@ -32,27 +32,13 @@ entry: declare <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float>, i8) nounwind readnone -; CHECK: @t1 -; CHECK-NOT: vextractf128 $0, %ymm0, %xmm0 -; CHECK-NOT: vmovups %xmm0, (%rdi) -; CHECK: vextractf128 $0, %ymm0, (%rdi) -define void @t1(float* %addr, <8 x float> %a) nounwind uwtable ssp { -entry: - %0 = tail call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %a, i8 0) - %1 = bitcast float* %addr to i8* - tail call void @llvm.x86.sse.storeu.ps(i8* %1, <4 x float> %0) - ret void -} - -declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind - ; CHECK: @t2 -; CHECK-NOT: vextractf128 $0, %ymm0, %xmm0 +; CHECK-NOT: vextractf128 $1, %ymm0, %xmm0 ; CHECK-NOT: vmovaps %xmm0, (%rdi) -; CHECK: vextractf128 $0, %ymm0, (%rdi) +; CHECK: vextractf128 $1, %ymm0, (%rdi) define void @t2(double* nocapture %addr, <4 x double> %a) nounwind uwtable ssp { entry: - %0 = tail call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a, i8 0) + %0 = tail call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a, i8 1) %1 = bitcast double* %addr to <2 x double>* store <2 x double> %0, <2 x double>* %1, align 16 ret void @@ -60,28 +46,14 @@ entry: declare <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double>, i8) nounwind readnone -; CHECK: @t3 -; CHECK-NOT: vextractf128 $0, %ymm0, %xmm0 -; CHECK-NOT: vmovups %xmm0, (%rdi) -; CHECK: vextractf128 $0, %ymm0, (%rdi) -define void @t3(double* %addr, <4 x double> %a) nounwind uwtable ssp { -entry: - %0 = tail call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a, i8 0) - %1 = bitcast double* %addr to i8* - tail call void @llvm.x86.sse2.storeu.pd(i8* %1, <2 x double> %0) - ret void -} - -declare void @llvm.x86.sse2.storeu.pd(i8*, <2 x double>) nounwind - ; CHECK: @t4 -; CHECK-NOT: vextractf128 $0, %ymm0, %xmm0 +; CHECK-NOT: vextractf128 $1, %ymm0, %xmm0 ; CHECK-NOT: vmovaps %xmm0, (%rdi) -; CHECK: vextractf128 $0, %ymm0, (%rdi) +; CHECK: vextractf128 $1, %ymm0, (%rdi) define void @t4(<2 x i64>* nocapture %addr, <4 x i64> %a) nounwind uwtable ssp { entry: %0 = bitcast <4 x i64> %a to <8 x i32> - %1 = tail call <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32> %0, i8 0) + %1 = tail call <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32> %0, i8 1) %2 = bitcast <4 x i32> %1 to <2 x i64> store <2 x i64> %2, <2 x i64>* %addr, align 16 ret void @@ -90,17 +62,43 @@ entry: declare <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32>, i8) nounwind readnone ; CHECK: @t5 -; CHECK-NOT: vextractf128 $0, %ymm0, %xmm0 -; CHECK-NOT: vmovdqu %xmm0, (%rdi) -; CHECK: vextractf128 $0, %ymm0, (%rdi) -define void @t5(<2 x i64>* %addr, <4 x i64> %a) nounwind uwtable ssp { +; CHECK: vmovaps %xmm0, (%rdi) +define void @t5(float* nocapture %addr, <8 x float> %a) nounwind uwtable ssp { +entry: + %0 = tail call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %a, i8 0) + %1 = bitcast float* %addr to <4 x float>* + store <4 x float> %0, <4 x float>* %1, align 16 + ret void +} + +; CHECK: @t6 +; CHECK: vmovaps %xmm0, (%rdi) +define void @t6(double* nocapture %addr, <4 x double> %a) nounwind uwtable ssp { +entry: + %0 = tail call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a, i8 0) + %1 = bitcast double* %addr to <2 x double>* + store <2 x double> %0, <2 x double>* %1, align 16 + ret void +} + +; CHECK: @t7 +; CHECK: vmovaps %xmm0, (%rdi) +define void @t7(<2 x i64>* nocapture %addr, <4 x i64> %a) nounwind uwtable ssp { entry: %0 = bitcast <4 x i64> %a to <8 x i32> %1 = tail call <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32> %0, i8 0) - %2 = bitcast <2 x i64>* %addr to i8* - %3 = bitcast <4 x i32> %1 to <16 x i8> - tail call void @llvm.x86.sse2.storeu.dq(i8* %2, <16 x i8> %3) + %2 = bitcast <4 x i32> %1 to <2 x i64> + store <2 x i64> %2, <2 x i64>* %addr, align 16 ret void } -declare void @llvm.x86.sse2.storeu.dq(i8*, <16 x i8>) nounwind +; CHECK: @t8 +; CHECK: vmovups %xmm0, (%rdi) +define void @t8(<2 x i64>* nocapture %addr, <4 x i64> %a) nounwind uwtable ssp { +entry: + %0 = bitcast <4 x i64> %a to <8 x i32> + %1 = tail call <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32> %0, i8 0) + %2 = bitcast <4 x i32> %1 to <2 x i64> + store <2 x i64> %2, <2 x i64>* %addr, align 1 + ret void +} diff --git a/test/CodeGen/X86/avx2-shuffle.ll b/test/CodeGen/X86/avx2-shuffle.ll index c5899fa..a414e68 100644 --- a/test/CodeGen/X86/avx2-shuffle.ll +++ b/test/CodeGen/X86/avx2-shuffle.ll @@ -26,3 +26,37 @@ entry: %shuffle.i = shufflevector <16 x i16> %src1, <16 x i16> %src1, <16 x i32> <i32 3, i32 undef, i32 1, i32 0, i32 4, i32 5, i32 6, i32 7, i32 11, i32 10, i32 9, i32 8, i32 12, i32 13, i32 14, i32 15> ret <16 x i16> %shuffle.i } + +; CHECK: vpshufb_test +; CHECK: vpshufb {{.*\(%r.*}}, %ymm +; CHECK: ret +define <32 x i8> @vpshufb_test(<32 x i8> %a) nounwind { + %S = shufflevector <32 x i8> %a, <32 x i8> undef, <32 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15, + i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15, + i32 18, i32 19, i32 30, i32 16, i32 25, i32 23, i32 17, i32 25, + i32 20, i32 19, i32 31, i32 17, i32 23, i32 undef, i32 29, i32 18> + ret <32 x i8>%S +} + +; CHECK: vpshufb1_test +; CHECK: vpshufb {{.*\(%r.*}}, %ymm +; CHECK: ret +define <32 x i8> @vpshufb1_test(<32 x i8> %a) nounwind { + %S = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15, + i32 1, i32 9, i32 36, i32 11, i32 5, i32 13, i32 7, i32 15, + i32 18, i32 49, i32 30, i32 16, i32 25, i32 23, i32 17, i32 25, + i32 20, i32 19, i32 31, i32 17, i32 23, i32 undef, i32 29, i32 18> + ret <32 x i8>%S +} + + +; CHECK: vpshufb2_test +; CHECK: vpshufb {{.*\(%r.*}}, %ymm +; CHECK: ret +define <32 x i8> @vpshufb2_test(<32 x i8> %a) nounwind { + %S = shufflevector <32 x i8> zeroinitializer, <32 x i8> %a, <32 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15, + i32 1, i32 9, i32 36, i32 11, i32 5, i32 13, i32 7, i32 15, + i32 18, i32 49, i32 30, i32 16, i32 25, i32 23, i32 17, i32 25, + i32 20, i32 19, i32 31, i32 17, i32 23, i32 undef, i32 29, i32 18> + ret <32 x i8>%S +} diff --git a/test/CodeGen/X86/bitcast-i256.ll b/test/CodeGen/X86/bitcast-i256.ll new file mode 100644 index 0000000..85ac2fe --- /dev/null +++ b/test/CodeGen/X86/bitcast-i256.ll @@ -0,0 +1,11 @@ +; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=core-avx-i < %s | FileCheck %s --check-prefix CHECK + +define i256 @foo(<8 x i32> %a) { + %r = bitcast <8 x i32> %a to i256 + ret i256 %r +; CHECK: foo +; CHECK: vextractf128 +; CHECK: vpextrq +; CHECK: vpextrq +; CHECK: ret +} diff --git a/test/CodeGen/X86/bool-simplify.ll b/test/CodeGen/X86/bool-simplify.ll index 0cb9fd9..09eb5d1 100644 --- a/test/CodeGen/X86/bool-simplify.ll +++ b/test/CodeGen/X86/bool-simplify.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=x86-64 -mattr=+sse41,-avx | FileCheck %s +; RUN: llc < %s -march=x86-64 -mattr=+sse41,-avx,+rdrand | FileCheck %s define i32 @foo(<2 x i64> %c, i32 %a, i32 %b) { %t1 = call i32 @llvm.x86.sse41.ptestz(<2 x i64> %c, <2 x i64> %c) @@ -39,4 +39,20 @@ define i32 @bax(<2 x i64> %c) { ; CHECK: ret } +define i32 @rnd(i32 %arg) nounwind uwtable { + %1 = tail call { i32, i32 } @llvm.x86.rdrand.32() nounwind + %2 = extractvalue { i32, i32 } %1, 0 + %3 = extractvalue { i32, i32 } %1, 1 + %4 = icmp eq i32 %3, 0 + %5 = select i1 %4, i32 0, i32 %arg + %6 = add i32 %5, %2 + ret i32 %6 +; CHECK: rnd +; CHECK: rdrand +; CHECK: cmov +; CHECK-NOT: cmov +; CHECK: ret +} + declare i32 @llvm.x86.sse41.ptestz(<2 x i64>, <2 x i64>) nounwind readnone +declare { i32, i32 } @llvm.x86.rdrand.32() nounwind diff --git a/test/CodeGen/X86/buildvec-insertvec.ll b/test/CodeGen/X86/buildvec-insertvec.ll new file mode 100644 index 0000000..3fb69a4 --- /dev/null +++ b/test/CodeGen/X86/buildvec-insertvec.ll @@ -0,0 +1,15 @@ +; RUN: llc < %s -mcpu=corei7 -mtriple=x86_64-unknown-linux-gnu | FileCheck %s + +define void @foo(<3 x float> %in, <4 x i8>* nocapture %out) nounwind { + %t0 = fptoui <3 x float> %in to <3 x i8> + %t1 = shufflevector <3 x i8> %t0, <3 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef> + %t2 = insertelement <4 x i8> %t1, i8 -1, i32 3 + store <4 x i8> %t2, <4 x i8>* %out, align 4 + ret void +; CHECK: foo +; CHECK: cvttps2dq +; CHECK-NOT: pextrd +; CHECK: pinsrd +; CHECK-NEXT: pshufb +; CHECK: ret +} diff --git a/test/CodeGen/X86/cmov-fp.ll b/test/CodeGen/X86/cmov-fp.ll new file mode 100644 index 0000000..ca91f9e --- /dev/null +++ b/test/CodeGen/X86/cmov-fp.ll @@ -0,0 +1,451 @@ +; RUN: llc -march x86 -mcpu pentium4 < %s | FileCheck %s -check-prefix=SSE +; RUN: llc -march x86 -mcpu pentium3 < %s | FileCheck %s -check-prefix=NOSSE2 +; RUN: llc -march x86 -mcpu pentium2 < %s | FileCheck %s -check-prefix=NOSSE1 +; RUN: llc -march x86 -mcpu pentium < %s | FileCheck %s -check-prefix=NOCMOV +; PR14035 + +define double @test1(i32 %a, i32 %b, double %x) nounwind { + %cmp = icmp ugt i32 %a, %b + %sel = select i1 %cmp, double 99.0, double %x + ret double %sel + +; SSE: test1: +; SSE: movsd + +; NOSSE2: test1: +; NOSSE2: fcmovnbe + +; NOSSE1: test1: +; NOSSE1: fcmovnbe + +; NOCMOV: test1: +; NOCMOV: fstp + +} + +define double @test2(i32 %a, i32 %b, double %x) nounwind { + %cmp = icmp uge i32 %a, %b + %sel = select i1 %cmp, double 99.0, double %x + ret double %sel + +; SSE: test2: +; SSE: movsd + +; NOSSE2: test2: +; NOSSE2: fcmovnb + +; NOSSE1: test2: +; NOSSE1: fcmovnb + +; NOCMOV: test2: +; NOCMOV: fstp +} + +define double @test3(i32 %a, i32 %b, double %x) nounwind { + %cmp = icmp ult i32 %a, %b + %sel = select i1 %cmp, double 99.0, double %x + ret double %sel + +; SSE: test3: +; SSE: movsd + +; NOSSE2: test3: +; NOSSE2: fcmovb + +; NOSSE1: test3: +; NOSSE1: fcmovb + +; NOCMOV: test3: +; NOCMOV: fstp +} + +define double @test4(i32 %a, i32 %b, double %x) nounwind { + %cmp = icmp ule i32 %a, %b + %sel = select i1 %cmp, double 99.0, double %x + ret double %sel + +; SSE: test4: +; SSE: movsd + +; NOSSE2: test4: +; NOSSE2: fcmovbe + +; NOSSE1: test4: +; NOSSE1: fcmovbe + +; NOCMOV: test4: +; NOCMOV: fstp +} + +define double @test5(i32 %a, i32 %b, double %x) nounwind { + %cmp = icmp sgt i32 %a, %b + %sel = select i1 %cmp, double 99.0, double %x + ret double %sel + +; SSE: test5: +; SSE: movsd + +; NOSSE2: test5: +; NOSSE2: fstp + +; NOSSE1: test5: +; NOSSE1: fstp + +; NOCMOV: test5: +; NOCMOV: fstp +} + +define double @test6(i32 %a, i32 %b, double %x) nounwind { + %cmp = icmp sge i32 %a, %b + %sel = select i1 %cmp, double 99.0, double %x + ret double %sel + +; SSE: test6: +; SSE: movsd + +; NOSSE2: test6: +; NOSSE2: fstp + +; NOSSE1: test6: +; NOSSE1: fstp + +; NOCMOV: test6: +; NOCMOV: fstp +} + +define double @test7(i32 %a, i32 %b, double %x) nounwind { + %cmp = icmp slt i32 %a, %b + %sel = select i1 %cmp, double 99.0, double %x + ret double %sel + +; SSE: test7: +; SSE: movsd + +; NOSSE2: test7: +; NOSSE2: fstp + +; NOSSE1: test7: +; NOSSE1: fstp + +; NOCMOV: test7: +; NOCMOV: fstp +} + +define double @test8(i32 %a, i32 %b, double %x) nounwind { + %cmp = icmp sle i32 %a, %b + %sel = select i1 %cmp, double 99.0, double %x + ret double %sel + +; SSE: test8: +; SSE: movsd + +; NOSSE2: test8: +; NOSSE2: fstp + +; NOSSE1: test8: +; NOSSE1: fstp + +; NOCMOV: test8: +; NOCMOV: fstp +} + +define float @test9(i32 %a, i32 %b, float %x) nounwind { + %cmp = icmp ugt i32 %a, %b + %sel = select i1 %cmp, float 99.0, float %x + ret float %sel + +; SSE: test9: +; SSE: movss + +; NOSSE2: test9: +; NOSSE2: movss + +; NOSSE1: test9: +; NOSSE1: fcmovnbe + +; NOCMOV: test9: +; NOCMOV: fstp +} + +define float @test10(i32 %a, i32 %b, float %x) nounwind { + %cmp = icmp uge i32 %a, %b + %sel = select i1 %cmp, float 99.0, float %x + ret float %sel + +; SSE: test10: +; SSE: movss + +; NOSSE2: test10: +; NOSSE2: movss + +; NOSSE1: test10: +; NOSSE1: fcmovnb + +; NOCMOV: test10: +; NOCMOV: fstp +} + +define float @test11(i32 %a, i32 %b, float %x) nounwind { + %cmp = icmp ult i32 %a, %b + %sel = select i1 %cmp, float 99.0, float %x + ret float %sel + +; SSE: test11: +; SSE: movss + +; NOSSE2: test11: +; NOSSE2: movss + +; NOSSE1: test11: +; NOSSE1: fcmovb + +; NOCMOV: test11: +; NOCMOV: fstp +} + +define float @test12(i32 %a, i32 %b, float %x) nounwind { + %cmp = icmp ule i32 %a, %b + %sel = select i1 %cmp, float 99.0, float %x + ret float %sel + +; SSE: test12: +; SSE: movss + +; NOSSE2: test12: +; NOSSE2: movss + +; NOSSE1: test12: +; NOSSE1: fcmovbe + +; NOCMOV: test12: +; NOCMOV: fstp +} + +define float @test13(i32 %a, i32 %b, float %x) nounwind { + %cmp = icmp sgt i32 %a, %b + %sel = select i1 %cmp, float 99.0, float %x + ret float %sel + +; SSE: test13: +; SSE: movss + +; NOSSE2: test13: +; NOSSE2: movss + +; NOSSE1: test13: +; NOSSE1: fstp + +; NOCMOV: test13: +; NOCMOV: fstp +} + +define float @test14(i32 %a, i32 %b, float %x) nounwind { + %cmp = icmp sge i32 %a, %b + %sel = select i1 %cmp, float 99.0, float %x + ret float %sel + +; SSE: test14: +; SSE: movss + +; NOSSE2: test14: +; NOSSE2: movss + +; NOSSE1: test14: +; NOSSE1: fstp + +; NOCMOV: test14: +; NOCMOV: fstp +} + +define float @test15(i32 %a, i32 %b, float %x) nounwind { + %cmp = icmp slt i32 %a, %b + %sel = select i1 %cmp, float 99.0, float %x + ret float %sel + +; SSE: test15: +; SSE: movss + +; NOSSE2: test15: +; NOSSE2: movss + +; NOSSE1: test15: +; NOSSE1: fstp + +; NOCMOV: test15: +; NOCMOV: fstp +} + +define float @test16(i32 %a, i32 %b, float %x) nounwind { + %cmp = icmp sle i32 %a, %b + %sel = select i1 %cmp, float 99.0, float %x + ret float %sel + +; SSE: test16: +; SSE: movss + +; NOSSE2: test16: +; NOSSE2: movss + +; NOSSE1: test16: +; NOSSE1: fstp + +; NOCMOV: test16: +; NOCMOV: fstp +} + +define x86_fp80 @test17(i32 %a, i32 %b, x86_fp80 %x) nounwind { + %cmp = icmp ugt i32 %a, %b + %sel = select i1 %cmp, x86_fp80 0xK4005C600000000000000, x86_fp80 %x + ret x86_fp80 %sel + +; SSE: test17: +; SSE: fcmovnbe + +; NOSSE2: test17: +; NOSSE2: fcmovnbe + +; NOSSE1: test17: +; NOSSE1: fcmovnbe + +; NOCMOV: test17: +; NOCMOV: fstp +} + +define x86_fp80 @test18(i32 %a, i32 %b, x86_fp80 %x) nounwind { + %cmp = icmp uge i32 %a, %b + %sel = select i1 %cmp, x86_fp80 0xK4005C600000000000000, x86_fp80 %x + ret x86_fp80 %sel + +; SSE: test18: +; SSE: fcmovnb + +; NOSSE2: test18: +; NOSSE2: fcmovnb + +; NOSSE1: test18: +; NOSSE1: fcmovnb + +; NOCMOV: test18: +; NOCMOV: fstp +} + +define x86_fp80 @test19(i32 %a, i32 %b, x86_fp80 %x) nounwind { + %cmp = icmp ult i32 %a, %b + %sel = select i1 %cmp, x86_fp80 0xK4005C600000000000000, x86_fp80 %x + ret x86_fp80 %sel + +; SSE: test19: +; SSE: fcmovb + +; NOSSE2: test19: +; NOSSE2: fcmovb + +; NOSSE1: test19: +; NOSSE1: fcmovb + +; NOCMOV: test19: +; NOCMOV: fstp +} + +define x86_fp80 @test20(i32 %a, i32 %b, x86_fp80 %x) nounwind { + %cmp = icmp ule i32 %a, %b + %sel = select i1 %cmp, x86_fp80 0xK4005C600000000000000, x86_fp80 %x + ret x86_fp80 %sel + +; SSE: test20: +; SSE: fcmovbe + +; NOSSE2: test20: +; NOSSE2: fcmovbe + +; NOSSE1: test20: +; NOSSE1: fcmovbe + +; NOCMOV: test20: +; NOCMOV: fstp +} + +define x86_fp80 @test21(i32 %a, i32 %b, x86_fp80 %x) nounwind { + %cmp = icmp sgt i32 %a, %b + %sel = select i1 %cmp, x86_fp80 0xK4005C600000000000000, x86_fp80 %x + ret x86_fp80 %sel + +; We don't emit a branch for fp80, why? +; SSE: test21: +; SSE: testb +; SSE: fcmovne + +; NOSSE2: test21: +; NOSSE2: testb +; NOSSE2: fcmovne + +; NOSSE1: test21: +; NOSSE1: testb +; NOSSE1: fcmovne + +; NOCMOV: test21: +; NOCMOV: fstp +} + +define x86_fp80 @test22(i32 %a, i32 %b, x86_fp80 %x) nounwind { + %cmp = icmp sge i32 %a, %b + %sel = select i1 %cmp, x86_fp80 0xK4005C600000000000000, x86_fp80 %x + ret x86_fp80 %sel + +; SSE: test22: +; SSE: testb +; SSE: fcmovne + +; NOSSE2: test22: +; NOSSE2: testb +; NOSSE2: fcmovne + +; NOSSE1: test22: +; NOSSE1: testb +; NOSSE1: fcmovne + +; NOCMOV: test22: +; NOCMOV: fstp +} + +define x86_fp80 @test23(i32 %a, i32 %b, x86_fp80 %x) nounwind { + %cmp = icmp slt i32 %a, %b + %sel = select i1 %cmp, x86_fp80 0xK4005C600000000000000, x86_fp80 %x + ret x86_fp80 %sel + +; SSE: test23: +; SSE: testb +; SSE: fcmovne + +; NOSSE2: test23: +; NOSSE2: testb +; NOSSE2: fcmovne + +; NOSSE1: test23: +; NOSSE1: testb +; NOSSE1: fcmovne + +; NOCMOV: test23: +; NOCMOV: fstp +} + +define x86_fp80 @test24(i32 %a, i32 %b, x86_fp80 %x) nounwind { + %cmp = icmp sle i32 %a, %b + %sel = select i1 %cmp, x86_fp80 0xK4005C600000000000000, x86_fp80 %x + ret x86_fp80 %sel + +; SSE: test24: +; SSE: testb +; SSE: fcmovne + +; NOSSE2: test24: +; NOSSE2: testb +; NOSSE2: fcmovne + +; NOSSE1: test24: +; NOSSE1: testb +; NOSSE1: fcmovne + +; NOCMOV: test24: +; NOCMOV: fstp +} diff --git a/test/CodeGen/X86/crash.ll b/test/CodeGen/X86/crash.ll index 9badfc8..276d0db 100644 --- a/test/CodeGen/X86/crash.ll +++ b/test/CodeGen/X86/crash.ll @@ -442,3 +442,150 @@ entry: ret void } declare void @_Z6PrintFz(...) + +@a = external global i32, align 4 +@fn1.g = private unnamed_addr constant [9 x i32*] [i32* null, i32* @a, i32* null, i32* null, i32* null, i32* null, i32* null, i32* null, i32* null], align 16 +@e = external global i32, align 4 + +define void @pr13943() nounwind uwtable ssp { +entry: + %srcval = load i576* bitcast ([9 x i32*]* @fn1.g to i576*), align 16 + br label %for.cond + +for.cond: ; preds = %for.inc, %entry + %g.0 = phi i576 [ %srcval, %entry ], [ %ins, %for.inc ] + %0 = load i32* @e, align 4 + %1 = lshr i576 %g.0, 64 + %2 = trunc i576 %1 to i64 + %3 = inttoptr i64 %2 to i32* + %cmp = icmp eq i32* undef, %3 + %conv2 = zext i1 %cmp to i32 + %and = and i32 %conv2, %0 + tail call void (...)* @fn3(i32 %and) nounwind + %tobool = icmp eq i32 undef, 0 + br i1 %tobool, label %for.inc, label %if.then + +if.then: ; preds = %for.cond + ret void + +for.inc: ; preds = %for.cond + %4 = shl i576 %1, 384 + %mask = and i576 %g.0, -726838724295606890509921801691610055141362320587174446476410459910173841445449629921945328942266354949348255351381262292727973638307841 + %5 = and i576 %4, 726838724295606890509921801691610055141362320587174446476410459910173841445449629921945328942266354949348255351381262292727973638307840 + %ins = or i576 %5, %mask + br label %for.cond +} + +declare void @fn3(...) + +; Check coalescing of IMPLICIT_DEF instructions: +; +; %vreg1 = IMPLICIT_DEF +; %vreg2 = MOV32r0 +; +; When coalescing %vreg1 and %vreg2, the IMPLICIT_DEF instruction should be +; erased along with its value number. +; +define void @rdar12474033() nounwind ssp { +bb: + br i1 undef, label %bb21, label %bb1 + +bb1: ; preds = %bb + switch i32 undef, label %bb10 [ + i32 4, label %bb2 + i32 1, label %bb9 + i32 5, label %bb3 + i32 6, label %bb3 + i32 2, label %bb9 + ] + +bb2: ; preds = %bb1 + unreachable + +bb3: ; preds = %bb1, %bb1 + br i1 undef, label %bb4, label %bb5 + +bb4: ; preds = %bb3 + unreachable + +bb5: ; preds = %bb3 + %tmp = load <4 x float>* undef, align 1 + %tmp6 = bitcast <4 x float> %tmp to i128 + %tmp7 = load <4 x float>* undef, align 1 + %tmp8 = bitcast <4 x float> %tmp7 to i128 + br label %bb10 + +bb9: ; preds = %bb1, %bb1 + unreachable + +bb10: ; preds = %bb5, %bb1 + %tmp11 = phi i128 [ undef, %bb1 ], [ %tmp6, %bb5 ] + %tmp12 = phi i128 [ 0, %bb1 ], [ %tmp8, %bb5 ] + switch i32 undef, label %bb21 [ + i32 2, label %bb18 + i32 3, label %bb13 + i32 5, label %bb16 + i32 6, label %bb17 + i32 1, label %bb18 + ] + +bb13: ; preds = %bb10 + br i1 undef, label %bb15, label %bb14 + +bb14: ; preds = %bb13 + br label %bb21 + +bb15: ; preds = %bb13 + unreachable + +bb16: ; preds = %bb10 + unreachable + +bb17: ; preds = %bb10 + unreachable + +bb18: ; preds = %bb10, %bb10 + %tmp19 = bitcast i128 %tmp11 to <4 x float> + %tmp20 = bitcast i128 %tmp12 to <4 x float> + br label %bb21 + +bb21: ; preds = %bb18, %bb14, %bb10, %bb + %tmp22 = phi <4 x float> [ undef, %bb ], [ undef, %bb10 ], [ undef, %bb14 ], [ %tmp20, %bb18 ] + %tmp23 = phi <4 x float> [ undef, %bb ], [ undef, %bb10 ], [ undef, %bb14 ], [ %tmp19, %bb18 ] + store <4 x float> %tmp23, <4 x float>* undef, align 16 + store <4 x float> %tmp22, <4 x float>* undef, align 16 + switch i32 undef, label %bb29 [ + i32 5, label %bb27 + i32 1, label %bb24 + i32 2, label %bb25 + i32 14, label %bb28 + i32 4, label %bb26 + ] + +bb24: ; preds = %bb21 + unreachable + +bb25: ; preds = %bb21 + br label %bb29 + +bb26: ; preds = %bb21 + br label %bb29 + +bb27: ; preds = %bb21 + unreachable + +bb28: ; preds = %bb21 + br label %bb29 + +bb29: ; preds = %bb28, %bb26, %bb25, %bb21 + unreachable +} + +define void @pr14194() nounwind uwtable { + %tmp = load i64* undef, align 16 + %tmp1 = trunc i64 %tmp to i32 + %tmp2 = lshr i64 %tmp, 32 + %tmp3 = trunc i64 %tmp2 to i32 + %tmp4 = call { i32, i32 } asm sideeffect "", "=&r,=&r,r,r,0,1,~{dirflag},~{fpsr},~{flags}"(i32 %tmp3, i32 undef, i32 %tmp3, i32 %tmp1) nounwind + ret void +} diff --git a/test/CodeGen/X86/cvtv2f32.ll b/test/CodeGen/X86/cvtv2f32.ll new file mode 100644 index 0000000..466b096 --- /dev/null +++ b/test/CodeGen/X86/cvtv2f32.ll @@ -0,0 +1,25 @@ +; RUN: llc < %s -mtriple=i686-linux-pc -mcpu=corei7 | FileCheck %s + +define <2 x float> @foo(i32 %x, i32 %y, <2 x float> %v) { + %t1 = uitofp i32 %x to float + %t2 = insertelement <2 x float> undef, float %t1, i32 0 + %t3 = uitofp i32 %y to float + %t4 = insertelement <2 x float> %t2, float %t3, i32 1 + %t5 = fmul <2 x float> %v, %t4 + ret <2 x float> %t5 +; CHECK: foo +; CHECK: or +; CHECK: subpd +; CHECK: cvtpd2ps +; CHECK: ret +} + +define <2 x float> @bar(<2 x i32> %in) { + %r = uitofp <2 x i32> %in to <2 x float> + ret <2 x float> %r +; CHECK: bar +; CHECK: or +; CHECK: subpd +; CHECK: cvtpd2ps +; CHECK: ret +} diff --git a/test/CodeGen/X86/early-ifcvt-crash.ll b/test/CodeGen/X86/early-ifcvt-crash.ll new file mode 100644 index 0000000..c828026 --- /dev/null +++ b/test/CodeGen/X86/early-ifcvt-crash.ll @@ -0,0 +1,32 @@ +; RUN: llc < %s -x86-early-ifcvt -verify-machineinstrs +; RUN: llc < %s -x86-early-ifcvt -stress-early-ifcvt -verify-machineinstrs +; +; Run these tests with and without -stress-early-ifcvt to exercise heuristics. +; +target triple = "x86_64-apple-macosx10.8.0" + +; MachineTraceMetrics::Ensemble::addLiveIns crashes because the first operand +; on an inline asm instruction is not a vreg def. +; <rdar://problem/12472811> +define void @f1() nounwind { +entry: + br i1 undef, label %if.then6.i, label %if.end.i + +if.then6.i: + br label %if.end.i + +if.end.i: + br i1 undef, label %if.end25.i, label %if.else17.i + +if.else17.i: + %shl24.i = shl i32 undef, undef + br label %if.end25.i + +if.end25.i: + %storemerge31.i = phi i32 [ %shl24.i, %if.else17.i ], [ 0, %if.end.i ] + store i32 %storemerge31.i, i32* undef, align 4 + %0 = tail call i32 asm sideeffect "", "=r,r,i,i"(i32 undef, i32 15, i32 1) nounwind + %conv = trunc i32 %0 to i8 + store i8 %conv, i8* undef, align 1 + unreachable +} diff --git a/test/CodeGen/X86/early-ifcvt.ll b/test/CodeGen/X86/early-ifcvt.ll index 7883ffa..2e1852d 100644 --- a/test/CodeGen/X86/early-ifcvt.ll +++ b/test/CodeGen/X86/early-ifcvt.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -enable-early-ifcvt -stress-early-ifcvt | FileCheck %s +; RUN: llc < %s -x86-early-ifcvt -stress-early-ifcvt | FileCheck %s target triple = "x86_64-apple-macosx10.8.0" ; CHECK: mm2 @@ -67,3 +67,78 @@ if.end41: } declare void @fprintf(...) nounwind + +; CHECK: BZ2_decompress +; This test case contains irreducible control flow, so MachineLoopInfo doesn't +; recognize the cycle in the CFG. This would confuse MachineTraceMetrics. +define void @BZ2_decompress(i8* %s) nounwind ssp { +entry: + switch i32 undef, label %sw.default [ + i32 39, label %if.end.sw.bb2050_crit_edge + i32 36, label %sw.bb1788 + i32 37, label %if.end.sw.bb1855_crit_edge + i32 40, label %sw.bb2409 + i32 38, label %sw.bb1983 + i32 44, label %if.end.sw.bb3058_crit_edge + ] + +if.end.sw.bb3058_crit_edge: ; preds = %entry + br label %save_state_and_return + +if.end.sw.bb1855_crit_edge: ; preds = %entry + br label %save_state_and_return + +if.end.sw.bb2050_crit_edge: ; preds = %entry + br label %sw.bb2050 + +sw.bb1788: ; preds = %entry + br label %save_state_and_return + +sw.bb1983: ; preds = %entry + br i1 undef, label %save_state_and_return, label %if.then1990 + +if.then1990: ; preds = %sw.bb1983 + br label %while.body2038 + +while.body2038: ; preds = %sw.bb2050, %if.then1990 + %groupPos.8 = phi i32 [ 0, %if.then1990 ], [ %groupPos.9, %sw.bb2050 ] + br i1 undef, label %save_state_and_return, label %if.end2042 + +if.end2042: ; preds = %while.body2038 + br i1 undef, label %if.end2048, label %while.end2104 + +if.end2048: ; preds = %if.end2042 + %bsLive2054.pre = getelementptr inbounds i8* %s, i32 8 + br label %sw.bb2050 + +sw.bb2050: ; preds = %if.end2048, %if.end.sw.bb2050_crit_edge + %groupPos.9 = phi i32 [ 0, %if.end.sw.bb2050_crit_edge ], [ %groupPos.8, %if.end2048 ] + %and2064 = and i32 undef, 1 + br label %while.body2038 + +while.end2104: ; preds = %if.end2042 + br i1 undef, label %save_state_and_return, label %if.end2117 + +if.end2117: ; preds = %while.end2104 + br i1 undef, label %while.body2161.lr.ph, label %while.body2145.lr.ph + +while.body2145.lr.ph: ; preds = %if.end2117 + br label %save_state_and_return + +while.body2161.lr.ph: ; preds = %if.end2117 + br label %save_state_and_return + +sw.bb2409: ; preds = %entry + br label %save_state_and_return + +sw.default: ; preds = %entry + call void @BZ2_bz__AssertH__fail() nounwind + br label %save_state_and_return + +save_state_and_return: + %groupPos.14 = phi i32 [ 0, %sw.default ], [ %groupPos.8, %while.body2038 ], [ %groupPos.8, %while.end2104 ], [ 0, %if.end.sw.bb3058_crit_edge ], [ 0, %if.end.sw.bb1855_crit_edge ], [ %groupPos.8, %while.body2161.lr.ph ], [ %groupPos.8, %while.body2145.lr.ph ], [ 0, %sw.bb2409 ], [ 0, %sw.bb1788 ], [ 0, %sw.bb1983 ] + store i32 %groupPos.14, i32* undef, align 4 + ret void +} + +declare void @BZ2_bz__AssertH__fail() diff --git a/test/CodeGen/X86/extract-concat.ll b/test/CodeGen/X86/extract-concat.ll new file mode 100644 index 0000000..704309e --- /dev/null +++ b/test/CodeGen/X86/extract-concat.ll @@ -0,0 +1,17 @@ +; RUN: llc < %s -mcpu=corei7 -mtriple=x86_64-unknown-linux-gnu | FileCheck %s + +define void @foo(<4 x float> %in, <4 x i8>* %out) { + %t0 = fptosi <4 x float> %in to <4 x i32> + %t1 = trunc <4 x i32> %t0 to <4 x i16> + %t2 = shufflevector <4 x i16> %t1, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + %t3 = trunc <8 x i16> %t2 to <8 x i8> + %t4 = shufflevector <8 x i8> %t3, <8 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %t5 = insertelement <4 x i8> %t4, i8 -1, i32 3 + store <4 x i8> %t5, <4 x i8>* %out + ret void +; CHECK: foo +; CHECK: cvttps2dq +; CHECK-NOT: pextrd +; CHECK: pshufb +; CHECK: ret +} diff --git a/test/CodeGen/X86/fast-cc-callee-pops.ll b/test/CodeGen/X86/fast-cc-callee-pops.ll index ea10897..2c5b80a 100644 --- a/test/CodeGen/X86/fast-cc-callee-pops.ll +++ b/test/CodeGen/X86/fast-cc-callee-pops.ll @@ -2,12 +2,12 @@ ; Check that a fastcc function pops its stack variables before returning. -define x86_fastcallcc void @func(i64 %X, i64 %Y, float %G, double %Z) nounwind { +define x86_fastcallcc void @func(i64 inreg %X, i64 %Y, float %G, double %Z) nounwind { ret void ; CHECK: ret{{.*}}20 } -define x86_thiscallcc void @func2(i32 %X, i64 %Y, float %G, double %Z) nounwind { +define x86_thiscallcc void @func2(i32 inreg %X, i64 %Y, float %G, double %Z) nounwind { ret void ; CHECK: ret{{.*}}20 } diff --git a/test/CodeGen/X86/fast-cc-merge-stack-adj.ll b/test/CodeGen/X86/fast-cc-merge-stack-adj.ll index 14cb136..d591f94 100644 --- a/test/CodeGen/X86/fast-cc-merge-stack-adj.ll +++ b/test/CodeGen/X86/fast-cc-merge-stack-adj.ll @@ -3,7 +3,7 @@ target triple = "i686-pc-linux-gnu" -declare x86_fastcallcc void @func(i32*, i64) +declare x86_fastcallcc void @func(i32*, i64 inreg) define x86_fastcallcc void @caller(i32, i64) { %X = alloca i32 ; <i32*> [#uses=1] diff --git a/test/CodeGen/X86/fast-cc-pass-in-regs.ll b/test/CodeGen/X86/fast-cc-pass-in-regs.ll index a96e504..b60b68b 100644 --- a/test/CodeGen/X86/fast-cc-pass-in-regs.ll +++ b/test/CodeGen/X86/fast-cc-pass-in-regs.ll @@ -1,7 +1,7 @@ ; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | FileCheck %s ; check that fastcc is passing stuff in regs. -declare x86_fastcallcc i64 @callee(i64) +declare x86_fastcallcc i64 @callee(i64 inreg) define i64 @caller() { %X = call x86_fastcallcc i64 @callee( i64 4294967299 ) ; <i64> [#uses=1] @@ -9,7 +9,7 @@ define i64 @caller() { ret i64 %X } -define x86_fastcallcc i64 @caller2(i64 %X) { +define x86_fastcallcc i64 @caller2(i64 inreg %X) { ret i64 %X ; CHECK: mov{{.*}}EAX, ECX } diff --git a/test/CodeGen/X86/fast-isel-x86-64.ll b/test/CodeGen/X86/fast-isel-x86-64.ll index d8f4663..cdfaf7f 100644 --- a/test/CodeGen/X86/fast-isel-x86-64.ll +++ b/test/CodeGen/X86/fast-isel-x86-64.ll @@ -1,4 +1,5 @@ -; RUN: llc < %s -fast-isel -O0 -regalloc=fast -asm-verbose=0 -fast-isel-abort | FileCheck %s +; RUN: llc < %s -mattr=-avx -fast-isel -O0 -regalloc=fast -asm-verbose=0 -fast-isel-abort | FileCheck %s +; RUN: llc < %s -mattr=+avx -fast-isel -O0 -regalloc=fast -asm-verbose=0 -fast-isel-abort | FileCheck %s --check-prefix=AVX target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" target triple = "x86_64-apple-darwin10.0.0" @@ -197,6 +198,11 @@ block2: ; CHECK: cvtsi2sdq {{.*}} %xmm0 ; CHECK: movb $1, %al ; CHECK: callq _test16callee + +; AVX: movabsq $1 +; AVX: vmovsd LCP{{.*}}_{{.*}}(%rip), %xmm0 +; AVX: movb $1, %al +; AVX: callq _test16callee call void (...)* @test16callee(double 1.000000e+00) ret void } @@ -285,3 +291,16 @@ entry: } declare void @foo22(i32) + +; PR13563 +define void @test23(i8* noalias sret %result) { + %a = alloca i8 + %b = call i8* @foo23() + ret void +; CHECK: test23: +; CHECK: call +; CHECK: movq %rdi, %rax +; CHECK: ret +} + +declare i8* @foo23() diff --git a/test/CodeGen/X86/fma.ll b/test/CodeGen/X86/fma.ll index b0c1d0a..bd3514c 100644 --- a/test/CodeGen/X86/fma.ll +++ b/test/CodeGen/X86/fma.ll @@ -1,11 +1,13 @@ -; RUN: llc < %s -mtriple=i386-apple-darwin10 -mattr=+fma | FileCheck %s --check-prefix=CHECK-FMA-INST -; RUN: llc < %s -mtriple=i386-apple-darwin10 | FileCheck %s --check-prefix=CHECK-FMA-CALL -; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mattr=+fma | FileCheck %s --check-prefix=CHECK-FMA-INST -; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s --check-prefix=CHECK-FMA-CALL +; RUN: llc < %s -mtriple=i386-apple-darwin10 -mattr=+fma,-fma4 | FileCheck %s --check-prefix=CHECK-FMA-INST +; RUN: llc < %s -mtriple=i386-apple-darwin10 -mattr=-fma,-fma4 | FileCheck %s --check-prefix=CHECK-FMA-CALL +; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mattr=+fma,-fma4 | FileCheck %s --check-prefix=CHECK-FMA-INST +; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mattr=-fma,-fma4 | FileCheck %s --check-prefix=CHECK-FMA-CALL +; RUN: llc < %s -march=x86 -mcpu=bdver2 -mattr=-fma4 | FileCheck %s --check-prefix=CHECK-FMA-INST +; RUN: llc < %s -march=x86 -mcpu=bdver2 -mattr=-fma,-fma4 | FileCheck %s --check-prefix=CHECK-FMA-CALL ; CHECK: test_f32 ; CHECK-FMA-INST: vfmadd213ss -; CHECK-FMA-CALL: _fmaf +; CHECK-FMA-CALL: fmaf define float @test_f32(float %a, float %b, float %c) nounwind readnone ssp { entry: @@ -15,7 +17,7 @@ entry: ; CHECK: test_f64 ; CHECK-FMA-INST: vfmadd213sd -; CHECK-FMA-CALL: _fma +; CHECK-FMA-CALL: fma define double @test_f64(double %a, double %b, double %c) nounwind readnone ssp { entry: @@ -24,7 +26,7 @@ entry: } ; CHECK: test_f80 -; CHECK: _fmal +; CHECK: fmal define x86_fp80 @test_f80(x86_fp80 %a, x86_fp80 %b, x86_fp80 %c) nounwind readnone ssp { entry: diff --git a/test/CodeGen/X86/fma3-intrinsics.ll b/test/CodeGen/X86/fma3-intrinsics.ll index 90529e0..e3910a6 100755 --- a/test/CodeGen/X86/fma3-intrinsics.ll +++ b/test/CodeGen/X86/fma3-intrinsics.ll @@ -1,4 +1,6 @@ -; RUN: llc < %s -mtriple=x86_64-pc-win32 -mcpu=core-avx2 -mattr=avx2,+fma | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-pc-win32 -mcpu=core-avx2 | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-pc-win32 -mattr=+fma,+fma4 | FileCheck %s +; RUN: llc < %s -mcpu=bdver2 -mtriple=x86_64-pc-win32 -mattr=-fma4 | FileCheck %s define <4 x float> @test_x86_fmadd_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { ; CHECK: fmadd213ss %xmm diff --git a/test/CodeGen/X86/fma4-intrinsics-x86_64.ll b/test/CodeGen/X86/fma4-intrinsics-x86_64.ll index fd414b3..2fe1ecd 100644 --- a/test/CodeGen/X86/fma4-intrinsics-x86_64.ll +++ b/test/CodeGen/X86/fma4-intrinsics-x86_64.ll @@ -1,4 +1,5 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -march=x86-64 -mattr=+avx,+fma4 | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=bdver2 -mattr=+avx,-fma | FileCheck %s ; VFMADD define < 4 x float > @test_x86_fma_vfmadd_ss(< 4 x float > %a0, < 4 x float > %a1, < 4 x float > %a2) { diff --git a/test/CodeGen/X86/fma_patterns.ll b/test/CodeGen/X86/fma_patterns.ll index 5d97a87..6d98d59 100644 --- a/test/CodeGen/X86/fma_patterns.ll +++ b/test/CodeGen/X86/fma_patterns.ll @@ -1,8 +1,13 @@ ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=avx2,+fma -fp-contract=fast | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=bdver2 -mattr=-fma4 -fp-contract=fast | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=bdver1 -fp-contract=fast | FileCheck %s --check-prefix=CHECK_FMA4 ; CHECK: test_x86_fmadd_ps -; CHECK: vfmadd213ps %xmm2, %xmm0, %xmm1 +; CHECK: vfmadd213ps %xmm2, %xmm1, %xmm0 ; CHECK: ret +; CHECK_FMA4: test_x86_fmadd_ps +; CHECK_FMA4: vfmaddps %xmm2, %xmm1, %xmm0, %xmm0 +; CHECK_FMA4: ret define <4 x float> @test_x86_fmadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { %x = fmul <4 x float> %a0, %a1 %res = fadd <4 x float> %x, %a2 @@ -10,8 +15,11 @@ define <4 x float> @test_x86_fmadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x flo } ; CHECK: test_x86_fmsub_ps -; CHECK: fmsub213ps %xmm2, %xmm0, %xmm1 +; CHECK: fmsub213ps %xmm2, %xmm1, %xmm0 ; CHECK: ret +; CHECK_FMA4: test_x86_fmsub_ps +; CHECK_FMA4: vfmsubps %xmm2, %xmm1, %xmm0, %xmm0 +; CHECK_FMA4: ret define <4 x float> @test_x86_fmsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { %x = fmul <4 x float> %a0, %a1 %res = fsub <4 x float> %x, %a2 @@ -19,8 +27,11 @@ define <4 x float> @test_x86_fmsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x flo } ; CHECK: test_x86_fnmadd_ps -; CHECK: fnmadd213ps %xmm2, %xmm0, %xmm1 +; CHECK: fnmadd213ps %xmm2, %xmm1, %xmm0 ; CHECK: ret +; CHECK_FMA4: test_x86_fnmadd_ps +; CHECK_FMA4: vfnmaddps %xmm2, %xmm1, %xmm0, %xmm0 +; CHECK_FMA4: ret define <4 x float> @test_x86_fnmadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { %x = fmul <4 x float> %a0, %a1 %res = fsub <4 x float> %a2, %x @@ -28,8 +39,11 @@ define <4 x float> @test_x86_fnmadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x fl } ; CHECK: test_x86_fnmsub_ps -; CHECK: fnmsub213ps %xmm2, %xmm0, %xmm1 +; CHECK: fnmsub213ps %xmm2, %xmm1, %xmm0 ; CHECK: ret +; CHECK_FMA4: test_x86_fnmsub_ps +; CHECK_FMA4: fnmsubps %xmm2, %xmm1, %xmm0, %xmm0 +; CHECK_FMA4: ret define <4 x float> @test_x86_fnmsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { %x = fmul <4 x float> %a0, %a1 %y = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %x @@ -38,8 +52,11 @@ define <4 x float> @test_x86_fnmsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x fl } ; CHECK: test_x86_fmadd_ps_y -; CHECK: vfmadd213ps %ymm2, %ymm0, %ymm1 +; CHECK: vfmadd213ps %ymm2, %ymm1, %ymm0 ; CHECK: ret +; CHECK_FMA4: test_x86_fmadd_ps_y +; CHECK_FMA4: vfmaddps %ymm2, %ymm1, %ymm0, %ymm0 +; CHECK_FMA4: ret define <8 x float> @test_x86_fmadd_ps_y(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) { %x = fmul <8 x float> %a0, %a1 %res = fadd <8 x float> %x, %a2 @@ -47,8 +64,11 @@ define <8 x float> @test_x86_fmadd_ps_y(<8 x float> %a0, <8 x float> %a1, <8 x f } ; CHECK: test_x86_fmsub_ps_y -; CHECK: vfmsub213ps %ymm2, %ymm0, %ymm1 +; CHECK: vfmsub213ps %ymm2, %ymm1, %ymm0 ; CHECK: ret +; CHECK_FMA4: test_x86_fmsub_ps_y +; CHECK_FMA4: vfmsubps %ymm2, %ymm1, %ymm0, %ymm0 +; CHECK_FMA4: ret define <8 x float> @test_x86_fmsub_ps_y(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) { %x = fmul <8 x float> %a0, %a1 %res = fsub <8 x float> %x, %a2 @@ -56,8 +76,11 @@ define <8 x float> @test_x86_fmsub_ps_y(<8 x float> %a0, <8 x float> %a1, <8 x f } ; CHECK: test_x86_fnmadd_ps_y -; CHECK: vfnmadd213ps %ymm2, %ymm0, %ymm1 +; CHECK: vfnmadd213ps %ymm2, %ymm1, %ymm0 ; CHECK: ret +; CHECK_FMA4: test_x86_fnmadd_ps_y +; CHECK_FMA4: vfnmaddps %ymm2, %ymm1, %ymm0, %ymm0 +; CHECK_FMA4: ret define <8 x float> @test_x86_fnmadd_ps_y(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) { %x = fmul <8 x float> %a0, %a1 %res = fsub <8 x float> %a2, %x @@ -65,7 +88,7 @@ define <8 x float> @test_x86_fnmadd_ps_y(<8 x float> %a0, <8 x float> %a1, <8 x } ; CHECK: test_x86_fnmsub_ps_y -; CHECK: vfnmsub213ps %ymm2, %ymm0, %ymm1 +; CHECK: vfnmsub213ps %ymm2, %ymm1, %ymm0 ; CHECK: ret define <8 x float> @test_x86_fnmsub_ps_y(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) { %x = fmul <8 x float> %a0, %a1 @@ -75,8 +98,11 @@ define <8 x float> @test_x86_fnmsub_ps_y(<8 x float> %a0, <8 x float> %a1, <8 x } ; CHECK: test_x86_fmadd_pd_y -; CHECK: vfmadd213pd %ymm2, %ymm0, %ymm1 +; CHECK: vfmadd213pd %ymm2, %ymm1, %ymm0 ; CHECK: ret +; CHECK_FMA4: test_x86_fmadd_pd_y +; CHECK_FMA4: vfmaddpd %ymm2, %ymm1, %ymm0, %ymm0 +; CHECK_FMA4: ret define <4 x double> @test_x86_fmadd_pd_y(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) { %x = fmul <4 x double> %a0, %a1 %res = fadd <4 x double> %x, %a2 @@ -84,8 +110,11 @@ define <4 x double> @test_x86_fmadd_pd_y(<4 x double> %a0, <4 x double> %a1, <4 } ; CHECK: test_x86_fmsub_pd_y -; CHECK: vfmsub213pd %ymm2, %ymm0, %ymm1 +; CHECK: vfmsub213pd %ymm2, %ymm1, %ymm0 ; CHECK: ret +; CHECK_FMA4: test_x86_fmsub_pd_y +; CHECK_FMA4: vfmsubpd %ymm2, %ymm1, %ymm0, %ymm0 +; CHECK_FMA4: ret define <4 x double> @test_x86_fmsub_pd_y(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) { %x = fmul <4 x double> %a0, %a1 %res = fsub <4 x double> %x, %a2 @@ -93,8 +122,11 @@ define <4 x double> @test_x86_fmsub_pd_y(<4 x double> %a0, <4 x double> %a1, <4 } ; CHECK: test_x86_fmsub_pd -; CHECK: vfmsub213pd %xmm2, %xmm0, %xmm1 +; CHECK: vfmsub213pd %xmm2, %xmm1, %xmm0 ; CHECK: ret +; CHECK_FMA4: test_x86_fmsub_pd +; CHECK_FMA4: vfmsubpd %xmm2, %xmm1, %xmm0, %xmm0 +; CHECK_FMA4: ret define <2 x double> @test_x86_fmsub_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) { %x = fmul <2 x double> %a0, %a1 %res = fsub <2 x double> %x, %a2 @@ -102,8 +134,11 @@ define <2 x double> @test_x86_fmsub_pd(<2 x double> %a0, <2 x double> %a1, <2 x } ; CHECK: test_x86_fnmadd_ss -; CHECK: vfnmadd213ss %xmm2, %xmm0, %xmm1 +; CHECK: vfnmadd213ss %xmm2, %xmm1, %xmm0 ; CHECK: ret +; CHECK_FMA4: test_x86_fnmadd_ss +; CHECK_FMA4: vfnmaddss %xmm2, %xmm1, %xmm0, %xmm0 +; CHECK_FMA4: ret define float @test_x86_fnmadd_ss(float %a0, float %a1, float %a2) { %x = fmul float %a0, %a1 %res = fsub float %a2, %x @@ -111,8 +146,11 @@ define float @test_x86_fnmadd_ss(float %a0, float %a1, float %a2) { } ; CHECK: test_x86_fnmadd_sd -; CHECK: vfnmadd213sd %xmm2, %xmm0, %xmm1 +; CHECK: vfnmadd213sd %xmm2, %xmm1, %xmm0 ; CHECK: ret +; CHECK_FMA4: test_x86_fnmadd_sd +; CHECK_FMA4: vfnmaddsd %xmm2, %xmm1, %xmm0, %xmm0 +; CHECK_FMA4: ret define double @test_x86_fnmadd_sd(double %a0, double %a1, double %a2) { %x = fmul double %a0, %a1 %res = fsub double %a2, %x @@ -120,8 +158,11 @@ define double @test_x86_fnmadd_sd(double %a0, double %a1, double %a2) { } ; CHECK: test_x86_fmsub_sd -; CHECK: vfmsub213sd %xmm2, %xmm0, %xmm1 +; CHECK: vfmsub213sd %xmm2, %xmm1, %xmm0 ; CHECK: ret +; CHECK_FMA4: test_x86_fmsub_sd +; CHECK_FMA4: vfmsubsd %xmm2, %xmm1, %xmm0, %xmm0 +; CHECK_FMA4: ret define double @test_x86_fmsub_sd(double %a0, double %a1, double %a2) { %x = fmul double %a0, %a1 %res = fsub double %x, %a2 @@ -129,11 +170,43 @@ define double @test_x86_fmsub_sd(double %a0, double %a1, double %a2) { } ; CHECK: test_x86_fnmsub_ss -; CHECK: vfnmsub213ss %xmm2, %xmm0, %xmm1 +; CHECK: vfnmsub213ss %xmm2, %xmm1, %xmm0 ; CHECK: ret +; CHECK_FMA4: test_x86_fnmsub_ss +; CHECK_FMA4: vfnmsubss %xmm2, %xmm1, %xmm0, %xmm0 +; CHECK_FMA4: ret define float @test_x86_fnmsub_ss(float %a0, float %a1, float %a2) { %x = fsub float -0.000000e+00, %a0 %y = fmul float %x, %a1 %res = fsub float %y, %a2 ret float %res } + +; CHECK: test_x86_fmadd_ps +; CHECK: vmovaps (%rdi), %xmm2 +; CHECK: vfmadd213ps %xmm1, %xmm0, %xmm2 +; CHECK: ret +; CHECK_FMA4: test_x86_fmadd_ps +; CHECK_FMA4: vfmaddps %xmm1, (%rdi), %xmm0, %xmm0 +; CHECK_FMA4: ret +define <4 x float> @test_x86_fmadd_ps_load(<4 x float>* %a0, <4 x float> %a1, <4 x float> %a2) { + %x = load <4 x float>* %a0 + %y = fmul <4 x float> %x, %a1 + %res = fadd <4 x float> %y, %a2 + ret <4 x float> %res +} + +; CHECK: test_x86_fmsub_ps +; CHECK: vmovaps (%rdi), %xmm2 +; CHECK: fmsub213ps %xmm1, %xmm0, %xmm2 +; CHECK: ret +; CHECK_FMA4: test_x86_fmsub_ps +; CHECK_FMA4: vfmsubps %xmm1, (%rdi), %xmm0, %xmm0 +; CHECK_FMA4: ret +define <4 x float> @test_x86_fmsub_ps_load(<4 x float>* %a0, <4 x float> %a1, <4 x float> %a2) { + %x = load <4 x float>* %a0 + %y = fmul <4 x float> %x, %a1 + %res = fsub <4 x float> %y, %a2 + ret <4 x float> %res +} + diff --git a/test/CodeGen/X86/fold-load.ll b/test/CodeGen/X86/fold-load.ll index c961f75..d836665 100644 --- a/test/CodeGen/X86/fold-load.ll +++ b/test/CodeGen/X86/fold-load.ll @@ -57,13 +57,13 @@ entry: %0 = load i32* %P, align 4 %1 = load i32* %Q, align 4 %2 = xor i32 %0, %1 - %3 = and i32 %2, 65535 + %3 = and i32 %2, 89947 %4 = icmp eq i32 %3, 0 br i1 %4, label %exit, label %land.end exit: %shr.i.i19 = xor i32 %1, %0 - %5 = and i32 %shr.i.i19, 2147418112 + %5 = and i32 %shr.i.i19, 3456789123 %6 = icmp eq i32 %5, 0 br label %land.end diff --git a/test/CodeGen/X86/fp-fast.ll b/test/CodeGen/X86/fp-fast.ll new file mode 100644 index 0000000..d70aa7d --- /dev/null +++ b/test/CodeGen/X86/fp-fast.ll @@ -0,0 +1,57 @@ +; RUN: llc -march=x86-64 -mattr=+avx,-fma4 -mtriple=x86_64-apple-darwin -enable-unsafe-fp-math < %s | FileCheck %s + +; CHECK: test1 +define float @test1(float %a) { +; CHECK-NOT: addss +; CHECK: mulss +; CHECK-NOT: addss +; CHECK: ret + %t1 = fadd float %a, %a + %r = fadd float %t1, %t1 + ret float %r +} + +; CHECK: test2 +define float @test2(float %a) { +; CHECK-NOT: addss +; CHECK: mulss +; CHECK-NOT: addss +; CHECK: ret + %t1 = fmul float 4.0, %a + %t2 = fadd float %a, %a + %r = fadd float %t1, %t2 + ret float %r +} + +; CHECK: test3 +define float @test3(float %a) { +; CHECK-NOT: addss +; CHECK: xorps +; CHECK-NOT: addss +; CHECK: ret + %t1 = fmul float 2.0, %a + %t2 = fadd float %a, %a + %r = fsub float %t1, %t2 + ret float %r +} + +; CHECK: test4 +define float @test4(float %a) { +; CHECK-NOT: fma +; CHECK-NOT mul +; CHECK-NOT: add +; CHECK: ret + %t1 = fmul float %a, 0.0 + %t2 = fadd float %a, %t1 + ret float %t2 +} + +; CHECK: test5 +define float @test5(float %a) { +; CHECK-NOT: add +; CHECK: vxorps +; CHECK: ret + %t1 = fsub float -0.0, %a + %t2 = fadd float %a, %t1 + ret float %t2 +} diff --git a/test/CodeGen/X86/fp-load-trunc.ll b/test/CodeGen/X86/fp-load-trunc.ll new file mode 100644 index 0000000..2ae65c9 --- /dev/null +++ b/test/CodeGen/X86/fp-load-trunc.ll @@ -0,0 +1,61 @@ +; RUN: llc < %s -march=x86 -mcpu=corei7 | FileCheck %s +; RUN: llc < %s -march=x86 -mcpu=core-avx-i | FileCheck %s --check-prefix=AVX + +define <1 x float> @test1(<1 x double>* %p) nounwind { +; CHECK: test1 +; CHECK: cvtsd2ss +; CHECK: ret +; AVX: test1 +; AVX: vcvtsd2ss +; AVX: ret + %x = load <1 x double>* %p + %y = fptrunc <1 x double> %x to <1 x float> + ret <1 x float> %y +} + +define <2 x float> @test2(<2 x double>* %p) nounwind { +; CHECK: test2 +; CHECK: cvtpd2ps {{[0-9]*}}(%{{.*}}) +; CHECK: ret +; AVX: test2 +; AVX: vcvtpd2psx {{[0-9]*}}(%{{.*}}) +; AVX: ret + %x = load <2 x double>* %p + %y = fptrunc <2 x double> %x to <2 x float> + ret <2 x float> %y +} + +define <4 x float> @test3(<4 x double>* %p) nounwind { +; CHECK: test3 +; CHECK: cvtpd2ps {{[0-9]*}}(%{{.*}}) +; CHECK: cvtpd2ps {{[0-9]*}}(%{{.*}}) +; CHECK: movlhps +; CHECK: ret +; AVX: test3 +; AVX: vcvtpd2psy {{[0-9]*}}(%{{.*}}) +; AVX: ret + %x = load <4 x double>* %p + %y = fptrunc <4 x double> %x to <4 x float> + ret <4 x float> %y +} + +define <8 x float> @test4(<8 x double>* %p) nounwind { +; CHECK: test4 +; CHECK: cvtpd2ps {{[0-9]*}}(%{{.*}}) +; CHECK: cvtpd2ps {{[0-9]*}}(%{{.*}}) +; CHECK: movlhps +; CHECK: cvtpd2ps {{[0-9]*}}(%{{.*}}) +; CHECK: cvtpd2ps {{[0-9]*}}(%{{.*}}) +; CHECK: movlhps +; CHECK: ret +; AVX: test4 +; AVX: vcvtpd2psy {{[0-9]*}}(%{{.*}}) +; AVX: vcvtpd2psy {{[0-9]*}}(%{{.*}}) +; AVX: vinsertf128 +; AVX: ret + %x = load <8 x double>* %p + %y = fptrunc <8 x double> %x to <8 x float> + ret <8 x float> %y +} + + diff --git a/test/CodeGen/X86/fp-trunc.ll b/test/CodeGen/X86/fp-trunc.ll index 170637a..25442fc 100644 --- a/test/CodeGen/X86/fp-trunc.ll +++ b/test/CodeGen/X86/fp-trunc.ll @@ -1,33 +1,56 @@ -; RUN: llc < %s -march=x86 -mattr=+sse2,-avx | FileCheck %s +; RUN: llc < %s -march=x86 -mcpu=corei7 | FileCheck %s +; RUN: llc < %s -march=x86 -mcpu=core-avx-i | FileCheck %s --check-prefix=AVX define <1 x float> @test1(<1 x double> %x) nounwind { +; CHECK: test1 ; CHECK: cvtsd2ss ; CHECK: ret +; AVX: test1 +; AVX: vcvtsd2ss +; AVX: ret %y = fptrunc <1 x double> %x to <1 x float> ret <1 x float> %y } - define <2 x float> @test2(<2 x double> %x) nounwind { -; FIXME: It would be nice if this compiled down to a cvtpd2ps -; CHECK: cvtsd2ss -; CHECK: cvtsd2ss +; CHECK: test2 +; CHECK: cvtpd2ps ; CHECK: ret +; AVX: test2 +; AVX-NOT: vcvtpd2psy +; AVX: vcvtpd2ps +; AVX: ret %y = fptrunc <2 x double> %x to <2 x float> ret <2 x float> %y } -define <8 x float> @test3(<8 x double> %x) nounwind { -; FIXME: It would be nice if this compiled down to a series of cvtpd2ps -; CHECK: cvtsd2ss -; CHECK: cvtsd2ss -; CHECK: cvtsd2ss -; CHECK: cvtsd2ss -; CHECK: cvtsd2ss -; CHECK: cvtsd2ss -; CHECK: cvtsd2ss -; CHECK: cvtsd2ss +define <4 x float> @test3(<4 x double> %x) nounwind { +; CHECK: test3 +; CHECK: cvtpd2ps +; CHECK: cvtpd2ps +; CHECK: movlhps +; CHECK: ret +; AVX: test3 +; AVX: vcvtpd2psy +; AVX: ret + %y = fptrunc <4 x double> %x to <4 x float> + ret <4 x float> %y +} + +define <8 x float> @test4(<8 x double> %x) nounwind { +; CHECK: test4 +; CHECK: cvtpd2ps +; CHECK: cvtpd2ps +; CHECK: movlhps +; CHECK: cvtpd2ps +; CHECK: cvtpd2ps +; CHECK: movlhps ; CHECK: ret +; AVX: test4 +; AVX: vcvtpd2psy +; AVX: vcvtpd2psy +; AVX: vinsertf128 +; AVX: ret %y = fptrunc <8 x double> %x to <8 x float> ret <8 x float> %y } diff --git a/test/CodeGen/X86/handle-move.ll b/test/CodeGen/X86/handle-move.ll new file mode 100644 index 0000000..e9f7a96 --- /dev/null +++ b/test/CodeGen/X86/handle-move.ll @@ -0,0 +1,74 @@ +; RUN: llc -march=x86-64 -mcpu=core2 -fast-isel -enable-misched -misched=shuffle -misched-bottomup -verify-machineinstrs < %s +; RUN: llc -march=x86-64 -mcpu=core2 -fast-isel -enable-misched -misched=shuffle -misched-topdown -verify-machineinstrs < %s +; REQUIRES: asserts +; +; Test the LiveIntervals::handleMove() function. +; +; Moving the DIV32r instruction exercises the regunit update code because +; %EDX has a live range into the function and is used by the DIV32r. +; +; Here sinking a kill + dead def: +; 144B -> 180B: DIV32r %vreg4, %EAX<imp-def>, %EDX<imp-def,dead>, %EFLAGS<imp-def,dead>, %EAX<imp-use,kill>, %EDX<imp-use> +; %vreg4: [48r,144r:0) 0@48r +; --> [48r,180r:0) 0@48r +; DH: [0B,16r:0)[128r,144r:2)[144r,144d:1) 0@0B-phi 1@144r 2@128r +; --> [0B,16r:0)[128r,180r:2)[180r,180d:1) 0@0B-phi 1@180r 2@128r +; DL: [0B,16r:0)[128r,144r:2)[144r,144d:1) 0@0B-phi 1@144r 2@128r +; --> [0B,16r:0)[128r,180r:2)[180r,180d:1) 0@0B-phi 1@180r 2@128r +; +define i32 @f1(i32 %a, i32 %b, i32 %c, i32 %d) nounwind uwtable readnone ssp { +entry: + %y = add i32 %c, 1 + %x = udiv i32 %b, %a + %add = add nsw i32 %y, %x + ret i32 %add +} + +; Same as above, but moving a kill + live def: +; 144B -> 180B: DIV32r %vreg4, %EAX<imp-def,dead>, %EDX<imp-def>, %EFLAGS<imp-def,dead>, %EAX<imp-use,kill>, %EDX<imp-use> +; %vreg4: [48r,144r:0) 0@48r +; --> [48r,180r:0) 0@48r +; DH: [0B,16r:0)[128r,144r:2)[144r,184r:1) 0@0B-phi 1@144r 2@128r +; --> [0B,16r:0)[128r,180r:2)[180r,184r:1) 0@0B-phi 1@180r 2@128r +; DL: [0B,16r:0)[128r,144r:2)[144r,184r:1) 0@0B-phi 1@144r 2@128r +; --> [0B,16r:0)[128r,180r:2)[180r,184r:1) 0@0B-phi 1@180r 2@128r +; +define i32 @f2(i32 %a, i32 %b, i32 %c, i32 %d) nounwind uwtable readnone ssp { +entry: + %y = sub i32 %c, %d + %x = urem i32 %b, %a + %add = add nsw i32 %x, %y + ret i32 %add +} + +; Moving a use below the existing kill (%vreg5): +; Moving a tied virtual register def (%vreg11): +; +; 96B -> 120B: %vreg11<def,tied1> = SUB32rr %vreg11<tied0>, %vreg5 +; %vreg11: [80r,96r:1)[96r,144r:0) 0@96r 1@80r +; --> [80r,120r:1)[120r,144r:0) 0@120r 1@80r +; %vreg5: [16r,112r:0) 0@16r +; --> [16r,120r:0) 0@16r +; +define i32 @f3(i32 %a, i32 %b, i32 %c, i32 %d) nounwind uwtable readnone ssp { +entry: + %y = sub i32 %a, %b + %x = add i32 %a, %b + %r = mul i32 %x, %y + ret i32 %r +} + +; Move EFLAGS dead def across another def: +; handleMove 208B -> 36B: %EDX<def> = MOV32r0 %EFLAGS<imp-def,dead> +; EFLAGS: [20r,20d:4)[160r,160d:3)[208r,208d:0)[224r,224d:1)[272r,272d:2)[304r,304d:5) 0@208r 1@224r 2@272r 3@160r 4@20r 5@304r +; --> [20r,20d:4)[36r,36d:0)[160r,160d:3)[224r,224d:1)[272r,272d:2)[304r,304d:5) 0@36r 1@224r 2@272r 3@160r 4@20r 5@304r +; +define i32 @f4(i32 %a, i32 %b, i32 %c, i32 %d) nounwind uwtable readnone ssp { +entry: + %x = sub i32 %a, %b + %y = sub i32 %b, %c + %z = sub i32 %c, %d + %r1 = udiv i32 %x, %y + %r2 = mul i32 %z, %r1 + ret i32 %r2 +} diff --git a/test/CodeGen/X86/inline-asm-tied.ll b/test/CodeGen/X86/inline-asm-tied.ll index 91576fb..597236e 100644 --- a/test/CodeGen/X86/inline-asm-tied.ll +++ b/test/CodeGen/X86/inline-asm-tied.ll @@ -19,3 +19,12 @@ entry: %1 = load i64* %retval ; <i64> [#uses=1] ret i64 %1 } + +; The tied operands are not necessarily in the same order as the defs. +; PR13742 +define i64 @swapped(i64 %x, i64 %y) nounwind { +entry: + %x0 = call { i64, i64 } asm "foo", "=r,=r,1,0,~{dirflag},~{fpsr},~{flags}"(i64 %x, i64 %y) nounwind + %x1 = extractvalue { i64, i64 } %x0, 0 + ret i64 %x1 +} diff --git a/test/CodeGen/X86/inline-asm.ll b/test/CodeGen/X86/inline-asm.ll index e6eb9ef..d201ebd 100644 --- a/test/CodeGen/X86/inline-asm.ll +++ b/test/CodeGen/X86/inline-asm.ll @@ -52,3 +52,10 @@ entry: %0 = call { i32, i32, i32, i32, i32 } asm sideeffect "", "=&r,=&r,=&r,=&r,=&q,r,~{ecx},~{memory},~{dirflag},~{fpsr},~{flags}"(i8* %h) nounwind ret void } + +; Mix normal and EC defs of the same register. +define i32 @pr14376() nounwind noinline { +entry: + %asm = tail call i32 asm sideeffect "", "={ax},i,~{eax},~{flags},~{rax}"(i64 61) nounwind + ret i32 %asm +} diff --git a/test/CodeGen/X86/inlineasm-sched-bug.ll b/test/CodeGen/X86/inlineasm-sched-bug.ll new file mode 100644 index 0000000..08de0c0 --- /dev/null +++ b/test/CodeGen/X86/inlineasm-sched-bug.ll @@ -0,0 +1,13 @@ +; PR13504 +; RUN: llc -march=x86 -mcpu=atom <%s | FileCheck %s +; CHECK: bsfl +; CHECK-NOT: movl + +define i32 @foo(i32 %treemap) nounwind uwtable { +entry: + %sub = sub i32 0, %treemap + %and = and i32 %treemap, %sub + %0 = tail call i32 asm "bsfl $1,$0\0A\09", "=r,rm,~{dirflag},~{fpsr},~{flags}"(i32 %and) nounwind + ret i32 %0 +} + diff --git a/test/CodeGen/X86/jump_sign.ll b/test/CodeGen/X86/jump_sign.ll index 48e2106..0e34222 100644 --- a/test/CodeGen/X86/jump_sign.ll +++ b/test/CodeGen/X86/jump_sign.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=x86 -mcpu=pentiumpro | FileCheck %s +; RUN: llc < %s -march=x86 -mcpu=pentiumpro -verify-machineinstrs | FileCheck %s define i32 @f(i32 %X) { entry: @@ -219,7 +219,6 @@ entry: ; by sbb, we should not optimize cmp away. define i32 @q(i32 %j.4, i32 %w, i32 %el) { ; CHECK: q: -; CHECK: sub ; CHECK: cmp ; CHECK-NEXT: sbb %tmp532 = add i32 %j.4, %w @@ -253,3 +252,56 @@ return: %retval.0 = phi i8* [ %add.ptr, %if.end ], [ null, %entry ] ret i8* %retval.0 } + +; Test optimizations of dec/inc. +define i32 @dec(i32 %a) nounwind { +entry: +; CHECK: dec: +; CHECK: decl +; CHECK-NOT: test +; CHECK: cmovsl + %sub = sub nsw i32 %a, 1 + %cmp = icmp sgt i32 %sub, 0 + %cond = select i1 %cmp, i32 %sub, i32 0 + ret i32 %cond +} + +define i32 @inc(i32 %a) nounwind { +entry: +; CHECK: inc: +; CHECK: incl +; CHECK-NOT: test +; CHECK: cmovsl + %add = add nsw i32 %a, 1 + %cmp = icmp sgt i32 %add, 0 + %cond = select i1 %cmp, i32 %add, i32 0 + ret i32 %cond +} + +; PR13966 +@b = common global i32 0, align 4 +@a = common global i32 0, align 4 +define i32 @test1(i32 %p1) nounwind uwtable { +entry: +; CHECK: test1: +; CHECK: testb +; CHECK: j +; CHECK: ret + %0 = load i32* @b, align 4 + %cmp = icmp ult i32 %0, %p1 + %conv = zext i1 %cmp to i32 + %1 = load i32* @a, align 4 + %and = and i32 %conv, %1 + %conv1 = trunc i32 %and to i8 + %2 = urem i8 %conv1, 3 + %tobool = icmp eq i8 %2, 0 + br i1 %tobool, label %if.end, label %if.then + +if.then: + %dec = add nsw i32 %1, -1 + store i32 %dec, i32* @a, align 4 + br label %if.end + +if.end: + ret i32 undef +} diff --git a/test/CodeGen/X86/misched-balance.ll b/test/CodeGen/X86/misched-balance.ll new file mode 100644 index 0000000..2184d9e --- /dev/null +++ b/test/CodeGen/X86/misched-balance.ll @@ -0,0 +1,230 @@ +; RUN: llc < %s -march=x86-64 -mcpu=core2 -pre-RA-sched=source -enable-misched \ +; RUN: -verify-machineinstrs | FileCheck %s +; +; Verify that misched resource/latency balancy heuristics are sane. + +define void @unrolled_mmult1(i32* %tmp55, i32* %tmp56, i32* %pre, i32* %pre94, + i32* %pre95, i32* %pre96, i32* %pre97, i32* %pre98, i32* %pre99, + i32* %pre100, i32* %pre101, i32* %pre102, i32* %pre103, i32* %pre104) + nounwind uwtable ssp { +entry: + br label %for.body + +; imull folded loads should be in order and interleaved with addl, never +; adjacent. Also check that we have no spilling. +; +; Since mmult1 IR is already in good order, this effectively ensure +; the scheduler maintains source order. +; +; CHECK: %for.body +; CHECK-NOT: %rsp +; CHECK: imull 4 +; CHECK-NOT: {{imull|rsp}} +; CHECK: addl +; CHECK: imull 8 +; CHECK-NOT: {{imull|rsp}} +; CHECK: addl +; CHECK: imull 12 +; CHECK-NOT: {{imull|rsp}} +; CHECK: addl +; CHECK: imull 16 +; CHECK-NOT: {{imull|rsp}} +; CHECK: addl +; CHECK: imull 20 +; CHECK-NOT: {{imull|rsp}} +; CHECK: addl +; CHECK: imull 24 +; CHECK-NOT: {{imull|rsp}} +; CHECK: addl +; CHECK: imull 28 +; CHECK-NOT: {{imull|rsp}} +; CHECK: addl +; CHECK: imull 32 +; CHECK-NOT: {{imull|rsp}} +; CHECK: addl +; CHECK: imull 36 +; CHECK-NOT: {{imull|rsp}} +; CHECK: addl +; CHECK-NOT: {{imull|rsp}} +; CHECK: %end +for.body: + %indvars.iv42.i = phi i64 [ %indvars.iv.next43.i, %for.body ], [ 0, %entry ] + %tmp57 = load i32* %tmp56, align 4 + %arrayidx12.us.i61 = getelementptr inbounds i32* %pre, i64 %indvars.iv42.i + %tmp58 = load i32* %arrayidx12.us.i61, align 4 + %mul.us.i = mul nsw i32 %tmp58, %tmp57 + %arrayidx8.us.i.1 = getelementptr inbounds i32* %tmp56, i64 1 + %tmp59 = load i32* %arrayidx8.us.i.1, align 4 + %arrayidx12.us.i61.1 = getelementptr inbounds i32* %pre94, i64 %indvars.iv42.i + %tmp60 = load i32* %arrayidx12.us.i61.1, align 4 + %mul.us.i.1 = mul nsw i32 %tmp60, %tmp59 + %add.us.i.1 = add nsw i32 %mul.us.i.1, %mul.us.i + %arrayidx8.us.i.2 = getelementptr inbounds i32* %tmp56, i64 2 + %tmp61 = load i32* %arrayidx8.us.i.2, align 4 + %arrayidx12.us.i61.2 = getelementptr inbounds i32* %pre95, i64 %indvars.iv42.i + %tmp62 = load i32* %arrayidx12.us.i61.2, align 4 + %mul.us.i.2 = mul nsw i32 %tmp62, %tmp61 + %add.us.i.2 = add nsw i32 %mul.us.i.2, %add.us.i.1 + %arrayidx8.us.i.3 = getelementptr inbounds i32* %tmp56, i64 3 + %tmp63 = load i32* %arrayidx8.us.i.3, align 4 + %arrayidx12.us.i61.3 = getelementptr inbounds i32* %pre96, i64 %indvars.iv42.i + %tmp64 = load i32* %arrayidx12.us.i61.3, align 4 + %mul.us.i.3 = mul nsw i32 %tmp64, %tmp63 + %add.us.i.3 = add nsw i32 %mul.us.i.3, %add.us.i.2 + %arrayidx8.us.i.4 = getelementptr inbounds i32* %tmp56, i64 4 + %tmp65 = load i32* %arrayidx8.us.i.4, align 4 + %arrayidx12.us.i61.4 = getelementptr inbounds i32* %pre97, i64 %indvars.iv42.i + %tmp66 = load i32* %arrayidx12.us.i61.4, align 4 + %mul.us.i.4 = mul nsw i32 %tmp66, %tmp65 + %add.us.i.4 = add nsw i32 %mul.us.i.4, %add.us.i.3 + %arrayidx8.us.i.5 = getelementptr inbounds i32* %tmp56, i64 5 + %tmp67 = load i32* %arrayidx8.us.i.5, align 4 + %arrayidx12.us.i61.5 = getelementptr inbounds i32* %pre98, i64 %indvars.iv42.i + %tmp68 = load i32* %arrayidx12.us.i61.5, align 4 + %mul.us.i.5 = mul nsw i32 %tmp68, %tmp67 + %add.us.i.5 = add nsw i32 %mul.us.i.5, %add.us.i.4 + %arrayidx8.us.i.6 = getelementptr inbounds i32* %tmp56, i64 6 + %tmp69 = load i32* %arrayidx8.us.i.6, align 4 + %arrayidx12.us.i61.6 = getelementptr inbounds i32* %pre99, i64 %indvars.iv42.i + %tmp70 = load i32* %arrayidx12.us.i61.6, align 4 + %mul.us.i.6 = mul nsw i32 %tmp70, %tmp69 + %add.us.i.6 = add nsw i32 %mul.us.i.6, %add.us.i.5 + %arrayidx8.us.i.7 = getelementptr inbounds i32* %tmp56, i64 7 + %tmp71 = load i32* %arrayidx8.us.i.7, align 4 + %arrayidx12.us.i61.7 = getelementptr inbounds i32* %pre100, i64 %indvars.iv42.i + %tmp72 = load i32* %arrayidx12.us.i61.7, align 4 + %mul.us.i.7 = mul nsw i32 %tmp72, %tmp71 + %add.us.i.7 = add nsw i32 %mul.us.i.7, %add.us.i.6 + %arrayidx8.us.i.8 = getelementptr inbounds i32* %tmp56, i64 8 + %tmp73 = load i32* %arrayidx8.us.i.8, align 4 + %arrayidx12.us.i61.8 = getelementptr inbounds i32* %pre101, i64 %indvars.iv42.i + %tmp74 = load i32* %arrayidx12.us.i61.8, align 4 + %mul.us.i.8 = mul nsw i32 %tmp74, %tmp73 + %add.us.i.8 = add nsw i32 %mul.us.i.8, %add.us.i.7 + %arrayidx8.us.i.9 = getelementptr inbounds i32* %tmp56, i64 9 + %tmp75 = load i32* %arrayidx8.us.i.9, align 4 + %arrayidx12.us.i61.9 = getelementptr inbounds i32* %pre102, i64 %indvars.iv42.i + %tmp76 = load i32* %arrayidx12.us.i61.9, align 4 + %mul.us.i.9 = mul nsw i32 %tmp76, %tmp75 + %add.us.i.9 = add nsw i32 %mul.us.i.9, %add.us.i.8 + %arrayidx16.us.i = getelementptr inbounds i32* %tmp55, i64 %indvars.iv42.i + store i32 %add.us.i.9, i32* %arrayidx16.us.i, align 4 + %indvars.iv.next43.i = add i64 %indvars.iv42.i, 1 + %lftr.wideiv = trunc i64 %indvars.iv.next43.i to i32 + %exitcond = icmp eq i32 %lftr.wideiv, 10 + br i1 %exitcond, label %end, label %for.body + +end: + ret void +} + +; Unlike the above loop, this IR starts out bad and must be +; rescheduled. +; +; CHECK: %for.body +; CHECK-NOT: %rsp +; CHECK: imull 4 +; CHECK-NOT: {{imull|rsp}} +; CHECK: addl +; CHECK: imull 8 +; CHECK-NOT: {{imull|rsp}} +; CHECK: addl +; CHECK: imull 12 +; CHECK-NOT: {{imull|rsp}} +; CHECK: addl +; CHECK: imull 16 +; CHECK-NOT: {{imull|rsp}} +; CHECK: addl +; CHECK: imull 20 +; CHECK-NOT: {{imull|rsp}} +; CHECK: addl +; CHECK: imull 24 +; CHECK-NOT: {{imull|rsp}} +; CHECK: addl +; CHECK: imull 28 +; CHECK-NOT: {{imull|rsp}} +; CHECK: addl +; CHECK: imull 32 +; CHECK-NOT: {{imull|rsp}} +; CHECK: addl +; CHECK: imull 36 +; CHECK-NOT: {{imull|rsp}} +; CHECK: addl +; CHECK-NOT: {{imull|rsp}} +; CHECK: %end +define void @unrolled_mmult2(i32* %tmp55, i32* %tmp56, i32* %pre, i32* %pre94, + i32* %pre95, i32* %pre96, i32* %pre97, i32* %pre98, i32* %pre99, + i32* %pre100, i32* %pre101, i32* %pre102, i32* %pre103, i32* %pre104) + nounwind uwtable ssp { +entry: + br label %for.body +for.body: + %indvars.iv42.i = phi i64 [ %indvars.iv.next43.i, %for.body ], [ 0, %entry ] + %tmp57 = load i32* %tmp56, align 4 + %arrayidx12.us.i61 = getelementptr inbounds i32* %pre, i64 %indvars.iv42.i + %tmp58 = load i32* %arrayidx12.us.i61, align 4 + %arrayidx8.us.i.1 = getelementptr inbounds i32* %tmp56, i64 1 + %tmp59 = load i32* %arrayidx8.us.i.1, align 4 + %arrayidx12.us.i61.1 = getelementptr inbounds i32* %pre94, i64 %indvars.iv42.i + %tmp60 = load i32* %arrayidx12.us.i61.1, align 4 + %arrayidx8.us.i.2 = getelementptr inbounds i32* %tmp56, i64 2 + %tmp61 = load i32* %arrayidx8.us.i.2, align 4 + %arrayidx12.us.i61.2 = getelementptr inbounds i32* %pre95, i64 %indvars.iv42.i + %tmp62 = load i32* %arrayidx12.us.i61.2, align 4 + %arrayidx8.us.i.3 = getelementptr inbounds i32* %tmp56, i64 3 + %tmp63 = load i32* %arrayidx8.us.i.3, align 4 + %arrayidx12.us.i61.3 = getelementptr inbounds i32* %pre96, i64 %indvars.iv42.i + %tmp64 = load i32* %arrayidx12.us.i61.3, align 4 + %arrayidx8.us.i.4 = getelementptr inbounds i32* %tmp56, i64 4 + %tmp65 = load i32* %arrayidx8.us.i.4, align 4 + %arrayidx12.us.i61.4 = getelementptr inbounds i32* %pre97, i64 %indvars.iv42.i + %tmp66 = load i32* %arrayidx12.us.i61.4, align 4 + %arrayidx8.us.i.5 = getelementptr inbounds i32* %tmp56, i64 5 + %tmp67 = load i32* %arrayidx8.us.i.5, align 4 + %arrayidx12.us.i61.5 = getelementptr inbounds i32* %pre98, i64 %indvars.iv42.i + %tmp68 = load i32* %arrayidx12.us.i61.5, align 4 + %arrayidx8.us.i.6 = getelementptr inbounds i32* %tmp56, i64 6 + %tmp69 = load i32* %arrayidx8.us.i.6, align 4 + %arrayidx12.us.i61.6 = getelementptr inbounds i32* %pre99, i64 %indvars.iv42.i + %tmp70 = load i32* %arrayidx12.us.i61.6, align 4 + %mul.us.i = mul nsw i32 %tmp58, %tmp57 + %arrayidx8.us.i.7 = getelementptr inbounds i32* %tmp56, i64 7 + %tmp71 = load i32* %arrayidx8.us.i.7, align 4 + %arrayidx12.us.i61.7 = getelementptr inbounds i32* %pre100, i64 %indvars.iv42.i + %tmp72 = load i32* %arrayidx12.us.i61.7, align 4 + %arrayidx8.us.i.8 = getelementptr inbounds i32* %tmp56, i64 8 + %tmp73 = load i32* %arrayidx8.us.i.8, align 4 + %arrayidx12.us.i61.8 = getelementptr inbounds i32* %pre101, i64 %indvars.iv42.i + %tmp74 = load i32* %arrayidx12.us.i61.8, align 4 + %arrayidx8.us.i.9 = getelementptr inbounds i32* %tmp56, i64 9 + %tmp75 = load i32* %arrayidx8.us.i.9, align 4 + %arrayidx12.us.i61.9 = getelementptr inbounds i32* %pre102, i64 %indvars.iv42.i + %tmp76 = load i32* %arrayidx12.us.i61.9, align 4 + %mul.us.i.1 = mul nsw i32 %tmp60, %tmp59 + %add.us.i.1 = add nsw i32 %mul.us.i.1, %mul.us.i + %mul.us.i.2 = mul nsw i32 %tmp62, %tmp61 + %add.us.i.2 = add nsw i32 %mul.us.i.2, %add.us.i.1 + %mul.us.i.3 = mul nsw i32 %tmp64, %tmp63 + %add.us.i.3 = add nsw i32 %mul.us.i.3, %add.us.i.2 + %mul.us.i.4 = mul nsw i32 %tmp66, %tmp65 + %add.us.i.4 = add nsw i32 %mul.us.i.4, %add.us.i.3 + %mul.us.i.5 = mul nsw i32 %tmp68, %tmp67 + %add.us.i.5 = add nsw i32 %mul.us.i.5, %add.us.i.4 + %mul.us.i.6 = mul nsw i32 %tmp70, %tmp69 + %add.us.i.6 = add nsw i32 %mul.us.i.6, %add.us.i.5 + %mul.us.i.7 = mul nsw i32 %tmp72, %tmp71 + %add.us.i.7 = add nsw i32 %mul.us.i.7, %add.us.i.6 + %mul.us.i.8 = mul nsw i32 %tmp74, %tmp73 + %add.us.i.8 = add nsw i32 %mul.us.i.8, %add.us.i.7 + %mul.us.i.9 = mul nsw i32 %tmp76, %tmp75 + %add.us.i.9 = add nsw i32 %mul.us.i.9, %add.us.i.8 + %arrayidx16.us.i = getelementptr inbounds i32* %tmp55, i64 %indvars.iv42.i + store i32 %add.us.i.9, i32* %arrayidx16.us.i, align 4 + %indvars.iv.next43.i = add i64 %indvars.iv42.i, 1 + %lftr.wideiv = trunc i64 %indvars.iv.next43.i to i32 + %exitcond = icmp eq i32 %lftr.wideiv, 10 + br i1 %exitcond, label %end, label %for.body + +end: + ret void +} diff --git a/test/CodeGen/X86/misched-ilp.ll b/test/CodeGen/X86/misched-ilp.ll new file mode 100644 index 0000000..c6cedb7 --- /dev/null +++ b/test/CodeGen/X86/misched-ilp.ll @@ -0,0 +1,25 @@ +; RUN: llc < %s -mtriple=x86_64-apple-macosx -mcpu=core2 -enable-misched -misched=ilpmax | FileCheck -check-prefix=MAX %s +; RUN: llc < %s -mtriple=x86_64-apple-macosx -mcpu=core2 -enable-misched -misched=ilpmin | FileCheck -check-prefix=MIN %s +; +; Basic verification of the ScheduleDAGILP metric. +; +; MAX: addss +; MAX: addss +; MAX: addss +; MAX: subss +; MAX: addss +; +; MIN: addss +; MIN: addss +; MIN: subss +; MIN: addss +; MIN: addss +define float @ilpsched(float %a, float %b, float %c, float %d, float %e, float %f) nounwind uwtable readnone ssp { +entry: + %add = fadd float %a, %b + %add1 = fadd float %c, %d + %add2 = fadd float %e, %f + %add3 = fsub float %add1, %add2 + %add4 = fadd float %add, %add3 + ret float %add4 +} diff --git a/test/CodeGen/X86/misched-new.ll b/test/CodeGen/X86/misched-new.ll index 8f2f6f7..cec04b5 100644 --- a/test/CodeGen/X86/misched-new.ll +++ b/test/CodeGen/X86/misched-new.ll @@ -1,4 +1,6 @@ -; RUN: llc -march=x86-64 -mcpu=core2 -enable-misched -misched=shuffle -misched-bottomup < %s +; RUN: llc < %s -march=x86-64 -mcpu=core2 -x86-early-ifcvt -enable-misched \ +; RUN: -misched=shuffle -misched-bottomup -verify-machineinstrs \ +; RUN: | FileCheck %s ; REQUIRES: asserts ; ; Interesting MachineScheduler cases. @@ -25,3 +27,27 @@ for.cond.preheader: ; preds = %entry if.end: ; preds = %entry ret void } + +; The machine verifier checks that EFLAGS kill flags are updated when +; the scheduler reorders cmovel instructions. +; +; CHECK: test +; CHECK: cmovel +; CHECK: cmovel +; CHECK: call +define void @foo(i32 %b) nounwind uwtable ssp { +entry: + %tobool = icmp ne i32 %b, 0 + br i1 %tobool, label %if.then, label %if.end + +if.then: ; preds = %entry + br label %if.end + +if.end: ; preds = %if.then, %entry + %v1 = phi i32 [1, %entry], [2, %if.then] + %v2 = phi i32 [3, %entry], [4, %if.then] + call void @bar(i32 %v1, i32 %v2) + ret void +} + +declare void @bar(i32,i32) diff --git a/test/CodeGen/X86/mmx-builtins.ll b/test/CodeGen/X86/mmx-builtins.ll index 8b7200d..a8d33f4 100644 --- a/test/CodeGen/X86/mmx-builtins.ll +++ b/test/CodeGen/X86/mmx-builtins.ll @@ -1043,6 +1043,20 @@ entry: ret i64 %5 } +define i32 @test21_2(<1 x i64> %a) nounwind readnone optsize ssp { +; CHECK: test21_2 +; CHECK: pshufw +; CHECK: movd +entry: + %0 = bitcast <1 x i64> %a to <4 x i16> + %1 = bitcast <4 x i16> %0 to x86_mmx + %2 = tail call x86_mmx @llvm.x86.sse.pshuf.w(x86_mmx %1, i8 3) nounwind readnone + %3 = bitcast x86_mmx %2 to <4 x i16> + %4 = bitcast <4 x i16> %3 to <2 x i32> + %5 = extractelement <2 x i32> %4, i32 0 + ret i32 %5 +} + declare x86_mmx @llvm.x86.mmx.pmulu.dq(x86_mmx, x86_mmx) nounwind readnone define i64 @test20(<1 x i64> %a, <1 x i64> %b) nounwind readnone optsize ssp { diff --git a/test/CodeGen/X86/ms-inline-asm.ll b/test/CodeGen/X86/ms-inline-asm.ll new file mode 100644 index 0000000..24d28ad --- /dev/null +++ b/test/CodeGen/X86/ms-inline-asm.ll @@ -0,0 +1,63 @@ +; RUN: llc < %s -march=x86 | FileCheck %s + +define i32 @t1() nounwind { +entry: + %0 = tail call i32 asm sideeffect inteldialect "mov eax, $1\0A\09mov $0, eax", "=r,r,~{eax},~{dirflag},~{fpsr},~{flags}"(i32 1) nounwind + ret i32 %0 +; CHECK: t1 +; CHECK: {{## InlineAsm Start|#APP}} +; CHECK: .intel_syntax +; CHECK: mov eax, ecx +; CHECK: mov ecx, eax +; CHECK: .att_syntax +; CHECK: {{## InlineAsm End|#NO_APP}} +} + +define void @t2() nounwind { +entry: + call void asm sideeffect inteldialect "mov eax, $$1", "~{eax},~{dirflag},~{fpsr},~{flags}"() nounwind + ret void +; CHECK: t2 +; CHECK: {{## InlineAsm Start|#APP}} +; CHECK: .intel_syntax +; CHECK: mov eax, 1 +; CHECK: .att_syntax +; CHECK: {{## InlineAsm End|#NO_APP}} +} + +define void @t3(i32 %V) nounwind { +entry: + %V.addr = alloca i32, align 4 + store i32 %V, i32* %V.addr, align 4 + call void asm sideeffect inteldialect "mov eax, DWORD PTR [$0]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %V.addr) nounwind + ret void +; CHECK: t3 +; CHECK: {{## InlineAsm Start|#APP}} +; CHECK: .intel_syntax +; CHECK: mov eax, DWORD PTR {{[[esp]}} +; CHECK: .att_syntax +; CHECK: {{## InlineAsm End|#NO_APP}} +} + +%struct.t18_type = type { i32, i32 } + +define i32 @t18() nounwind { +entry: + %foo = alloca %struct.t18_type, align 4 + %a = getelementptr inbounds %struct.t18_type* %foo, i32 0, i32 0 + store i32 1, i32* %a, align 4 + %b = getelementptr inbounds %struct.t18_type* %foo, i32 0, i32 1 + store i32 2, i32* %b, align 4 + call void asm sideeffect inteldialect "lea ebx, foo\0A\09mov eax, [ebx].0\0A\09mov [ebx].4, ecx", "~{eax},~{dirflag},~{fpsr},~{flags}"() nounwind + %b1 = getelementptr inbounds %struct.t18_type* %foo, i32 0, i32 1 + %0 = load i32* %b1, align 4 + ret i32 %0 +; CHECK: t18 +; CHECK: {{## InlineAsm Start|#APP}} +; CHECK: .intel_syntax +; CHECK: lea ebx, foo +; CHECK: mov eax, [ebx].0 +; CHECK: mov [ebx].4, ecx +; CHECK: .att_syntax +; CHECK: {{## InlineAsm End|#NO_APP}} +} diff --git a/test/CodeGen/X86/mulx32.ll b/test/CodeGen/X86/mulx32.ll new file mode 100644 index 0000000..b75ac00 --- /dev/null +++ b/test/CodeGen/X86/mulx32.ll @@ -0,0 +1,22 @@ +; RUN: llc -mcpu=core-avx2 -march=x86 < %s | FileCheck %s + +define i64 @f1(i32 %a, i32 %b) { + %x = zext i32 %a to i64 + %y = zext i32 %b to i64 + %r = mul i64 %x, %y +; CHECK: f1 +; CHECK: mulxl +; CHECK: ret + ret i64 %r +} + +define i64 @f2(i32 %a, i32* %p) { + %b = load i32* %p + %x = zext i32 %a to i64 + %y = zext i32 %b to i64 + %r = mul i64 %x, %y +; CHECK: f2 +; CHECK: mulxl ({{.+}}), %{{.+}}, %{{.+}} +; CHECK: ret + ret i64 %r +} diff --git a/test/CodeGen/X86/mulx64.ll b/test/CodeGen/X86/mulx64.ll new file mode 100644 index 0000000..d573028 --- /dev/null +++ b/test/CodeGen/X86/mulx64.ll @@ -0,0 +1,22 @@ +; RUN: llc -mcpu=core-avx2 -march=x86-64 < %s | FileCheck %s + +define i128 @f1(i64 %a, i64 %b) { + %x = zext i64 %a to i128 + %y = zext i64 %b to i128 + %r = mul i128 %x, %y +; CHECK: f1 +; CHECK: mulxq +; CHECK: ret + ret i128 %r +} + +define i128 @f2(i64 %a, i64* %p) { + %b = load i64* %p + %x = zext i64 %a to i128 + %y = zext i64 %b to i128 + %r = mul i128 %x, %y +; CHECK: f2 +; CHECK: mulxq ({{.+}}), %{{.+}}, %{{.+}} +; CHECK: ret + ret i128 %r +} diff --git a/test/CodeGen/X86/phys_subreg_coalesce-3.ll b/test/CodeGen/X86/phys_subreg_coalesce-3.ll index 984d7e5..2a20e7a 100644 --- a/test/CodeGen/X86/phys_subreg_coalesce-3.ll +++ b/test/CodeGen/X86/phys_subreg_coalesce-3.ll @@ -1,14 +1,10 @@ -; RUN: llc < %s -mtriple=i386-apple-darwin | FileCheck %s -; XFAIL: * +; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=corei7 | FileCheck %s ; rdar://5571034 ; This requires physreg joining, %vreg13 is live everywhere: ; 304L %CL<def> = COPY %vreg13:sub_8bit; GR32_ABCD:%vreg13 ; 320L %vreg15<def> = COPY %vreg19; GR32:%vreg15 GR32_NOSP:%vreg19 ; 336L %vreg15<def> = SAR32rCL %vreg15, %EFLAGS<imp-def,dead>, %CL<imp-use,kill>; GR32:%vreg15 -; -; This test is XFAIL until the register allocator understands trivial physreg -; interference. <rdar://9802098> define void @foo(i32* nocapture %quadrant, i32* nocapture %ptr, i32 %bbSize, i32 %bbStart, i32 %shifts) nounwind ssp { ; CHECK: foo: diff --git a/test/CodeGen/X86/pic_jumptable.ll b/test/CodeGen/X86/pic_jumptable.ll index 8c16dc6..bdd8859 100644 --- a/test/CodeGen/X86/pic_jumptable.ll +++ b/test/CodeGen/X86/pic_jumptable.ll @@ -1,5 +1,7 @@ ; RUN: llc < %s -relocation-model=pic -mtriple=i386-linux-gnu -asm-verbose=false \ ; RUN: | FileCheck %s --check-prefix=CHECK-LINUX +; RUN: llc < %s -relocation-model=pic -mark-data-regions -mtriple=i686-apple-darwin -asm-verbose=false \ +; RUN: | FileCheck %s --check-prefix=CHECK-DATA ; RUN: llc < %s -relocation-model=pic -mtriple=i686-apple-darwin -asm-verbose=false \ ; RUN: | FileCheck %s ; RUN: llc < %s -mtriple=x86_64-apple-darwin | not grep 'lJTI' @@ -16,6 +18,16 @@ entry: ; CHECK: Ltmp0 = LJTI0_0-L0$pb ; CHECK-NEXT: addl Ltmp0(%eax,%ecx,4) ; CHECK-NEXT: jmpl *%eax + +;; When data-in-code markers are enabled, we should see them around the jump +;; table. +; CHECK-DATA: .data_region jt32 +; CHECK-DATA: LJTI0_0 +; CHECK-DATA: .end_data_region + +;; When they're not enabled, make sure we don't see them at all. +; CHECK-NOT: .data_region +; CHECK-LINUX-NOT: .data_region %Y_addr = alloca i32 ; <i32*> [#uses=2] %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0] store i32 %Y, i32* %Y_addr diff --git a/test/CodeGen/X86/pmovext.ll b/test/CodeGen/X86/pmovext.ll new file mode 100644 index 0000000..16e9c28 --- /dev/null +++ b/test/CodeGen/X86/pmovext.ll @@ -0,0 +1,22 @@ +; RUN: llc < %s -march=x86-64 -mcpu=corei7 | FileCheck %s + +; rdar://11897677 + +;CHECK: intrin_pmov +;CHECK: pmovzxbw (%{{.*}}), %xmm0 +;CHECK-NEXT: movdqu +;CHECK-NEXT: ret +define void @intrin_pmov(i16* noalias %dest, i8* noalias %src) nounwind uwtable ssp { + %1 = bitcast i8* %src to <2 x i64>* + %2 = load <2 x i64>* %1, align 16 + %3 = bitcast <2 x i64> %2 to <16 x i8> + %4 = tail call <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8> %3) nounwind + %5 = bitcast i16* %dest to i8* + %6 = bitcast <8 x i16> %4 to <16 x i8> + tail call void @llvm.x86.sse2.storeu.dq(i8* %5, <16 x i8> %6) nounwind + ret void +} + +declare <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8>) nounwind readnone + +declare void @llvm.x86.sse2.storeu.dq(i8*, <16 x i8>) nounwind diff --git a/test/CodeGen/X86/pointer-vector.ll b/test/CodeGen/X86/pointer-vector.ll index 800fbed..58423d1 100644 --- a/test/CodeGen/X86/pointer-vector.ll +++ b/test/CodeGen/X86/pointer-vector.ll @@ -81,8 +81,7 @@ define <4 x i32*> @INT2PTR1(<4 x i8>* %p) nounwind { entry: %G = load <4 x i8>* %p ;CHECK: movl -;CHECK: movd -;CHECK: pshufb +;CHECK: pmovzxbd ;CHECK: pand %K = inttoptr <4 x i8> %G to <4 x i32*> ;CHECK: ret @@ -105,7 +104,7 @@ define <2 x i32*> @BITCAST1(<2 x i8*>* %p) nounwind { entry: %G = load <2 x i8*>* %p ;CHECK: movl -;CHECK: movsd +;CHECK: pmovzxdq %T = bitcast <2 x i8*> %G to <2 x i32*> ;CHECK: ret ret <2 x i32*> %T diff --git a/test/CodeGen/X86/pr11334.ll b/test/CodeGen/X86/pr11334.ll index 5b7b5ea..e7e29e0 100644 --- a/test/CodeGen/X86/pr11334.ll +++ b/test/CodeGen/X86/pr11334.ll @@ -54,3 +54,11 @@ entry: %f1 = fpext <8 x float> %v1 to <8 x double> ret <8 x double> %f1 } + +define void @test_vector_creation() nounwind { + %1 = insertelement <4 x double> undef, double 0.000000e+00, i32 2 + %2 = load double addrspace(1)* null + %3 = insertelement <4 x double> %1, double %2, i32 3 + store <4 x double> %3, <4 x double>* undef + ret void +} diff --git a/test/CodeGen/X86/pr11985.ll b/test/CodeGen/X86/pr11985.ll new file mode 100644 index 0000000..fa37850 --- /dev/null +++ b/test/CodeGen/X86/pr11985.ll @@ -0,0 +1,19 @@ +; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=prescott | FileCheck %s + +define float @foo(i8* nocapture %buf, float %a, float %b) nounwind uwtable { +entry: + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %buf, i8* blockaddress(@foo, %out), i64 22, i32 1, i1 false) + br label %out + +out: ; preds = %entry + %add = fadd float %a, %b + ret float %add +; CHECK: foo +; CHECK: movw .L{{.*}}+20(%rip), %{{.*}} +; CHECK: movl .L{{.*}}+16(%rip), %{{.*}} +; CHECK: movq .L{{.*}}+8(%rip), %{{.*}} +; CHECK: movq .L{{.*}}(%rip), %{{.*}} +; CHECK: ret +} + +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind diff --git a/test/CodeGen/X86/pr12312.ll b/test/CodeGen/X86/pr12312.ll new file mode 100644 index 0000000..087b8d7 --- /dev/null +++ b/test/CodeGen/X86/pr12312.ll @@ -0,0 +1,155 @@ +; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse41,-avx < %s | FileCheck %s --check-prefix SSE41 +; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx,-avx2 < %s | FileCheck %s --check-prefix AVX + +define i32 @veccond128(<4 x i32> %input) { +entry: + %0 = bitcast <4 x i32> %input to i128 + %1 = icmp ne i128 %0, 0 + br i1 %1, label %if-true-block, label %endif-block + +if-true-block: ; preds = %entry + ret i32 0 +endif-block: ; preds = %entry, + ret i32 1 +; SSE41: veccond128 +; SSE41: ptest +; SSE41: ret +; AVX: veccond128 +; AVX: vptest %xmm{{.*}}, %xmm{{.*}} +; AVX: ret +} + +define i32 @veccond256(<8 x i32> %input) { +entry: + %0 = bitcast <8 x i32> %input to i256 + %1 = icmp ne i256 %0, 0 + br i1 %1, label %if-true-block, label %endif-block + +if-true-block: ; preds = %entry + ret i32 0 +endif-block: ; preds = %entry, + ret i32 1 +; SSE41: veccond256 +; SSE41: por +; SSE41: ptest +; SSE41: ret +; AVX: veccond256 +; AVX: vptest %ymm{{.*}}, %ymm{{.*}} +; AVX: ret +} + +define i32 @veccond512(<16 x i32> %input) { +entry: + %0 = bitcast <16 x i32> %input to i512 + %1 = icmp ne i512 %0, 0 + br i1 %1, label %if-true-block, label %endif-block + +if-true-block: ; preds = %entry + ret i32 0 +endif-block: ; preds = %entry, + ret i32 1 +; SSE41: veccond512 +; SSE41: por +; SSE41: por +; SSE41: por +; SSE41: ptest +; SSE41: ret +; AVX: veccond512 +; AVX: vorps +; AVX: vptest %ymm{{.*}}, %ymm{{.*}} +; AVX: ret +} + +define i32 @vectest128(<4 x i32> %input) { +entry: + %0 = bitcast <4 x i32> %input to i128 + %1 = icmp ne i128 %0, 0 + %2 = zext i1 %1 to i32 + ret i32 %2 +; SSE41: vectest128 +; SSE41: ptest +; SSE41: ret +; AVX: vectest128 +; AVX: vptest %xmm{{.*}}, %xmm{{.*}} +; AVX: ret +} + +define i32 @vectest256(<8 x i32> %input) { +entry: + %0 = bitcast <8 x i32> %input to i256 + %1 = icmp ne i256 %0, 0 + %2 = zext i1 %1 to i32 + ret i32 %2 +; SSE41: vectest256 +; SSE41: por +; SSE41: ptest +; SSE41: ret +; AVX: vectest256 +; AVX: vptest %ymm{{.*}}, %ymm{{.*}} +; AVX: ret +} + +define i32 @vectest512(<16 x i32> %input) { +entry: + %0 = bitcast <16 x i32> %input to i512 + %1 = icmp ne i512 %0, 0 + %2 = zext i1 %1 to i32 + ret i32 %2 +; SSE41: vectest512 +; SSE41: por +; SSE41: por +; SSE41: por +; SSE41: ptest +; SSE41: ret +; AVX: vectest512 +; AVX: vorps +; AVX: vptest %ymm{{.*}}, %ymm{{.*}} +; AVX: ret +} + +define i32 @vecsel128(<4 x i32> %input, i32 %a, i32 %b) { +entry: + %0 = bitcast <4 x i32> %input to i128 + %1 = icmp ne i128 %0, 0 + %2 = select i1 %1, i32 %a, i32 %b + ret i32 %2 +; SSE41: vecsel128 +; SSE41: ptest +; SSE41: ret +; AVX: vecsel128 +; AVX: vptest %xmm{{.*}}, %xmm{{.*}} +; AVX: ret +} + +define i32 @vecsel256(<8 x i32> %input, i32 %a, i32 %b) { +entry: + %0 = bitcast <8 x i32> %input to i256 + %1 = icmp ne i256 %0, 0 + %2 = select i1 %1, i32 %a, i32 %b + ret i32 %2 +; SSE41: vecsel256 +; SSE41: por +; SSE41: ptest +; SSE41: ret +; AVX: vecsel256 +; AVX: vptest %ymm{{.*}}, %ymm{{.*}} +; AVX: ret +} + +define i32 @vecsel512(<16 x i32> %input, i32 %a, i32 %b) { +entry: + %0 = bitcast <16 x i32> %input to i512 + %1 = icmp ne i512 %0, 0 + %2 = select i1 %1, i32 %a, i32 %b + ret i32 %2 +; SSE41: vecsel512 +; SSE41: por +; SSE41: por +; SSE41: por +; SSE41: ptest +; SSE41: ret +; AVX: vecsel512 +; AVX: vorps +; AVX: vptest %ymm{{.*}}, %ymm{{.*}} +; AVX: ret +} diff --git a/test/CodeGen/X86/pr12359.ll b/test/CodeGen/X86/pr12359.ll new file mode 100644 index 0000000..024b163 --- /dev/null +++ b/test/CodeGen/X86/pr12359.ll @@ -0,0 +1,10 @@ +; RUN: llc -asm-verbose -mtriple=x86_64-unknown-unknown -mcpu=corei7 < %s | FileCheck %s +define <16 x i8> @shuf(<16 x i8> %inval1) { +entry: + %0 = shufflevector <16 x i8> %inval1, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 4, i32 3, i32 2, i32 16, i32 16, i32 3, i32 4, i32 0, i32 4, i32 3, i32 2, i32 16, i32 16, i32 3, i32 4> + ret <16 x i8> %0 +; CHECK: shuf +; CHECK: # BB#0: # %entry +; CHECK-NEXT: pshufb +; CHECK-NEXT: ret +} diff --git a/test/CodeGen/X86/pr13458.ll b/test/CodeGen/X86/pr13458.ll new file mode 100644 index 0000000..55548b3 --- /dev/null +++ b/test/CodeGen/X86/pr13458.ll @@ -0,0 +1,14 @@ +; RUN: llc < %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-darwin11.4.2" + +%v8_uniform_Stats.0.2.4.10 = type { i64, i64, i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i64, i32, i32, i32, i32, i32, i32, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i32, i64, [7 x i32], [7 x i64] } + +@globalStats = external global %v8_uniform_Stats.0.2.4.10 + +define void @MergeStats() nounwind { +allocas: + %r.i.i720 = atomicrmw max i64* getelementptr inbounds (%v8_uniform_Stats.0.2.4.10* @globalStats, i64 0, i32 30), i64 0 seq_cst + ret void +} diff --git a/test/CodeGen/X86/pr13859.ll b/test/CodeGen/X86/pr13859.ll new file mode 100644 index 0000000..719721d --- /dev/null +++ b/test/CodeGen/X86/pr13859.ll @@ -0,0 +1,28 @@ +; RUN: llc < %s +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128" +target triple = "i386-apple-macosx10.7.0" + +define void @_Z17FilterYUVRows_MMXi(i32 %af) nounwind ssp { +entry: + %aMyAlloca = alloca i32, align 32 + %dest = alloca <1 x i64>, align 32 + + %a32 = load i32* %aMyAlloca, align 4 + %aconv = trunc i32 %a32 to i16 + %a36 = insertelement <4 x i16> undef, i16 %aconv, i32 0 + %a37 = insertelement <4 x i16> %a36, i16 %aconv, i32 1 + %a38 = insertelement <4 x i16> %a37, i16 %aconv, i32 2 + %a39 = insertelement <4 x i16> %a38, i16 %aconv, i32 3 + %a40 = bitcast <4 x i16> %a39 to x86_mmx + %a41 = bitcast x86_mmx %a40 to <1 x i64> + + %a47 = trunc i32 %a32 to i1 + br i1 %a47, label %a48, label %a49 + +a48: + unreachable + +a49: + store <1 x i64> %a41, <1 x i64>* %dest, align 8 ; !!! + ret void +} diff --git a/test/CodeGen/X86/pr13899.ll b/test/CodeGen/X86/pr13899.ll new file mode 100644 index 0000000..bc81e34 --- /dev/null +++ b/test/CodeGen/X86/pr13899.ll @@ -0,0 +1,58 @@ +; RUN: llc < %s -mtriple=i386-pc-win32 -mcpu=corei7 | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 | FileCheck %s --check-prefix=X64 + +; ModuleID = 'a.bc' +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f80:128:128-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S32" +target triple = "i386-pc-win32" + +%v4_varying_big_struct = type { [4 x <4 x i32>] } + +declare <4 x i32> @"foo"(%v4_varying_big_struct, <4 x i32>) nounwind + +define <4 x i32> @"bar"(%v4_varying_big_struct %s, <4 x i32> %__mask) nounwind { +allocas: + %calltmp = call <4 x i32> @"foo"(%v4_varying_big_struct %s, <4 x i32> %__mask) + ret <4 x i32> %calltmp +; CHECK: bar +; CHECK: andl +; CHECK: call +; CHECK: ret +} + +declare <8 x float> @bar64(<8 x float> %i0, <8 x float> %i1, + <8 x float> %i2, <8 x float> %i3, + <8 x float> %i4, <8 x float> %i5, + <8 x float> %i6, <8 x float> %i7, + <8 x float> %i8, <8 x float> %i9) + +define <8 x float> @foo64(<8 x float>* %p) { + %1 = load <8 x float>* %p + %idx1 = getelementptr inbounds <8 x float>* %p, i64 1 + %2 = load <8 x float>* %idx1 + %idx2 = getelementptr inbounds <8 x float>* %p, i64 2 + %3 = load <8 x float>* %idx2 + %idx3 = getelementptr inbounds <8 x float>* %p, i64 3 + %4 = load <8 x float>* %idx3 + %idx4 = getelementptr inbounds <8 x float>* %p, i64 4 + %5 = load <8 x float>* %idx4 + %idx5 = getelementptr inbounds <8 x float>* %p, i64 5 + %6 = load <8 x float>* %idx5 + %idx6 = getelementptr inbounds <8 x float>* %p, i64 6 + %7 = load <8 x float>* %idx6 + %idx7 = getelementptr inbounds <8 x float>* %p, i64 7 + %8 = load <8 x float>* %idx7 + %idx8 = getelementptr inbounds <8 x float>* %p, i64 8 + %9 = load <8 x float>* %idx8 + %idx9 = getelementptr inbounds <8 x float>* %p, i64 9 + %10 = load <8 x float>* %idx9 + %r = tail call <8 x float> @bar64(<8 x float> %1, <8 x float> %2, + <8 x float> %3, <8 x float> %4, + <8 x float> %5, <8 x float> %6, + <8 x float> %7, <8 x float> %8, + <8 x float> %9, <8 x float> %10) + ret <8 x float> %r +; X64: foo +; X64: and +; X64: call +; X64: ret +} diff --git a/test/CodeGen/X86/pr14088.ll b/test/CodeGen/X86/pr14088.ll new file mode 100644 index 0000000..505e3b5 --- /dev/null +++ b/test/CodeGen/X86/pr14088.ll @@ -0,0 +1,25 @@ +; RUN: llc -mtriple x86_64-linux -mcpu core2 -verify-machineinstrs %s -o - | FileCheck %s +define i32 @f(i1 %foo, i16* %tm_year2, i8* %bar, i16 %zed, i32 %zed2) { +entry: + br i1 %foo, label %return, label %if.end + +if.end: + %rem = srem i32 %zed2, 100 + %conv3 = trunc i32 %rem to i16 + store i16 %conv3, i16* %tm_year2 + %sext = shl i32 %rem, 16 + %conv5 = ashr exact i32 %sext, 16 + %div = sdiv i32 %conv5, 10 + %conv6 = trunc i32 %div to i8 + store i8 %conv6, i8* %bar + br label %return + +return: + %retval.0 = phi i32 [ 0, %if.end ], [ -1, %entry ] + ret i32 %retval.0 +} + +; We were miscompiling this and using %ax instead of %cx in the movw. +; CHECK: movswl %cx, %ecx +; CHECK: movw %cx, (%rsi) +; CHECK: movslq %ecx, %rcx diff --git a/test/CodeGen/X86/pr14090.ll b/test/CodeGen/X86/pr14090.ll new file mode 100644 index 0000000..d76b912 --- /dev/null +++ b/test/CodeGen/X86/pr14090.ll @@ -0,0 +1,76 @@ +; RUN: llc < %s -march=x86-64 -print-before=stack-coloring -print-after=stack-coloring >%t 2>&1 && FileCheck <%t %s + +define void @foo(i64* %retval.i, i32 %call, i32* %.ph.i80, i32 %fourteen, i32* %out.lo, i32* %out.hi) nounwind align 2 { +entry: + %_Tmp.i39 = alloca i64, align 8 + %retval.i33 = alloca i64, align 8 + %_Tmp.i = alloca i64, align 8 + %retval.i.i = alloca i64, align 8 + %_First.i = alloca i64, align 8 + + %0 = load i64* %retval.i, align 8 + + %1 = load i64* %retval.i, align 8 + + %_Tmp.i39.0.cast73 = bitcast i64* %_Tmp.i39 to i8* + call void @llvm.lifetime.start(i64 8, i8* %_Tmp.i39.0.cast73) + store i64 %1, i64* %_Tmp.i39, align 8 + %cmp.i.i.i40 = icmp slt i32 %call, 0 + %2 = lshr i64 %1, 32 + %3 = trunc i64 %2 to i32 + %sub.i.i.i44 = sub i32 0, %call + %cmp2.i.i.i45 = icmp ult i32 %3, %sub.i.i.i44 + %or.cond.i.i.i46 = and i1 %cmp.i.i.i40, %cmp2.i.i.i45 + %add.i.i.i47 = add i32 %3, %call + %sub5.i.i.i48 = lshr i32 %add.i.i.i47, 5 + %trunc.i50 = trunc i64 %1 to i32 + %inttoptr.i51 = inttoptr i32 %trunc.i50 to i32* + %add61617.i.i.i52 = or i32 %sub5.i.i.i48, -134217728 + %add61617.i.sub5.i.i.i53 = select i1 %or.cond.i.i.i46, i32 %add61617.i.i.i52, i32 %sub5.i.i.i48 + %storemerge2.i.i54 = getelementptr inbounds i32* %inttoptr.i51, i32 %add61617.i.sub5.i.i.i53 + %_Tmp.i39.0.cast74 = bitcast i64* %_Tmp.i39 to i32** + store i32* %storemerge2.i.i54, i32** %_Tmp.i39.0.cast74, align 8 + %storemerge.i.i55 = and i32 %add.i.i.i47, 31 + %_Tmp.i39.4.raw_idx = getelementptr inbounds i8* %_Tmp.i39.0.cast73, i32 4 + %_Tmp.i39.4.cast = bitcast i8* %_Tmp.i39.4.raw_idx to i32* + store i32 %storemerge.i.i55, i32* %_Tmp.i39.4.cast, align 4 + %srcval.i56 = load i64* %_Tmp.i39, align 8 + call void @llvm.lifetime.end(i64 8, i8* %_Tmp.i39.0.cast73) + +; CHECK: Before Merge disjoint stack slots +; CHECK: [[PREFIX15:MOV64mr.*<fi#]]{{[0-9]}}[[SUFFIX15:.*;]] mem:ST8[%fifteen] +; CHECK: [[PREFIX87:MOV32mr.*;]] mem:ST4[%sunkaddr87] + +; CHECK: After Merge disjoint stack slots +; CHECK: [[PREFIX15]]{{[0-9]}}[[SUFFIX15]] mem:ST8[%_Tmp.i39] +; CHECK: [[PREFIX87]] mem:ST4[<unknown>] + + %fifteen = bitcast i64* %retval.i.i to i32** + %sixteen = bitcast i64* %retval.i.i to i8* + call void @llvm.lifetime.start(i64 8, i8* %sixteen) + store i32* %.ph.i80, i32** %fifteen, align 8, !tbaa !0 + %sunkaddr = ptrtoint i64* %retval.i.i to i32 + %sunkaddr86 = add i32 %sunkaddr, 4 + %sunkaddr87 = inttoptr i32 %sunkaddr86 to i32* + store i32 %fourteen, i32* %sunkaddr87, align 4, !tbaa !3 + %seventeen = load i64* %retval.i.i, align 8 + call void @llvm.lifetime.end(i64 8, i8* %sixteen) + %eighteen = lshr i64 %seventeen, 32 + %nineteen = trunc i64 %eighteen to i32 + %shl.i.i.i = shl i32 1, %nineteen + + store i32 %shl.i.i.i, i32* %out.lo, align 8 + store i32 %nineteen, i32* %out.hi, align 8 + + ret void +} + +declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind + +declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind + +!0 = metadata !{metadata !"int", metadata !1} +!1 = metadata !{metadata !"omnipotent char", metadata !2} +!2 = metadata !{metadata !"Simple C/C++ TBAA"} +!3 = metadata !{metadata !"any pointer", metadata !1} +!4 = metadata !{metadata !"vtable pointer", metadata !2} diff --git a/test/CodeGen/X86/pr14098.ll b/test/CodeGen/X86/pr14098.ll new file mode 100644 index 0000000..6ce2449 --- /dev/null +++ b/test/CodeGen/X86/pr14098.ll @@ -0,0 +1,23 @@ +; RUN: llc -mtriple i386-unknown-linux-gnu -relocation-model=pic -verify-machineinstrs < %s +; We used to crash on this. + +declare void @foo() +declare void @foo3(i1 %x) +define void @bar(i1 %a1, i16 %a2) nounwind align 2 { +bb0: + %a3 = trunc i16 %a2 to i8 + %a4 = lshr i16 %a2, 8 + %a5 = trunc i16 %a4 to i8 + br i1 %a1, label %bb1, label %bb2 +bb1: + br label %bb2 +bb2: + %a6 = phi i8 [ 3, %bb0 ], [ %a5, %bb1 ] + %a7 = phi i8 [ 9, %bb0 ], [ %a3, %bb1 ] + %a8 = icmp eq i8 %a6, 1 + call void @foo() + %a9 = icmp eq i8 %a7, 0 + call void @foo3(i1 %a9) + call void @foo3(i1 %a8) + ret void +} diff --git a/test/CodeGen/X86/pr14161.ll b/test/CodeGen/X86/pr14161.ll new file mode 100644 index 0000000..ff4532e --- /dev/null +++ b/test/CodeGen/X86/pr14161.ll @@ -0,0 +1,38 @@ +; RUN: llc < %s -mtriple=x86_64-linux-pc -mcpu=corei7 | FileCheck %s + +declare <4 x i32> @llvm.x86.sse41.pminud(<4 x i32>, <4 x i32>) + +define <2 x i16> @good(<4 x i32>*, <4 x i8>*) { +entry: + %2 = load <4 x i32>* %0, align 16 + %3 = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %2, <4 x i32> <i32 127, i32 127, i32 127, i32 127>) + %4 = extractelement <4 x i32> %3, i32 0 + %5 = extractelement <4 x i32> %3, i32 1 + %6 = extractelement <4 x i32> %3, i32 2 + %7 = extractelement <4 x i32> %3, i32 3 + %8 = bitcast i32 %4 to <2 x i16> + %9 = bitcast i32 %5 to <2 x i16> + ret <2 x i16> %8 +; CHECK: good +; CHECK: pminud +; CHECK-NEXT: pmovzxwq +; CHECK: ret +} + +define <2 x i16> @bad(<4 x i32>*, <4 x i8>*) { +entry: + %2 = load <4 x i32>* %0, align 16 + %3 = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %2, <4 x i32> <i32 127, i32 127, i32 127, i32 127>) + %4 = extractelement <4 x i32> %3, i32 0 + %5 = extractelement <4 x i32> %3, i32 1 + %6 = extractelement <4 x i32> %3, i32 2 + %7 = extractelement <4 x i32> %3, i32 3 + %8 = bitcast i32 %4 to <2 x i16> + %9 = bitcast i32 %5 to <2 x i16> + ret <2 x i16> %9 +; CHECK: bad +; CHECK: pminud +; CHECK: pextrd +; CHECK: pmovzxwq +; CHECK: ret +} diff --git a/test/CodeGen/X86/pr14204.ll b/test/CodeGen/X86/pr14204.ll new file mode 100644 index 0000000..42e362b --- /dev/null +++ b/test/CodeGen/X86/pr14204.ll @@ -0,0 +1,15 @@ +; RUN: llc < %s -mtriple=x86_64-linux-pc -mcpu=core-avx2 | FileCheck %s + +; FIXME: vpmovsxwd should be generated instead of vpmovzxwd followed by +; SLL/SRA. + +define <8 x i32> @foo(<8 x i1> %bar) nounwind readnone { +entry: + %s = sext <8 x i1> %bar to <8 x i32> + ret <8 x i32> %s +; CHECK: foo +; CHECK: vpmovzxwd +; CHECK: vpslld +; CHECK: vpsrad +; CHECK: ret +} diff --git a/test/CodeGen/X86/pr14314.ll b/test/CodeGen/X86/pr14314.ll new file mode 100644 index 0000000..5388a4b --- /dev/null +++ b/test/CodeGen/X86/pr14314.ll @@ -0,0 +1,13 @@ +; RUN: llc < %s -mtriple=i386-pc-linux -mcpu=corei7 | FileCheck %s + +define i64 @atomicSub(i64* %a, i64 %b) nounwind { +entry: + %0 = atomicrmw sub i64* %a, i64 %b seq_cst + ret i64 %0 +; CHECK: atomicSub +; movl %eax, %ebx +; subl {{%[a-z]+}}, %ebx +; movl %edx, %ecx +; sbbl {{%[a-z]+}}, %ecx +; CHECK: ret +} diff --git a/test/CodeGen/X86/pr14333.ll b/test/CodeGen/X86/pr14333.ll new file mode 100644 index 0000000..86c12ef --- /dev/null +++ b/test/CodeGen/X86/pr14333.ll @@ -0,0 +1,12 @@ +; RUN: llc -mtriple=x86_64-unknown-unknown < %s +%foo = type { i64, i64 } +define void @bar(%foo* %zed) { + %tmp = getelementptr inbounds %foo* %zed, i64 0, i32 0 + store i64 0, i64* %tmp, align 8 + %tmp2 = getelementptr inbounds %foo* %zed, i64 0, i32 1 + store i64 0, i64* %tmp2, align 8 + %tmp3 = bitcast %foo* %zed to i8* + call void @llvm.memset.p0i8.i64(i8* %tmp3, i8 0, i64 16, i32 8, i1 false) + ret void +} +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind diff --git a/test/CodeGen/X86/pr5145.ll b/test/CodeGen/X86/pr5145.ll new file mode 100644 index 0000000..d048db8 --- /dev/null +++ b/test/CodeGen/X86/pr5145.ll @@ -0,0 +1,35 @@ +; RUN: llc -march=x86-64 < %s | FileCheck %s +@sc8 = external global i8 + +define void @atomic_maxmin_i8() { +; CHECK: atomic_maxmin_i8 + %1 = atomicrmw max i8* @sc8, i8 5 acquire +; CHECK: [[LABEL1:\.?LBB[0-9]+_[0-9]+]]: +; CHECK: cmpb +; CHECK: cmovl +; CHECK: lock +; CHECK-NEXT: cmpxchgb +; CHECK: jne [[LABEL1]] + %2 = atomicrmw min i8* @sc8, i8 6 acquire +; CHECK: [[LABEL3:\.?LBB[0-9]+_[0-9]+]]: +; CHECK: cmpb +; CHECK: cmovg +; CHECK: lock +; CHECK-NEXT: cmpxchgb +; CHECK: jne [[LABEL3]] + %3 = atomicrmw umax i8* @sc8, i8 7 acquire +; CHECK: [[LABEL5:\.?LBB[0-9]+_[0-9]+]]: +; CHECK: cmpb +; CHECK: cmovb +; CHECK: lock +; CHECK-NEXT: cmpxchgb +; CHECK: jne [[LABEL5]] + %4 = atomicrmw umin i8* @sc8, i8 8 acquire +; CHECK: [[LABEL7:\.?LBB[0-9]+_[0-9]+]]: +; CHECK: cmpb +; CHECK: cmova +; CHECK: lock +; CHECK-NEXT: cmpxchgb +; CHECK: jne [[LABEL7]] + ret void +} diff --git a/test/CodeGen/X86/promote.ll b/test/CodeGen/X86/promote.ll index 8b30dc7..283f48c 100644 --- a/test/CodeGen/X86/promote.ll +++ b/test/CodeGen/X86/promote.ll @@ -20,7 +20,7 @@ entry: ; CHECK: shuff_f define i32 @shuff_f(<4 x i8>* %A) { entry: -; CHECK: pshufb +; CHECK: pmovzxbd ; CHECK: paddd ; CHECK: pshufb %0 = load <4 x i8>* %A, align 8 diff --git a/test/CodeGen/X86/ptr-rotate.ll b/test/CodeGen/X86/ptr-rotate.ll index 6debd16..fbd13b5 100644 --- a/test/CodeGen/X86/ptr-rotate.ll +++ b/test/CodeGen/X86/ptr-rotate.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=i386-apple-darwin -o - < %s | FileCheck %s +; RUN: llc -mtriple=i386-apple-darwin -mcpu=corei7 -o - < %s | FileCheck %s define i32 @func(i8* %A) nounwind readnone { entry: diff --git a/test/CodeGen/X86/red-zone2.ll b/test/CodeGen/X86/red-zone2.ll index f092163..3e9c790 100644 --- a/test/CodeGen/X86/red-zone2.ll +++ b/test/CodeGen/X86/red-zone2.ll @@ -1,6 +1,7 @@ -; RUN: llc < %s -mcpu=generic -march=x86-64 > %t -; RUN: grep subq %t | count 1 -; RUN: grep addq %t | count 1 +; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux | FileCheck %s +; CHECK: f0: +; CHECK: subq +; CHECK: addq define x86_fp80 @f0(float %f) nounwind readnone noredzone { entry: diff --git a/test/CodeGen/X86/rot32.ll b/test/CodeGen/X86/rot32.ll index 99602fd..e95a734 100644 --- a/test/CodeGen/X86/rot32.ll +++ b/test/CodeGen/X86/rot32.ll @@ -1,4 +1,5 @@ -; RUN: llc < %s -march=x86 | FileCheck %s +; RUN: llc < %s -march=x86 -mcpu=corei7 | FileCheck %s +; RUN: llc < %s -march=x86 -mcpu=core-avx2 | FileCheck %s --check-prefix=BMI2 define i32 @foo(i32 %x, i32 %y, i32 %z) nounwind readnone { entry: @@ -48,12 +49,25 @@ define i32 @xfoo(i32 %x, i32 %y, i32 %z) nounwind readnone { entry: ; CHECK: xfoo: ; CHECK: roll $7 +; BMI2: xfoo: +; BMI2: rorxl $25 %0 = lshr i32 %x, 25 %1 = shl i32 %x, 7 %2 = or i32 %0, %1 ret i32 %2 } +define i32 @xfoop(i32* %p) nounwind readnone { +entry: +; BMI2: xfoop: +; BMI2: rorxl $25, ({{.+}}), %{{.+}} + %x = load i32* %p + %a = lshr i32 %x, 25 + %b = shl i32 %x, 7 + %c = or i32 %a, %b + ret i32 %c +} + define i32 @xbar(i32 %x, i32 %y, i32 %z) nounwind readnone { entry: ; CHECK: xbar: @@ -68,12 +82,25 @@ define i32 @xun(i32 %x, i32 %y, i32 %z) nounwind readnone { entry: ; CHECK: xun: ; CHECK: roll $25 +; BMI2: xun: +; BMI2: rorxl $7 %0 = lshr i32 %x, 7 %1 = shl i32 %x, 25 %2 = or i32 %0, %1 ret i32 %2 } +define i32 @xunp(i32* %p) nounwind readnone { +entry: +; BMI2: xunp: +; BMI2: rorxl $7, ({{.+}}), %{{.+}} + %x = load i32* %p + %a = lshr i32 %x, 7 + %b = shl i32 %x, 25 + %c = or i32 %a, %b + ret i32 %c +} + define i32 @xbu(i32 %x, i32 %y, i32 %z) nounwind readnone { entry: ; CHECK: xbu: diff --git a/test/CodeGen/X86/rot64.ll b/test/CodeGen/X86/rot64.ll index 4e082bb..7fa982d 100644 --- a/test/CodeGen/X86/rot64.ll +++ b/test/CodeGen/X86/rot64.ll @@ -1,8 +1,9 @@ -; RUN: llc < %s -march=x86-64 > %t -; RUN: grep rol %t | count 3 +; RUN: llc < %s -march=x86-64 -mcpu=corei7 > %t +; RUN: grep rol %t | count 5 ; RUN: grep ror %t | count 1 ; RUN: grep shld %t | count 2 ; RUN: grep shrd %t | count 2 +; RUN: llc < %s -march=x86-64 -mcpu=core-avx2 | FileCheck %s --check-prefix=BMI2 define i64 @foo(i64 %x, i64 %y, i64 %z) nounwind readnone { entry: @@ -42,12 +43,25 @@ entry: define i64 @xfoo(i64 %x, i64 %y, i64 %z) nounwind readnone { entry: +; BMI2: xfoo: +; BMI2: rorxq $57 %0 = lshr i64 %x, 57 %1 = shl i64 %x, 7 %2 = or i64 %0, %1 ret i64 %2 } +define i64 @xfoop(i64* %p) nounwind readnone { +entry: +; BMI2: xfoop: +; BMI2: rorxq $57, ({{.+}}), %{{.+}} + %x = load i64* %p + %a = lshr i64 %x, 57 + %b = shl i64 %x, 7 + %c = or i64 %a, %b + ret i64 %c +} + define i64 @xbar(i64 %x, i64 %y, i64 %z) nounwind readnone { entry: %0 = shl i64 %y, 7 @@ -58,12 +72,25 @@ entry: define i64 @xun(i64 %x, i64 %y, i64 %z) nounwind readnone { entry: +; BMI2: xun: +; BMI2: rorxq $7 %0 = lshr i64 %x, 7 %1 = shl i64 %x, 57 %2 = or i64 %0, %1 ret i64 %2 } +define i64 @xunp(i64* %p) nounwind readnone { +entry: +; BMI2: xunp: +; BMI2: rorxq $7, ({{.+}}), %{{.+}} + %x = load i64* %p + %a = lshr i64 %x, 7 + %b = shl i64 %x, 57 + %c = or i64 %a, %b + ret i64 %c +} + define i64 @xbu(i64 %x, i64 %y, i64 %z) nounwind readnone { entry: %0 = lshr i64 %y, 7 diff --git a/test/CodeGen/X86/rotate2.ll b/test/CodeGen/X86/rotate2.ll index 2eea399..2316c70 100644 --- a/test/CodeGen/X86/rotate2.ll +++ b/test/CodeGen/X86/rotate2.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=x86-64 | grep rol | count 2 +; RUN: llc < %s -march=x86-64 -mcpu=corei7 | grep rol | count 2 define i64 @test1(i64 %x) nounwind { entry: diff --git a/test/CodeGen/X86/rtm.ll b/test/CodeGen/X86/rtm.ll new file mode 100644 index 0000000..76eb951 --- /dev/null +++ b/test/CodeGen/X86/rtm.ll @@ -0,0 +1,30 @@ +; RUN: llc < %s -mattr=+rtm -mtriple=x86_64-unknown-unknown | FileCheck %s + +declare i32 @llvm.x86.xbegin() nounwind +declare void @llvm.x86.xend() nounwind +declare void @llvm.x86.xabort(i8) noreturn nounwind + +define i32 @test_xbegin() nounwind uwtable { +entry: + %0 = tail call i32 @llvm.x86.xbegin() nounwind + ret i32 %0 +; CHECK: test_xbegin +; CHECK: xbegin [[LABEL:.*BB.*]] +; CHECK: [[LABEL]]: +} + +define void @test_xend() nounwind uwtable { +entry: + tail call void @llvm.x86.xend() nounwind + ret void +; CHECK: test_xend +; CHECK: xend +} + +define void @test_xabort() nounwind uwtable { +entry: + tail call void @llvm.x86.xabort(i8 2) + unreachable +; CHECK: test_xabort +; CHECK: xabort $2 +} diff --git a/test/CodeGen/X86/select.ll b/test/CodeGen/X86/select.ll index 2e39473..3bec3ac 100644 --- a/test/CodeGen/X86/select.ll +++ b/test/CodeGen/X86/select.ll @@ -344,3 +344,16 @@ entry: ; ATOM: negw ; ATOM: sbbw } + +define i8 @test18(i32 %x, i8 zeroext %a, i8 zeroext %b) nounwind { + %cmp = icmp slt i32 %x, 15 + %sel = select i1 %cmp, i8 %a, i8 %b + ret i8 %sel +; CHECK: test18: +; CHECK: cmpl $15, %edi +; CHECK: cmovgel %edx + +; ATOM: test18: +; ATOM: cmpl $15, %edi +; ATOM: cmovgel %edx +} diff --git a/test/CodeGen/X86/select_const.ll b/test/CodeGen/X86/select_const.ll new file mode 100644 index 0000000..5b2409d --- /dev/null +++ b/test/CodeGen/X86/select_const.ll @@ -0,0 +1,16 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mcpu=corei7 | FileCheck %s + +define i64 @test1(i64 %x) nounwind { +entry: + %cmp = icmp eq i64 %x, 2 + %add = add i64 %x, 1 + %retval.0 = select i1 %cmp, i64 2, i64 %add + ret i64 %retval.0 + +; CHECK: test1: +; CHECK: leaq 1(%rdi), %rax +; CHECK: cmpq $2, %rdi +; CHECK: cmoveq %rdi, %rax +; CHECK: ret + +} diff --git a/test/CodeGen/X86/shift-bmi2.ll b/test/CodeGen/X86/shift-bmi2.ll new file mode 100644 index 0000000..d1f321f --- /dev/null +++ b/test/CodeGen/X86/shift-bmi2.ll @@ -0,0 +1,178 @@ +; RUN: llc -mtriple=i386-unknown-unknown -mcpu=core-avx2 < %s | FileCheck --check-prefix=BMI2 %s +; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=core-avx2 < %s | FileCheck --check-prefix=BMI264 %s + +define i32 @shl32(i32 %x, i32 %shamt) nounwind uwtable readnone { +entry: + %shl = shl i32 %x, %shamt +; BMI2: shl32 +; BMI2: shlxl +; BMI2: ret +; BMI264: shl32 +; BMI264: shlxl +; BMI264: ret + ret i32 %shl +} + +define i32 @shl32i(i32 %x) nounwind uwtable readnone { +entry: + %shl = shl i32 %x, 5 +; BMI2: shl32i +; BMI2-NOT: shlxl +; BMI2: ret +; BMI264: shl32i +; BMI264-NOT: shlxl +; BMI264: ret + ret i32 %shl +} + +define i32 @shl32p(i32* %p, i32 %shamt) nounwind uwtable readnone { +entry: + %x = load i32* %p + %shl = shl i32 %x, %shamt +; BMI2: shl32p +; BMI2: shlxl %{{.+}}, ({{.+}}), %{{.+}} +; BMI2: ret +; BMI264: shl32p +; BMI264: shlxl %{{.+}}, ({{.+}}), %{{.+}} +; BMI264: ret + ret i32 %shl +} + +define i32 @shl32pi(i32* %p) nounwind uwtable readnone { +entry: + %x = load i32* %p + %shl = shl i32 %x, 5 +; BMI2: shl32pi +; BMI2-NOT: shlxl +; BMI2: ret +; BMI264: shl32pi +; BMI264-NOT: shlxl +; BMI264: ret + ret i32 %shl +} + +define i64 @shl64(i64 %x, i64 %shamt) nounwind uwtable readnone { +entry: + %shl = shl i64 %x, %shamt +; BMI264: shl64 +; BMI264: shlxq +; BMI264: ret + ret i64 %shl +} + +define i64 @shl64i(i64 %x) nounwind uwtable readnone { +entry: + %shl = shl i64 %x, 7 +; BMI264: shl64i +; BMI264-NOT: shlxq +; BMI264: ret + ret i64 %shl +} + +define i64 @shl64p(i64* %p, i64 %shamt) nounwind uwtable readnone { +entry: + %x = load i64* %p + %shl = shl i64 %x, %shamt +; BMI264: shl64p +; BMI264: shlxq %{{.+}}, ({{.+}}), %{{.+}} +; BMI264: ret + ret i64 %shl +} + +define i64 @shl64pi(i64* %p) nounwind uwtable readnone { +entry: + %x = load i64* %p + %shl = shl i64 %x, 7 +; BMI264: shl64p +; BMI264-NOT: shlxq +; BMI264: ret + ret i64 %shl +} + +define i32 @lshr32(i32 %x, i32 %shamt) nounwind uwtable readnone { +entry: + %shl = lshr i32 %x, %shamt +; BMI2: lshr32 +; BMI2: shrxl +; BMI2: ret +; BMI264: lshr32 +; BMI264: shrxl +; BMI264: ret + ret i32 %shl +} + +define i32 @lshr32p(i32* %p, i32 %shamt) nounwind uwtable readnone { +entry: + %x = load i32* %p + %shl = lshr i32 %x, %shamt +; BMI2: lshr32p +; BMI2: shrxl %{{.+}}, ({{.+}}), %{{.+}} +; BMI2: ret +; BMI264: lshr32 +; BMI264: shrxl %{{.+}}, ({{.+}}), %{{.+}} +; BMI264: ret + ret i32 %shl +} + +define i64 @lshr64(i64 %x, i64 %shamt) nounwind uwtable readnone { +entry: + %shl = lshr i64 %x, %shamt +; BMI264: lshr64 +; BMI264: shrxq +; BMI264: ret + ret i64 %shl +} + +define i64 @lshr64p(i64* %p, i64 %shamt) nounwind uwtable readnone { +entry: + %x = load i64* %p + %shl = lshr i64 %x, %shamt +; BMI264: lshr64p +; BMI264: shrxq %{{.+}}, ({{.+}}), %{{.+}} +; BMI264: ret + ret i64 %shl +} + +define i32 @ashr32(i32 %x, i32 %shamt) nounwind uwtable readnone { +entry: + %shl = ashr i32 %x, %shamt +; BMI2: ashr32 +; BMI2: sarxl +; BMI2: ret +; BMI264: ashr32 +; BMI264: sarxl +; BMI264: ret + ret i32 %shl +} + +define i32 @ashr32p(i32* %p, i32 %shamt) nounwind uwtable readnone { +entry: + %x = load i32* %p + %shl = ashr i32 %x, %shamt +; BMI2: ashr32p +; BMI2: sarxl %{{.+}}, ({{.+}}), %{{.+}} +; BMI2: ret +; BMI264: ashr32 +; BMI264: sarxl %{{.+}}, ({{.+}}), %{{.+}} +; BMI264: ret + ret i32 %shl +} + +define i64 @ashr64(i64 %x, i64 %shamt) nounwind uwtable readnone { +entry: + %shl = ashr i64 %x, %shamt +; BMI264: ashr64 +; BMI264: sarxq +; BMI264: ret + ret i64 %shl +} + +define i64 @ashr64p(i64* %p, i64 %shamt) nounwind uwtable readnone { +entry: + %x = load i64* %p + %shl = ashr i64 %x, %shamt +; BMI264: ashr64p +; BMI264: sarxq %{{.+}}, ({{.+}}), %{{.+}} +; BMI264: ret + ret i64 %shl +} diff --git a/test/CodeGen/X86/sincos.ll b/test/CodeGen/X86/sincos.ll index 1479be1..734f48a 100644 --- a/test/CodeGen/X86/sincos.ll +++ b/test/CodeGen/X86/sincos.ll @@ -1,6 +1,7 @@ ; Make sure this testcase codegens to the sin and cos instructions, not calls ; RUN: llc < %s -mtriple=i686-apple-macosx -mattr=-sse,-sse2,-sse3 -enable-unsafe-fp-math | FileCheck %s --check-prefix=SIN ; RUN: llc < %s -mtriple=i686-apple-macosx -mattr=-sse,-sse2,-sse3 -enable-unsafe-fp-math | FileCheck %s --check-prefix=COS +; RUN: llc < %s -mtriple=i686-apple-macosx -mattr=-sse,-sse2,-sse3 | FileCheck %s --check-prefix=SAFE declare float @sinf(float) readonly @@ -17,6 +18,9 @@ define float @test1(float %X) { ; SIN-NOT: fsin +; SAFE: test1 +; SAFE-NOT: fsin + ; SIN: test2: define double @test2(double %X) { %Y = call double @sin(double %X) readonly @@ -26,6 +30,9 @@ define double @test2(double %X) { ; SIN-NOT: fsin +; SAFE: test2 +; SAFE-NOT: fsin + ; SIN: test3: define x86_fp80 @test3(x86_fp80 %X) { %Y = call x86_fp80 @sinl(x86_fp80 %X) readonly @@ -50,12 +57,18 @@ define float @test4(float %X) { } ; COS: {{^[ \t]*fcos}} +; SAFE: test4 +; SAFE-NOT: fcos + define double @test5(double %X) { %Y = call double @cos(double %X) readonly ret double %Y } ; COS: {{^[ \t]*fcos}} +; SAFE: test5 +; SAFE-NOT: fcos + define x86_fp80 @test6(x86_fp80 %X) { %Y = call x86_fp80 @cosl(x86_fp80 %X) readonly ret x86_fp80 %Y diff --git a/test/CodeGen/X86/sjlj.ll b/test/CodeGen/X86/sjlj.ll new file mode 100644 index 0000000..681db00 --- /dev/null +++ b/test/CodeGen/X86/sjlj.ll @@ -0,0 +1,60 @@ +; RUN: llc < %s -mtriple=i386-pc-linux -mcpu=corei7 -relocation-model=static | FileCheck --check-prefix=X86 %s +; RUN: llc < %s -mtriple=i386-pc-linux -mcpu=corei7 -relocation-model=pic | FileCheck --check-prefix=PIC86 %s +; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=corei7 -relocation-model=static | FileCheck --check-prefix=X64 %s +; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=corei7 -relocation-model=pic | FileCheck --check-prefix=PIC64 %s + +@buf = internal global [5 x i8*] zeroinitializer + +declare i8* @llvm.frameaddress(i32) nounwind readnone + +declare i8* @llvm.stacksave() nounwind + +declare i32 @llvm.eh.sjlj.setjmp(i8*) nounwind + +declare void @llvm.eh.sjlj.longjmp(i8*) nounwind + +define i32 @sj0() nounwind { + %fp = tail call i8* @llvm.frameaddress(i32 0) + store i8* %fp, i8** getelementptr inbounds ([5 x i8*]* @buf, i64 0, i64 0), align 16 + %sp = tail call i8* @llvm.stacksave() + store i8* %sp, i8** getelementptr inbounds ([5 x i8*]* @buf, i64 0, i64 2), align 16 + %r = tail call i32 @llvm.eh.sjlj.setjmp(i8* bitcast ([5 x i8*]* @buf to i8*)) + ret i32 %r +; X86: sj0 +; x86: movl %ebp, buf +; X86: movl %esp, buf+8 +; x86: movl ${{.*LBB.*}}, buf+4 +; X86: ret +; PIC86: sj0 +; PIC86: movl %ebp, buf@GOTOFF(%[[GOT:.*]]) +; PIC86: movl %esp, buf@GOTOFF+8(%[[GOT]]) +; PIC86: leal {{.*LBB.*}}@GOTOFF(%[[GOT]]), %[[LREG:.*]] +; PIC86: movl %[[LREG]], buf@GOTOFF+4 +; PIC86: ret +; X64: sj0 +; x64: movq %rbp, buf(%rip) +; x64: movq ${{.*LBB.*}}, buf+8(%rip) +; X64: movq %rsp, buf+16(%rip) +; X64: ret +; PIC64: sj0 +; PIC64: movq %rbp, buf(%rip) +; PIC64: movq %rsp, buf+16(%rip) +; PIC64: leaq {{.*LBB.*}}(%rip), %[[LREG:.*]] +; PIC64: movq %[[LREG]], buf+8(%rip) +; PIC64: ret +} + +define void @lj0() nounwind { + tail call void @llvm.eh.sjlj.longjmp(i8* bitcast ([5 x i8*]* @buf to i8*)) + unreachable +; X86: lj0 +; X86: movl buf, %ebp +; X86: movl buf+4, %[[REG32:.*]] +; X86: movl buf+8, %esp +; X86: jmpl *%[[REG32]] +; X64: lj0 +; X64: movq buf(%rip), %rbp +; X64: movq buf+8(%rip), %[[REG64:.*]] +; X64: movq buf+16(%rip), %rsp +; X64: jmpq *%[[REG64]] +} diff --git a/test/CodeGen/X86/smul-with-overflow.ll b/test/CodeGen/X86/smul-with-overflow.ll index 7ac3840..2d0b2f7 100644 --- a/test/CodeGen/X86/smul-with-overflow.ll +++ b/test/CodeGen/X86/smul-with-overflow.ll @@ -67,3 +67,17 @@ entry: ; CHECK: mull ; CHECK-NEXT: ret } + +declare { i63, i1 } @llvm.smul.with.overflow.i63(i63, i63) nounwind readnone + +define i1 @test5() nounwind { +entry: + %res = call { i63, i1 } @llvm.smul.with.overflow.i63(i63 4, i63 4611686018427387903) + %sum = extractvalue { i63, i1 } %res, 0 + %overflow = extractvalue { i63, i1 } %res, 1 + ret i1 %overflow +; Was returning false, should return true (not constant folded yet though). +; PR13991 +; CHECK: test5: +; CHECK-NOT: xorb +} diff --git a/test/CodeGen/X86/sse-intel-ocl.ll b/test/CodeGen/X86/sse-intel-ocl.ll new file mode 100644 index 0000000..1885050 --- /dev/null +++ b/test/CodeGen/X86/sse-intel-ocl.ll @@ -0,0 +1,93 @@ +; RUN: llc < %s -mtriple=i386-pc-win32 -mcpu=nehalem | FileCheck -check-prefix=WIN32 %s +; RUN: llc < %s -mtriple=x86_64-win32 -mcpu=nehalem | FileCheck -check-prefix=WIN64 %s +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=nehalem | FileCheck -check-prefix=NOT_WIN %s + +declare <16 x float> @func_float16_ptr(<16 x float>, <16 x float> *) +declare <16 x float> @func_float16(<16 x float>, <16 x float>) +; WIN64: testf16_inp +; WIN64: addps {{.*}}, {{%xmm[0-3]}} +; WIN64: addps {{.*}}, {{%xmm[0-3]}} +; WIN64: addps {{.*}}, {{%xmm[0-3]}} +; WIN64: addps {{.*}}, {{%xmm[0-3]}} +; WIN64: leaq {{.*}}(%rsp), %rcx +; WIN64: call +; WIN64: ret + +; WIN32: testf16_inp +; WIN32: movl %eax, (%esp) +; WIN32: addps {{.*}}, {{%xmm[0-3]}} +; WIN32: addps {{.*}}, {{%xmm[0-3]}} +; WIN32: addps {{.*}}, {{%xmm[0-3]}} +; WIN32: addps {{.*}}, {{%xmm[0-3]}} +; WIN32: call +; WIN32: ret + +; NOT_WIN: testf16_inp +; NOT_WIN: addps {{.*}}, {{%xmm[0-3]}} +; NOT_WIN: addps {{.*}}, {{%xmm[0-3]}} +; NOT_WIN: addps {{.*}}, {{%xmm[0-3]}} +; NOT_WIN: addps {{.*}}, {{%xmm[0-3]}} +; NOT_WIN: leaq {{.*}}(%rsp), %rdi +; NOT_WIN: call +; NOT_WIN: ret + +;test calling conventions - input parameters +define <16 x float> @testf16_inp(<16 x float> %a, <16 x float> %b) nounwind { + %y = alloca <16 x float>, align 16 + %x = fadd <16 x float> %a, %b + %1 = call intel_ocl_bicc <16 x float> @func_float16_ptr(<16 x float> %x, <16 x float>* %y) + %2 = load <16 x float>* %y, align 16 + %3 = fadd <16 x float> %2, %1 + ret <16 x float> %3 +} + +;test calling conventions - preserved registers + +; preserved xmm6-xmm15 +; WIN64: testf16_regs +; WIN64: call +; WIN64: addps {{%xmm[6-9]}}, {{.*}} +; WIN64: addps {{%xmm[6-9]}}, {{.*}} +; WIN64: ret + +; preserved xmm8-xmm15 +; NOT_WIN: testf16_regs +; NOT_WIN: call +; NOT_WIN: addps {{%xmm([8-9]|1[0-1])}}, {{.*}} +; NOT_WIN: addps {{%xmm([8-9]|1[0-1])}}, {{.*}} +; NOT_WIN: addps {{%xmm([8-9]|1[0-1])}}, {{.*}} +; NOT_WIN: addps {{%xmm([8-9]|1[0-1])}}, {{.*}} +; NOT_WIN: ret + +define <16 x float> @testf16_regs(<16 x float> %a, <16 x float> %b) nounwind { + %y = alloca <16 x float>, align 16 + %x = fadd <16 x float> %a, %b + %1 = call intel_ocl_bicc <16 x float> @func_float16_ptr(<16 x float> %x, <16 x float>* %y) + %2 = load <16 x float>* %y, align 16 + %3 = fadd <16 x float> %1, %b + %4 = fadd <16 x float> %2, %3 + ret <16 x float> %4 +} + +; test calling conventions - prolog and epilog +; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill +; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill +; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill +; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill +; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill +; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill +; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill +; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill +; NOT_WIN: call +; NOT_WIN: movaps {{.*(%rsp).*}}, {{%xmm([8-9]|1[0-5])}} ## 16-byte Reload +; NOT_WIN: movaps {{.*(%rsp).*}}, {{%xmm([8-9]|1[0-5])}} ## 16-byte Reload +; NOT_WIN: movaps {{.*(%rsp).*}}, {{%xmm([8-9]|1[0-5])}} ## 16-byte Reload +; NOT_WIN: movaps {{.*(%rsp).*}}, {{%xmm([8-9]|1[0-5])}} ## 16-byte Reload +; NOT_WIN: movaps {{.*(%rsp).*}}, {{%xmm([8-9]|1[0-5])}} ## 16-byte Reload +; NOT_WIN: movaps {{.*(%rsp).*}}, {{%xmm([8-9]|1[0-5])}} ## 16-byte Reload +; NOT_WIN: movaps {{.*(%rsp).*}}, {{%xmm([8-9]|1[0-5])}} ## 16-byte Reload +; NOT_WIN: movaps {{.*(%rsp).*}}, {{%xmm([8-9]|1[0-5])}} ## 16-byte Reload +define intel_ocl_bicc <16 x float> @test_prolog_epilog(<16 x float> %a, <16 x float> %b) nounwind { + %c = call <16 x float> @func_float16(<16 x float> %a, <16 x float> %b) + ret <16 x float> %c +} diff --git a/test/CodeGen/X86/sse-minmax.ll b/test/CodeGen/X86/sse-minmax.ll index 3839e87..0ba0215 100644 --- a/test/CodeGen/X86/sse-minmax.ll +++ b/test/CodeGen/X86/sse-minmax.ll @@ -47,8 +47,7 @@ define double @olt(double %x, double %y) nounwind { ; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0 ; CHECK-NEXT: ret ; UNSAFE: ogt_inverse: -; UNSAFE-NEXT: minsd %xmm0, %xmm1 -; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 +; UNSAFE-NEXT: minsd %xmm1, %xmm0 ; UNSAFE-NEXT: ret ; FINITE: ogt_inverse: ; FINITE-NEXT: minsd %xmm0, %xmm1 @@ -65,8 +64,7 @@ define double @ogt_inverse(double %x, double %y) nounwind { ; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0 ; CHECK-NEXT: ret ; UNSAFE: olt_inverse: -; UNSAFE-NEXT: maxsd %xmm0, %xmm1 -; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 +; UNSAFE-NEXT: maxsd %xmm1, %xmm0 ; UNSAFE-NEXT: ret ; FINITE: olt_inverse: ; FINITE-NEXT: maxsd %xmm0, %xmm1 @@ -107,8 +105,7 @@ define double @ole(double %x, double %y) nounwind { ; CHECK: oge_inverse: ; CHECK-NEXT: ucomisd %xmm1, %xmm0 ; UNSAFE: oge_inverse: -; UNSAFE-NEXT: minsd %xmm0, %xmm1 -; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 +; UNSAFE-NEXT: minsd %xmm1, %xmm0 ; UNSAFE-NEXT: ret ; FINITE: oge_inverse: ; FINITE-NEXT: minsd %xmm0, %xmm1 @@ -123,8 +120,7 @@ define double @oge_inverse(double %x, double %y) nounwind { ; CHECK: ole_inverse: ; CHECK-NEXT: ucomisd %xmm0, %xmm1 ; UNSAFE: ole_inverse: -; UNSAFE-NEXT: maxsd %xmm0, %xmm1 -; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 +; UNSAFE-NEXT: maxsd %xmm1, %xmm0 ; UNSAFE-NEXT: ret ; FINITE: ole_inverse: ; FINITE-NEXT: maxsd %xmm0, %xmm1 @@ -142,7 +138,8 @@ define double @ole_inverse(double %x, double %y) nounwind { ; CHECK-NEXT: ret ; UNSAFE: ogt_x: ; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1 -; UNSAFE-NEXT: maxsd %xmm1, %xmm0 +; UNSAFE-NEXT: maxsd %xmm0, %xmm1 +; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 ; UNSAFE-NEXT: ret ; FINITE: ogt_x: ; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1 @@ -160,7 +157,8 @@ define double @ogt_x(double %x) nounwind { ; CHECK-NEXT: ret ; UNSAFE: olt_x: ; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1 -; UNSAFE-NEXT: minsd %xmm1, %xmm0 +; UNSAFE-NEXT: minsd %xmm0, %xmm1 +; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 ; UNSAFE-NEXT: ret ; FINITE: olt_x: ; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1 @@ -218,7 +216,8 @@ define double @olt_inverse_x(double %x) nounwind { ; CHECK: ucomisd %xmm1, %xmm0 ; UNSAFE: oge_x: ; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1 -; UNSAFE-NEXT: maxsd %xmm1, %xmm0 +; UNSAFE-NEXT: maxsd %xmm0, %xmm1 +; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 ; UNSAFE-NEXT: ret ; FINITE: oge_x: ; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1 @@ -234,7 +233,8 @@ define double @oge_x(double %x) nounwind { ; CHECK: ucomisd %xmm0, %xmm1 ; UNSAFE: ole_x: ; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1 -; UNSAFE-NEXT: minsd %xmm1, %xmm0 +; UNSAFE-NEXT: minsd %xmm0, %xmm1 +; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 ; UNSAFE-NEXT: ret ; FINITE: ole_x: ; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1 @@ -313,8 +313,7 @@ define double @ult(double %x, double %y) nounwind { ; CHECK: ugt_inverse: ; CHECK: ucomisd %xmm0, %xmm1 ; UNSAFE: ugt_inverse: -; UNSAFE-NEXT: minsd %xmm0, %xmm1 -; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 +; UNSAFE-NEXT: minsd %xmm1, %xmm0 ; UNSAFE-NEXT: ret ; FINITE: ugt_inverse: ; FINITE-NEXT: minsd %xmm0, %xmm1 @@ -329,8 +328,7 @@ define double @ugt_inverse(double %x, double %y) nounwind { ; CHECK: ult_inverse: ; CHECK: ucomisd %xmm1, %xmm0 ; UNSAFE: ult_inverse: -; UNSAFE-NEXT: maxsd %xmm0, %xmm1 -; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 +; UNSAFE-NEXT: maxsd %xmm1, %xmm0 ; UNSAFE-NEXT: ret ; FINITE: ult_inverse: ; FINITE-NEXT: maxsd %xmm0, %xmm1 @@ -378,8 +376,7 @@ define double @ule(double %x, double %y) nounwind { ; CHECK-NEXT: minsd %xmm1, %xmm0 ; CHECK-NEXT: ret ; UNSAFE: uge_inverse: -; UNSAFE-NEXT: minsd %xmm0, %xmm1 -; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 +; UNSAFE-NEXT: minsd %xmm1, %xmm0 ; UNSAFE-NEXT: ret ; FINITE: uge_inverse: ; FINITE-NEXT: minsd %xmm0, %xmm1 @@ -395,8 +392,7 @@ define double @uge_inverse(double %x, double %y) nounwind { ; CHECK-NEXT: maxsd %xmm1, %xmm0 ; CHECK-NEXT: ret ; UNSAFE: ule_inverse: -; UNSAFE-NEXT: maxsd %xmm0, %xmm1 -; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 +; UNSAFE-NEXT: maxsd %xmm1, %xmm0 ; UNSAFE-NEXT: ret ; FINITE: ule_inverse: ; FINITE-NEXT: maxsd %xmm0, %xmm1 @@ -412,7 +408,8 @@ define double @ule_inverse(double %x, double %y) nounwind { ; CHECK: ucomisd %xmm0, %xmm1 ; UNSAFE: ugt_x: ; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1 -; UNSAFE-NEXT: maxsd %xmm1, %xmm0 +; UNSAFE-NEXT: maxsd %xmm0, %xmm1 +; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 ; UNSAFE-NEXT: ret ; FINITE: ugt_x: ; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1 @@ -428,7 +425,8 @@ define double @ugt_x(double %x) nounwind { ; CHECK: ucomisd %xmm1, %xmm0 ; UNSAFE: ult_x: ; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1 -; UNSAFE-NEXT: minsd %xmm1, %xmm0 +; UNSAFE-NEXT: minsd %xmm0, %xmm1 +; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 ; UNSAFE-NEXT: ret ; FINITE: ult_x: ; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1 @@ -483,7 +481,8 @@ define double @ult_inverse_x(double %x) nounwind { ; CHECK-NEXT: ret ; UNSAFE: uge_x: ; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1 -; UNSAFE-NEXT: maxsd %xmm1, %xmm0 +; UNSAFE-NEXT: maxsd %xmm0, %xmm1 +; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 ; UNSAFE-NEXT: ret ; FINITE: uge_x: ; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1 @@ -502,7 +501,8 @@ define double @uge_x(double %x) nounwind { ; CHECK-NEXT: ret ; UNSAFE: ule_x: ; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1 -; UNSAFE-NEXT: minsd %xmm1, %xmm0 +; UNSAFE-NEXT: minsd %xmm0, %xmm1 +; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 ; UNSAFE-NEXT: ret ; FINITE: ule_x: ; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1 @@ -590,9 +590,7 @@ define double @olt_y(double %x) nounwind { ; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0 ; CHECK-NEXT: ret ; UNSAFE: ogt_inverse_y: -; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1 -; UNSAFE-NEXT: minsd %xmm0, %xmm1 -; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 +; UNSAFE-NEXT: minsd {{[^,]*}}, %xmm0 ; UNSAFE-NEXT: ret ; FINITE: ogt_inverse_y: ; FINITE-NEXT: movsd {{[^,]*}}, %xmm1 @@ -611,9 +609,7 @@ define double @ogt_inverse_y(double %x) nounwind { ; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0 ; CHECK-NEXT: ret ; UNSAFE: olt_inverse_y: -; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1 -; UNSAFE-NEXT: maxsd %xmm0, %xmm1 -; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 +; UNSAFE-NEXT: maxsd {{[^,]*}}, %xmm0 ; UNSAFE-NEXT: ret ; FINITE: olt_inverse_y: ; FINITE-NEXT: movsd {{[^,]*}}, %xmm1 @@ -657,9 +653,7 @@ define double @ole_y(double %x) nounwind { ; CHECK: oge_inverse_y: ; CHECK: ucomisd %xmm ; UNSAFE: oge_inverse_y: -; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1 -; UNSAFE-NEXT: minsd %xmm0, %xmm1 -; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 +; UNSAFE-NEXT: minsd {{[^,]*}}, %xmm0 ; UNSAFE-NEXT: ret ; FINITE: oge_inverse_y: ; FINITE-NEXT: movsd {{[^,]*}}, %xmm1 @@ -675,9 +669,7 @@ define double @oge_inverse_y(double %x) nounwind { ; CHECK: ole_inverse_y: ; CHECK: ucomisd %xmm ; UNSAFE: ole_inverse_y: -; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1 -; UNSAFE-NEXT: maxsd %xmm0, %xmm1 -; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 +; UNSAFE-NEXT: maxsd {{[^,]*}}, %xmm0 ; UNSAFE-NEXT: ret ; FINITE: ole_inverse_y: ; FINITE-NEXT: movsd {{[^,]*}}, %xmm1 @@ -721,9 +713,7 @@ define double @ult_y(double %x) nounwind { ; CHECK: ugt_inverse_y: ; CHECK: ucomisd %xmm ; UNSAFE: ugt_inverse_y: -; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1 -; UNSAFE-NEXT: minsd %xmm0, %xmm1 -; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 +; UNSAFE-NEXT: minsd {{[^,]*}}, %xmm0 ; UNSAFE-NEXT: ret ; FINITE: ugt_inverse_y: ; FINITE-NEXT: movsd {{[^,]*}}, %xmm1 @@ -739,9 +729,7 @@ define double @ugt_inverse_y(double %x) nounwind { ; CHECK: ult_inverse_y: ; CHECK: ucomisd %xmm ; UNSAFE: ult_inverse_y: -; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1 -; UNSAFE-NEXT: maxsd %xmm0, %xmm1 -; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 +; UNSAFE-NEXT: maxsd {{[^,]*}}, %xmm0 ; UNSAFE-NEXT: ret ; FINITE: ult_inverse_y: ; FINITE-NEXT: movsd {{[^,]*}}, %xmm1 @@ -792,9 +780,7 @@ define double @ule_y(double %x) nounwind { ; CHECK-NEXT: minsd {{[^,]*}}, %xmm0 ; CHECK-NEXT: ret ; UNSAFE: uge_inverse_y: -; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1 -; UNSAFE-NEXT: minsd %xmm0, %xmm1 -; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 +; UNSAFE-NEXT: minsd {{[^,]*}}, %xmm0 ; UNSAFE-NEXT: ret ; FINITE: uge_inverse_y: ; FINITE-NEXT: movsd {{[^,]*}}, %xmm1 @@ -811,9 +797,7 @@ define double @uge_inverse_y(double %x) nounwind { ; CHECK-NEXT: maxsd {{[^,]*}}, %xmm0 ; CHECK-NEXT: ret ; UNSAFE: ule_inverse_y: -; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1 -; UNSAFE-NEXT: maxsd %xmm0, %xmm1 -; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 +; UNSAFE-NEXT: maxsd {{[^,]*}}, %xmm0 ; UNSAFE-NEXT: ret ; FINITE: ule_inverse_y: ; FINITE-NEXT: movsd {{[^,]*}}, %xmm1 diff --git a/test/CodeGen/X86/sse_partial_update.ll b/test/CodeGen/X86/sse_partial_update.ll new file mode 100644 index 0000000..655f758 --- /dev/null +++ b/test/CodeGen/X86/sse_partial_update.ll @@ -0,0 +1,36 @@ +; RUN: llc < %s -mtriple=x86_64-apple-macosx -mattr=+sse2 -mcpu=nehalem | FileCheck %s + +; rdar: 12558838 +; PR14221 +; There is a mismatch between the intrinsic and the actual instruction. +; The actual instruction has a partial update of dest, while the intrinsic +; passes through the upper FP values. Here, we make sure the source and +; destination of rsqrtss are the same. +define void @t1(<4 x float> %a) nounwind uwtable ssp { +entry: +; CHECK: t1: +; CHECK: rsqrtss %xmm0, %xmm0 + %0 = tail call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %a) nounwind + %a.addr.0.extract = extractelement <4 x float> %0, i32 0 + %conv = fpext float %a.addr.0.extract to double + %a.addr.4.extract = extractelement <4 x float> %0, i32 1 + %conv3 = fpext float %a.addr.4.extract to double + tail call void @callee(double %conv, double %conv3) nounwind + ret void +} +declare void @callee(double, double) +declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone + +define void @t2(<4 x float> %a) nounwind uwtable ssp { +entry: +; CHECK: t2: +; CHECK: rcpss %xmm0, %xmm0 + %0 = tail call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %a) nounwind + %a.addr.0.extract = extractelement <4 x float> %0, i32 0 + %conv = fpext float %a.addr.0.extract to double + %a.addr.4.extract = extractelement <4 x float> %0, i32 1 + %conv3 = fpext float %a.addr.4.extract to double + tail call void @callee(double %conv, double %conv3) nounwind + ret void +} +declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone diff --git a/test/CodeGen/X86/tailcall-64.ll b/test/CodeGen/X86/tailcall-64.ll index 7030753..ecc253b 100644 --- a/test/CodeGen/X86/tailcall-64.ll +++ b/test/CodeGen/X86/tailcall-64.ll @@ -1,6 +1,4 @@ -; RUN: llc < %s | FileCheck %s -target datalayout = "e-p:64:64:64-S128-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f16:16:16-f32:32:32-f64:64:64-f128:128:128-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" -target triple = "x86_64-apple-darwin11.4.0" +; RUN: llc -mtriple=x86_64-apple-macosx -mcpu=core2 < %s | FileCheck %s declare i64 @testi() @@ -93,4 +91,67 @@ define { i64, i64 } @crash(i8* %this) { ret { i64, i64 } %mrv7 } +; Check that we can fold an indexed load into a tail call instruction. +; CHECK: fold_indexed_load +; CHECK: leaq (%rsi,%rsi,4), %[[RAX:r..]] +; CHECK: jmpq *16(%{{r..}},%[[RAX]],8) # TAILCALL +%struct.funcs = type { i32 (i8*, i32*, i32)*, i32 (i8*)*, i32 (i8*)*, i32 (i8*, i32)*, i32 } +@func_table = external global [0 x %struct.funcs] +define void @fold_indexed_load(i8* %mbstr, i64 %idxprom) nounwind uwtable ssp { +entry: + %dsplen = getelementptr inbounds [0 x %struct.funcs]* @func_table, i64 0, i64 %idxprom, i32 2 + %x1 = load i32 (i8*)** %dsplen, align 8 + %call = tail call i32 %x1(i8* %mbstr) nounwind + ret void +} + +; <rdar://problem/12282281> Fold an indexed load into the tail call instruction. +; Calling a varargs function with 6 arguments requires 7 registers (%al is the +; vector count for varargs functions). This leaves %r11 as the only available +; scratch register. +; +; It is not possible to fold an indexed load into TCRETURNmi64 in that case. +; +; typedef int (*funcptr)(void*, ...); +; extern const funcptr funcs[]; +; int f(int n) { +; return funcs[n](0, 0, 0, 0, 0, 0); +; } +; +; CHECK: rdar12282281 +; CHECK: jmpq *%r11 # TAILCALL +@funcs = external constant [0 x i32 (i8*, ...)*] + +define i32 @rdar12282281(i32 %n) nounwind uwtable ssp { +entry: + %idxprom = sext i32 %n to i64 + %arrayidx = getelementptr inbounds [0 x i32 (i8*, ...)*]* @funcs, i64 0, i64 %idxprom + %0 = load i32 (i8*, ...)** %arrayidx, align 8 + %call = tail call i32 (i8*, ...)* %0(i8* null, i32 0, i32 0, i32 0, i32 0, i32 0) nounwind + ret i32 %call +} + +define x86_fp80 @fp80_call(x86_fp80 %x) nounwind { +entry: +; CHECK: fp80_call: +; CHECK: jmp _fp80_callee + %call = tail call x86_fp80 @fp80_callee(x86_fp80 %x) nounwind + ret x86_fp80 %call +} + +declare x86_fp80 @fp80_callee(x86_fp80) + +; rdar://12229511 +define x86_fp80 @trunc_fp80(x86_fp80 %x) nounwind { +entry: +; CHECK: trunc_fp80 +; CHECK: callq _trunc +; CHECK-NOT: jmp _trunc +; CHECK: ret + %conv = fptrunc x86_fp80 %x to double + %call = tail call double @trunc(double %conv) nounwind readnone + %conv1 = fpext double %call to x86_fp80 + ret x86_fp80 %conv1 +} +declare double @trunc(double) nounwind readnone diff --git a/test/CodeGen/X86/targetLoweringGeneric.ll b/test/CodeGen/X86/targetLoweringGeneric.ll index ba5f8f8..a773e9d 100644 --- a/test/CodeGen/X86/targetLoweringGeneric.ll +++ b/test/CodeGen/X86/targetLoweringGeneric.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=i386-apple-darwin9 -fast-isel=false -O0 < %s | FileCheck %s +; RUN: llc -mtriple=i386-apple-darwin9 -mcpu=corei7 -fast-isel=false -O0 < %s | FileCheck %s ; Gather non-machine specific tests for the transformations in ; CodeGen/SelectionDAG/TargetLowering. Currently, these diff --git a/test/CodeGen/X86/tls-pic.ll b/test/CodeGen/X86/tls-pic.ll index 51c3d23..b823f0a 100644 --- a/test/CodeGen/X86/tls-pic.ll +++ b/test/CodeGen/X86/tls-pic.ll @@ -76,12 +76,12 @@ entry: ; X32: f5: ; X32: leal {{[jk]}}@TLSLDM(%ebx) -; X32-NEXT: calll ___tls_get_addr@PLT -; X32-NEXT: movl {{[jk]}}@DTPOFF(%eax) -; X32-NEXT: addl {{[jk]}}@DTPOFF(%eax) +; X32: calll ___tls_get_addr@PLT +; X32: movl {{[jk]}}@DTPOFF(%e +; X32: addl {{[jk]}}@DTPOFF(%e ; X64: f5: ; X64: leaq {{[jk]}}@TLSLD(%rip), %rdi -; X64-NEXT: callq __tls_get_addr@PLT -; X64-NEXT: movl {{[jk]}}@DTPOFF(%rax) -; X64-NEXT: addl {{[jk]}}@DTPOFF(%rax) +; X64: callq __tls_get_addr@PLT +; X64: movl {{[jk]}}@DTPOFF(%r +; X64: addl {{[jk]}}@DTPOFF(%r diff --git a/test/CodeGen/X86/trunc-ext-ld-st.ll b/test/CodeGen/X86/trunc-ext-ld-st.ll index 9877d7b..1d22a18 100644 --- a/test/CodeGen/X86/trunc-ext-ld-st.ll +++ b/test/CodeGen/X86/trunc-ext-ld-st.ll @@ -2,8 +2,7 @@ ;CHECK: load_2_i8 ; A single 16-bit load -;CHECK: movzwl -;CHECK: pshufb +;CHECK: pmovzxbq ;CHECK: paddq ;CHECK: pshufb ; A single 16-bit store @@ -19,8 +18,7 @@ define void @load_2_i8(<2 x i8>* %A) { ;CHECK: load_2_i16 ; Read 32-bits -;CHECK: movd -;CHECK: pshufb +;CHECK: pmovzxwq ;CHECK: paddq ;CHECK: pshufb ;CHECK: movd @@ -33,7 +31,7 @@ define void @load_2_i16(<2 x i16>* %A) { } ;CHECK: load_2_i32 -;CHECK: pshufd +;CHECK: pmovzxdq ;CHECK: paddq ;CHECK: pshufd ;CHECK: ret @@ -45,8 +43,7 @@ define void @load_2_i32(<2 x i32>* %A) { } ;CHECK: load_4_i8 -;CHECK: movd -;CHECK: pshufb +;CHECK: pmovzxbd ;CHECK: paddd ;CHECK: pshufb ;CHECK: ret @@ -58,7 +55,7 @@ define void @load_4_i8(<4 x i8>* %A) { } ;CHECK: load_4_i16 -;CHECK: punpcklwd +;CHECK: pmovzxwd ;CHECK: paddd ;CHECK: pshufb ;CHECK: ret @@ -70,7 +67,7 @@ define void @load_4_i16(<4 x i16>* %A) { } ;CHECK: load_8_i8 -;CHECK: punpcklbw +;CHECK: pmovzxbw ;CHECK: paddw ;CHECK: pshufb ;CHECK: ret diff --git a/test/CodeGen/X86/vec_compare-2.ll b/test/CodeGen/X86/vec_compare-2.ll index 46d6a23..4da7953 100644 --- a/test/CodeGen/X86/vec_compare-2.ll +++ b/test/CodeGen/X86/vec_compare-2.ll @@ -10,8 +10,7 @@ define void @blackDespeckle_wrapper(i8** %args_list, i64* %gtid, i64 %xend) { entry: ; CHECK: cfi_def_cfa_offset ; CHECK-NOT: set -; CHECK: punpcklwd -; CHECK: pshufd +; CHECK: pmovzxwq ; CHECK: pshufb %shr.i = ashr <4 x i32> zeroinitializer, <i32 3, i32 3, i32 3, i32 3> ; <<4 x i32>> [#uses=1] %cmp318.i = sext <4 x i1> zeroinitializer to <4 x i32> ; <<4 x i32>> [#uses=1] diff --git a/test/CodeGen/X86/vec_fabs.ll b/test/CodeGen/X86/vec_fabs.ll new file mode 100644 index 0000000..82517cb --- /dev/null +++ b/test/CodeGen/X86/vec_fabs.ll @@ -0,0 +1,38 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -march=x86 -mcpu=corei7-avx | FileCheck %s + + +define <2 x double> @fabs_v2f64(<2 x double> %p) +{ + ; CHECK: fabs_v2f64 + ; CHECK: vandps + %t = call <2 x double> @llvm.fabs.v2f64(<2 x double> %p) + ret <2 x double> %t +} +declare <2 x double> @llvm.fabs.v2f64(<2 x double> %p) + +define <4 x float> @fabs_v4f32(<4 x float> %p) +{ + ; CHECK: fabs_v4f32 + ; CHECK: vandps + %t = call <4 x float> @llvm.fabs.v4f32(<4 x float> %p) + ret <4 x float> %t +} +declare <4 x float> @llvm.fabs.v4f32(<4 x float> %p) + +define <4 x double> @fabs_v4f64(<4 x double> %p) +{ + ; CHECK: fabs_v4f64 + ; CHECK: vandps + %t = call <4 x double> @llvm.fabs.v4f64(<4 x double> %p) + ret <4 x double> %t +} +declare <4 x double> @llvm.fabs.v4f64(<4 x double> %p) + +define <8 x float> @fabs_v8f32(<8 x float> %p) +{ + ; CHECK: fabs_v8f32 + ; CHECK: vandps + %t = call <8 x float> @llvm.fabs.v8f32(<8 x float> %p) + ret <8 x float> %t +} +declare <8 x float> @llvm.fabs.v8f32(<8 x float> %p) diff --git a/test/CodeGen/X86/vec_floor.ll b/test/CodeGen/X86/vec_floor.ll new file mode 100644 index 0000000..5e0160b --- /dev/null +++ b/test/CodeGen/X86/vec_floor.ll @@ -0,0 +1,38 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -march=x86 -mcpu=corei7-avx | FileCheck %s + + +define <2 x double> @floor_v2f64(<2 x double> %p) +{ + ; CHECK: floor_v2f64 + ; CHECK: vroundpd + %t = call <2 x double> @llvm.floor.v2f64(<2 x double> %p) + ret <2 x double> %t +} +declare <2 x double> @llvm.floor.v2f64(<2 x double> %p) + +define <4 x float> @floor_v4f32(<4 x float> %p) +{ + ; CHECK: floor_v4f32 + ; CHECK: vroundps + %t = call <4 x float> @llvm.floor.v4f32(<4 x float> %p) + ret <4 x float> %t +} +declare <4 x float> @llvm.floor.v4f32(<4 x float> %p) + +define <4 x double> @floor_v4f64(<4 x double> %p) +{ + ; CHECK: floor_v4f64 + ; CHECK: vroundpd + %t = call <4 x double> @llvm.floor.v4f64(<4 x double> %p) + ret <4 x double> %t +} +declare <4 x double> @llvm.floor.v4f64(<4 x double> %p) + +define <8 x float> @floor_v8f32(<8 x float> %p) +{ + ; CHECK: floor_v8f32 + ; CHECK: vroundps + %t = call <8 x float> @llvm.floor.v8f32(<8 x float> %p) + ret <8 x float> %t +} +declare <8 x float> @llvm.floor.v8f32(<8 x float> %p) diff --git a/test/CodeGen/X86/vec_fpext.ll b/test/CodeGen/X86/vec_fpext.ll index 05b263e..dc0464f 100644 --- a/test/CodeGen/X86/vec_fpext.ll +++ b/test/CodeGen/X86/vec_fpext.ll @@ -1,14 +1,38 @@ ; RUN: llc < %s -march=x86 -mattr=+sse41,-avx | FileCheck %s +; RUN: llc < %s -march=x86 -mattr=+avx | FileCheck --check-prefix=AVX %s ; PR11674 define void @fpext_frommem(<2 x float>* %in, <2 x double>* %out) { entry: -; TODO: We should be able to generate cvtps2pd for the load. -; For now, just check that we generate something sane. -; CHECK: cvtss2sd -; CHECK: cvtss2sd +; CHECK: cvtps2pd (%{{.+}}), %xmm{{[0-9]+}} +; AVX: vcvtps2pd (%{{.+}}), %xmm{{[0-9]+}} %0 = load <2 x float>* %in, align 8 %1 = fpext <2 x float> %0 to <2 x double> store <2 x double> %1, <2 x double>* %out, align 1 ret void } + +define void @fpext_frommem4(<4 x float>* %in, <4 x double>* %out) { +entry: +; CHECK: cvtps2pd (%{{.+}}), %xmm{{[0-9]+}} +; CHECK: cvtps2pd 8(%{{.+}}), %xmm{{[0-9]+}} +; AVX: vcvtps2pd (%{{.+}}), %ymm{{[0-9]+}} + %0 = load <4 x float>* %in + %1 = fpext <4 x float> %0 to <4 x double> + store <4 x double> %1, <4 x double>* %out, align 1 + ret void +} + +define void @fpext_frommem8(<8 x float>* %in, <8 x double>* %out) { +entry: +; CHECK: cvtps2pd (%{{.+}}), %xmm{{[0-9]+}} +; CHECK: cvtps2pd 8(%{{.+}}), %xmm{{[0-9]+}} +; CHECK: cvtps2pd 16(%{{.+}}), %xmm{{[0-9]+}} +; CHECK: cvtps2pd 24(%{{.+}}), %xmm{{[0-9]+}} +; AVX: vcvtps2pd (%{{.+}}), %ymm{{[0-9]+}} +; AVX: vcvtps2pd 16(%{{.+}}), %ymm{{[0-9]+}} + %0 = load <8 x float>* %in + %1 = fpext <8 x float> %0 to <8 x double> + store <8 x double> %1, <8 x double>* %out, align 1 + ret void +} diff --git a/test/CodeGen/X86/vec_shuffle-26.ll b/test/CodeGen/X86/vec_shuffle-26.ll index 086af6b..4c56f84 100644 --- a/test/CodeGen/X86/vec_shuffle-26.ll +++ b/test/CodeGen/X86/vec_shuffle-26.ll @@ -1,6 +1,5 @@ -; RUN: llc < %s -march=x86 -mattr=sse41 -o %t -; RUN: grep unpcklps %t | count 1 -; RUN: grep unpckhps %t | count 3 +; RUN: llc < %s -march=x86 -mcpu=generic -mattr=sse41 | FileCheck %s +; RUN: llc < %s -march=x86 -mcpu=atom | FileCheck -check-prefix=ATOM %s ; Transpose example using the more generic vector shuffle. Return float8 ; instead of float16 @@ -14,6 +13,17 @@ target triple = "i386-apple-cl.1.0" define <8 x float> @__transpose2(<4 x float> %p0, <4 x float> %p1, <4 x float> %p2, <4 x float> %p3) nounwind { entry: +; CHECK: transpose2 +; CHECK: unpckhps +; CHECK: unpckhps +; CHECK: unpcklps +; CHECK: unpckhps +; Different instruction order for Atom. +; ATOM: transpose2 +; ATOM: unpckhps +; ATOM: unpckhps +; ATOM: unpckhps +; ATOM: unpcklps %unpcklps = shufflevector <4 x float> %p0, <4 x float> %p2, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x float>> [#uses=2] %unpckhps = shufflevector <4 x float> %p0, <4 x float> %p2, <4 x i32> < i32 2, i32 6, i32 3, i32 7 > ; <<4 x float>> [#uses=2] %unpcklps8 = shufflevector <4 x float> %p1, <4 x float> %p3, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x float>> [#uses=2] @@ -27,3 +37,32 @@ entry: ; %r3 = shufflevector <8 x float> %r1, <8 x float> %r2, <16 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15 >; ret <8 x float> %r2 } + +define <2 x i64> @lo_hi_shift(float* nocapture %x, float* nocapture %y) nounwind { +entry: +; movhps should happen before extractps to assure it gets the correct value. +; CHECK: lo_hi_shift +; CHECK: movhps ([[BASEREG:%[a-z]+]]), +; CHECK: extractps ${{[0-9]+}}, %xmm{{[0-9]+}}, {{[0-9]*}}([[BASEREG]]) +; CHECK: extractps ${{[0-9]+}}, %xmm{{[0-9]+}}, {{[0-9]*}}([[BASEREG]]) +; ATOM: lo_hi_shift +; ATOM: movhps ([[BASEREG:%[a-z]+]]), +; ATOM: movd %xmm{{[0-9]+}}, {{[0-9]*}}([[BASEREG]]) +; ATOM: movd %xmm{{[0-9]+}}, {{[0-9]*}}([[BASEREG]]) + %v.i = bitcast float* %y to <4 x float>* + %0 = load <4 x float>* %v.i, align 1 + %1 = bitcast float* %x to <1 x i64>* + %.val = load <1 x i64>* %1, align 1 + %2 = bitcast <1 x i64> %.val to <2 x float> + %shuffle.i = shufflevector <2 x float> %2, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef> + %shuffle1.i = shufflevector <4 x float> %0, <4 x float> %shuffle.i, <4 x i32> <i32 0, i32 1, i32 4, i32 5> + %cast.i = bitcast <4 x float> %0 to <2 x i64> + %extract.i = extractelement <2 x i64> %cast.i, i32 1 + %3 = bitcast float* %x to i64* + store i64 %extract.i, i64* %3, align 4 + %4 = bitcast <4 x float> %0 to <16 x i8> + %5 = bitcast <4 x float> %shuffle1.i to <16 x i8> + %palignr = shufflevector <16 x i8> %5, <16 x i8> %4, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23> + %6 = bitcast <16 x i8> %palignr to <2 x i64> + ret <2 x i64> %6 +} diff --git a/test/CodeGen/X86/vec_shuffle-30.ll b/test/CodeGen/X86/vec_shuffle-30.ll index 1651c4c..f5f8842 100644 --- a/test/CodeGen/X86/vec_shuffle-30.ll +++ b/test/CodeGen/X86/vec_shuffle-30.ll @@ -1,21 +1,25 @@ -; RUN: llc < %s -march=x86 -mattr=sse41 -o %t -; RUN: grep pshufhw %t | grep -- -95 | count 1 -; RUN: grep shufps %t | count 1 -; RUN: not grep pslldq %t +; RUN: llc < %s -march=x86 -mattr=+avx | FileCheck %s +; CHECK: test ; Test case when creating pshufhw, we incorrectly set the higher order bit ; for an undef, define void @test(<8 x i16>* %dest, <8 x i16> %in) nounwind { entry: +; CHECK-NOT: vmovaps +; CHECK: vmovlpd +; CHECK: vpshufhw $-95 %0 = load <8 x i16>* %dest %1 = shufflevector <8 x i16> %0, <8 x i16> %in, <8 x i32> < i32 0, i32 1, i32 2, i32 3, i32 13, i32 undef, i32 14, i32 14> store <8 x i16> %1, <8 x i16>* %dest ret void -} +} +; CHECK: test2 ; A test case where we shouldn't generate a punpckldq but a pshufd and a pslldq define void @test2(<4 x i32>* %dest, <4 x i32> %in) nounwind { entry: +; CHECK-NOT: pslldq +; CHECK: shufps %0 = shufflevector <4 x i32> %in, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> < i32 undef, i32 5, i32 undef, i32 2> store <4 x i32> %0, <4 x i32>* %dest ret void diff --git a/test/CodeGen/X86/widen_cast-1.ll b/test/CodeGen/X86/widen_cast-1.ll index ebdfea9..56c6364 100644 --- a/test/CodeGen/X86/widen_cast-1.ll +++ b/test/CodeGen/X86/widen_cast-1.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=x86 -mcpu=generic -mattr=+sse42 < %s | FileCheck %s -; RUN: llc -march=x86 -mcpu=atom -mattr=+sse42 < %s | FileCheck -check-prefix=ATOM %s +; RUN: llc -march=x86 -mcpu=atom < %s | FileCheck -check-prefix=ATOM %s ; CHECK: paddd ; CHECK: movl diff --git a/test/CodeGen/X86/widen_load-1.ll b/test/CodeGen/X86/widen_load-1.ll index 9705d14..dfaa3d6 100644 --- a/test/CodeGen/X86/widen_load-1.ll +++ b/test/CodeGen/X86/widen_load-1.ll @@ -1,12 +1,17 @@ -; RUN: llc %s -o - -march=x86-64 -mtriple=x86_64-unknown-linux-gnu | FileCheck %s +; RUN: llc %s -o - -march=x86-64 -mattr=-avx -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=SSE +; RUN: llc %s -o - -march=x86-64 -mattr=+avx -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=AVX ; PR4891 ; PR5626 ; This load should be before the call, not after. -; CHECK: movaps compl+128(%rip), %xmm0 -; CHECK: movaps %xmm0, (%rsp) -; CHECK: callq killcommon +; SSE: movaps compl+128(%rip), %xmm0 +; SSE: movaps %xmm0, (%rsp) +; SSE: callq killcommon + +; AVX: vmovapd compl+128(%rip), %xmm0 +; AVX: vmovapd %xmm0, (%rsp) +; AVX: callq killcommon @compl = linkonce global [20 x i64] zeroinitializer, align 64 ; <[20 x i64]*> [#uses=1] diff --git a/test/CodeGen/X86/widen_load-2.ll b/test/CodeGen/X86/widen_load-2.ll index 79aa000..224898c 100644 --- a/test/CodeGen/X86/widen_load-2.ll +++ b/test/CodeGen/X86/widen_load-2.ll @@ -170,7 +170,7 @@ define void @add31i8(%i8vec31* nocapture sret %ret, %i8vec31* %ap, %i8vec31* %bp ; CHECK: rot %i8vec3pack = type { <3 x i8>, i8 } define %i8vec3pack @rot() nounwind { -; CHECK: movd {{-?[0-9]+}}(%rsp), {{%xmm[0-9]}} +; CHECK: pmovzxbd {{-?[0-9]+}}(%rsp), {{%xmm[0-9]}} entry: %X = alloca %i8vec3pack, align 4 %rot = alloca %i8vec3pack, align 4 diff --git a/test/CodeGen/X86/xmulo.ll b/test/CodeGen/X86/xmulo.ll new file mode 100644 index 0000000..486dafe --- /dev/null +++ b/test/CodeGen/X86/xmulo.ll @@ -0,0 +1,50 @@ +; RUN: llc %s -o - | FileCheck %s +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128" +target triple = "i386-apple-macosx10.8.0" + +declare {i64, i1} @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone +declare i32 @printf(i8*, ...) + +@.str = private unnamed_addr constant [10 x i8] c"%llx, %d\0A\00", align 1 + +define i32 @t1() nounwind { +; CHECK: t1: +; CHECK: movl $0, 12(%esp) +; CHECK: movl $0, 8(%esp) +; CHECK: movl $72, 4(%esp) + + %1 = call {i64, i1} @llvm.umul.with.overflow.i64(i64 9, i64 8) + %2 = extractvalue {i64, i1} %1, 0 + %3 = extractvalue {i64, i1} %1, 1 + %4 = zext i1 %3 to i32 + %5 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str, i32 0, i32 0), i64 %2, i32 %4) + ret i32 0 +} + +define i32 @t2() nounwind { +; CHECK: t2: +; CHECK: movl $0, 12(%esp) +; CHECK: movl $0, 8(%esp) +; CHECK: movl $0, 4(%esp) + + %1 = call {i64, i1} @llvm.umul.with.overflow.i64(i64 9, i64 0) + %2 = extractvalue {i64, i1} %1, 0 + %3 = extractvalue {i64, i1} %1, 1 + %4 = zext i1 %3 to i32 + %5 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str, i32 0, i32 0), i64 %2, i32 %4) + ret i32 0 +} + +define i32 @t3() nounwind { +; CHECK: t3: +; CHECK: movl $1, 12(%esp) +; CHECK: movl $-1, 8(%esp) +; CHECK: movl $-9, 4(%esp) + + %1 = call {i64, i1} @llvm.umul.with.overflow.i64(i64 9, i64 -1) + %2 = extractvalue {i64, i1} %1, 0 + %3 = extractvalue {i64, i1} %1, 1 + %4 = zext i1 %3 to i32 + %5 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str, i32 0, i32 0), i64 %2, i32 %4) + ret i32 0 +} |