summaryrefslogtreecommitdiffstats
path: root/test/Transforms/InstCombine
diff options
context:
space:
mode:
Diffstat (limited to 'test/Transforms/InstCombine')
-rw-r--r--test/Transforms/InstCombine/2008-01-21-MulTrunc.ll2
-rw-r--r--test/Transforms/InstCombine/apint-cast.ll2
-rw-r--r--test/Transforms/InstCombine/cast-mul-select.ll2
-rw-r--r--test/Transforms/InstCombine/cast-set.ll2
-rw-r--r--test/Transforms/InstCombine/cast.ll2
-rw-r--r--test/Transforms/InstCombine/compare-signs.ll29
-rw-r--r--test/Transforms/InstCombine/intrinsics.ll12
-rw-r--r--test/Transforms/InstCombine/invariant.ll16
-rw-r--r--test/Transforms/InstCombine/phi.ll140
-rw-r--r--test/Transforms/InstCombine/sext-misc.ll2
-rw-r--r--test/Transforms/InstCombine/udivrem-change-width.ll2
11 files changed, 210 insertions, 1 deletions
diff --git a/test/Transforms/InstCombine/2008-01-21-MulTrunc.ll b/test/Transforms/InstCombine/2008-01-21-MulTrunc.ll
index a49829a..87c2b75 100644
--- a/test/Transforms/InstCombine/2008-01-21-MulTrunc.ll
+++ b/test/Transforms/InstCombine/2008-01-21-MulTrunc.ll
@@ -1,5 +1,7 @@
; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+
define i16 @test1(i16 %a) {
%tmp = zext i16 %a to i32 ; <i32> [#uses=2]
%tmp21 = lshr i32 %tmp, 8 ; <i32> [#uses=1]
diff --git a/test/Transforms/InstCombine/apint-cast.ll b/test/Transforms/InstCombine/apint-cast.ll
index 9bc539e..85e7a4f 100644
--- a/test/Transforms/InstCombine/apint-cast.ll
+++ b/test/Transforms/InstCombine/apint-cast.ll
@@ -1,6 +1,8 @@
; Tests to make sure elimination of casts is working correctly
; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+
define i17 @test1(i17 %a) {
%tmp = zext i17 %a to i37 ; <i37> [#uses=2]
%tmp21 = lshr i37 %tmp, 8 ; <i37> [#uses=1]
diff --git a/test/Transforms/InstCombine/cast-mul-select.ll b/test/Transforms/InstCombine/cast-mul-select.ll
index fcb7e23..f55423c 100644
--- a/test/Transforms/InstCombine/cast-mul-select.ll
+++ b/test/Transforms/InstCombine/cast-mul-select.ll
@@ -1,5 +1,7 @@
; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32"
+
define i32 @mul(i32 %x, i32 %y) {
%A = trunc i32 %x to i8
%B = trunc i32 %y to i8
diff --git a/test/Transforms/InstCombine/cast-set.ll b/test/Transforms/InstCombine/cast-set.ll
index 611ded4..8934404 100644
--- a/test/Transforms/InstCombine/cast-set.ll
+++ b/test/Transforms/InstCombine/cast-set.ll
@@ -1,6 +1,8 @@
; This tests for various complex cast elimination cases instcombine should
; handle.
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+
; RUN: opt < %s -instcombine -S | FileCheck %s
define i1 @test1(i32 %X) {
diff --git a/test/Transforms/InstCombine/cast.ll b/test/Transforms/InstCombine/cast.ll
index 79f86e9..e7695b7 100644
--- a/test/Transforms/InstCombine/cast.ll
+++ b/test/Transforms/InstCombine/cast.ll
@@ -1,6 +1,6 @@
; Tests to make sure elimination of casts is working correctly
; RUN: opt < %s -instcombine -S | FileCheck %s
-target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128-n8:16:32:64"
@inbuf = external global [32832 x i8] ; <[32832 x i8]*> [#uses=1]
diff --git a/test/Transforms/InstCombine/compare-signs.ll b/test/Transforms/InstCombine/compare-signs.ll
new file mode 100644
index 0000000..2f98641
--- /dev/null
+++ b/test/Transforms/InstCombine/compare-signs.ll
@@ -0,0 +1,29 @@
+; RUN: opt %s -instcombine -S | FileCheck %s
+; XFAIL: *
+; PR5438
+
+; TODO: This should also optimize down.
+;define i32 @bar(i32 %a, i32 %b) nounwind readnone {
+;entry:
+; %0 = icmp sgt i32 %a, -1 ; <i1> [#uses=1]
+; %1 = icmp slt i32 %b, 0 ; <i1> [#uses=1]
+; %2 = xor i1 %1, %0 ; <i1> [#uses=1]
+; %3 = zext i1 %2 to i32 ; <i32> [#uses=1]
+; ret i32 %3
+;}
+
+define i32 @qaz(i32 %a, i32 %b) nounwind readnone {
+; CHECK: @qaz
+entry:
+; CHECK: xor i32 %a, %b
+; CHECK; lshr i32 %0, 31
+; CHECK: xor i32 %1, 1
+ %0 = lshr i32 %a, 31 ; <i32> [#uses=1]
+ %1 = lshr i32 %b, 31 ; <i32> [#uses=1]
+ %2 = icmp eq i32 %0, %1 ; <i1> [#uses=1]
+ %3 = zext i1 %2 to i32 ; <i32> [#uses=1]
+ ret i32 %3
+; CHECK-NOT: icmp
+; CHECK-NOT: zext
+; CHECK: ret i32 %2
+}
diff --git a/test/Transforms/InstCombine/intrinsics.ll b/test/Transforms/InstCombine/intrinsics.ll
new file mode 100644
index 0000000..7abd380
--- /dev/null
+++ b/test/Transforms/InstCombine/intrinsics.ll
@@ -0,0 +1,12 @@
+; RUN: opt %s -instcombine -S | FileCheck %s
+
+declare {i8, i1} @llvm.uadd.with.overflow.i8(i8, i8)
+
+define i8 @test1(i8 %A, i8 %B) {
+ %x = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %A, i8 %B)
+ %y = extractvalue {i8, i1} %x, 0
+ ret i8 %y
+; CHECK: @test1
+; CHECK-NEXT: %y = add i8 %A, %B
+; CHECK-NEXT: ret i8 %y
+}
diff --git a/test/Transforms/InstCombine/invariant.ll b/test/Transforms/InstCombine/invariant.ll
new file mode 100644
index 0000000..c67ad33
--- /dev/null
+++ b/test/Transforms/InstCombine/invariant.ll
@@ -0,0 +1,16 @@
+; Test to make sure unused llvm.invariant.start calls are not trivially eliminated
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+declare void @g(i8*)
+
+declare { }* @llvm.invariant.start(i64, i8* nocapture) nounwind readonly
+
+define i8 @f() {
+ %a = alloca i8 ; <i8*> [#uses=4]
+ store i8 0, i8* %a
+ %i = call { }* @llvm.invariant.start(i64 1, i8* %a) ; <{ }*> [#uses=0]
+ ; CHECK: call { }* @llvm.invariant.start
+ call void @g(i8* %a)
+ %r = load i8* %a ; <i8> [#uses=1]
+ ret i8 %r
+}
diff --git a/test/Transforms/InstCombine/phi.ll b/test/Transforms/InstCombine/phi.ll
index b73ce3f..f0343e4 100644
--- a/test/Transforms/InstCombine/phi.ll
+++ b/test/Transforms/InstCombine/phi.ll
@@ -2,6 +2,8 @@
;
; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128:n8:16:32:64"
+
define i32 @test1(i32 %A, i1 %b) {
BB0:
br i1 %b, label %BB1, label %BB2
@@ -222,3 +224,141 @@ end:
; CHECK: ret i1 %z
}
+
+define i64 @test12(i1 %cond, i8* %Ptr, i64 %Val) {
+entry:
+ %tmp41 = ptrtoint i8* %Ptr to i64
+ %tmp42 = zext i64 %tmp41 to i128
+ br i1 %cond, label %end, label %two
+
+two:
+ %tmp36 = zext i64 %Val to i128 ; <i128> [#uses=1]
+ %tmp37 = shl i128 %tmp36, 64 ; <i128> [#uses=1]
+ %ins39 = or i128 %tmp42, %tmp37 ; <i128> [#uses=1]
+ br label %end
+
+end:
+ %tmp869.0 = phi i128 [ %tmp42, %entry ], [ %ins39, %two ]
+ %tmp32 = trunc i128 %tmp869.0 to i64 ; <i64> [#uses=1]
+ %tmp29 = lshr i128 %tmp869.0, 64 ; <i128> [#uses=1]
+ %tmp30 = trunc i128 %tmp29 to i64 ; <i64> [#uses=1]
+
+ %tmp2 = add i64 %tmp32, %tmp30
+ ret i64 %tmp2
+; CHECK: @test12
+; CHECK-NOT: zext
+; CHECK: end:
+; CHECK-NEXT: phi i64 [ 0, %entry ], [ %Val, %two ]
+; CHECK-NOT: phi
+; CHECK: ret i64
+}
+
+declare void @test13f(double, i32)
+
+define void @test13(i1 %cond, i32 %V1, double %Vald) {
+entry:
+ %tmp42 = zext i32 %V1 to i128
+ br i1 %cond, label %end, label %two
+
+two:
+ %Val = bitcast double %Vald to i64
+ %tmp36 = zext i64 %Val to i128 ; <i128> [#uses=1]
+ %tmp37 = shl i128 %tmp36, 64 ; <i128> [#uses=1]
+ %ins39 = or i128 %tmp42, %tmp37 ; <i128> [#uses=1]
+ br label %end
+
+end:
+ %tmp869.0 = phi i128 [ %tmp42, %entry ], [ %ins39, %two ]
+ %tmp32 = trunc i128 %tmp869.0 to i32
+ %tmp29 = lshr i128 %tmp869.0, 64 ; <i128> [#uses=1]
+ %tmp30 = trunc i128 %tmp29 to i64 ; <i64> [#uses=1]
+ %tmp31 = bitcast i64 %tmp30 to double
+
+ call void @test13f(double %tmp31, i32 %tmp32)
+ ret void
+; CHECK: @test13
+; CHECK-NOT: zext
+; CHECK: end:
+; CHECK-NEXT: phi double [ 0.000000e+00, %entry ], [ %Vald, %two ]
+; CHECK-NEXT: call void @test13f(double {{[^,]*}}, i32 %V1)
+; CHECK: ret void
+}
+
+define i640 @test14a(i320 %A, i320 %B, i1 %b1) {
+BB0:
+ %a = zext i320 %A to i640
+ %b = zext i320 %B to i640
+ br label %Loop
+
+Loop:
+ %C = phi i640 [ %a, %BB0 ], [ %b, %Loop ]
+ br i1 %b1, label %Loop, label %Exit
+
+Exit: ; preds = %Loop
+ ret i640 %C
+; CHECK: @test14a
+; CHECK: Loop:
+; CHECK-NEXT: phi i320
+}
+
+define i160 @test14b(i320 %A, i320 %B, i1 %b1) {
+BB0:
+ %a = trunc i320 %A to i160
+ %b = trunc i320 %B to i160
+ br label %Loop
+
+Loop:
+ %C = phi i160 [ %a, %BB0 ], [ %b, %Loop ]
+ br i1 %b1, label %Loop, label %Exit
+
+Exit: ; preds = %Loop
+ ret i160 %C
+; CHECK: @test14b
+; CHECK: Loop:
+; CHECK-NEXT: phi i160
+}
+
+declare i64 @test15a(i64)
+
+define i64 @test15b(i64 %A, i1 %b) {
+; CHECK: @test15b
+entry:
+ %i0 = zext i64 %A to i128
+ %i1 = shl i128 %i0, 64
+ %i = or i128 %i1, %i0
+ br i1 %b, label %one, label %two
+; CHECK: entry:
+; CHECK-NEXT: br i1 %b
+
+one:
+ %x = phi i128 [%i, %entry], [%y, %two]
+ %x1 = lshr i128 %x, 64
+ %x2 = trunc i128 %x1 to i64
+ %c = call i64 @test15a(i64 %x2)
+ %c1 = zext i64 %c to i128
+ br label %two
+
+; CHECK: one:
+; CHECK-NEXT: phi i64
+; CHECK-NEXT: %c = call i64 @test15a
+
+two:
+ %y = phi i128 [%i, %entry], [%c1, %one]
+ %y1 = lshr i128 %y, 64
+ %y2 = trunc i128 %y1 to i64
+ %d = call i64 @test15a(i64 %y2)
+ %d1 = trunc i64 %d to i1
+ br i1 %d1, label %one, label %end
+
+; CHECK: two:
+; CHECK-NEXT: phi i64
+; CHECK-NEXT: phi i64
+; CHECK-NEXT: %d = call i64 @test15a
+
+end:
+ %g = trunc i128 %y to i64
+ ret i64 %g
+; CHECK: end:
+; CHECK-NEXT: ret i64
+}
+
diff --git a/test/Transforms/InstCombine/sext-misc.ll b/test/Transforms/InstCombine/sext-misc.ll
index 107bba6..3346ff8 100644
--- a/test/Transforms/InstCombine/sext-misc.ll
+++ b/test/Transforms/InstCombine/sext-misc.ll
@@ -1,5 +1,7 @@
; RUN: opt < %s -instcombine -S | not grep sext
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+
declare i32 @llvm.ctpop.i32(i32)
declare i32 @llvm.ctlz.i32(i32)
declare i32 @llvm.cttz.i32(i32)
diff --git a/test/Transforms/InstCombine/udivrem-change-width.ll b/test/Transforms/InstCombine/udivrem-change-width.ll
index 56877e3..9983944 100644
--- a/test/Transforms/InstCombine/udivrem-change-width.ll
+++ b/test/Transforms/InstCombine/udivrem-change-width.ll
@@ -1,6 +1,8 @@
; RUN: opt < %s -instcombine -S | not grep zext
; PR4548
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+
define i8 @udiv_i8(i8 %a, i8 %b) nounwind {
%conv = zext i8 %a to i32
%conv2 = zext i8 %b to i32
OpenPOWER on IntegriCloud