summaryrefslogtreecommitdiffstats
path: root/test/Transforms/InstCombine
diff options
context:
space:
mode:
Diffstat (limited to 'test/Transforms/InstCombine')
-rw-r--r--test/Transforms/InstCombine/apint-shift.ll7
-rw-r--r--test/Transforms/InstCombine/bswap-fold.ll31
-rw-r--r--test/Transforms/InstCombine/bswap.ll2
-rw-r--r--test/Transforms/InstCombine/cast-and-cast.ll17
-rw-r--r--test/Transforms/InstCombine/cast-cast-to-and.ll9
-rw-r--r--test/Transforms/InstCombine/cast-load-gep.ll21
-rw-r--r--test/Transforms/InstCombine/cast-propagate.ll11
-rw-r--r--test/Transforms/InstCombine/cast-sext-zext.ll12
-rw-r--r--test/Transforms/InstCombine/cast.ll280
-rw-r--r--test/Transforms/InstCombine/cast2.ll37
-rw-r--r--test/Transforms/InstCombine/cast3.ll35
-rw-r--r--test/Transforms/InstCombine/cast_ld_addr_space.ll19
-rw-r--r--test/Transforms/InstCombine/cast_ptr.ll41
-rw-r--r--test/Transforms/InstCombine/fsub-fadd.ll39
-rw-r--r--test/Transforms/InstCombine/intrinsics.ll47
-rw-r--r--test/Transforms/InstCombine/load-cmp.ll112
-rw-r--r--test/Transforms/InstCombine/load.ll9
-rw-r--r--test/Transforms/InstCombine/loadstore-alignment.ll4
-rw-r--r--test/Transforms/InstCombine/or.ll63
-rw-r--r--test/Transforms/InstCombine/setcc-cast-cast.ll46
-rw-r--r--test/Transforms/InstCombine/shift-sra.ll45
-rw-r--r--test/Transforms/InstCombine/sub.ll29
22 files changed, 670 insertions, 246 deletions
diff --git a/test/Transforms/InstCombine/apint-shift.ll b/test/Transforms/InstCombine/apint-shift.ll
index 6573b5b..55243a6 100644
--- a/test/Transforms/InstCombine/apint-shift.ll
+++ b/test/Transforms/InstCombine/apint-shift.ll
@@ -168,13 +168,6 @@ define i11 @test23(i44 %A) {
ret i11 %D
}
-define i17 @test24(i17 %X) {
- %Y = and i17 %X, -5 ; <i17> [#uses=1]
- %Z = shl i17 %Y, 9 ; <i17> [#uses=1]
- %Q = ashr i17 %Z, 9 ; <i17> [#uses=1]
- ret i17 %Q
-}
-
define i37 @test25(i37 %tmp.2, i37 %AA) {
%x = lshr i37 %AA, 17 ; <i37> [#uses=1]
%tmp.3 = lshr i37 %tmp.2, 17 ; <i37> [#uses=1]
diff --git a/test/Transforms/InstCombine/bswap-fold.ll b/test/Transforms/InstCombine/bswap-fold.ll
index 3e56951..034c70e 100644
--- a/test/Transforms/InstCombine/bswap-fold.ll
+++ b/test/Transforms/InstCombine/bswap-fold.ll
@@ -1,23 +1,22 @@
-; RUN: opt < %s -instcombine -S | grep ret | count 6
; RUN: opt < %s -instcombine -S | not grep call.*bswap
define i1 @test1(i16 %tmp2) {
- %tmp10 = call i16 @llvm.bswap.i16( i16 %tmp2 ) ; <i16> [#uses=1]
- %tmp = icmp eq i16 %tmp10, 1 ; <i1> [#uses=1]
+ %tmp10 = call i16 @llvm.bswap.i16( i16 %tmp2 )
+ %tmp = icmp eq i16 %tmp10, 1
ret i1 %tmp
}
define i1 @test2(i32 %tmp) {
- %tmp34 = tail call i32 @llvm.bswap.i32( i32 %tmp ) ; <i32> [#uses=1]
- %tmp.upgrd.1 = icmp eq i32 %tmp34, 1 ; <i1> [#uses=1]
+ %tmp34 = tail call i32 @llvm.bswap.i32( i32 %tmp )
+ %tmp.upgrd.1 = icmp eq i32 %tmp34, 1
ret i1 %tmp.upgrd.1
}
declare i32 @llvm.bswap.i32(i32)
define i1 @test3(i64 %tmp) {
- %tmp34 = tail call i64 @llvm.bswap.i64( i64 %tmp ) ; <i64> [#uses=1]
- %tmp.upgrd.2 = icmp eq i64 %tmp34, 1 ; <i1> [#uses=1]
+ %tmp34 = tail call i64 @llvm.bswap.i64( i64 %tmp )
+ %tmp.upgrd.2 = icmp eq i64 %tmp34, 1
ret i1 %tmp.upgrd.2
}
@@ -50,3 +49,21 @@ entry:
ret i32 %tmp4
}
+; PR5284
+declare i64 @llvm.bswap.i64(i64)
+declare i32 @llvm.bswap.i32(i32)
+declare i16 @llvm.bswap.i16(i16)
+
+define i16 @test7(i32 %A) {
+ %B = tail call i32 @llvm.bswap.i32(i32 %A) nounwind
+ %C = trunc i32 %B to i16
+ %D = tail call i16 @llvm.bswap.i16(i16 %C) nounwind
+ ret i16 %D
+}
+
+define i16 @test8(i64 %A) {
+ %B = tail call i64 @llvm.bswap.i64(i64 %A) nounwind
+ %C = trunc i64 %B to i16
+ %D = tail call i16 @llvm.bswap.i16(i16 %C) nounwind
+ ret i16 %D
+}
diff --git a/test/Transforms/InstCombine/bswap.ll b/test/Transforms/InstCombine/bswap.ll
index c5aa8be..168b3e8 100644
--- a/test/Transforms/InstCombine/bswap.ll
+++ b/test/Transforms/InstCombine/bswap.ll
@@ -1,3 +1,5 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
+
; RUN: opt < %s -instcombine -S | \
; RUN: grep {call.*llvm.bswap} | count 6
diff --git a/test/Transforms/InstCombine/cast-and-cast.ll b/test/Transforms/InstCombine/cast-and-cast.ll
deleted file mode 100644
index eda9d99..0000000
--- a/test/Transforms/InstCombine/cast-and-cast.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: opt < %s -instcombine -S | \
-; RUN: not grep bitcast
-
-define i1 @test1(i32 %val) {
- %t1 = bitcast i32 %val to i32 ; <i32> [#uses=1]
- %t2 = and i32 %t1, 1 ; <i32> [#uses=1]
- %t3 = trunc i32 %t2 to i1 ; <i1> [#uses=1]
- ret i1 %t3
-}
-
-define i16 @test1.upgrd.1(i32 %val) {
- %t1 = bitcast i32 %val to i32 ; <i32> [#uses=1]
- %t2 = and i32 %t1, 1 ; <i32> [#uses=1]
- %t3 = trunc i32 %t2 to i16 ; <i16> [#uses=1]
- ret i16 %t3
-}
-
diff --git a/test/Transforms/InstCombine/cast-cast-to-and.ll b/test/Transforms/InstCombine/cast-cast-to-and.ll
deleted file mode 100644
index 1e591cc..0000000
--- a/test/Transforms/InstCombine/cast-cast-to-and.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: opt < %s -instcombine -S | \
-; RUN: not grep i8
-
-define i32 @test1(i32 %X) {
- %Y = trunc i32 %X to i8 ; <i8> [#uses=1]
- %Z = zext i8 %Y to i32 ; <i32> [#uses=1]
- ret i32 %Z
-}
-
diff --git a/test/Transforms/InstCombine/cast-load-gep.ll b/test/Transforms/InstCombine/cast-load-gep.ll
deleted file mode 100644
index 271c737..0000000
--- a/test/Transforms/InstCombine/cast-load-gep.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: opt < %s -instcombine -globaldce -S | \
-; RUN: not grep Array
-target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
-
-; Pulling the cast out of the load allows us to eliminate the load, and then
-; the whole array.
-
- %op = type { float }
- %unop = type { i32 }
-@Array = internal constant [1 x %op* (%op*)*] [ %op* (%op*)* @foo ] ; <[1 x %op* (%op*)*]*> [#uses=1]
-
-define %op* @foo(%op* %X) {
- ret %op* %X
-}
-
-define %unop* @caller(%op* %O) {
- %tmp = load %unop* (%op*)** bitcast ([1 x %op* (%op*)*]* @Array to %unop* (%op*)**); <%unop* (%op*)*> [#uses=1]
- %tmp.2 = call %unop* %tmp( %op* %O ) ; <%unop*> [#uses=1]
- ret %unop* %tmp.2
-}
-
diff --git a/test/Transforms/InstCombine/cast-propagate.ll b/test/Transforms/InstCombine/cast-propagate.ll
deleted file mode 100644
index 95c040b..0000000
--- a/test/Transforms/InstCombine/cast-propagate.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: opt < %s -instcombine -mem2reg -S | \
-; RUN: not grep load
-
-define i32 @test1(i32* %P) {
- %A = alloca i32 ; <i32*> [#uses=2]
- store i32 123, i32* %A
- ; Cast the result of the load not the source
- %Q = bitcast i32* %A to i32* ; <i32*> [#uses=1]
- %V = load i32* %Q ; <i32> [#uses=1]
- ret i32 %V
-}
diff --git a/test/Transforms/InstCombine/cast-sext-zext.ll b/test/Transforms/InstCombine/cast-sext-zext.ll
deleted file mode 100644
index 0fecc1c..0000000
--- a/test/Transforms/InstCombine/cast-sext-zext.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: opt < %s -instcombine -S | not grep sext
-; XFAIL: *
-
-define zeroext i16 @t(i8 zeroext %on_off, i16* nocapture %puls) nounwind readonly {
-entry:
- %0 = zext i8 %on_off to i32
- %1 = add i32 %0, -1
- %2 = sext i32 %1 to i64
- %3 = getelementptr i16* %puls, i64 %2
- %4 = load i16* %3, align 2
- ret i16 %4
-}
diff --git a/test/Transforms/InstCombine/cast.ll b/test/Transforms/InstCombine/cast.ll
index 5f75cd0..878da68 100644
--- a/test/Transforms/InstCombine/cast.ll
+++ b/test/Transforms/InstCombine/cast.ll
@@ -55,8 +55,8 @@ define i32 @test6(i64 %A) {
%c1 = trunc i64 %A to i32 ; <i32> [#uses=1]
%res = bitcast i32 %c1 to i32 ; <i32> [#uses=1]
ret i32 %res
-; CHECK: %res = trunc i64 %A to i32
-; CHECK: ret i32 %res
+; CHECK: trunc i64 %A to i32
+; CHECK-NEXT: ret i32
}
define i64 @test7(i1 %A) {
@@ -71,8 +71,8 @@ define i64 @test8(i8 %A) {
%c1 = sext i8 %A to i64 ; <i64> [#uses=1]
%res = bitcast i64 %c1 to i64 ; <i64> [#uses=1]
ret i64 %res
-; CHECK: %res = sext i8 %A to i64
-; CHECK: ret i64 %res
+; CHECK: = sext i8 %A to i64
+; CHECK-NEXT: ret i64
}
define i16 @test9(i16 %A) {
@@ -185,8 +185,8 @@ define i32 @test22(i32 %X) {
%c2 = sext i8 %c1 to i32 ; <i32> [#uses=1]
%RV = shl i32 %c2, 24 ; <i32> [#uses=1]
ret i32 %RV
-; CHECK: %RV = shl i32 %X, 24
-; CHECK: ret i32 %RV
+; CHECK: shl i32 %X, 24
+; CHECK-NEXT: ret i32
}
define i32 @test23(i32 %X) {
@@ -337,3 +337,271 @@ define i64 @test38(i32 %a) {
; CHECK: %2 = zext i1 %1 to i64
; CHECK: ret i64 %2
}
+
+define i16 @test39(i16 %a) {
+ %tmp = zext i16 %a to i32
+ %tmp21 = lshr i32 %tmp, 8
+ %tmp5 = shl i32 %tmp, 8
+ %tmp.upgrd.32 = or i32 %tmp21, %tmp5
+ %tmp.upgrd.3 = trunc i32 %tmp.upgrd.32 to i16
+ ret i16 %tmp.upgrd.3
+; CHECK: @test39
+; CHECK: %tmp.upgrd.32 = call i16 @llvm.bswap.i16(i16 %a)
+; CHECK: ret i16 %tmp.upgrd.32
+}
+
+define i16 @test40(i16 %a) {
+ %tmp = zext i16 %a to i32
+ %tmp21 = lshr i32 %tmp, 9
+ %tmp5 = shl i32 %tmp, 8
+ %tmp.upgrd.32 = or i32 %tmp21, %tmp5
+ %tmp.upgrd.3 = trunc i32 %tmp.upgrd.32 to i16
+ ret i16 %tmp.upgrd.3
+; CHECK: @test40
+; CHECK: %tmp21 = lshr i16 %a, 9
+; CHECK: %tmp5 = shl i16 %a, 8
+; CHECK: %tmp.upgrd.32 = or i16 %tmp21, %tmp5
+; CHECK: ret i16 %tmp.upgrd.32
+}
+
+; PR1263
+define i32* @test41(i32* %tmp1) {
+ %tmp64 = bitcast i32* %tmp1 to { i32 }*
+ %tmp65 = getelementptr { i32 }* %tmp64, i32 0, i32 0
+ ret i32* %tmp65
+; CHECK: @test41
+; CHECK: ret i32* %tmp1
+}
+
+define i32 @test42(i32 %X) {
+ %Y = trunc i32 %X to i8 ; <i8> [#uses=1]
+ %Z = zext i8 %Y to i32 ; <i32> [#uses=1]
+ ret i32 %Z
+; CHECK: @test42
+; CHECK: %Z = and i32 %X, 255
+}
+
+; rdar://6598839
+define zeroext i64 @test43(i8 zeroext %on_off) nounwind readonly {
+ %A = zext i8 %on_off to i32
+ %B = add i32 %A, -1
+ %C = sext i32 %B to i64
+ ret i64 %C ;; Should be (add (zext i8 -> i64), -1)
+; CHECK: @test43
+; CHECK-NEXT: %A = zext i8 %on_off to i64
+; CHECK-NEXT: %B = add i64 %A, -1
+; CHECK-NEXT: ret i64 %B
+}
+
+define i64 @test44(i8 %T) {
+ %A = zext i8 %T to i16
+ %B = or i16 %A, 1234
+ %C = zext i16 %B to i64
+ ret i64 %C
+; CHECK: @test44
+; CHECK-NEXT: %A = zext i8 %T to i64
+; CHECK-NEXT: %B = or i64 %A, 1234
+; CHECK-NEXT: ret i64 %B
+}
+
+define i64 @test45(i8 %A, i64 %Q) {
+ %D = trunc i64 %Q to i32 ;; should be removed
+ %B = sext i8 %A to i32
+ %C = or i32 %B, %D
+ %E = zext i32 %C to i64
+ ret i64 %E
+; CHECK: @test45
+; CHECK-NEXT: %B = sext i8 %A to i64
+; CHECK-NEXT: %C = or i64 %B, %Q
+; CHECK-NEXT: %E = and i64 %C, 4294967295
+; CHECK-NEXT: ret i64 %E
+}
+
+
+define i64 @test46(i64 %A) {
+ %B = trunc i64 %A to i32
+ %C = and i32 %B, 42
+ %D = shl i32 %C, 8
+ %E = zext i32 %D to i64
+ ret i64 %E
+; CHECK: @test46
+; CHECK-NEXT: %C = shl i64 %A, 8
+; CHECK-NEXT: %D = and i64 %C, 10752
+; CHECK-NEXT: ret i64 %D
+}
+
+define i64 @test47(i8 %A) {
+ %B = sext i8 %A to i32
+ %C = or i32 %B, 42
+ %E = zext i32 %C to i64
+ ret i64 %E
+; CHECK: @test47
+; CHECK-NEXT: %B = sext i8 %A to i64
+; CHECK-NEXT: %C = or i64 %B, 42
+; CHECK-NEXT: %E = and i64 %C, 4294967295
+; CHECK-NEXT: ret i64 %E
+}
+
+define i64 @test48(i8 %A, i8 %a) {
+ %b = zext i8 %a to i32
+ %B = zext i8 %A to i32
+ %C = shl i32 %B, 8
+ %D = or i32 %C, %b
+ %E = zext i32 %D to i64
+ ret i64 %E
+; CHECK: @test48
+; CHECK-NEXT: %b = zext i8 %a to i64
+; CHECK-NEXT: %B = zext i8 %A to i64
+; CHECK-NEXT: %C = shl i64 %B, 8
+; CHECK-NEXT: %D = or i64 %C, %b
+; CHECK-NEXT: ret i64 %D
+}
+
+define i64 @test49(i64 %A) {
+ %B = trunc i64 %A to i32
+ %C = or i32 %B, 1
+ %D = sext i32 %C to i64
+ ret i64 %D
+; CHECK: @test49
+; CHECK-NEXT: %C = shl i64 %A, 32
+; CHECK-NEXT: ashr i64 %C, 32
+; CHECK-NEXT: %D = or i64 {{.*}}, 1
+; CHECK-NEXT: ret i64 %D
+}
+
+define i64 @test50(i64 %A) {
+ %a = lshr i64 %A, 2
+ %B = trunc i64 %a to i32
+ %D = add i32 %B, -1
+ %E = sext i32 %D to i64
+ ret i64 %E
+; CHECK: @test50
+; CHECK-NEXT: shl i64 %A, 30
+; CHECK-NEXT: add i64 {{.*}}, -4294967296
+; CHECK-NEXT: %E = ashr i64 {{.*}}, 32
+; CHECK-NEXT: ret i64 %E
+}
+
+define i64 @test51(i64 %A, i1 %cond) {
+ %B = trunc i64 %A to i32
+ %C = and i32 %B, -2
+ %D = or i32 %B, 1
+ %E = select i1 %cond, i32 %C, i32 %D
+ %F = sext i32 %E to i64
+ ret i64 %F
+; CHECK: @test51
+
+; FIXME: disabled, see PR5997
+; HECK-NEXT: %C = and i64 %A, 4294967294
+; HECK-NEXT: %D = or i64 %A, 1
+; HECK-NEXT: %E = select i1 %cond, i64 %C, i64 %D
+; HECK-NEXT: %sext = shl i64 %E, 32
+; HECK-NEXT: %F = ashr i64 %sext, 32
+; HECK-NEXT: ret i64 %F
+}
+
+define i32 @test52(i64 %A) {
+ %B = trunc i64 %A to i16
+ %C = or i16 %B, -32574
+ %D = and i16 %C, -25350
+ %E = zext i16 %D to i32
+ ret i32 %E
+; CHECK: @test52
+; CHECK-NEXT: %B = trunc i64 %A to i32
+; CHECK-NEXT: %C = or i32 %B, 32962
+; CHECK-NEXT: %D = and i32 %C, 40186
+; CHECK-NEXT: ret i32 %D
+}
+
+define i64 @test53(i32 %A) {
+ %B = trunc i32 %A to i16
+ %C = or i16 %B, -32574
+ %D = and i16 %C, -25350
+ %E = zext i16 %D to i64
+ ret i64 %E
+; CHECK: @test53
+; CHECK-NEXT: %B = zext i32 %A to i64
+; CHECK-NEXT: %C = or i64 %B, 32962
+; CHECK-NEXT: %D = and i64 %C, 40186
+; CHECK-NEXT: ret i64 %D
+}
+
+define i32 @test54(i64 %A) {
+ %B = trunc i64 %A to i16
+ %C = or i16 %B, -32574
+ %D = and i16 %C, -25350
+ %E = sext i16 %D to i32
+ ret i32 %E
+; CHECK: @test54
+; CHECK-NEXT: %B = trunc i64 %A to i32
+; CHECK-NEXT: %C = or i32 %B, -32574
+; CHECK-NEXT: %D = and i32 %C, -25350
+; CHECK-NEXT: ret i32 %D
+}
+
+define i64 @test55(i32 %A) {
+ %B = trunc i32 %A to i16
+ %C = or i16 %B, -32574
+ %D = and i16 %C, -25350
+ %E = sext i16 %D to i64
+ ret i64 %E
+; CHECK: @test55
+; CHECK-NEXT: %B = zext i32 %A to i64
+; CHECK-NEXT: %C = or i64 %B, -32574
+; CHECK-NEXT: %D = and i64 %C, -25350
+; CHECK-NEXT: ret i64 %D
+}
+
+define i64 @test56(i16 %A) nounwind {
+ %tmp353 = sext i16 %A to i32
+ %tmp354 = lshr i32 %tmp353, 5
+ %tmp355 = zext i32 %tmp354 to i64
+ ret i64 %tmp355
+; CHECK: @test56
+; CHECK-NEXT: %tmp353 = sext i16 %A to i64
+; CHECK-NEXT: %tmp354 = lshr i64 %tmp353, 5
+; CHECK-NEXT: %tmp355 = and i64 %tmp354, 134217727
+; CHECK-NEXT: ret i64 %tmp355
+}
+
+define i64 @test57(i64 %A) nounwind {
+ %B = trunc i64 %A to i32
+ %C = lshr i32 %B, 8
+ %E = zext i32 %C to i64
+ ret i64 %E
+; CHECK: @test57
+; CHECK-NEXT: %C = lshr i64 %A, 8
+; CHECK-NEXT: %E = and i64 %C, 16777215
+; CHECK-NEXT: ret i64 %E
+}
+
+define i64 @test58(i64 %A) nounwind {
+ %B = trunc i64 %A to i32
+ %C = lshr i32 %B, 8
+ %D = or i32 %C, 128
+ %E = zext i32 %D to i64
+ ret i64 %E
+
+; CHECK: @test58
+; CHECK-NEXT: %C = lshr i64 %A, 8
+; CHECK-NEXT: %D = or i64 %C, 128
+; CHECK-NEXT: %E = and i64 %D, 16777215
+; CHECK-NEXT: ret i64 %E
+}
+
+define i64 @test59(i8 %A, i8 %B) nounwind {
+ %C = zext i8 %A to i32
+ %D = shl i32 %C, 4
+ %E = and i32 %D, 48
+ %F = zext i8 %B to i32
+ %G = lshr i32 %F, 4
+ %H = or i32 %G, %E
+ %I = zext i32 %H to i64
+ ret i64 %I
+; CHECK: @test59
+; CHECK-NEXT: %C = zext i8 %A to i64
+; CHECK-NOT: i32
+; CHECK: %F = zext i8 %B to i64
+; CHECK-NOT: i32
+; CHECK: ret i64 %H
+}
diff --git a/test/Transforms/InstCombine/cast2.ll b/test/Transforms/InstCombine/cast2.ll
deleted file mode 100644
index 2941ee0..0000000
--- a/test/Transforms/InstCombine/cast2.ll
+++ /dev/null
@@ -1,37 +0,0 @@
-; Tests to make sure elimination of casts is working correctly
-; RUN: opt < %s -instcombine -S | FileCheck %s
-target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
-
-define i16 @test1(i16 %a) {
- %tmp = zext i16 %a to i32 ; <i32> [#uses=2]
- %tmp21 = lshr i32 %tmp, 8 ; <i32> [#uses=1]
- %tmp5 = shl i32 %tmp, 8 ; <i32> [#uses=1]
- %tmp.upgrd.32 = or i32 %tmp21, %tmp5 ; <i32> [#uses=1]
- %tmp.upgrd.3 = trunc i32 %tmp.upgrd.32 to i16 ; <i16> [#uses=1]
- ret i16 %tmp.upgrd.3
-; CHECK: %tmp.upgrd.32 = call i16 @llvm.bswap.i16(i16 %a)
-; CHECK: ret i16 %tmp.upgrd.32
-}
-
-define i16 @test2(i16 %a) {
- %tmp = zext i16 %a to i32 ; <i32> [#uses=2]
- %tmp21 = lshr i32 %tmp, 9 ; <i32> [#uses=1]
- %tmp5 = shl i32 %tmp, 8 ; <i32> [#uses=1]
- %tmp.upgrd.32 = or i32 %tmp21, %tmp5 ; <i32> [#uses=1]
- %tmp.upgrd.3 = trunc i32 %tmp.upgrd.32 to i16 ; <i16> [#uses=1]
- ret i16 %tmp.upgrd.3
-; CHECK: %tmp21 = lshr i16 %a, 9
-; CHECK: %tmp5 = shl i16 %a, 8
-; CHECK: %tmp.upgrd.32 = or i16 %tmp21, %tmp5
-; CHECK: ret i16 %tmp.upgrd.32
-}
-
-; PR1263
-define i32* @test3(i32* %tmp1) {
- %tmp64 = bitcast i32* %tmp1 to { i32 }* ; <{ i32 }*> [#uses=1]
- %tmp65 = getelementptr { i32 }* %tmp64, i32 0, i32 0 ; <i32*> [#uses=1]
- ret i32* %tmp65
-; CHECK: ret i32* %tmp1
-}
-
-
diff --git a/test/Transforms/InstCombine/cast3.ll b/test/Transforms/InstCombine/cast3.ll
deleted file mode 100644
index bc60f55..0000000
--- a/test/Transforms/InstCombine/cast3.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: opt < %s -instcombine -S | not grep getelementptr
-; PR2831
-
-; Don't raise arbitrary inttoptr+arithmetic+ptrtoint to getelementptr.
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-
-define i32 @main(i32 %argc, i8** %argv) nounwind {
-entry:
- %0 = ptrtoint i8** %argv to i32 ; <i32> [#uses=1]
- %1 = add i32 %0, 1 ; <i32> [#uses=1]
- ret i32 %1
-}
-
-; This testcase could theoretically be optimized down to return zero,
-; but for now being conservative with ptrtoint/inttoptr is fine.
-define i32 @a() nounwind {
-entry:
- %b = alloca i32 ; <i32*> [#uses=3]
- %a = alloca i32 ; <i32*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i32 1, i32* %b, align 4
- %a1 = ptrtoint i32* %a to i32 ; <i32> [#uses=1]
- %b4 = ptrtoint i32* %b to i32 ; <i32> [#uses=1]
- %a7 = ptrtoint i32* %a to i32 ; <i32> [#uses=1]
- %0 = sub i32 %b4, %a7 ; <i32> [#uses=1]
- %1 = add i32 %a1, %0 ; <i32> [#uses=1]
- %2 = inttoptr i32 %1 to i32* ; <i32*> [#uses=1]
- store i32 0, i32* %2, align 4
- %3 = load i32* %b, align 4 ; <i32> [#uses=1]
- br label %return
-
-return: ; preds = %entry
- ret i32 %3
-}
diff --git a/test/Transforms/InstCombine/cast_ld_addr_space.ll b/test/Transforms/InstCombine/cast_ld_addr_space.ll
deleted file mode 100644
index e94dce7..0000000
--- a/test/Transforms/InstCombine/cast_ld_addr_space.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: opt < %s -instcombine -S | grep bitcast | count 1
-
-; InstCombine can not 'load (cast P)' -> cast (load P)' if the cast changes
-; the address space.
-
-
-define void @test2(i8 addrspace(1)* %source, <2 x i8> addrspace(1)* %dest) {
-entry:
- %arrayidx1 = bitcast <2 x i8> addrspace(1)* %dest to <2 x i8> addrspace(1)*
- %conv = bitcast i8 addrspace(1)* %source to <16 x i8>*
- %arrayidx22 = bitcast <16 x i8>* %conv to <16 x i8>*
- %tmp3 = load <16 x i8>* %arrayidx22
- %arrayidx223 = bitcast i8 addrspace(1)* %source to i8*
- %tmp4 = load i8* %arrayidx223
- %tmp5 = insertelement <2 x i8> undef, i8 %tmp4, i32 0
- %splat = shufflevector <2 x i8> %tmp5, <2 x i8> undef, <2 x i32> zeroinitializer
- store <2 x i8> %splat, <2 x i8> addrspace(1)* %arrayidx1
- ret void
-} \ No newline at end of file
diff --git a/test/Transforms/InstCombine/cast_ptr.ll b/test/Transforms/InstCombine/cast_ptr.ll
index 6a00e83..09910fb 100644
--- a/test/Transforms/InstCombine/cast_ptr.ll
+++ b/test/Transforms/InstCombine/cast_ptr.ll
@@ -36,3 +36,44 @@ define i1 @test3(i8* %a) {
%r = icmp eq i32 %tmpa, ptrtoint (i8* @global to i32)
ret i1 %r
}
+
+define i1 @test4(i32 %A) {
+ %B = inttoptr i32 %A to i8*
+ %C = icmp eq i8* %B, null
+ ret i1 %C
+; CHECK: @test4
+; CHECK-NEXT: %C = icmp eq i32 %A, 0
+; CHECK-NEXT: ret i1 %C
+}
+
+
+; Pulling the cast out of the load allows us to eliminate the load, and then
+; the whole array.
+
+ %op = type { float }
+ %unop = type { i32 }
+@Array = internal constant [1 x %op* (%op*)*] [ %op* (%op*)* @foo ] ; <[1 x %op* (%op*)*]*> [#uses=1]
+
+declare %op* @foo(%op* %X)
+
+define %unop* @test5(%op* %O) {
+ %tmp = load %unop* (%op*)** bitcast ([1 x %op* (%op*)*]* @Array to %unop* (%op*)**); <%unop* (%op*)*> [#uses=1]
+ %tmp.2 = call %unop* %tmp( %op* %O ) ; <%unop*> [#uses=1]
+ ret %unop* %tmp.2
+; CHECK: @test5
+; CHECK: call %op* @foo(%op* %O)
+}
+
+
+
+; InstCombine can not 'load (cast P)' -> cast (load P)' if the cast changes
+; the address space.
+
+define i8 @test6(i8 addrspace(1)* %source) {
+entry:
+ %arrayidx223 = bitcast i8 addrspace(1)* %source to i8*
+ %tmp4 = load i8* %arrayidx223
+ ret i8 %tmp4
+; CHECK: @test6
+; CHECK: load i8* %arrayidx223
+}
diff --git a/test/Transforms/InstCombine/fsub-fadd.ll b/test/Transforms/InstCombine/fsub-fadd.ll
new file mode 100644
index 0000000..f4cff88
--- /dev/null
+++ b/test/Transforms/InstCombine/fsub-fadd.ll
@@ -0,0 +1,39 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; <rdar://problem/7530098>
+
+define void @func(double* %rhi, double* %rlo, double %xh, double %xl, double %yh, double %yl) nounwind ssp {
+entry:
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %tmp = fmul double %xh, 0x41A0000002000000 ; <double> [#uses=2]
+ %tmp1 = fsub double %xh, %tmp ; <double> [#uses=1]
+ %tmp2 = fadd double %tmp1, %tmp ; <double> [#uses=3]
+ %tmp3 = fsub double %xh, %tmp2 ; <double> [#uses=2]
+ %tmp4 = fmul double %yh, 0x41A0000002000000 ; <double> [#uses=2]
+ %tmp5 = fsub double %yh, %tmp4 ; <double> [#uses=1]
+ %tmp6 = fadd double %tmp5, %tmp4 ; <double> [#uses=3]
+ %tmp7 = fsub double %yh, %tmp6 ; <double> [#uses=2]
+ %tmp8 = fmul double %xh, %yh ; <double> [#uses=3]
+ %tmp9 = fmul double %tmp2, %tmp6 ; <double> [#uses=1]
+ %tmp10 = fsub double %tmp9, %tmp8 ; <double> [#uses=1]
+ %tmp11 = fmul double %tmp2, %tmp7 ; <double> [#uses=1]
+ %tmp12 = fadd double %tmp10, %tmp11 ; <double> [#uses=1]
+ %tmp13 = fmul double %tmp3, %tmp6 ; <double> [#uses=1]
+ %tmp14 = fadd double %tmp12, %tmp13 ; <double> [#uses=1]
+ %tmp15 = fmul double %tmp3, %tmp7 ; <double> [#uses=1]
+ %tmp16 = fadd double %tmp14, %tmp15 ; <double> [#uses=1]
+ %tmp17 = fmul double %xh, %yl ; <double> [#uses=1]
+ %tmp18 = fmul double %xl, %yh ; <double> [#uses=1]
+ %tmp19 = fadd double %tmp17, %tmp18 ; <double> [#uses=1]
+ %tmp20 = fadd double %tmp19, %tmp16 ; <double> [#uses=2]
+ %tmp21 = fadd double %tmp8, %tmp20 ; <double> [#uses=1]
+ store double %tmp21, double* %rhi, align 8
+ %tmp22 = load double* %rhi, align 8 ; <double> [#uses=1]
+ %tmp23 = fsub double %tmp8, %tmp22 ; <double> [#uses=1]
+ %tmp24 = fadd double %tmp23, %tmp20 ; <double> [#uses=1]
+
+; CHECK: %tmp23 = fsub double %tmp8, %tmp21
+; CHECK: %tmp24 = fadd double %tmp23, %tmp20
+
+ store double %tmp24, double* %rlo, align 8
+ ret void
+}
diff --git a/test/Transforms/InstCombine/intrinsics.ll b/test/Transforms/InstCombine/intrinsics.ll
index 135e777..c63475c 100644
--- a/test/Transforms/InstCombine/intrinsics.ll
+++ b/test/Transforms/InstCombine/intrinsics.ll
@@ -5,6 +5,10 @@
declare %overflow.result @llvm.uadd.with.overflow.i8(i8, i8)
declare %overflow.result @llvm.umul.with.overflow.i8(i8, i8)
declare double @llvm.powi.f64(double, i32) nounwind readonly
+declare i32 @llvm.cttz.i32(i32) nounwind readnone
+declare i32 @llvm.ctlz.i32(i32) nounwind readnone
+declare i32 @llvm.ctpop.i32(i32) nounwind readnone
+declare i8 @llvm.ctlz.i8(i8) nounwind readnone
define i8 @test1(i8 %A, i8 %B) {
%x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %A, i8 %B)
@@ -79,7 +83,6 @@ define i8 @test6(i8 %A, i1* %overflowPtr) {
; CHECK-NEXT: ret i8 %A
}
-
define void @powi(double %V, double *%P) {
entry:
%A = tail call double @llvm.powi.f64(double %V, i32 -1) nounwind
@@ -98,4 +101,46 @@ entry:
; CHECK: volatile store double %V
}
+define i32 @cttz(i32 %a) {
+entry:
+ %or = or i32 %a, 8
+ %and = and i32 %or, -8
+ %count = tail call i32 @llvm.cttz.i32(i32 %and) nounwind readnone
+ ret i32 %count
+; CHECK: @cttz
+; CHECK-NEXT: entry:
+; CHECK-NEXT: ret i32 3
+}
+define i8 @ctlz(i8 %a) {
+entry:
+ %or = or i8 %a, 32
+ %and = and i8 %or, 63
+ %count = tail call i8 @llvm.ctlz.i8(i8 %and) nounwind readnone
+ ret i8 %count
+; CHECK: @ctlz
+; CHECK-NEXT: entry:
+; CHECK-NEXT: ret i8 2
+}
+
+define void @cmp.simplify(i32 %a, i32 %b, i1* %c) {
+entry:
+ %lz = tail call i32 @llvm.ctlz.i32(i32 %a) nounwind readnone
+ %lz.cmp = icmp eq i32 %lz, 32
+ volatile store i1 %lz.cmp, i1* %c
+ %tz = tail call i32 @llvm.cttz.i32(i32 %a) nounwind readnone
+ %tz.cmp = icmp ne i32 %tz, 32
+ volatile store i1 %tz.cmp, i1* %c
+ %pop = tail call i32 @llvm.ctpop.i32(i32 %b) nounwind readnone
+ %pop.cmp = icmp eq i32 %pop, 0
+ volatile store i1 %pop.cmp, i1* %c
+ ret void
+; CHECK: @cmp.simplify
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %lz.cmp = icmp eq i32 %a, 0
+; CHECK-NEXT: volatile store i1 %lz.cmp, i1* %c
+; CHECK-NEXT: %tz.cmp = icmp ne i32 %a, 0
+; CHECK-NEXT: volatile store i1 %tz.cmp, i1* %c
+; CHECK-NEXT: %pop.cmp = icmp eq i32 %b, 0
+; CHECK-NEXT: volatile store i1 %pop.cmp, i1* %c
+}
diff --git a/test/Transforms/InstCombine/load-cmp.ll b/test/Transforms/InstCombine/load-cmp.ll
new file mode 100644
index 0000000..fe5df92
--- /dev/null
+++ b/test/Transforms/InstCombine/load-cmp.ll
@@ -0,0 +1,112 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+@G16 = internal constant [10 x i16] [i16 35, i16 82, i16 69, i16 81, i16 85,
+ i16 73, i16 82, i16 69, i16 68, i16 0]
+@GD = internal constant [6 x double]
+ [double -10.0, double 1.0, double 4.0, double 2.0, double -20.0, double -40.0]
+
+define i1 @test1(i32 %X) {
+ %P = getelementptr inbounds [10 x i16]* @G16, i32 0, i32 %X
+ %Q = load i16* %P
+ %R = icmp eq i16 %Q, 0
+ ret i1 %R
+; CHECK: @test1
+; CHECK-NEXT: %R = icmp eq i32 %X, 9
+; CHECK-NEXT: ret i1 %R
+}
+
+define i1 @test2(i32 %X) {
+ %P = getelementptr inbounds [10 x i16]* @G16, i32 0, i32 %X
+ %Q = load i16* %P
+ %R = icmp slt i16 %Q, 85
+ ret i1 %R
+; CHECK: @test2
+; CHECK-NEXT: %R = icmp ne i32 %X, 4
+; CHECK-NEXT: ret i1 %R
+}
+
+define i1 @test3(i32 %X) {
+ %P = getelementptr inbounds [6 x double]* @GD, i32 0, i32 %X
+ %Q = load double* %P
+ %R = fcmp oeq double %Q, 1.0
+ ret i1 %R
+; CHECK: @test3
+; CHECK-NEXT: %R = icmp eq i32 %X, 1
+; CHECK-NEXT: ret i1 %R
+}
+
+define i1 @test4(i32 %X) {
+ %P = getelementptr inbounds [10 x i16]* @G16, i32 0, i32 %X
+ %Q = load i16* %P
+ %R = icmp sle i16 %Q, 73
+ ret i1 %R
+; CHECK: @test4
+; CHECK-NEXT: lshr i32 933, %X
+; CHECK-NEXT: and i32 {{.*}}, 1
+; CHECK-NEXT: %R = icmp ne i32 {{.*}}, 0
+; CHECK-NEXT: ret i1 %R
+}
+
+define i1 @test5(i32 %X) {
+ %P = getelementptr inbounds [10 x i16]* @G16, i32 0, i32 %X
+ %Q = load i16* %P
+ %R = icmp eq i16 %Q, 69
+ ret i1 %R
+; CHECK: @test5
+; CHECK-NEXT: icmp eq i32 %X, 2
+; CHECK-NEXT: icmp eq i32 %X, 7
+; CHECK-NEXT: %R = or i1
+; CHECK-NEXT: ret i1 %R
+}
+
+define i1 @test6(i32 %X) {
+ %P = getelementptr inbounds [6 x double]* @GD, i32 0, i32 %X
+ %Q = load double* %P
+ %R = fcmp ogt double %Q, 0.0
+ ret i1 %R
+; CHECK: @test6
+; CHECK-NEXT: add i32 %X, -1
+; CHECK-NEXT: %R = icmp ult i32 {{.*}}, 3
+; CHECK-NEXT: ret i1 %R
+}
+
+define i1 @test7(i32 %X) {
+ %P = getelementptr inbounds [6 x double]* @GD, i32 0, i32 %X
+ %Q = load double* %P
+ %R = fcmp olt double %Q, 0.0
+ ret i1 %R
+; CHECK: @test7
+; CHECK-NEXT: add i32 %X, -1
+; CHECK-NEXT: %R = icmp ugt i32 {{.*}}, 2
+; CHECK-NEXT: ret i1 %R
+}
+
+define i1 @test8(i32 %X) {
+ %P = getelementptr inbounds [10 x i16]* @G16, i32 0, i32 %X
+ %Q = load i16* %P
+ %R = and i16 %Q, 3
+ %S = icmp eq i16 %R, 0
+ ret i1 %S
+; CHECK: @test8
+; CHECK-NEXT: add i32 %X, -8
+; CHECK-NEXT: %S = icmp ult i32 {{.*}}, 2
+; CHECK-NEXT: ret i1 %S
+}
+
+@GA = internal constant [4 x { i32, i32 } ] [
+ { i32, i32 } { i32 1, i32 0 },
+ { i32, i32 } { i32 2, i32 1 },
+ { i32, i32 } { i32 3, i32 1 },
+ { i32, i32 } { i32 4, i32 0 }
+]
+
+define i1 @test9(i32 %X) {
+ %P = getelementptr inbounds [4 x { i32, i32 } ]* @GA, i32 0, i32 %X, i32 1
+ %Q = load i32* %P
+ %R = icmp eq i32 %Q, 1
+ ret i1 %R
+; CHECK: @test9
+; CHECK-NEXT: add i32 %X, -1
+; CHECK-NEXT: %R = icmp ult i32 {{.*}}, 2
+; CHECK-NEXT: ret i1 %R
+}
diff --git a/test/Transforms/InstCombine/load.ll b/test/Transforms/InstCombine/load.ll
index 6d068f5..75c62a8 100644
--- a/test/Transforms/InstCombine/load.ll
+++ b/test/Transforms/InstCombine/load.ll
@@ -76,3 +76,12 @@ define double @test11(double* %p) {
%x = load double* %t1
ret double %x
}
+
+define i32 @test12(i32* %P) {
+ %A = alloca i32
+ store i32 123, i32* %A
+ ; Cast the result of the load not the source
+ %Q = bitcast i32* %A to i32*
+ %V = load i32* %Q
+ ret i32 %V
+}
diff --git a/test/Transforms/InstCombine/loadstore-alignment.ll b/test/Transforms/InstCombine/loadstore-alignment.ll
index 9fbe683..1d932d2 100644
--- a/test/Transforms/InstCombine/loadstore-alignment.ll
+++ b/test/Transforms/InstCombine/loadstore-alignment.ll
@@ -29,7 +29,7 @@ define <2 x i64> @foo() {
define <2 x i64> @bar() {
%t = alloca <2 x i64>
- call void @kip(<2 x i64>* %t);
+ call void @kip(<2 x i64>* %t)
%tmp1 = load <2 x i64>* %t, align 1
ret <2 x i64> %tmp1
}
@@ -59,7 +59,7 @@ define void @foo_store(<2 x i64> %y) {
define void @bar_store(<2 x i64> %y) {
%t = alloca <2 x i64>
- call void @kip(<2 x i64>* %t);
+ call void @kip(<2 x i64>* %t)
store <2 x i64> %y, <2 x i64>* %t, align 1
ret void
}
diff --git a/test/Transforms/InstCombine/or.ll b/test/Transforms/InstCombine/or.ll
index 4a14081..2040f3d 100644
--- a/test/Transforms/InstCombine/or.ll
+++ b/test/Transforms/InstCombine/or.ll
@@ -268,19 +268,17 @@ define i1 @test26(i32 %A, i32 %B) {
; CHECK: ret i1
}
-; PR5634
define i1 @test27(i32* %A, i32* %B) {
- %C1 = icmp eq i32* %A, null
- %C2 = icmp eq i32* %B, null
- ; (A == 0) & (A == 0) --> (A|B) == 0
- %D = and i1 %C1, %C2
- ret i1 %D
+ %C1 = ptrtoint i32* %A to i32
+ %C2 = ptrtoint i32* %B to i32
+ %D = or i32 %C1, %C2
+ %E = icmp eq i32 %D, 0
+ ret i1 %E
; CHECK: @test27
-; CHECK: ptrtoint i32* %A
-; CHECK: ptrtoint i32* %B
-; CHECK: or i32
-; CHECK: icmp eq i32 {{.*}}, 0
-; CHECK: ret i1
+; CHECK: icmp eq i32* %A, null
+; CHECK: icmp eq i32* %B, null
+; CHECK: and i1
+; CHECK: ret i1
}
; PR5634
@@ -295,3 +293,46 @@ define i1 @test28(i32 %A, i32 %B) {
; CHECK: icmp ne i32 {{.*}}, 0
; CHECK: ret i1
}
+
+define i1 @test29(i32* %A, i32* %B) {
+ %C1 = ptrtoint i32* %A to i32
+ %C2 = ptrtoint i32* %B to i32
+ %D = or i32 %C1, %C2
+ %E = icmp ne i32 %D, 0
+ ret i1 %E
+; CHECK: @test29
+; CHECK: icmp ne i32* %A, null
+; CHECK: icmp ne i32* %B, null
+; CHECK: or i1
+; CHECK: ret i1
+}
+
+; PR4216
+define i32 @test30(i32 %A) {
+entry:
+ %B = or i32 %A, 32962
+ %C = and i32 %A, -65536
+ %D = and i32 %B, 40186
+ %E = or i32 %D, %C
+ ret i32 %E
+; CHECK: @test30
+; CHECK: %B = or i32 %A, 32962
+; CHECK: %E = and i32 %B, -25350
+; CHECK: ret i32 %E
+}
+
+; PR4216
+define i64 @test31(i64 %A) nounwind readnone ssp noredzone {
+ %B = or i64 %A, 194
+ %D = and i64 %B, 250
+
+ %C = or i64 %A, 32768
+ %E = and i64 %C, 4294941696
+
+ %F = or i64 %D, %E
+ ret i64 %F
+; CHECK: @test31
+; CHECK-NEXT: %bitfield = or i64 %A, 32962
+; CHECK-NEXT: %F = and i64 %bitfield, 4294941946
+; CHECK-NEXT: ret i64 %F
+}
diff --git a/test/Transforms/InstCombine/setcc-cast-cast.ll b/test/Transforms/InstCombine/setcc-cast-cast.ll
deleted file mode 100644
index b2681ea..0000000
--- a/test/Transforms/InstCombine/setcc-cast-cast.ll
+++ /dev/null
@@ -1,46 +0,0 @@
-; This test case was reduced from MultiSource/Applications/hbd. It makes sure
-; that folding doesn't happen in case a zext is applied where a sext should have
-; been when a setcc is used with two casts.
-; RUN: opt < %s -instcombine -S | \
-; RUN: not grep {br i1 false}
-; END.
-
-define i32 @bug(i8 %inbuff) {
-entry:
- %tmp = bitcast i8 %inbuff to i8 ; <i8> [#uses=1]
- %tmp.upgrd.1 = sext i8 %tmp to i32 ; <i32> [#uses=3]
- %tmp.upgrd.2 = icmp eq i32 %tmp.upgrd.1, 1 ; <i1> [#uses=1]
- br i1 %tmp.upgrd.2, label %cond_true, label %cond_next
-
-cond_true: ; preds = %entry
- br label %bb
-
-cond_next: ; preds = %entry
- %tmp3 = icmp eq i32 %tmp.upgrd.1, -1 ; <i1> [#uses=1]
- br i1 %tmp3, label %cond_true4, label %cond_next5
-
-cond_true4: ; preds = %cond_next
- br label %bb
-
-cond_next5: ; preds = %cond_next
- %tmp7 = icmp sgt i32 %tmp.upgrd.1, 1 ; <i1> [#uses=1]
- br i1 %tmp7, label %cond_true8, label %cond_false
-
-cond_true8: ; preds = %cond_next5
- br label %cond_next9
-
-cond_false: ; preds = %cond_next5
- br label %cond_next9
-
-cond_next9: ; preds = %cond_false, %cond_true8
- %iftmp.1.0 = phi i32 [ 42, %cond_true8 ], [ 23, %cond_false ] ; <i32> [#uses=1]
- br label %return
-
-bb: ; preds = %cond_true4, %cond_true
- br label %return
-
-return: ; preds = %bb, %cond_next9
- %retval.0 = phi i32 [ 17, %bb ], [ %iftmp.1.0, %cond_next9 ] ; <i32> [#uses=1]
- ret i32 %retval.0
-}
-
diff --git a/test/Transforms/InstCombine/shift-sra.ll b/test/Transforms/InstCombine/shift-sra.ll
index 4492785..58f3226 100644
--- a/test/Transforms/InstCombine/shift-sra.ll
+++ b/test/Transforms/InstCombine/shift-sra.ll
@@ -1,6 +1,4 @@
-; RUN: opt < %s -instcombine -S | \
-; RUN: grep {lshr i32} | count 2
-; RUN: opt < %s -instcombine -S | not grep ashr
+; RUN: opt < %s -instcombine -S | FileCheck %s
define i32 @test1(i32 %X, i8 %A) {
@@ -9,6 +7,8 @@ define i32 @test1(i32 %X, i8 %A) {
%Y = ashr i32 %X, %shift.upgrd.1 ; <i32> [#uses=1]
%Z = and i32 %Y, 1 ; <i32> [#uses=1]
ret i32 %Z
+; CHECK: @test1
+; CHECK: lshr i32 %X, %shift.upgrd.1
}
define i32 @test2(i8 %tmp) {
@@ -16,4 +16,43 @@ define i32 @test2(i8 %tmp) {
%tmp4 = add i32 %tmp3, 7 ; <i32> [#uses=1]
%tmp5 = ashr i32 %tmp4, 3 ; <i32> [#uses=1]
ret i32 %tmp5
+; CHECK: @test2
+; CHECK: lshr i32 %tmp4, 3
+}
+
+define i64 @test3(i1 %X, i64 %Y, i1 %Cond) {
+ br i1 %Cond, label %T, label %F
+T:
+ %X2 = sext i1 %X to i64
+ br label %C
+F:
+ %Y2 = ashr i64 %Y, 63
+ br label %C
+C:
+ %P = phi i64 [%X2, %T], [%Y2, %F]
+ %S = ashr i64 %P, 12
+ ret i64 %S
+
+; CHECK: @test3
+; CHECK: %P = phi i64
+; CHECK-NEXT: ret i64 %P
+}
+
+define i64 @test4(i1 %X, i64 %Y, i1 %Cond) {
+ br i1 %Cond, label %T, label %F
+T:
+ %X2 = sext i1 %X to i64
+ br label %C
+F:
+ %Y2 = ashr i64 %Y, 63
+ br label %C
+C:
+ %P = phi i64 [%X2, %T], [%Y2, %F]
+ %R = shl i64 %P, 12
+ %S = ashr i64 %R, 12
+ ret i64 %S
+
+; CHECK: @test4
+; CHECK: %P = phi i64
+; CHECK-NEXT: ret i64 %P
}
diff --git a/test/Transforms/InstCombine/sub.ll b/test/Transforms/InstCombine/sub.ll
index ba28910..fa0322a 100644
--- a/test/Transforms/InstCombine/sub.ll
+++ b/test/Transforms/InstCombine/sub.ll
@@ -223,8 +223,8 @@ define i32 @test23(i8* %P, i64 %A){
%G = sub i32 %D, %F
ret i32 %G
; CHECK: @test23
-; CHECK: %A1 = trunc i64 %A to i32
-; CHECK: ret i32 %A1
+; CHECK-NEXT: = trunc i64 %A to i32
+; CHECK-NEXT: ret i32
}
define i64 @test24(i8* %P, i64 %A){
@@ -248,3 +248,28 @@ define i64 @test24a(i8* %P, i64 %A){
; CHECK-NEXT: ret i64
}
+@Arr = external global [42 x i16]
+
+define i64 @test24b(i8* %P, i64 %A){
+ %B = getelementptr inbounds [42 x i16]* @Arr, i64 0, i64 %A
+ %C = ptrtoint i16* %B to i64
+ %G = sub i64 %C, ptrtoint ([42 x i16]* @Arr to i64)
+ ret i64 %G
+; CHECK: @test24b
+; CHECK-NEXT: shl i64 %A, 1
+; CHECK-NEXT: ret i64
+}
+
+
+define i64 @test25(i8* %P, i64 %A){
+ %B = getelementptr inbounds [42 x i16]* @Arr, i64 0, i64 %A
+ %C = ptrtoint i16* %B to i64
+ %G = sub i64 %C, ptrtoint (i16* getelementptr ([42 x i16]* @Arr, i64 1, i64 0) to i64)
+ ret i64 %G
+; CHECK: @test25
+; CHECK-NEXT: shl i64 %A, 1
+; CHECK-NEXT: add i64 {{.*}}, -84
+; CHECK-NEXT: ret i64
+}
+
+
OpenPOWER on IntegriCloud