summaryrefslogtreecommitdiffstats
path: root/test/Transforms/LoopStrengthReduce
diff options
context:
space:
mode:
authordim <dim@FreeBSD.org>2012-04-14 13:54:10 +0000
committerdim <dim@FreeBSD.org>2012-04-14 13:54:10 +0000
commit1fc08f5e9ef733ef1ce6f363fecedc2260e78974 (patch)
tree19c69a04768629f2d440944b71cbe90adae0b615 /test/Transforms/LoopStrengthReduce
parent07637c87f826cdf411f0673595e9bc92ebd793f2 (diff)
downloadFreeBSD-src-1fc08f5e9ef733ef1ce6f363fecedc2260e78974.zip
FreeBSD-src-1fc08f5e9ef733ef1ce6f363fecedc2260e78974.tar.gz
Vendor import of llvm trunk r154661:
http://llvm.org/svn/llvm-project/llvm/trunk@r154661
Diffstat (limited to 'test/Transforms/LoopStrengthReduce')
-rw-r--r--test/Transforms/LoopStrengthReduce/2008-08-13-CmpStride.ll4
-rw-r--r--test/Transforms/LoopStrengthReduce/2011-12-19-PostincQuadratic.ll39
-rw-r--r--test/Transforms/LoopStrengthReduce/2012-01-02-nopreheader.ll88
-rw-r--r--test/Transforms/LoopStrengthReduce/2012-01-16-nopreheader.ll113
-rw-r--r--test/Transforms/LoopStrengthReduce/2012-03-15-nopreheader.ll155
-rw-r--r--test/Transforms/LoopStrengthReduce/2012-03-26-constexpr.ll49
-rw-r--r--test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll292
-rw-r--r--test/Transforms/LoopStrengthReduce/ARM/lit.local.cfg6
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/2011-11-29-postincphi.ll36
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/2011-12-04-loserreg.ll96
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/2012-01-13-phielim.ll92
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/dg.exp5
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll300
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/ivchain-stress-X86.ll96
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/lit.local.cfg6
-rw-r--r--test/Transforms/LoopStrengthReduce/addrec-gep.ll82
-rw-r--r--test/Transforms/LoopStrengthReduce/dg.exp3
-rw-r--r--test/Transforms/LoopStrengthReduce/dominate-assert.ll70
-rw-r--r--test/Transforms/LoopStrengthReduce/exit_compare_live_range.ll2
-rw-r--r--test/Transforms/LoopStrengthReduce/ivchain.ll43
-rw-r--r--test/Transforms/LoopStrengthReduce/lit.local.cfg1
-rw-r--r--test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll10
-rw-r--r--test/Transforms/LoopStrengthReduce/pr12018.ll38
-rw-r--r--test/Transforms/LoopStrengthReduce/pr12048.ll38
-rw-r--r--test/Transforms/LoopStrengthReduce/pr3399.ll2
-rw-r--r--test/Transforms/LoopStrengthReduce/preserve-gep-loop-variant.ll42
26 files changed, 1691 insertions, 17 deletions
diff --git a/test/Transforms/LoopStrengthReduce/2008-08-13-CmpStride.ll b/test/Transforms/LoopStrengthReduce/2008-08-13-CmpStride.ll
index 90477d1..ce56bd3 100644
--- a/test/Transforms/LoopStrengthReduce/2008-08-13-CmpStride.ll
+++ b/test/Transforms/LoopStrengthReduce/2008-08-13-CmpStride.ll
@@ -10,7 +10,7 @@ entry:
bb: ; preds = %bb, %entry
%l_2.0.reg2mem.0 = phi i16 [ 0, %entry ], [ %t1, %bb ] ; <i16> [#uses=2]
%t0 = shl i16 %l_2.0.reg2mem.0, 1 ; <i16>:0 [#uses=1]
- volatile store i16 %t0, i16* @g_3, align 2
+ store volatile i16 %t0, i16* @g_3, align 2
%t1 = add i16 %l_2.0.reg2mem.0, -3 ; <i16>:1 [#uses=2]
%t2 = icmp slt i16 %t1, 1 ; <i1>:2 [#uses=1]
br i1 %t2, label %bb, label %return
@@ -22,7 +22,7 @@ return: ; preds = %bb
define i32 @main() nounwind {
entry:
tail call void @func_1( ) nounwind
- volatile load i16* @g_3, align 2 ; <i16>:0 [#uses=1]
+ load volatile i16* @g_3, align 2 ; <i16>:0 [#uses=1]
zext i16 %0 to i32 ; <i32>:1 [#uses=1]
tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), i32 %1 ) nounwind ; <i32>:2 [#uses=0]
ret i32 0
diff --git a/test/Transforms/LoopStrengthReduce/2011-12-19-PostincQuadratic.ll b/test/Transforms/LoopStrengthReduce/2011-12-19-PostincQuadratic.ll
new file mode 100644
index 0000000..392a8bc
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/2011-12-19-PostincQuadratic.ll
@@ -0,0 +1,39 @@
+; RUN: opt -loop-reduce -S < %s | FileCheck %s
+;
+; PR11571: handle a postinc user outside of for.body7 that requires
+; recursive expansion of a quadratic recurrence within for.body7. LSR
+; needs to forget that for.body7 is a postinc loop during expansion.
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S128"
+target triple = "i386-unknown-freebsd10.0"
+
+@b = external global [121 x i32]
+
+; CHECK: @vb
+; Outer recurrence:
+; CHECK: %lsr.iv1 = phi [121 x i32]*
+; Inner recurrence:
+; CHECK: %lsr.iv = phi i32
+; Outer step (relative to inner recurrence):
+; CHECK: %scevgep = getelementptr i1* %{{.*}}, i32 %lsr.iv
+; Outer use:
+; CHECK: %lsr.iv3 = phi [121 x i32]* [ %lsr.iv1, %for.body43.preheader ]
+define void @vb() nounwind {
+for.cond.preheader:
+ br label %for.body7
+
+for.body7:
+ %indvars.iv77 = phi i32 [ %indvars.iv.next78, %for.body7 ], [ 1, %for.cond.preheader ]
+ %bf.072 = phi i32 [ %t1, %for.body7 ], [ 0, %for.cond.preheader ]
+ %t1 = add i32 %bf.072, %indvars.iv77
+ %indvars.iv.next78 = add i32 %indvars.iv77, 1
+ br i1 undef, label %for.body43, label %for.body7
+
+for.body43:
+ %bf.459 = phi i32 [ %inc44, %for.body43 ], [ %t1, %for.body7 ]
+ %inc44 = add nsw i32 %bf.459, 1
+ %arrayidx45 = getelementptr inbounds [121 x i32]* @b, i32 0, i32 %bf.459
+ %t2 = load i32* %arrayidx45, align 4
+ br label %for.body43
+}
+
diff --git a/test/Transforms/LoopStrengthReduce/2012-01-02-nopreheader.ll b/test/Transforms/LoopStrengthReduce/2012-01-02-nopreheader.ll
new file mode 100644
index 0000000..d7f5723
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/2012-01-02-nopreheader.ll
@@ -0,0 +1,88 @@
+; RUN: opt -loop-reduce -S < %s | FileCheck %s
+;
+; <rdar://10619599> "SelectionDAGBuilder shouldn't visit PHI nodes!" assert.
+; <rdar://10655343> SCEVExpander segfault on simple test case
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-f128:128:128-n8:16:32"
+target triple = "i386-apple-darwin"
+
+; LSR should convert the inner loop (bb7.us) IV (j.01.us) into float*.
+; This involves a nested AddRec, the outer AddRec's loop invariant components
+; cannot find a preheader, so they should be expanded in the loop header
+; (bb7.lr.ph.us) below the existing phi i.12.us.
+; Currently, LSR won't kick in on such loops.
+; CHECK: @nopreheader
+; CHECK: bb7.us:
+; CHECK-NOT: phi float*
+; CHECK: %j.01.us = phi i32
+; CHECK-NOT: phi float*
+define void @nopreheader(float* nocapture %a, i32 %n) nounwind {
+entry:
+ %0 = sdiv i32 %n, undef
+ indirectbr i8* undef, [label %bb10.preheader]
+
+bb10.preheader: ; preds = %bb4
+ indirectbr i8* undef, [label %bb8.preheader.lr.ph, label %return]
+
+bb8.preheader.lr.ph: ; preds = %bb10.preheader
+ indirectbr i8* null, [label %bb7.lr.ph.us, label %bb9]
+
+bb7.lr.ph.us: ; preds = %bb9.us, %bb8.preheader.lr.ph
+ %i.12.us = phi i32 [ %2, %bb9.us ], [ 0, %bb8.preheader.lr.ph ]
+ %tmp30 = mul i32 %0, %i.12.us
+ indirectbr i8* undef, [label %bb7.us]
+
+bb7.us: ; preds = %bb7.lr.ph.us, %bb7.us
+ %j.01.us = phi i32 [ 0, %bb7.lr.ph.us ], [ %1, %bb7.us ]
+ %tmp31 = add i32 %tmp30, %j.01.us
+ %scevgep9 = getelementptr float* %a, i32 %tmp31
+ store float undef, float* %scevgep9, align 1
+ %1 = add nsw i32 %j.01.us, 1
+ indirectbr i8* undef, [label %bb9.us, label %bb7.us]
+
+bb9.us: ; preds = %bb7.us
+ %2 = add nsw i32 %i.12.us, 1
+ indirectbr i8* undef, [label %bb7.lr.ph.us, label %return]
+
+bb9: ; preds = %bb9, %bb8.preheader.lr.ph
+ indirectbr i8* undef, [label %bb9, label %return]
+
+return: ; preds = %bb9, %bb9.us, %bb10.preheader
+ ret void
+}
+
+; In this case, SCEVExpander simply cannot materialize the AddRecExpr
+; that LSR picks. We must detect that %bb8.preheader does not have a
+; preheader and avoid performing LSR on %bb7.
+; CHECK: @nopreheader2
+; CHECK: bb7:
+; CHECK: %indvar = phi i32
+define fastcc void @nopreheader2([200 x i32]* nocapture %Array2) nounwind {
+entry:
+ indirectbr i8* undef, [label %bb]
+
+bb: ; preds = %bb, %entry
+ indirectbr i8* undef, [label %bb3, label %bb]
+
+bb3: ; preds = %bb3, %bb
+ indirectbr i8* undef, [label %bb8.preheader, label %bb3]
+
+bb8.preheader: ; preds = %bb9, %bb3
+ %indvar5 = phi i32 [ %indvar.next6, %bb9 ], [ 0, %bb3 ]
+ %tmp26 = add i32 %indvar5, 13
+ indirectbr i8* null, [label %bb7]
+
+bb7: ; preds = %bb8.preheader, %bb7
+ %indvar = phi i32 [ 0, %bb8.preheader ], [ %indvar.next, %bb7 ]
+ %scevgep = getelementptr [200 x i32]* %Array2, i32 %tmp26, i32 %indvar
+ store i32 undef, i32* %scevgep, align 4
+ %indvar.next = add i32 %indvar, 1
+ indirectbr i8* undef, [label %bb9, label %bb7]
+
+bb9: ; preds = %bb7
+ %indvar.next6 = add i32 %indvar5, 1
+ indirectbr i8* undef, [label %return, label %bb8.preheader]
+
+return: ; preds = %bb9
+ ret void
+}
diff --git a/test/Transforms/LoopStrengthReduce/2012-01-16-nopreheader.ll b/test/Transforms/LoopStrengthReduce/2012-01-16-nopreheader.ll
new file mode 100644
index 0000000..3036a7e
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/2012-01-16-nopreheader.ll
@@ -0,0 +1,113 @@
+; RUN: opt -loop-reduce -S < %s | FileCheck %s
+;
+; <rdar://10701050> "Cannot split an edge from an IndirectBrInst" assert.
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-apple-darwin10.0.0"
+
+; while.cond197 is a dominates the simplified loop while.cond238 but
+; has no with no preheader.
+;
+; CHECK: @nopreheader
+; CHECK: %while.cond238
+; CHECK: phi i64
+; CHECK-NOT: phi
+; CHECK: indirectbr
+define void @nopreheader(i8* %end) nounwind {
+entry:
+ br label %while.cond179
+
+while.cond179: ; preds = %if.end434, %if.end369, %if.end277, %if.end165
+ %s.1 = phi i8* [ undef, %if.end434 ], [ %incdec.ptr356, %if.end348 ], [ undef, %entry ]
+ indirectbr i8* undef, [label %land.rhs184, label %while.end453]
+
+land.rhs184: ; preds = %while.cond179
+ indirectbr i8* undef, [label %while.end453, label %while.cond197]
+
+while.cond197: ; preds = %land.rhs202, %land.rhs184
+ %0 = phi i64 [ %indvar.next11, %land.rhs202 ], [ 0, %land.rhs184 ]
+ indirectbr i8* undef, [label %land.rhs202, label %while.end215]
+
+land.rhs202: ; preds = %while.cond197
+ %indvar.next11 = add i64 %0, 1
+ indirectbr i8* undef, [label %while.end215, label %while.cond197]
+
+while.end215: ; preds = %land.rhs202, %while.cond197
+ indirectbr i8* undef, [label %PREMATURE, label %if.end221]
+
+if.end221: ; preds = %while.end215
+ indirectbr i8* undef, [label %while.cond238.preheader, label %lor.lhs.false227]
+
+lor.lhs.false227: ; preds = %if.end221
+ indirectbr i8* undef, [label %while.cond238.preheader, label %if.else]
+
+while.cond238.preheader: ; preds = %lor.lhs.false227, %if.end221
+ %tmp16 = add i64 %0, 2
+ indirectbr i8* undef, [label %while.cond238]
+
+while.cond238: ; preds = %land.rhs243, %while.cond238.preheader
+ %1 = phi i64 [ %indvar.next15, %land.rhs243 ], [ 0, %while.cond238.preheader ]
+ %tmp36 = add i64 %tmp16, %1
+ %s.3 = getelementptr i8* %s.1, i64 %tmp36
+ %cmp241 = icmp ult i8* %s.3, %end
+ indirectbr i8* undef, [label %land.rhs243, label %while.end256]
+
+land.rhs243: ; preds = %while.cond238
+ %indvar.next15 = add i64 %1, 1
+ indirectbr i8* undef, [label %while.end256, label %while.cond238]
+
+while.end256: ; preds = %land.rhs243, %while.cond238
+ indirectbr i8* undef, [label %PREMATURE]
+
+if.else: ; preds = %lor.lhs.false227
+ indirectbr i8* undef, [label %if.then297, label %if.else386]
+
+if.then297: ; preds = %if.else
+ indirectbr i8* undef, [label %PREMATURE, label %if.end307]
+
+if.end307: ; preds = %if.then297
+ indirectbr i8* undef, [label %if.end314, label %FAIL]
+
+if.end314: ; preds = %if.end307
+ indirectbr i8* undef, [label %if.end340]
+
+if.end340: ; preds = %while.end334
+ indirectbr i8* undef, [label %PREMATURE, label %if.end348]
+
+if.end348: ; preds = %if.end340
+ %incdec.ptr356 = getelementptr inbounds i8* undef, i64 2
+ indirectbr i8* undef, [label %while.cond179]
+
+if.else386: ; preds = %if.else
+ indirectbr i8* undef, [label %while.end453, label %if.end434]
+
+if.end434: ; preds = %if.then428, %if.end421
+ indirectbr i8* undef, [label %while.cond179]
+
+while.end453: ; preds = %if.else386, %land.rhs184, %while.cond179
+ indirectbr i8* undef, [label %PREMATURE, label %if.end459]
+
+if.end459: ; preds = %while.end453
+ indirectbr i8* undef, [label %if.then465, label %FAIL]
+
+if.then465: ; preds = %if.end459
+ indirectbr i8* undef, [label %return, label %if.then479]
+
+if.then479: ; preds = %if.then465
+ indirectbr i8* undef, [label %return]
+
+FAIL: ; preds = %if.end459, %if.end307, %land.lhs.true142, %land.lhs.true131, %while.end
+ indirectbr i8* undef, [label %DECL_FAIL]
+
+PREMATURE: ; preds = %while.end453, %while.end415, %if.end340, %while.end334, %if.then297, %while.end256, %while.end215
+ indirectbr i8* undef, [label %return, label %if.then495]
+
+if.then495: ; preds = %PREMATURE
+ indirectbr i8* undef, [label %return]
+
+DECL_FAIL: ; preds = %if.then488, %FAIL, %land.lhs.true99, %lor.lhs.false, %if.end83, %if.then39, %if.end
+ indirectbr i8* undef, [label %return]
+
+return: ; preds = %if.then512, %if.end504, %DECL_FAIL, %if.then495, %PREMATURE, %if.then479, %if.then465, %if.then69, %if.end52, %if.end19, %if.then
+ ret void
+}
diff --git a/test/Transforms/LoopStrengthReduce/2012-03-15-nopreheader.ll b/test/Transforms/LoopStrengthReduce/2012-03-15-nopreheader.ll
new file mode 100644
index 0000000..0172492
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/2012-03-15-nopreheader.ll
@@ -0,0 +1,155 @@
+; RUN: opt -loop-reduce -S < %s | FileCheck %s
+;
+; <rdar://problem/11049788> Segmentation fault: 11 in LoopStrengthReduce
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-apple-darwin10.0.0"
+
+; IVUsers should not consider tmp128 a valid user because it is not in a
+; simplified loop nest.
+; CHECK: @nopreheader
+; CHECK: for.cond:
+; CHECK: %tmp128 = add i64 %0, %indvar65
+define void @nopreheader(i8* %cmd) nounwind ssp {
+entry:
+ indirectbr i8* undef, [label %while.cond]
+
+while.cond: ; preds = %while.body, %entry
+ %0 = phi i64 [ %indvar.next48, %while.body ], [ 0, %entry ]
+ indirectbr i8* undef, [label %while.end, label %while.body]
+
+while.body: ; preds = %lor.rhs, %lor.lhs.false17, %lor.lhs.false11, %lor.lhs.false, %land.rhs
+ %indvar.next48 = add i64 %0, 1
+ indirectbr i8* undef, [label %while.cond]
+
+while.end: ; preds = %lor.rhs, %while.cond
+ indirectbr i8* undef, [label %if.end152]
+
+if.end152: ; preds = %lor.lhs.false144, %if.end110
+ indirectbr i8* undef, [label %lor.lhs.false184, label %for.cond]
+
+lor.lhs.false184: ; preds = %lor.lhs.false177
+ indirectbr i8* undef, [label %return, label %for.cond]
+
+for.cond: ; preds = %for.inc, %lor.lhs.false184, %if.end152
+ %indvar65 = phi i64 [ %indvar.next66, %for.inc ], [ 0, %lor.lhs.false184 ], [ 0, %if.end152 ]
+ %tmp128 = add i64 %0, %indvar65
+ %s.4 = getelementptr i8* %cmd, i64 %tmp128
+ %tmp195 = load i8* %s.4, align 1
+ indirectbr i8* undef, [label %return, label %land.rhs198]
+
+land.rhs198: ; preds = %for.cond
+ indirectbr i8* undef, [label %return, label %for.inc]
+
+for.inc: ; preds = %lor.rhs234, %land.lhs.true228, %land.lhs.true216, %land.lhs.true204
+ %indvar.next66 = add i64 %indvar65, 1
+ indirectbr i8* undef, [label %for.cond]
+
+return: ; preds = %if.end677, %doshell, %if.then96
+ ret void
+}
+
+; Another case with a dominating loop that does not contain the IV
+; User. Just make sure it doesn't assert.
+define void @nopreheader2() nounwind ssp {
+entry:
+ indirectbr i8* undef, [label %while.cond, label %return]
+
+while.cond: ; preds = %while.cond.backedge, %entry
+ indirectbr i8* undef, [label %while.cond.backedge, label %lor.rhs]
+
+lor.rhs: ; preds = %while.cond
+ indirectbr i8* undef, [label %while.cond.backedge, label %while.end]
+
+while.cond.backedge: ; preds = %lor.rhs, %while.cond
+ indirectbr i8* undef, [label %while.cond]
+
+while.end: ; preds = %lor.rhs
+ indirectbr i8* undef, [label %if.then18, label %return]
+
+if.then18: ; preds = %while.end
+ indirectbr i8* undef, [label %if.end35, label %lor.lhs.false]
+
+lor.lhs.false: ; preds = %if.then18
+ indirectbr i8* undef, [label %if.end35, label %return]
+
+if.end35: ; preds = %lor.lhs.false, %if.then18
+ indirectbr i8* undef, [label %while.cond36]
+
+while.cond36: ; preds = %while.body49, %if.end35
+ %0 = phi i64 [ %indvar.next13, %while.body49 ], [ 0, %if.end35 ]
+ indirectbr i8* undef, [label %while.body49, label %lor.rhs42]
+
+lor.rhs42: ; preds = %while.cond36
+ indirectbr i8* undef, [label %while.body49, label %while.end52]
+
+while.body49: ; preds = %lor.rhs42, %while.cond36
+ %indvar.next13 = add i64 %0, 1
+ indirectbr i8* undef, [label %while.cond36]
+
+while.end52: ; preds = %lor.rhs42
+ indirectbr i8* undef, [label %land.lhs.true, label %return]
+
+land.lhs.true: ; preds = %while.end52
+ indirectbr i8* undef, [label %while.cond66.preheader, label %return]
+
+while.cond66.preheader: ; preds = %land.lhs.true
+ indirectbr i8* undef, [label %while.cond66]
+
+while.cond66: ; preds = %while.body77, %while.cond66.preheader
+ indirectbr i8* undef, [label %land.rhs, label %while.cond81.preheader]
+
+land.rhs: ; preds = %while.cond66
+ indirectbr i8* undef, [label %while.body77, label %while.cond81.preheader]
+
+while.cond81.preheader: ; preds = %land.rhs, %while.cond66
+ %tmp45 = add i64 undef, %0
+ %tmp46 = add i64 %tmp45, undef
+ indirectbr i8* undef, [label %while.cond81]
+
+while.body77: ; preds = %land.rhs
+ indirectbr i8* undef, [label %while.cond66]
+
+while.cond81: ; preds = %while.body94, %while.cond81.preheader
+ %tmp25 = add i64 %tmp46, undef
+ indirectbr i8* undef, [label %while.body94, label %lor.rhs87]
+
+lor.rhs87: ; preds = %while.cond81
+ indirectbr i8* undef, [label %while.body94, label %return]
+
+while.body94: ; preds = %lor.rhs87, %while.cond81
+ indirectbr i8* undef, [label %while.cond81]
+
+return: ; preds = %if.end216, %land.lhs.true183, %land.lhs.true, %while.end52, %lor.lhs.false, %while.end, %entry
+ ret void
+}
+
+; Test a phi operand IV User dominated by a no-preheader loop.
+define void @nopreheader3() nounwind uwtable ssp align 2 {
+entry:
+ indirectbr i8* blockaddress(@nopreheader3, %if.end10), [label %if.end22, label %if.end10]
+
+if.end10: ; preds = %entry
+ indirectbr i8* blockaddress(@nopreheader3, %if.end6.i), [label %if.end22, label %if.end6.i]
+
+if.end6.i: ; preds = %if.end10
+ indirectbr i8* blockaddress(@nopreheader3, %while.cond2.preheader.i.i), [label %if.then12, label %while.cond2.preheader.i.i]
+
+while.cond2.preheader.i.i: ; preds = %while.end.i18.i, %if.end6.i
+ indirectbr i8* blockaddress(@nopreheader3, %while.cond2.i.i), [label %while.cond2.i.i]
+
+while.cond2.i.i: ; preds = %while.cond2.i.i, %while.cond2.preheader.i.i
+ %i1.1.i14.i = phi i32 [ %add.i15.i, %while.cond2.i.i ], [ undef, %while.cond2.preheader.i.i ]
+ %add.i15.i = add nsw i32 %i1.1.i14.i, undef
+ indirectbr i8* blockaddress(@nopreheader3, %while.end.i18.i), [label %while.cond2.i.i, label %while.end.i18.i]
+
+while.end.i18.i: ; preds = %while.cond2.i.i
+ indirectbr i8* blockaddress(@nopreheader3, %while.cond2.preheader.i.i), [label %if.then12, label %while.cond2.preheader.i.i]
+
+if.then12: ; preds = %while.end.i18.i, %if.end6.i
+ %i1.0.lcssa.i.i = phi i32 [ undef, %if.end6.i ], [ %i1.1.i14.i, %while.end.i18.i ]
+ indirectbr i8* blockaddress(@nopreheader3, %if.end22), [label %if.end22]
+
+if.end22: ; preds = %if.then12, %if.end10, %entry
+ ret void
+}
diff --git a/test/Transforms/LoopStrengthReduce/2012-03-26-constexpr.ll b/test/Transforms/LoopStrengthReduce/2012-03-26-constexpr.ll
new file mode 100644
index 0000000..c9b11a9
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/2012-03-26-constexpr.ll
@@ -0,0 +1,49 @@
+; RUN: opt < %s -loop-reduce -S
+; PR11950: isHighCostExpansion crashes on ConstExpr
+;
+; The crash happened during IVChain analysis (CollectChains). We don't
+; really care how LSR decides to transform this loop, so we don't
+; check it. As long as the analysis doesn't crash we're ok.
+target datalayout = "e-p:64:64:64-n32:64"
+
+%struct.this_structure_s.0.5 = type { [6144 x [8 x i32]], [6144 x [8 x i32]], [6147 x [4 x i32]], [8 x i32], [2 x i8*], [2 x i8*], [6144 x i8], [6144 x i32], [6144 x i32], [4 x [4 x i8]] }
+
+define internal fastcc void @someFunction(%struct.this_structure_s.0.5* nocapture %scratch, i32 %stage, i32 %cbSize) nounwind {
+entry:
+ %0 = getelementptr inbounds %struct.this_structure_s.0.5* %scratch, i32 0, i32 4, i32 %stage
+ %1 = load i8** %0, align 4
+ %2 = getelementptr inbounds %struct.this_structure_s.0.5* %scratch, i32 0, i32 5, i32 %stage
+ %3 = load i8** %2, align 4
+ %4 = getelementptr inbounds %struct.this_structure_s.0.5* %scratch, i32 0, i32 2, i32 0, i32 0
+ %tmp11 = shl i32 %stage, 1
+ %tmp1325 = or i32 %tmp11, 1
+ br label %__label_D_1608
+
+__label_D_1608: ; preds = %__label_D_1608, %entry
+ %i.12 = phi i32 [ 0, %entry ], [ %10, %__label_D_1608 ]
+ %tmp = shl i32 %i.12, 2
+ %lvar_g.13 = getelementptr i32* %4, i32 %tmp
+ %tmp626 = or i32 %tmp, 1
+ %scevgep = getelementptr i32* %4, i32 %tmp626
+ %tmp727 = or i32 %tmp, 2
+ %scevgep8 = getelementptr i32* %4, i32 %tmp727
+ %tmp928 = or i32 %tmp, 3
+ %scevgep10 = getelementptr i32* %4, i32 %tmp928
+ %scevgep12 = getelementptr %struct.this_structure_s.0.5* %scratch, i32 0, i32 9, i32 %tmp11, i32 %i.12
+ %scevgep14 = getelementptr %struct.this_structure_s.0.5* %scratch, i32 0, i32 9, i32 %tmp1325, i32 %i.12
+ %5 = load i8* %scevgep12, align 1
+ %6 = sext i8 %5 to i32
+ %7 = load i8* %scevgep14, align 1
+ %8 = sext i8 %7 to i32
+ store i32 0, i32* %lvar_g.13, align 4
+ store i32 %8, i32* %scevgep, align 4
+ store i32 %6, i32* %scevgep8, align 4
+ %9 = add nsw i32 %8, %6
+ store i32 %9, i32* %scevgep10, align 4
+ %10 = add nsw i32 %i.12, 1
+ %exitcond = icmp eq i32 %10, 3
+ br i1 %exitcond, label %return, label %__label_D_1608
+
+return: ; preds = %__label_D_1608
+ ret void
+}
diff --git a/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll b/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll
new file mode 100644
index 0000000..9189d79
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll
@@ -0,0 +1,292 @@
+; RUN: llc < %s -O3 -march=thumb -mcpu=cortex-a9 | FileCheck %s -check-prefix=A9
+
+; @simple is the most basic chain of address induction variables. Chaining
+; saves at least one register and avoids complex addressing and setup
+; code.
+;
+; A9: @simple
+; no expensive address computation in the preheader
+; A9: lsl
+; A9-NOT: lsl
+; A9: %loop
+; no complex address modes
+; A9-NOT: lsl
+define i32 @simple(i32* %a, i32* %b, i32 %x) nounwind {
+entry:
+ br label %loop
+loop:
+ %iv = phi i32* [ %a, %entry ], [ %iv4, %loop ]
+ %s = phi i32 [ 0, %entry ], [ %s4, %loop ]
+ %v = load i32* %iv
+ %iv1 = getelementptr inbounds i32* %iv, i32 %x
+ %v1 = load i32* %iv1
+ %iv2 = getelementptr inbounds i32* %iv1, i32 %x
+ %v2 = load i32* %iv2
+ %iv3 = getelementptr inbounds i32* %iv2, i32 %x
+ %v3 = load i32* %iv3
+ %s1 = add i32 %s, %v
+ %s2 = add i32 %s1, %v1
+ %s3 = add i32 %s2, %v2
+ %s4 = add i32 %s3, %v3
+ %iv4 = getelementptr inbounds i32* %iv3, i32 %x
+ %cmp = icmp eq i32* %iv4, %b
+ br i1 %cmp, label %exit, label %loop
+exit:
+ ret i32 %s4
+}
+
+; @user is not currently chained because the IV is live across memory ops.
+;
+; A9: @user
+; stride multiples computed in the preheader
+; A9: lsl
+; A9: lsl
+; A9: %loop
+; complex address modes
+; A9: lsl
+; A9: lsl
+define i32 @user(i32* %a, i32* %b, i32 %x) nounwind {
+entry:
+ br label %loop
+loop:
+ %iv = phi i32* [ %a, %entry ], [ %iv4, %loop ]
+ %s = phi i32 [ 0, %entry ], [ %s4, %loop ]
+ %v = load i32* %iv
+ %iv1 = getelementptr inbounds i32* %iv, i32 %x
+ %v1 = load i32* %iv1
+ %iv2 = getelementptr inbounds i32* %iv1, i32 %x
+ %v2 = load i32* %iv2
+ %iv3 = getelementptr inbounds i32* %iv2, i32 %x
+ %v3 = load i32* %iv3
+ %s1 = add i32 %s, %v
+ %s2 = add i32 %s1, %v1
+ %s3 = add i32 %s2, %v2
+ %s4 = add i32 %s3, %v3
+ %iv4 = getelementptr inbounds i32* %iv3, i32 %x
+ store i32 %s4, i32* %iv
+ %cmp = icmp eq i32* %iv4, %b
+ br i1 %cmp, label %exit, label %loop
+exit:
+ ret i32 %s4
+}
+
+; @extrastride is a slightly more interesting case of a single
+; complete chain with multiple strides. The test case IR is what LSR
+; used to do, and exactly what we don't want to do. LSR's new IV
+; chaining feature should now undo the damage.
+;
+; A9: extrastride:
+; no spills
+; A9-NOT: str
+; only one stride multiple in the preheader
+; A9: lsl
+; A9-NOT: {{str r|lsl}}
+; A9: %for.body{{$}}
+; no complex address modes or reloads
+; A9-NOT: {{ldr .*[sp]|lsl}}
+define void @extrastride(i8* nocapture %main, i32 %main_stride, i32* nocapture %res, i32 %x, i32 %y, i32 %z) nounwind {
+entry:
+ %cmp8 = icmp eq i32 %z, 0
+ br i1 %cmp8, label %for.end, label %for.body.lr.ph
+
+for.body.lr.ph: ; preds = %entry
+ %add.ptr.sum = shl i32 %main_stride, 1 ; s*2
+ %add.ptr1.sum = add i32 %add.ptr.sum, %main_stride ; s*3
+ %add.ptr2.sum = add i32 %x, %main_stride ; s + x
+ %add.ptr4.sum = shl i32 %main_stride, 2 ; s*4
+ %add.ptr3.sum = add i32 %add.ptr2.sum, %add.ptr4.sum ; total IV stride = s*5+x
+ br label %for.body
+
+for.body: ; preds = %for.body.lr.ph, %for.body
+ %main.addr.011 = phi i8* [ %main, %for.body.lr.ph ], [ %add.ptr6, %for.body ]
+ %i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
+ %res.addr.09 = phi i32* [ %res, %for.body.lr.ph ], [ %add.ptr7, %for.body ]
+ %0 = bitcast i8* %main.addr.011 to i32*
+ %1 = load i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8* %main.addr.011, i32 %main_stride
+ %2 = bitcast i8* %add.ptr to i32*
+ %3 = load i32* %2, align 4
+ %add.ptr1 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr.sum
+ %4 = bitcast i8* %add.ptr1 to i32*
+ %5 = load i32* %4, align 4
+ %add.ptr2 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr1.sum
+ %6 = bitcast i8* %add.ptr2 to i32*
+ %7 = load i32* %6, align 4
+ %add.ptr3 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr4.sum
+ %8 = bitcast i8* %add.ptr3 to i32*
+ %9 = load i32* %8, align 4
+ %add = add i32 %3, %1
+ %add4 = add i32 %add, %5
+ %add5 = add i32 %add4, %7
+ %add6 = add i32 %add5, %9
+ store i32 %add6, i32* %res.addr.09, align 4
+ %add.ptr6 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr3.sum
+ %add.ptr7 = getelementptr inbounds i32* %res.addr.09, i32 %y
+ %inc = add i32 %i.010, 1
+ %cmp = icmp eq i32 %inc, %z
+ br i1 %cmp, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+; @foldedidx is an unrolled variant of this loop:
+; for (unsigned long i = 0; i < len; i += s) {
+; c[i] = a[i] + b[i];
+; }
+; where 's' can be folded into the addressing mode.
+; Consequently, we should *not* form any chains.
+;
+; A9: foldedidx:
+; A9: ldrb.w {{r[0-9]|lr}}, [{{r[0-9]|lr}}, #3]
+define void @foldedidx(i8* nocapture %a, i8* nocapture %b, i8* nocapture %c) nounwind ssp {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %i.07 = phi i32 [ 0, %entry ], [ %inc.3, %for.body ]
+ %arrayidx = getelementptr inbounds i8* %a, i32 %i.07
+ %0 = load i8* %arrayidx, align 1
+ %conv5 = zext i8 %0 to i32
+ %arrayidx1 = getelementptr inbounds i8* %b, i32 %i.07
+ %1 = load i8* %arrayidx1, align 1
+ %conv26 = zext i8 %1 to i32
+ %add = add nsw i32 %conv26, %conv5
+ %conv3 = trunc i32 %add to i8
+ %arrayidx4 = getelementptr inbounds i8* %c, i32 %i.07
+ store i8 %conv3, i8* %arrayidx4, align 1
+ %inc1 = or i32 %i.07, 1
+ %arrayidx.1 = getelementptr inbounds i8* %a, i32 %inc1
+ %2 = load i8* %arrayidx.1, align 1
+ %conv5.1 = zext i8 %2 to i32
+ %arrayidx1.1 = getelementptr inbounds i8* %b, i32 %inc1
+ %3 = load i8* %arrayidx1.1, align 1
+ %conv26.1 = zext i8 %3 to i32
+ %add.1 = add nsw i32 %conv26.1, %conv5.1
+ %conv3.1 = trunc i32 %add.1 to i8
+ %arrayidx4.1 = getelementptr inbounds i8* %c, i32 %inc1
+ store i8 %conv3.1, i8* %arrayidx4.1, align 1
+ %inc.12 = or i32 %i.07, 2
+ %arrayidx.2 = getelementptr inbounds i8* %a, i32 %inc.12
+ %4 = load i8* %arrayidx.2, align 1
+ %conv5.2 = zext i8 %4 to i32
+ %arrayidx1.2 = getelementptr inbounds i8* %b, i32 %inc.12
+ %5 = load i8* %arrayidx1.2, align 1
+ %conv26.2 = zext i8 %5 to i32
+ %add.2 = add nsw i32 %conv26.2, %conv5.2
+ %conv3.2 = trunc i32 %add.2 to i8
+ %arrayidx4.2 = getelementptr inbounds i8* %c, i32 %inc.12
+ store i8 %conv3.2, i8* %arrayidx4.2, align 1
+ %inc.23 = or i32 %i.07, 3
+ %arrayidx.3 = getelementptr inbounds i8* %a, i32 %inc.23
+ %6 = load i8* %arrayidx.3, align 1
+ %conv5.3 = zext i8 %6 to i32
+ %arrayidx1.3 = getelementptr inbounds i8* %b, i32 %inc.23
+ %7 = load i8* %arrayidx1.3, align 1
+ %conv26.3 = zext i8 %7 to i32
+ %add.3 = add nsw i32 %conv26.3, %conv5.3
+ %conv3.3 = trunc i32 %add.3 to i8
+ %arrayidx4.3 = getelementptr inbounds i8* %c, i32 %inc.23
+ store i8 %conv3.3, i8* %arrayidx4.3, align 1
+ %inc.3 = add nsw i32 %i.07, 4
+ %exitcond.3 = icmp eq i32 %inc.3, 400
+ br i1 %exitcond.3, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+; @testNeon is an important example of the nead for ivchains.
+;
+; Currently we have three extra add.w's that keep the store address
+; live past the next increment because ISEL is unfortunately undoing
+; the store chain. ISEL also fails to convert the stores to
+; post-increment addressing. However, the loads should use
+; post-increment addressing, no add's or add.w's beyond the three
+; mentioned. Most importantly, there should be no spills or reloads!
+;
+; CHECK: testNeon:
+; CHECK: %.lr.ph
+; CHECK-NOT: lsl.w
+; CHECK-NOT: {{ldr|str|adds|add r}}
+; CHECK: add.w r
+; CHECK-NOT: {{ldr|str|adds|add r}}
+; CHECK: add.w r
+; CHECK-NOT: {{ldr|str|adds|add r}}
+; CHECK: add.w r
+; CHECK-NOT: {{ldr|str|adds|add r}}
+; CHECK-NOT: add.w r
+; CHECK: bne
+define hidden void @testNeon(i8* %ref_data, i32 %ref_stride, i32 %limit, <16 x i8>* nocapture %data) nounwind optsize {
+ %1 = icmp sgt i32 %limit, 0
+ br i1 %1, label %.lr.ph, label %45
+
+.lr.ph: ; preds = %0
+ %2 = shl nsw i32 %ref_stride, 1
+ %3 = mul nsw i32 %ref_stride, 3
+ %4 = shl nsw i32 %ref_stride, 2
+ %5 = mul nsw i32 %ref_stride, 5
+ %6 = mul nsw i32 %ref_stride, 6
+ %7 = mul nsw i32 %ref_stride, 7
+ %8 = shl nsw i32 %ref_stride, 3
+ %9 = sub i32 0, %8
+ %10 = mul i32 %limit, -64
+ br label %11
+
+; <label>:11 ; preds = %11, %.lr.ph
+ %.05 = phi i8* [ %ref_data, %.lr.ph ], [ %42, %11 ]
+ %counter.04 = phi i32 [ 0, %.lr.ph ], [ %44, %11 ]
+ %result.03 = phi <16 x i8> [ zeroinitializer, %.lr.ph ], [ %41, %11 ]
+ %.012 = phi <16 x i8>* [ %data, %.lr.ph ], [ %43, %11 ]
+ %12 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %.05, i32 1) nounwind
+ %13 = getelementptr inbounds i8* %.05, i32 %ref_stride
+ %14 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %13, i32 1) nounwind
+ %15 = shufflevector <1 x i64> %12, <1 x i64> %14, <2 x i32> <i32 0, i32 1>
+ %16 = bitcast <2 x i64> %15 to <16 x i8>
+ %17 = getelementptr inbounds <16 x i8>* %.012, i32 1
+ store <16 x i8> %16, <16 x i8>* %.012, align 4
+ %18 = getelementptr inbounds i8* %.05, i32 %2
+ %19 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %18, i32 1) nounwind
+ %20 = getelementptr inbounds i8* %.05, i32 %3
+ %21 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %20, i32 1) nounwind
+ %22 = shufflevector <1 x i64> %19, <1 x i64> %21, <2 x i32> <i32 0, i32 1>
+ %23 = bitcast <2 x i64> %22 to <16 x i8>
+ %24 = getelementptr inbounds <16 x i8>* %.012, i32 2
+ store <16 x i8> %23, <16 x i8>* %17, align 4
+ %25 = getelementptr inbounds i8* %.05, i32 %4
+ %26 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %25, i32 1) nounwind
+ %27 = getelementptr inbounds i8* %.05, i32 %5
+ %28 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %27, i32 1) nounwind
+ %29 = shufflevector <1 x i64> %26, <1 x i64> %28, <2 x i32> <i32 0, i32 1>
+ %30 = bitcast <2 x i64> %29 to <16 x i8>
+ %31 = getelementptr inbounds <16 x i8>* %.012, i32 3
+ store <16 x i8> %30, <16 x i8>* %24, align 4
+ %32 = getelementptr inbounds i8* %.05, i32 %6
+ %33 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %32, i32 1) nounwind
+ %34 = getelementptr inbounds i8* %.05, i32 %7
+ %35 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %34, i32 1) nounwind
+ %36 = shufflevector <1 x i64> %33, <1 x i64> %35, <2 x i32> <i32 0, i32 1>
+ %37 = bitcast <2 x i64> %36 to <16 x i8>
+ store <16 x i8> %37, <16 x i8>* %31, align 4
+ %38 = add <16 x i8> %16, %23
+ %39 = add <16 x i8> %38, %30
+ %40 = add <16 x i8> %39, %37
+ %41 = add <16 x i8> %result.03, %40
+ %42 = getelementptr i8* %.05, i32 %9
+ %43 = getelementptr inbounds <16 x i8>* %.012, i32 -64
+ %44 = add nsw i32 %counter.04, 1
+ %exitcond = icmp eq i32 %44, %limit
+ br i1 %exitcond, label %._crit_edge, label %11
+
+._crit_edge: ; preds = %11
+ %scevgep = getelementptr <16 x i8>* %data, i32 %10
+ br label %45
+
+; <label>:45 ; preds = %._crit_edge, %0
+ %result.0.lcssa = phi <16 x i8> [ %41, %._crit_edge ], [ zeroinitializer, %0 ]
+ %.01.lcssa = phi <16 x i8>* [ %scevgep, %._crit_edge ], [ %data, %0 ]
+ store <16 x i8> %result.0.lcssa, <16 x i8>* %.01.lcssa, align 4
+ ret void
+}
+
+declare <1 x i64> @llvm.arm.neon.vld1.v1i64(i8*, i32) nounwind readonly
diff --git a/test/Transforms/LoopStrengthReduce/ARM/lit.local.cfg b/test/Transforms/LoopStrengthReduce/ARM/lit.local.cfg
new file mode 100644
index 0000000..bac2ffa
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/ARM/lit.local.cfg
@@ -0,0 +1,6 @@
+config.suffixes = ['.ll']
+
+targets = set(config.root.targets_to_build.split())
+if not 'ARM' in targets:
+ config.unsupported = True
+
diff --git a/test/Transforms/LoopStrengthReduce/X86/2011-11-29-postincphi.ll b/test/Transforms/LoopStrengthReduce/X86/2011-11-29-postincphi.ll
new file mode 100644
index 0000000..cb23ad0
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/2011-11-29-postincphi.ll
@@ -0,0 +1,36 @@
+; RUN: llc < %s | FileCheck %s
+;
+; PR11431: handle a phi operand that is replaced by a postinc user.
+; LSR first expands %t3 to %t2 in %phi
+; LSR then expands %t2 in %phi into two decrements, one on each loop exit.
+
+target datalayout = "e-p:64:64:64-S128-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-f128:128:128-n8:16:32:64"
+target triple = "x86_64-unknown-linux-gnu"
+
+declare i1 @check() nounwind
+
+; Check that LSR did something close to the behavior at the time of the bug.
+; CHECK: @sqlite3DropTriggerPtr
+; CHECK: incq %rax
+; CHECK: jne
+; CHECK: decq %rax
+; CHECK: ret
+define i64 @sqlite3DropTriggerPtr() nounwind {
+bb:
+ %cmp = call zeroext i1 @check()
+ br label %bb1
+
+bb1: ; preds = %bb4, %bb
+ %t0 = phi i64 [ 0, %bb ], [ %t3, %bb4 ]
+ %t2 = phi i64 [ 1, %bb ], [ %t5, %bb4 ]
+ %t3 = add nsw i64 %t0, 1
+ br i1 %cmp, label %bb4, label %bb8
+
+bb4: ; preds = %bb1
+ %t5 = add nsw i64 %t2, 1
+ br i1 %cmp, label %bb1, label %bb8
+
+bb8: ; preds = %bb8, %bb4
+ %phi = phi i64 [ %t3, %bb1 ], [ %t2, %bb4 ]
+ ret i64 %phi
+}
diff --git a/test/Transforms/LoopStrengthReduce/X86/2011-12-04-loserreg.ll b/test/Transforms/LoopStrengthReduce/X86/2011-12-04-loserreg.ll
new file mode 100644
index 0000000..5108650
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/2011-12-04-loserreg.ll
@@ -0,0 +1,96 @@
+; RUN: llc < %s | FileCheck %s
+;
+; Test LSR's ability to prune formulae that refer to nonexistant
+; AddRecs in other loops.
+;
+; Unable to reduce this case further because it requires LSR to exceed
+; ComplexityLimit.
+;
+; We really just want to ensure that LSR can process this loop without
+; finding an unsatisfactory solution and bailing out. I've added
+; dummyout, an obvious candidate for postinc replacement so we can
+; verify that LSR removes it.
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-darwin"
+
+; CHECK: @test
+; CHECK: # %for.body{{$}}
+; dummyiv copy should be removed
+; CHECK-NOT: movq
+; CHECK: # %for.cond19.preheader
+; dummycnt should be removed
+; CHECK-NOT: incq
+; CHECK: # %for.body23{{$}}
+define i64 @test(i64 %count, float* nocapture %srcrow, i32* nocapture %destrow) nounwind uwtable ssp {
+entry:
+ %cmp34 = icmp eq i64 %count, 0
+ br i1 %cmp34, label %for.end29, label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %dummyiv = phi i64 [ %dummycnt, %for.body ], [ 0, %entry ]
+ %indvars.iv39 = phi i64 [ %indvars.iv.next40, %for.body ], [ 0, %entry ]
+ %dp.036 = phi i32* [ %add.ptr, %for.body ], [ %destrow, %entry ]
+ %p.035 = phi float* [ %incdec.ptr4, %for.body ], [ %srcrow, %entry ]
+ %incdec.ptr = getelementptr inbounds float* %p.035, i64 1
+ %0 = load float* %incdec.ptr, align 4
+ %incdec.ptr2 = getelementptr inbounds float* %p.035, i64 2
+ %1 = load float* %incdec.ptr2, align 4
+ %incdec.ptr3 = getelementptr inbounds float* %p.035, i64 3
+ %2 = load float* %incdec.ptr3, align 4
+ %incdec.ptr4 = getelementptr inbounds float* %p.035, i64 4
+ %3 = load float* %incdec.ptr4, align 4
+ %4 = load i32* %dp.036, align 4
+ %conv5 = fptoui float %0 to i32
+ %or = or i32 %4, %conv5
+ %arrayidx6 = getelementptr inbounds i32* %dp.036, i64 1
+ %5 = load i32* %arrayidx6, align 4
+ %conv7 = fptoui float %1 to i32
+ %or8 = or i32 %5, %conv7
+ %arrayidx9 = getelementptr inbounds i32* %dp.036, i64 2
+ %6 = load i32* %arrayidx9, align 4
+ %conv10 = fptoui float %2 to i32
+ %or11 = or i32 %6, %conv10
+ %arrayidx12 = getelementptr inbounds i32* %dp.036, i64 3
+ %7 = load i32* %arrayidx12, align 4
+ %conv13 = fptoui float %3 to i32
+ %or14 = or i32 %7, %conv13
+ store i32 %or, i32* %dp.036, align 4
+ store i32 %or8, i32* %arrayidx6, align 4
+ store i32 %or11, i32* %arrayidx9, align 4
+ store i32 %or14, i32* %arrayidx12, align 4
+ %add.ptr = getelementptr inbounds i32* %dp.036, i64 4
+ %indvars.iv.next40 = add i64 %indvars.iv39, 4
+ %dummycnt = add i64 %dummyiv, 1
+ %cmp = icmp ult i64 %indvars.iv.next40, %count
+ br i1 %cmp, label %for.body, label %for.cond19.preheader
+
+for.cond19.preheader: ; preds = %for.body
+ %dummyout = add i64 %dummyiv, 1
+ %rem = and i64 %count, 3
+ %cmp2130 = icmp eq i64 %rem, 0
+ br i1 %cmp2130, label %for.end29, label %for.body23.lr.ph
+
+for.body23.lr.ph: ; preds = %for.cond19.preheader
+ %8 = and i64 %count, 3
+ br label %for.body23
+
+for.body23: ; preds = %for.body23, %for.body23.lr.ph
+ %indvars.iv = phi i64 [ 0, %for.body23.lr.ph ], [ %indvars.iv.next, %for.body23 ]
+ %dp.132 = phi i32* [ %add.ptr, %for.body23.lr.ph ], [ %incdec.ptr28, %for.body23 ]
+ %p.131 = phi float* [ %incdec.ptr4, %for.body23.lr.ph ], [ %incdec.ptr24, %for.body23 ]
+ %incdec.ptr24 = getelementptr inbounds float* %p.131, i64 1
+ %9 = load float* %incdec.ptr24, align 4
+ %10 = load i32* %dp.132, align 4
+ %conv25 = fptoui float %9 to i32
+ %or26 = or i32 %10, %conv25
+ store i32 %or26, i32* %dp.132, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %incdec.ptr28 = getelementptr inbounds i32* %dp.132, i64 1
+ %exitcond = icmp eq i64 %indvars.iv.next, %8
+ br i1 %exitcond, label %for.end29, label %for.body23
+
+for.end29: ; preds = %entry, %for.body23, %for.cond19.preheader
+ %result = phi i64 [ 0, %entry ], [ %dummyout, %for.body23 ], [ %dummyout, %for.cond19.preheader ]
+ ret i64 %result
+}
diff --git a/test/Transforms/LoopStrengthReduce/X86/2012-01-13-phielim.ll b/test/Transforms/LoopStrengthReduce/X86/2012-01-13-phielim.ll
new file mode 100644
index 0000000..2dcaab8
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/2012-01-13-phielim.ll
@@ -0,0 +1,92 @@
+; RUN: llc < %s -O3 -march=x86-64 -mcpu=core2 | FileCheck %s
+
+declare i1 @check() nounwind
+declare i1 @foo(i8*, i8*, i8*) nounwind
+
+; Check that redundant phi elimination ran
+; CHECK: @test
+; CHECK: %while.body.i
+; CHECK: movs
+; CHECK-NOT: movs
+; CHECK: %for.end.i
+define i32 @test(i8* %base) nounwind uwtable ssp {
+entry:
+ br label %while.body.lr.ph.i
+
+while.body.lr.ph.i: ; preds = %cond.true.i
+ br label %while.body.i
+
+while.body.i: ; preds = %cond.true29.i, %while.body.lr.ph.i
+ %indvars.iv7.i = phi i64 [ 16, %while.body.lr.ph.i ], [ %indvars.iv.next8.i, %cond.true29.i ]
+ %i.05.i = phi i64 [ 0, %while.body.lr.ph.i ], [ %indvars.iv7.i, %cond.true29.i ]
+ %sext.i = shl i64 %i.05.i, 32
+ %idx.ext.i = ashr exact i64 %sext.i, 32
+ %add.ptr.sum.i = add i64 %idx.ext.i, 16
+ br label %for.body.i
+
+for.body.i: ; preds = %for.body.i, %while.body.i
+ %indvars.iv.i = phi i64 [ 0, %while.body.i ], [ %indvars.iv.next.i, %for.body.i ]
+ %add.ptr.sum = add i64 %add.ptr.sum.i, %indvars.iv.i
+ %arrayidx22.i = getelementptr inbounds i8* %base, i64 %add.ptr.sum
+ %0 = load i8* %arrayidx22.i, align 1
+ %indvars.iv.next.i = add i64 %indvars.iv.i, 1
+ %cmp = call i1 @check() nounwind
+ br i1 %cmp, label %for.end.i, label %for.body.i
+
+for.end.i: ; preds = %for.body.i
+ %add.ptr.i144 = getelementptr inbounds i8* %base, i64 %add.ptr.sum.i
+ %cmp2 = tail call i1 @foo(i8* %add.ptr.i144, i8* %add.ptr.i144, i8* undef) nounwind
+ br i1 %cmp2, label %cond.true29.i, label %cond.false35.i
+
+cond.true29.i: ; preds = %for.end.i
+ %indvars.iv.next8.i = add i64 %indvars.iv7.i, 16
+ br i1 false, label %exit, label %while.body.i
+
+cond.false35.i: ; preds = %for.end.i
+ unreachable
+
+exit: ; preds = %cond.true29.i, %cond.true.i
+ ret i32 0
+}
+
+%struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771 = type { i32, i32, i32 }
+
+@tags = external global [5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771], align 16
+
+; PR11782: SCEVExpander assert
+;
+; Test phi reuse after LSR that requires SCEVExpander to hoist an
+; interesting GEP.
+;
+; CHECK: @test2
+; CHECK: %entry
+; CHECK-NOT: mov
+; CHECK: jne
+define void @test2(i32 %n) nounwind uwtable {
+entry:
+ br i1 undef, label %while.end, label %for.cond468
+
+for.cond468: ; preds = %if.then477, %entry
+ %indvars.iv1163 = phi i64 [ %indvars.iv.next1164, %if.then477 ], [ 1, %entry ]
+ %k.0.in = phi i32* [ %last, %if.then477 ], [ getelementptr inbounds ([5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771]* @tags, i64 0, i64 0, i32 2), %entry ]
+ %k.0 = load i32* %k.0.in, align 4
+ %0 = trunc i64 %indvars.iv1163 to i32
+ %cmp469 = icmp slt i32 %0, %n
+ br i1 %cmp469, label %for.body471, label %for.inc498
+
+for.body471: ; preds = %for.cond468
+ %first = getelementptr inbounds [5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771]* @tags, i64 0, i64 %indvars.iv1163, i32 1
+ %1 = load i32* %first, align 4
+ br i1 undef, label %if.then477, label %for.inc498
+
+if.then477: ; preds = %for.body471
+ %last = getelementptr inbounds [5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771]* @tags, i64 0, i64 %indvars.iv1163, i32 2
+ %indvars.iv.next1164 = add i64 %indvars.iv1163, 1
+ br label %for.cond468
+
+for.inc498: ; preds = %for.inc498, %for.body471, %for.cond468
+ br label %for.inc498
+
+while.end: ; preds = %entry
+ ret void
+}
diff --git a/test/Transforms/LoopStrengthReduce/X86/dg.exp b/test/Transforms/LoopStrengthReduce/X86/dg.exp
deleted file mode 100644
index 7b7bd4e..0000000
--- a/test/Transforms/LoopStrengthReduce/X86/dg.exp
+++ /dev/null
@@ -1,5 +0,0 @@
-load_lib llvm.exp
-
-if { [llvm_supports_target X86] } {
- RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll}]]
-}
diff --git a/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll b/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
new file mode 100644
index 0000000..e42b67f
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
@@ -0,0 +1,300 @@
+; RUN: llc < %s -O3 -march=x86-64 -mcpu=core2 | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -O3 -march=x86 -mcpu=core2 | FileCheck %s -check-prefix=X32
+
+; @simple is the most basic chain of address induction variables. Chaining
+; saves at least one register and avoids complex addressing and setup
+; code.
+;
+; X64: @simple
+; %x * 4
+; X64: shlq $2
+; no other address computation in the preheader
+; X64-NEXT: xorl
+; X64-NEXT: .align
+; X64: %loop
+; no complex address modes
+; X64-NOT: (%{{[^)]+}},%{{[^)]+}},
+;
+; X32: @simple
+; no expensive address computation in the preheader
+; X32-NOT: imul
+; X32: %loop
+; no complex address modes
+; X32-NOT: (%{{[^)]+}},%{{[^)]+}},
+define i32 @simple(i32* %a, i32* %b, i32 %x) nounwind {
+entry:
+ br label %loop
+loop:
+ %iv = phi i32* [ %a, %entry ], [ %iv4, %loop ]
+ %s = phi i32 [ 0, %entry ], [ %s4, %loop ]
+ %v = load i32* %iv
+ %iv1 = getelementptr inbounds i32* %iv, i32 %x
+ %v1 = load i32* %iv1
+ %iv2 = getelementptr inbounds i32* %iv1, i32 %x
+ %v2 = load i32* %iv2
+ %iv3 = getelementptr inbounds i32* %iv2, i32 %x
+ %v3 = load i32* %iv3
+ %s1 = add i32 %s, %v
+ %s2 = add i32 %s1, %v1
+ %s3 = add i32 %s2, %v2
+ %s4 = add i32 %s3, %v3
+ %iv4 = getelementptr inbounds i32* %iv3, i32 %x
+ %cmp = icmp eq i32* %iv4, %b
+ br i1 %cmp, label %exit, label %loop
+exit:
+ ret i32 %s4
+}
+
+; @user is not currently chained because the IV is live across memory ops.
+;
+; X64: @user
+; X64: shlq $4
+; X64: lea
+; X64: lea
+; X64: %loop
+; complex address modes
+; X64: (%{{[^)]+}},%{{[^)]+}},
+;
+; X32: @user
+; expensive address computation in the preheader
+; X32: imul
+; X32: %loop
+; complex address modes
+; X32: (%{{[^)]+}},%{{[^)]+}},
+define i32 @user(i32* %a, i32* %b, i32 %x) nounwind {
+entry:
+ br label %loop
+loop:
+ %iv = phi i32* [ %a, %entry ], [ %iv4, %loop ]
+ %s = phi i32 [ 0, %entry ], [ %s4, %loop ]
+ %v = load i32* %iv
+ %iv1 = getelementptr inbounds i32* %iv, i32 %x
+ %v1 = load i32* %iv1
+ %iv2 = getelementptr inbounds i32* %iv1, i32 %x
+ %v2 = load i32* %iv2
+ %iv3 = getelementptr inbounds i32* %iv2, i32 %x
+ %v3 = load i32* %iv3
+ %s1 = add i32 %s, %v
+ %s2 = add i32 %s1, %v1
+ %s3 = add i32 %s2, %v2
+ %s4 = add i32 %s3, %v3
+ %iv4 = getelementptr inbounds i32* %iv3, i32 %x
+ store i32 %s4, i32* %iv
+ %cmp = icmp eq i32* %iv4, %b
+ br i1 %cmp, label %exit, label %loop
+exit:
+ ret i32 %s4
+}
+
+; @extrastride is a slightly more interesting case of a single
+; complete chain with multiple strides. The test case IR is what LSR
+; used to do, and exactly what we don't want to do. LSR's new IV
+; chaining feature should now undo the damage.
+;
+; X64: extrastride:
+; We currently don't handle this on X64 because the sexts cause
+; strange increment expressions like this:
+; IV + ((sext i32 (2 * %s) to i64) + (-1 * (sext i32 %s to i64)))
+;
+; X32: extrastride:
+; no spills in the preheader
+; X32-NOT: mov{{.*}}(%esp){{$}}
+; X32: %for.body{{$}}
+; no complex address modes
+; X32-NOT: (%{{[^)]+}},%{{[^)]+}},
+; no reloads
+; X32-NOT: (%esp)
+define void @extrastride(i8* nocapture %main, i32 %main_stride, i32* nocapture %res, i32 %x, i32 %y, i32 %z) nounwind {
+entry:
+ %cmp8 = icmp eq i32 %z, 0
+ br i1 %cmp8, label %for.end, label %for.body.lr.ph
+
+for.body.lr.ph: ; preds = %entry
+ %add.ptr.sum = shl i32 %main_stride, 1 ; s*2
+ %add.ptr1.sum = add i32 %add.ptr.sum, %main_stride ; s*3
+ %add.ptr2.sum = add i32 %x, %main_stride ; s + x
+ %add.ptr4.sum = shl i32 %main_stride, 2 ; s*4
+ %add.ptr3.sum = add i32 %add.ptr2.sum, %add.ptr4.sum ; total IV stride = s*5+x
+ br label %for.body
+
+for.body: ; preds = %for.body.lr.ph, %for.body
+ %main.addr.011 = phi i8* [ %main, %for.body.lr.ph ], [ %add.ptr6, %for.body ]
+ %i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
+ %res.addr.09 = phi i32* [ %res, %for.body.lr.ph ], [ %add.ptr7, %for.body ]
+ %0 = bitcast i8* %main.addr.011 to i32*
+ %1 = load i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8* %main.addr.011, i32 %main_stride
+ %2 = bitcast i8* %add.ptr to i32*
+ %3 = load i32* %2, align 4
+ %add.ptr1 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr.sum
+ %4 = bitcast i8* %add.ptr1 to i32*
+ %5 = load i32* %4, align 4
+ %add.ptr2 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr1.sum
+ %6 = bitcast i8* %add.ptr2 to i32*
+ %7 = load i32* %6, align 4
+ %add.ptr3 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr4.sum
+ %8 = bitcast i8* %add.ptr3 to i32*
+ %9 = load i32* %8, align 4
+ %add = add i32 %3, %1
+ %add4 = add i32 %add, %5
+ %add5 = add i32 %add4, %7
+ %add6 = add i32 %add5, %9
+ store i32 %add6, i32* %res.addr.09, align 4
+ %add.ptr6 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr3.sum
+ %add.ptr7 = getelementptr inbounds i32* %res.addr.09, i32 %y
+ %inc = add i32 %i.010, 1
+ %cmp = icmp eq i32 %inc, %z
+ br i1 %cmp, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+; @foldedidx is an unrolled variant of this loop:
+; for (unsigned long i = 0; i < len; i += s) {
+; c[i] = a[i] + b[i];
+; }
+; where 's' can be folded into the addressing mode.
+; Consequently, we should *not* form any chains.
+;
+; X64: foldedidx:
+; X64: movzbl -3(
+;
+; X32: foldedidx:
+; X32: movzbl -3(
+define void @foldedidx(i8* nocapture %a, i8* nocapture %b, i8* nocapture %c) nounwind ssp {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %i.07 = phi i32 [ 0, %entry ], [ %inc.3, %for.body ]
+ %arrayidx = getelementptr inbounds i8* %a, i32 %i.07
+ %0 = load i8* %arrayidx, align 1
+ %conv5 = zext i8 %0 to i32
+ %arrayidx1 = getelementptr inbounds i8* %b, i32 %i.07
+ %1 = load i8* %arrayidx1, align 1
+ %conv26 = zext i8 %1 to i32
+ %add = add nsw i32 %conv26, %conv5
+ %conv3 = trunc i32 %add to i8
+ %arrayidx4 = getelementptr inbounds i8* %c, i32 %i.07
+ store i8 %conv3, i8* %arrayidx4, align 1
+ %inc1 = or i32 %i.07, 1
+ %arrayidx.1 = getelementptr inbounds i8* %a, i32 %inc1
+ %2 = load i8* %arrayidx.1, align 1
+ %conv5.1 = zext i8 %2 to i32
+ %arrayidx1.1 = getelementptr inbounds i8* %b, i32 %inc1
+ %3 = load i8* %arrayidx1.1, align 1
+ %conv26.1 = zext i8 %3 to i32
+ %add.1 = add nsw i32 %conv26.1, %conv5.1
+ %conv3.1 = trunc i32 %add.1 to i8
+ %arrayidx4.1 = getelementptr inbounds i8* %c, i32 %inc1
+ store i8 %conv3.1, i8* %arrayidx4.1, align 1
+ %inc.12 = or i32 %i.07, 2
+ %arrayidx.2 = getelementptr inbounds i8* %a, i32 %inc.12
+ %4 = load i8* %arrayidx.2, align 1
+ %conv5.2 = zext i8 %4 to i32
+ %arrayidx1.2 = getelementptr inbounds i8* %b, i32 %inc.12
+ %5 = load i8* %arrayidx1.2, align 1
+ %conv26.2 = zext i8 %5 to i32
+ %add.2 = add nsw i32 %conv26.2, %conv5.2
+ %conv3.2 = trunc i32 %add.2 to i8
+ %arrayidx4.2 = getelementptr inbounds i8* %c, i32 %inc.12
+ store i8 %conv3.2, i8* %arrayidx4.2, align 1
+ %inc.23 = or i32 %i.07, 3
+ %arrayidx.3 = getelementptr inbounds i8* %a, i32 %inc.23
+ %6 = load i8* %arrayidx.3, align 1
+ %conv5.3 = zext i8 %6 to i32
+ %arrayidx1.3 = getelementptr inbounds i8* %b, i32 %inc.23
+ %7 = load i8* %arrayidx1.3, align 1
+ %conv26.3 = zext i8 %7 to i32
+ %add.3 = add nsw i32 %conv26.3, %conv5.3
+ %conv3.3 = trunc i32 %add.3 to i8
+ %arrayidx4.3 = getelementptr inbounds i8* %c, i32 %inc.23
+ store i8 %conv3.3, i8* %arrayidx4.3, align 1
+ %inc.3 = add nsw i32 %i.07, 4
+ %exitcond.3 = icmp eq i32 %inc.3, 400
+ br i1 %exitcond.3, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+; @multioper tests instructions with multiple IV user operands. We
+; should be able to chain them independent of each other.
+;
+; X64: @multioper
+; X64: %for.body
+; X64: movl %{{.*}},4)
+; X64-NEXT: leal 1(
+; X64-NEXT: movl %{{.*}},4)
+; X64-NEXT: leal 2(
+; X64-NEXT: movl %{{.*}},4)
+; X64-NEXT: leal 3(
+; X64-NEXT: movl %{{.*}},4)
+;
+; X32: @multioper
+; X32: %for.body
+; X32: movl %{{.*}},4)
+; X32-NEXT: leal 1(
+; X32-NEXT: movl %{{.*}},4)
+; X32-NEXT: leal 2(
+; X32-NEXT: movl %{{.*}},4)
+; X32-NEXT: leal 3(
+; X32-NEXT: movl %{{.*}},4)
+define void @multioper(i32* %a, i32 %n) nounwind {
+entry:
+ br label %for.body
+
+for.body:
+ %p = phi i32* [ %p.next, %for.body ], [ %a, %entry ]
+ %i = phi i32 [ %inc4, %for.body ], [ 0, %entry ]
+ store i32 %i, i32* %p, align 4
+ %inc1 = or i32 %i, 1
+ %add.ptr.i1 = getelementptr inbounds i32* %p, i32 1
+ store i32 %inc1, i32* %add.ptr.i1, align 4
+ %inc2 = add nsw i32 %i, 2
+ %add.ptr.i2 = getelementptr inbounds i32* %p, i32 2
+ store i32 %inc2, i32* %add.ptr.i2, align 4
+ %inc3 = add nsw i32 %i, 3
+ %add.ptr.i3 = getelementptr inbounds i32* %p, i32 3
+ store i32 %inc3, i32* %add.ptr.i3, align 4
+ %p.next = getelementptr inbounds i32* %p, i32 4
+ %inc4 = add nsw i32 %i, 4
+ %cmp = icmp slt i32 %inc4, %n
+ br i1 %cmp, label %for.body, label %exit
+
+exit:
+ ret void
+}
+
+; @testCmpZero has a ICmpZero LSR use that should not be hidden from
+; LSR. Profitable chains should have more than one nonzero increment
+; anyway.
+;
+; X32: @testCmpZero
+; X32: %for.body82.us
+; X32: dec
+; X32: jne
+define void @testCmpZero(i8* %src, i8* %dst, i32 %srcidx, i32 %dstidx, i32 %len) nounwind ssp {
+entry:
+ %dest0 = getelementptr inbounds i8* %src, i32 %srcidx
+ %source0 = getelementptr inbounds i8* %dst, i32 %dstidx
+ %add.ptr79.us.sum = add i32 %srcidx, %len
+ %lftr.limit = getelementptr i8* %src, i32 %add.ptr79.us.sum
+ br label %for.body82.us
+
+for.body82.us:
+ %dest = phi i8* [ %dest0, %entry ], [ %incdec.ptr91.us, %for.body82.us ]
+ %source = phi i8* [ %source0, %entry ], [ %add.ptr83.us, %for.body82.us ]
+ %0 = bitcast i8* %source to i32*
+ %1 = load i32* %0, align 4
+ %trunc = trunc i32 %1 to i8
+ %add.ptr83.us = getelementptr inbounds i8* %source, i32 4
+ %incdec.ptr91.us = getelementptr inbounds i8* %dest, i32 1
+ store i8 %trunc, i8* %dest, align 1
+ %exitcond = icmp eq i8* %incdec.ptr91.us, %lftr.limit
+ br i1 %exitcond, label %return, label %for.body82.us
+
+return:
+ ret void
+}
diff --git a/test/Transforms/LoopStrengthReduce/X86/ivchain-stress-X86.ll b/test/Transforms/LoopStrengthReduce/X86/ivchain-stress-X86.ll
new file mode 100644
index 0000000..d8e0aa9
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/ivchain-stress-X86.ll
@@ -0,0 +1,96 @@
+; REQUIRES: asserts
+; RUN: llc < %s -O3 -march=x86-64 -mcpu=core2 -stress-ivchain | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -O3 -march=x86 -mcpu=core2 -stress-ivchain | FileCheck %s -check-prefix=X32
+
+; @sharedidx is an unrolled variant of this loop:
+; for (unsigned long i = 0; i < len; i += s) {
+; c[i] = a[i] + b[i];
+; }
+; where 's' cannot be folded into the addressing mode.
+;
+; This is not quite profitable to chain. But with -stress-ivchain, we
+; can form three address chains in place of the shared induction
+; variable.
+
+; X64: sharedidx:
+; X64: %for.body.preheader
+; X64-NOT: leal ({{.*}},4)
+; X64: %for.body.1
+
+; X32: sharedidx:
+; X32: %for.body.2
+; X32: add
+; X32: add
+; X32: add
+; X32: add
+; X32: add
+; X32: %for.body.3
+define void @sharedidx(i8* nocapture %a, i8* nocapture %b, i8* nocapture %c, i32 %s, i32 %len) nounwind ssp {
+entry:
+ %cmp8 = icmp eq i32 %len, 0
+ br i1 %cmp8, label %for.end, label %for.body
+
+for.body: ; preds = %entry, %for.body.3
+ %i.09 = phi i32 [ %add5.3, %for.body.3 ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i8* %a, i32 %i.09
+ %0 = load i8* %arrayidx, align 1
+ %conv6 = zext i8 %0 to i32
+ %arrayidx1 = getelementptr inbounds i8* %b, i32 %i.09
+ %1 = load i8* %arrayidx1, align 1
+ %conv27 = zext i8 %1 to i32
+ %add = add nsw i32 %conv27, %conv6
+ %conv3 = trunc i32 %add to i8
+ %arrayidx4 = getelementptr inbounds i8* %c, i32 %i.09
+ store i8 %conv3, i8* %arrayidx4, align 1
+ %add5 = add i32 %i.09, %s
+ %cmp = icmp ult i32 %add5, %len
+ br i1 %cmp, label %for.body.1, label %for.end
+
+for.end: ; preds = %for.body, %for.body.1, %for.body.2, %for.body.3, %entry
+ ret void
+
+for.body.1: ; preds = %for.body
+ %arrayidx.1 = getelementptr inbounds i8* %a, i32 %add5
+ %2 = load i8* %arrayidx.1, align 1
+ %conv6.1 = zext i8 %2 to i32
+ %arrayidx1.1 = getelementptr inbounds i8* %b, i32 %add5
+ %3 = load i8* %arrayidx1.1, align 1
+ %conv27.1 = zext i8 %3 to i32
+ %add.1 = add nsw i32 %conv27.1, %conv6.1
+ %conv3.1 = trunc i32 %add.1 to i8
+ %arrayidx4.1 = getelementptr inbounds i8* %c, i32 %add5
+ store i8 %conv3.1, i8* %arrayidx4.1, align 1
+ %add5.1 = add i32 %add5, %s
+ %cmp.1 = icmp ult i32 %add5.1, %len
+ br i1 %cmp.1, label %for.body.2, label %for.end
+
+for.body.2: ; preds = %for.body.1
+ %arrayidx.2 = getelementptr inbounds i8* %a, i32 %add5.1
+ %4 = load i8* %arrayidx.2, align 1
+ %conv6.2 = zext i8 %4 to i32
+ %arrayidx1.2 = getelementptr inbounds i8* %b, i32 %add5.1
+ %5 = load i8* %arrayidx1.2, align 1
+ %conv27.2 = zext i8 %5 to i32
+ %add.2 = add nsw i32 %conv27.2, %conv6.2
+ %conv3.2 = trunc i32 %add.2 to i8
+ %arrayidx4.2 = getelementptr inbounds i8* %c, i32 %add5.1
+ store i8 %conv3.2, i8* %arrayidx4.2, align 1
+ %add5.2 = add i32 %add5.1, %s
+ %cmp.2 = icmp ult i32 %add5.2, %len
+ br i1 %cmp.2, label %for.body.3, label %for.end
+
+for.body.3: ; preds = %for.body.2
+ %arrayidx.3 = getelementptr inbounds i8* %a, i32 %add5.2
+ %6 = load i8* %arrayidx.3, align 1
+ %conv6.3 = zext i8 %6 to i32
+ %arrayidx1.3 = getelementptr inbounds i8* %b, i32 %add5.2
+ %7 = load i8* %arrayidx1.3, align 1
+ %conv27.3 = zext i8 %7 to i32
+ %add.3 = add nsw i32 %conv27.3, %conv6.3
+ %conv3.3 = trunc i32 %add.3 to i8
+ %arrayidx4.3 = getelementptr inbounds i8* %c, i32 %add5.2
+ store i8 %conv3.3, i8* %arrayidx4.3, align 1
+ %add5.3 = add i32 %add5.2, %s
+ %cmp.3 = icmp ult i32 %add5.3, %len
+ br i1 %cmp.3, label %for.body, label %for.end
+}
diff --git a/test/Transforms/LoopStrengthReduce/X86/lit.local.cfg b/test/Transforms/LoopStrengthReduce/X86/lit.local.cfg
new file mode 100644
index 0000000..da2db5a
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/lit.local.cfg
@@ -0,0 +1,6 @@
+config.suffixes = ['.ll']
+
+targets = set(config.root.targets_to_build.split())
+if not 'X86' in targets:
+ config.unsupported = True
+
diff --git a/test/Transforms/LoopStrengthReduce/addrec-gep.ll b/test/Transforms/LoopStrengthReduce/addrec-gep.ll
new file mode 100644
index 0000000..3e4e369
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/addrec-gep.ll
@@ -0,0 +1,82 @@
+; RUN: opt < %s -loop-reduce -S | FileCheck %s
+; CHECK: bb1:
+; CHECK: load double* [[IV:%[^,]+]]
+; CHECK: store double {{.*}}, double* [[IV]]
+; CHECK: getelementptr double*
+; CHECK-NOT: cast
+; CHECK: br {{.*}} label %bb1
+
+; This test tests several things. The load and store should use the
+; same address instead of having it computed twice, and SCEVExpander should
+; be able to reconstruct the full getelementptr, despite it having a few
+; obstacles set in its way.
+; We only check that the inner loop (bb1-bb2) is "reduced" because LSR
+; currently only operates on inner loops.
+
+target datalayout = "e-p:64:64:64-n32:64"
+
+define void @foo(i64 %n, i64 %m, i64 %o, i64 %q, double* nocapture %p) nounwind {
+entry:
+ %tmp = icmp sgt i64 %n, 0 ; <i1> [#uses=1]
+ br i1 %tmp, label %bb.nph3, label %return
+
+bb.nph: ; preds = %bb2.preheader
+ %tmp1 = mul i64 %tmp16, %i.02 ; <i64> [#uses=1]
+ %tmp2 = mul i64 %tmp19, %i.02 ; <i64> [#uses=1]
+ br label %bb1
+
+bb1: ; preds = %bb2, %bb.nph
+ %j.01 = phi i64 [ %tmp9, %bb2 ], [ 0, %bb.nph ] ; <i64> [#uses=3]
+ %tmp3 = add i64 %j.01, %tmp1 ; <i64> [#uses=1]
+ %tmp4 = add i64 %j.01, %tmp2 ; <i64> [#uses=1]
+ %z0 = add i64 %tmp3, 5203
+ %tmp5 = getelementptr double* %p, i64 %z0 ; <double*> [#uses=1]
+ %tmp6 = load double* %tmp5, align 8 ; <double> [#uses=1]
+ %tmp7 = fdiv double %tmp6, 2.100000e+00 ; <double> [#uses=1]
+ %z1 = add i64 %tmp4, 5203
+ %tmp8 = getelementptr double* %p, i64 %z1 ; <double*> [#uses=1]
+ store double %tmp7, double* %tmp8, align 8
+ %tmp9 = add i64 %j.01, 1 ; <i64> [#uses=2]
+ br label %bb2
+
+bb2: ; preds = %bb1
+ %tmp10 = icmp slt i64 %tmp9, %m ; <i1> [#uses=1]
+ br i1 %tmp10, label %bb1, label %bb2.bb3_crit_edge
+
+bb2.bb3_crit_edge: ; preds = %bb2
+ br label %bb3
+
+bb3: ; preds = %bb2.preheader, %bb2.bb3_crit_edge
+ %tmp11 = add i64 %i.02, 1 ; <i64> [#uses=2]
+ br label %bb4
+
+bb4: ; preds = %bb3
+ %tmp12 = icmp slt i64 %tmp11, %n ; <i1> [#uses=1]
+ br i1 %tmp12, label %bb2.preheader, label %bb4.return_crit_edge
+
+bb4.return_crit_edge: ; preds = %bb4
+ br label %bb4.return_crit_edge.split
+
+bb4.return_crit_edge.split: ; preds = %bb.nph3, %bb4.return_crit_edge
+ br label %return
+
+bb.nph3: ; preds = %entry
+ %tmp13 = icmp sgt i64 %m, 0 ; <i1> [#uses=1]
+ %tmp14 = mul i64 %n, 37 ; <i64> [#uses=1]
+ %tmp15 = mul i64 %tmp14, %o ; <i64> [#uses=1]
+ %tmp16 = mul i64 %tmp15, %q ; <i64> [#uses=1]
+ %tmp17 = mul i64 %n, 37 ; <i64> [#uses=1]
+ %tmp18 = mul i64 %tmp17, %o ; <i64> [#uses=1]
+ %tmp19 = mul i64 %tmp18, %q ; <i64> [#uses=1]
+ br i1 %tmp13, label %bb.nph3.split, label %bb4.return_crit_edge.split
+
+bb.nph3.split: ; preds = %bb.nph3
+ br label %bb2.preheader
+
+bb2.preheader: ; preds = %bb.nph3.split, %bb4
+ %i.02 = phi i64 [ %tmp11, %bb4 ], [ 0, %bb.nph3.split ] ; <i64> [#uses=3]
+ br i1 true, label %bb.nph, label %bb3
+
+return: ; preds = %bb4.return_crit_edge.split, %entry
+ ret void
+}
diff --git a/test/Transforms/LoopStrengthReduce/dg.exp b/test/Transforms/LoopStrengthReduce/dg.exp
deleted file mode 100644
index f200589..0000000
--- a/test/Transforms/LoopStrengthReduce/dg.exp
+++ /dev/null
@@ -1,3 +0,0 @@
-load_lib llvm.exp
-
-RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp}]]
diff --git a/test/Transforms/LoopStrengthReduce/dominate-assert.ll b/test/Transforms/LoopStrengthReduce/dominate-assert.ll
new file mode 100644
index 0000000..b87bf62
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/dominate-assert.ll
@@ -0,0 +1,70 @@
+; RUN: opt -loop-reduce %s
+; we used to crash on this one
+
+declare i8* @_Znwm()
+declare i32 @__gxx_personality_v0(...)
+declare void @g()
+define void @f() {
+bb0:
+ br label %bb1
+bb1:
+ %v0 = phi i64 [ 0, %bb0 ], [ %v1, %bb1 ]
+ %v1 = add nsw i64 %v0, 1
+ br i1 undef, label %bb2, label %bb1
+bb2:
+ %v2 = icmp eq i64 %v0, 0
+ br i1 %v2, label %bb6, label %bb3
+bb3:
+ %v3 = invoke noalias i8* @_Znwm()
+ to label %bb5 unwind label %bb4
+bb4:
+ %v4 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ br label %bb9
+bb5:
+ %v5 = bitcast i8* %v3 to i32**
+ %add.ptr.i = getelementptr inbounds i32** %v5, i64 %v0
+ br label %bb6
+bb6:
+ %v6 = phi i32** [ null, %bb2 ], [ %add.ptr.i, %bb5 ]
+ invoke void @g()
+ to label %bb7 unwind label %bb8
+bb7:
+ unreachable
+bb8:
+ %v7 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ br label %bb9
+bb9:
+ resume { i8*, i32 } zeroinitializer
+}
+
+
+define void @h() {
+bb1:
+ invoke void @g() optsize
+ to label %bb2 unwind label %bb5
+bb2:
+ %arrayctor.cur = phi i8* [ undef, %bb1 ], [ %arrayctor.next, %bb3 ]
+ invoke void @g() optsize
+ to label %bb3 unwind label %bb6
+bb3:
+ %arrayctor.next = getelementptr inbounds i8* %arrayctor.cur, i64 1
+ br label %bb2
+bb4:
+ ret void
+bb5:
+ %tmp = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ invoke void @g() optsize
+ to label %bb4 unwind label %bb7
+bb6:
+ %tmp1 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ %arraydestroy.isempty = icmp eq i8* undef, %arrayctor.cur
+ ret void
+bb7:
+ %lpad.nonloopexit = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ catch i8* null
+ ret void
+}
diff --git a/test/Transforms/LoopStrengthReduce/exit_compare_live_range.ll b/test/Transforms/LoopStrengthReduce/exit_compare_live_range.ll
index abbfda6..ad4959b 100644
--- a/test/Transforms/LoopStrengthReduce/exit_compare_live_range.ll
+++ b/test/Transforms/LoopStrengthReduce/exit_compare_live_range.ll
@@ -9,7 +9,7 @@ entry:
br label %no_exit
no_exit: ; preds = %no_exit, %entry
%indvar = phi i32 [ 0, %entry ], [ %indvar.next, %no_exit ] ; <i32> [#uses=1]
- volatile store float 0.000000e+00, float* %D
+ store volatile float 0.000000e+00, float* %D
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
; CHECK: icmp
; CHECK-NEXT: br i1
diff --git a/test/Transforms/LoopStrengthReduce/ivchain.ll b/test/Transforms/LoopStrengthReduce/ivchain.ll
new file mode 100644
index 0000000..ce7ad19
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/ivchain.ll
@@ -0,0 +1,43 @@
+; RUN: opt < %s -loop-reduce -S | FileCheck %s
+;
+; PR11782: bad cast to AddRecExpr.
+; A sign extend feeds an IVUser and cannot be hoisted into the AddRec.
+; CollectIVChains should bailout on this case.
+
+%struct = type { i8*, i8*, i16, i64, i16, i16, i16, i64, i64, i16, i8*, i64, i64, i64 }
+
+; CHECK: @test
+; CHECK: for.body:
+; CHECK: lsr.iv = phi %struct
+; CHECK: br
+define i32 @test(i8* %h, i32 %more) nounwind uwtable {
+entry:
+ br i1 undef, label %land.end238, label %return
+
+land.end238: ; preds = %if.end229
+ br label %for.body
+
+for.body: ; preds = %sw.epilog, %land.end238
+ %fbh.0 = phi %struct* [ undef, %land.end238 ], [ %incdec.ptr, %sw.epilog ]
+ %column_n.0 = phi i16 [ 0, %land.end238 ], [ %inc601, %sw.epilog ]
+ %conv250 = sext i16 %column_n.0 to i32
+ %add257 = add nsw i32 %conv250, 1
+ %conv258 = trunc i32 %add257 to i16
+ %cmp263 = icmp ult i16 undef, 2
+ br label %if.end388
+
+if.end388: ; preds = %if.then380, %if.else356
+ %ColLength = getelementptr inbounds %struct* %fbh.0, i64 0, i32 7
+ %call405 = call signext i16 @SQLColAttribute(i8* undef, i16 zeroext %conv258, i16 zeroext 1003, i8* null, i16 signext 0, i16* null, i64* %ColLength) nounwind
+ br label %sw.epilog
+
+sw.epilog: ; preds = %sw.bb542, %sw.bb523, %if.end475
+ %inc601 = add i16 %column_n.0, 1
+ %incdec.ptr = getelementptr inbounds %struct* %fbh.0, i64 1
+ br label %for.body
+
+return: ; preds = %entry
+ ret i32 1
+}
+
+declare signext i16 @SQLColAttribute(i8*, i16 zeroext, i16 zeroext, i8*, i16 signext, i16*, i64*)
diff --git a/test/Transforms/LoopStrengthReduce/lit.local.cfg b/test/Transforms/LoopStrengthReduce/lit.local.cfg
new file mode 100644
index 0000000..19eebc0
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/lit.local.cfg
@@ -0,0 +1 @@
+config.suffixes = ['.ll', '.c', '.cpp']
diff --git a/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll b/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll
index 2760915..96904c6 100644
--- a/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll
+++ b/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll
@@ -1,15 +1,15 @@
; RUN: opt -loop-reduce -S < %s | FileCheck %s
; PR9939
-; LSR should property handle the post-inc offset when folding the
+; LSR should properly handle the post-inc offset when folding the
; non-IV operand of an icmp into the IV.
-; CHECK: %5 = sub i64 %sub.ptr.lhs.cast, %sub.ptr.rhs.cast
-; CHECK: %6 = lshr i64 %5, 1
-; CHECK: %7 = mul i64 %6, 2
+; CHECK: %4 = sub i64 %sub.ptr.lhs.cast, %sub.ptr.rhs.cast
+; CHECK: %5 = lshr i64 %4, 1
+; CHECK: %6 = mul i64 %5, 2
; CHECK: br label %for.body
; CHECK: for.body:
-; CHECK: %lsr.iv2 = phi i64 [ %lsr.iv.next, %for.body ], [ %7, %for.body.lr.ph ]
+; CHECK: %lsr.iv2 = phi i64 [ %lsr.iv.next, %for.body ], [ %6, %for.body.lr.ph ]
; CHECK: %lsr.iv.next = add i64 %lsr.iv2, -2
; CHECK: %lsr.iv.next3 = inttoptr i64 %lsr.iv.next to i16*
; CHECK: %cmp27 = icmp eq i16* %lsr.iv.next3, null
diff --git a/test/Transforms/LoopStrengthReduce/pr12018.ll b/test/Transforms/LoopStrengthReduce/pr12018.ll
new file mode 100644
index 0000000..ee7b1e8
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/pr12018.ll
@@ -0,0 +1,38 @@
+; RUN: opt < %s -loop-reduce
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
+
+%struct.nsTArray = type { i8 }
+%struct.nsTArrayHeader = type { i32 }
+
+define void @_Z6foobarR8nsTArray(%struct.nsTArray* %aValues, i32 %foo, %struct.nsTArrayHeader* %bar) nounwind {
+entry:
+ br label %for.body
+
+for.body: ; preds = %_ZN8nsTArray9ElementAtEi.exit, %entry
+ %i.06 = phi i32 [ %add, %_ZN8nsTArray9ElementAtEi.exit ], [ 0, %entry ]
+ %call.i = call %struct.nsTArrayHeader* @_ZN8nsTArray4Hdr2Ev() nounwind
+ %add.ptr.i = getelementptr inbounds %struct.nsTArrayHeader* %call.i, i32 1
+ %tmp = bitcast %struct.nsTArrayHeader* %add.ptr.i to %struct.nsTArray*
+ %arrayidx = getelementptr inbounds %struct.nsTArray* %tmp, i32 %i.06
+ %add = add nsw i32 %i.06, 1
+ call void @llvm.dbg.value(metadata !{%struct.nsTArray* %aValues}, i64 0, metadata !0) nounwind
+ br label %_ZN8nsTArray9ElementAtEi.exit
+
+_ZN8nsTArray9ElementAtEi.exit: ; preds = %for.body
+ %arrayidx.i = getelementptr inbounds %struct.nsTArray* %tmp, i32 %add
+ call void @_ZN11nsTArray15ComputeDistanceERKS_Rd(%struct.nsTArray* %arrayidx, %struct.nsTArray* %arrayidx.i) nounwind
+ %cmp = icmp slt i32 %add, %foo
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end: ; preds = %_ZN8nsTArray9ElementAtEi.exit
+ ret void
+}
+
+declare void @_ZN11nsTArray15ComputeDistanceERKS_Rd(%struct.nsTArray*, %struct.nsTArray*)
+
+declare %struct.nsTArrayHeader* @_ZN8nsTArray4Hdr2Ev()
+
+declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
+
+!0 = metadata !{i32 786689} ; [ DW_TAG_arg_variable ]
diff --git a/test/Transforms/LoopStrengthReduce/pr12048.ll b/test/Transforms/LoopStrengthReduce/pr12048.ll
new file mode 100644
index 0000000..7e0f2ad
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/pr12048.ll
@@ -0,0 +1,38 @@
+; RUN: opt < %s -loop-reduce
+
+define void @resolve_name() nounwind uwtable ssp {
+ br label %while.cond40.preheader
+while.cond132.while.cond.loopexit_crit_edge:
+ br label %while.cond40.preheader
+while.cond40.preheader:
+ br label %while.cond40
+while.cond40:
+ %indvars.iv194 = phi i8* [ null, %while.cond40.preheader ], [ %scevgep, %while.body51 ]
+ %tmp.1 = phi i8* [ undef, %while.cond40.preheader ], [ %incdec.ptr, %while.body51 ]
+ switch i8 undef, label %while.body51 [
+ i8 0, label %if.then59
+ ]
+while.body51: ; preds = %land.end50
+ %incdec.ptr = getelementptr inbounds i8* %tmp.1, i64 1
+ %scevgep = getelementptr i8* %indvars.iv194, i64 1
+ br label %while.cond40
+if.then59: ; preds = %while.end
+ br i1 undef, label %if.then64, label %if.end113
+if.then64: ; preds = %if.then59
+ %incdec.ptr88.tmp.2 = select i1 undef, i8* undef, i8* undef
+ br label %if.end113
+if.end113: ; preds = %if.then64, %if.then59
+ %tmp.4 = phi i8* [ %incdec.ptr88.tmp.2, %if.then64 ], [ undef, %if.then59 ]
+ %tmp.4195 = ptrtoint i8* %tmp.4 to i64
+ br label %while.cond132.preheader
+while.cond132.preheader: ; preds = %if.end113
+ %cmp133173 = icmp eq i8* %tmp.1, %tmp.4
+ br i1 %cmp133173, label %while.cond40.preheader, label %while.body139.lr.ph
+while.body139.lr.ph: ; preds = %while.cond132.preheader
+ %scevgep198 = getelementptr i8* %indvars.iv194, i64 0
+ %scevgep198199 = ptrtoint i8* %scevgep198 to i64
+ br label %while.body139
+while.body139: ; preds = %while.body139, %while.body139.lr.ph
+ %start_of_var.0177 = phi i8* [ %tmp.1, %while.body139.lr.ph ], [ null, %while.body139 ]
+ br i1 undef, label %while.cond132.while.cond.loopexit_crit_edge, label %while.body139
+}
diff --git a/test/Transforms/LoopStrengthReduce/pr3399.ll b/test/Transforms/LoopStrengthReduce/pr3399.ll
index b809007..26c5002 100644
--- a/test/Transforms/LoopStrengthReduce/pr3399.ll
+++ b/test/Transforms/LoopStrengthReduce/pr3399.ll
@@ -13,7 +13,7 @@ bb: ; preds = %bb5, %bb5.thread
bb1: ; preds = %bb
%l_2.0.reg2mem.0 = sub i32 0, %indvar ; <i32> [#uses=1]
- %0 = volatile load i32* @g_53, align 4 ; <i32> [#uses=1]
+ %0 = load volatile i32* @g_53, align 4 ; <i32> [#uses=1]
%1 = trunc i32 %l_2.0.reg2mem.0 to i16 ; <i16> [#uses=1]
%2 = trunc i32 %0 to i16 ; <i16> [#uses=1]
%3 = mul i16 %2, %1 ; <i16> [#uses=1]
diff --git a/test/Transforms/LoopStrengthReduce/preserve-gep-loop-variant.ll b/test/Transforms/LoopStrengthReduce/preserve-gep-loop-variant.ll
new file mode 100644
index 0000000..f90d030
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/preserve-gep-loop-variant.ll
@@ -0,0 +1,42 @@
+; RUN: opt < %s -loop-reduce -S | FileCheck %s
+; CHECK-NOT: {{inttoptr|ptrtoint}}
+; CHECK: scevgep
+; CHECK-NOT: {{inttoptr|ptrtoint}}
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128-n32:64"
+
+; Indvars shouldn't need inttoptr/ptrtoint to expand an address here.
+
+define void @foo(i8* %p) nounwind {
+entry:
+ br i1 true, label %bb.nph, label %for.end
+
+for.cond:
+ %phitmp = icmp slt i64 %inc, 20
+ br i1 %phitmp, label %for.body, label %for.cond.for.end_crit_edge
+
+for.cond.for.end_crit_edge:
+ br label %for.end
+
+bb.nph:
+ br label %for.body
+
+for.body:
+ %storemerge1 = phi i64 [ %inc, %for.cond ], [ 0, %bb.nph ]
+ %call = tail call i64 @bar() nounwind
+ %call2 = tail call i64 @car() nounwind
+ %conv = trunc i64 %call2 to i8
+ %conv3 = sext i8 %conv to i64
+ %add = add nsw i64 %call, %storemerge1
+ %add4 = add nsw i64 %add, %conv3
+ %arrayidx = getelementptr inbounds i8* %p, i64 %add4
+ store i8 0, i8* %arrayidx
+ %inc = add nsw i64 %storemerge1, 1
+ br label %for.cond
+
+for.end:
+ ret void
+}
+
+declare i64 @bar()
+
+declare i64 @car()
OpenPOWER on IntegriCloud