summaryrefslogtreecommitdiffstats
path: root/test/Transforms/LoopVectorize
diff options
context:
space:
mode:
Diffstat (limited to 'test/Transforms/LoopVectorize')
-rw-r--r--test/Transforms/LoopVectorize/12-12-11-if-conv.ll44
-rw-r--r--test/Transforms/LoopVectorize/2012-10-20-infloop.ll46
-rw-r--r--test/Transforms/LoopVectorize/2012-10-22-isconsec.ll2
-rw-r--r--test/Transforms/LoopVectorize/ARM/arm-unroll.ll32
-rw-r--r--test/Transforms/LoopVectorize/ARM/gcc-examples.ll60
-rw-r--r--test/Transforms/LoopVectorize/ARM/lit.local.cfg6
-rw-r--r--test/Transforms/LoopVectorize/ARM/mul-cast-vect.ll114
-rw-r--r--test/Transforms/LoopVectorize/ARM/width-detect.ll52
-rw-r--r--test/Transforms/LoopVectorize/X86/avx1.ll4
-rw-r--r--test/Transforms/LoopVectorize/X86/constant-vector-operand.ll28
-rw-r--r--test/Transforms/LoopVectorize/X86/conversion-cost.ll11
-rw-r--r--test/Transforms/LoopVectorize/X86/cost-model.ll5
-rw-r--r--test/Transforms/LoopVectorize/X86/gcc-examples.ll27
-rw-r--r--test/Transforms/LoopVectorize/X86/min-trip-count-switch.ll28
-rw-r--r--test/Transforms/LoopVectorize/X86/no-vector.ll22
-rw-r--r--test/Transforms/LoopVectorize/X86/parallel-loops-after-reg2mem.ll52
-rw-r--r--test/Transforms/LoopVectorize/X86/parallel-loops.ll114
-rw-r--r--test/Transforms/LoopVectorize/X86/reduction-crash.ll35
-rw-r--r--test/Transforms/LoopVectorize/X86/small-size.ll170
-rw-r--r--test/Transforms/LoopVectorize/X86/struct-store.ll27
-rw-r--r--test/Transforms/LoopVectorize/X86/unroll-small-loops.ll50
-rw-r--r--test/Transforms/LoopVectorize/X86/unroll_selection.ll71
-rw-r--r--test/Transforms/LoopVectorize/X86/vector-scalar-select-cost.ll66
-rw-r--r--test/Transforms/LoopVectorize/X86/vector_ptr_load_store.ll150
-rw-r--r--test/Transforms/LoopVectorize/bzip_reverse_loops.ll71
-rw-r--r--test/Transforms/LoopVectorize/calloc.ll53
-rw-r--r--test/Transforms/LoopVectorize/cast-induction.ll30
-rw-r--r--test/Transforms/LoopVectorize/cpp-new-array.ll4
-rw-r--r--test/Transforms/LoopVectorize/dbg.value.ll70
-rw-r--r--test/Transforms/LoopVectorize/flags.ll2
-rw-r--r--test/Transforms/LoopVectorize/float-reduction.ll29
-rw-r--r--test/Transforms/LoopVectorize/gcc-examples.ll57
-rw-r--r--test/Transforms/LoopVectorize/global_alias.ll1078
-rw-r--r--test/Transforms/LoopVectorize/i8-induction.ll35
-rw-r--r--test/Transforms/LoopVectorize/if-conv-crash.ll39
-rw-r--r--test/Transforms/LoopVectorize/if-conversion-reduction.ll38
-rw-r--r--test/Transforms/LoopVectorize/if-conversion.ll108
-rw-r--r--test/Transforms/LoopVectorize/increment.ll2
-rw-r--r--test/Transforms/LoopVectorize/induction_plus.ll5
-rw-r--r--test/Transforms/LoopVectorize/intrinsic.ll935
-rw-r--r--test/Transforms/LoopVectorize/lcssa-crash.ll29
-rw-r--r--test/Transforms/LoopVectorize/no_int_induction.ll33
-rw-r--r--test/Transforms/LoopVectorize/nofloat.ll29
-rw-r--r--test/Transforms/LoopVectorize/non-const-n.ll2
-rw-r--r--test/Transforms/LoopVectorize/nsw-crash.ll25
-rw-r--r--test/Transforms/LoopVectorize/phi-hang.ll29
-rw-r--r--test/Transforms/LoopVectorize/ptr_loops.ll74
-rw-r--r--test/Transforms/LoopVectorize/read-only.ll2
-rw-r--r--test/Transforms/LoopVectorize/reduction.ll95
-rw-r--r--test/Transforms/LoopVectorize/runtime-check.ll6
-rw-r--r--test/Transforms/LoopVectorize/same-base-access.ll110
-rw-r--r--test/Transforms/LoopVectorize/scalar-select.ll2
-rw-r--r--test/Transforms/LoopVectorize/simple-unroll.ll39
-rw-r--r--test/Transforms/LoopVectorize/small-loop.ll2
-rw-r--r--test/Transforms/LoopVectorize/start-non-zero.ll2
-rw-r--r--test/Transforms/LoopVectorize/struct_access.ll50
-rw-r--r--test/Transforms/LoopVectorize/vectorize-once.ll75
-rw-r--r--test/Transforms/LoopVectorize/write-only.ll2
58 files changed, 4336 insertions, 42 deletions
diff --git a/test/Transforms/LoopVectorize/12-12-11-if-conv.ll b/test/Transforms/LoopVectorize/12-12-11-if-conv.ll
new file mode 100644
index 0000000..2dd7fe3
--- /dev/null
+++ b/test/Transforms/LoopVectorize/12-12-11-if-conv.ll
@@ -0,0 +1,44 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -enable-if-conversion -dce -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+;CHECK: @foo
+;CHECK: icmp eq <4 x i32>
+;CHECK: select <4 x i1>
+;CHECK: ret i32
+define i32 @foo(i32 %x, i32 %t, i32* nocapture %A) nounwind uwtable ssp {
+entry:
+ %cmp10 = icmp sgt i32 %x, 0
+ br i1 %cmp10, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %if.end
+ %indvars.iv = phi i64 [ %indvars.iv.next, %if.end ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4, !tbaa !0
+ %tobool = icmp eq i32 %0, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then: ; preds = %for.body
+ %1 = add nsw i64 %indvars.iv, 45
+ %2 = trunc i64 %indvars.iv to i32
+ %mul = mul nsw i32 %2, %t
+ %3 = trunc i64 %1 to i32
+ %add1 = add nsw i32 %3, %mul
+ br label %if.end
+
+if.end: ; preds = %for.body, %if.then
+ %z.0 = phi i32 [ %add1, %if.then ], [ 9, %for.body ]
+ store i32 %z.0, i32* %arrayidx, align 4, !tbaa !0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %x
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %if.end, %entry
+ ret i32 undef
+}
+
+!0 = metadata !{metadata !"int", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/Transforms/LoopVectorize/2012-10-20-infloop.ll b/test/Transforms/LoopVectorize/2012-10-20-infloop.ll
index 0176c9a..aa7cc0e 100644
--- a/test/Transforms/LoopVectorize/2012-10-20-infloop.ll
+++ b/test/Transforms/LoopVectorize/2012-10-20-infloop.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-width=4 -dce
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce
; Check that we don't fall into an infinite loop.
define void @test() nounwind {
@@ -25,3 +25,47 @@ for.body: ; preds = %for.body, %entry
for.end: ; preds = %for.body
unreachable
}
+
+;PR14701
+define void @start_model_rare() nounwind uwtable ssp {
+entry:
+ br i1 undef, label %return, label %if.end
+
+if.end: ; preds = %entry
+ br i1 undef, label %cond.false, label %cond.true
+
+cond.true: ; preds = %if.end
+ unreachable
+
+cond.false: ; preds = %if.end
+ br i1 undef, label %cond.false28, label %cond.true20
+
+cond.true20: ; preds = %cond.false
+ unreachable
+
+cond.false28: ; preds = %cond.false
+ br label %for.body40
+
+for.body40: ; preds = %for.inc50, %cond.false28
+ %indvars.iv123 = phi i64 [ 3, %cond.false28 ], [ %indvars.iv.next124, %for.inc50 ]
+ %step.0121 = phi i32 [ 1, %cond.false28 ], [ %step.1, %for.inc50 ]
+ br i1 undef, label %if.then46, label %for.inc50
+
+if.then46: ; preds = %for.body40
+ %inc47 = add nsw i32 %step.0121, 1
+ br label %for.inc50
+
+for.inc50: ; preds = %if.then46, %for.body40
+ %k.1 = phi i32 [ undef, %for.body40 ], [ %inc47, %if.then46 ]
+ %step.1 = phi i32 [ %step.0121, %for.body40 ], [ %inc47, %if.then46 ]
+ %indvars.iv.next124 = add i64 %indvars.iv123, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next124 to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 256
+ br i1 %exitcond, label %for.end52, label %for.body40
+
+for.end52: ; preds = %for.inc50
+ unreachable
+
+return: ; preds = %entry
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/2012-10-22-isconsec.ll b/test/Transforms/LoopVectorize/2012-10-22-isconsec.ll
index 2516e24..405582c 100644
--- a/test/Transforms/LoopVectorize/2012-10-22-isconsec.ll
+++ b/test/Transforms/LoopVectorize/2012-10-22-isconsec.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -dce -force-vector-width=4
+; RUN: opt < %s -loop-vectorize -dce -force-vector-unroll=1 -force-vector-width=4
; Check that we don't crash.
diff --git a/test/Transforms/LoopVectorize/ARM/arm-unroll.ll b/test/Transforms/LoopVectorize/ARM/arm-unroll.ll
new file mode 100644
index 0000000..c8d307f
--- /dev/null
+++ b/test/Transforms/LoopVectorize/ARM/arm-unroll.ll
@@ -0,0 +1,32 @@
+; RUN: opt < %s -loop-vectorize -mtriple=thumbv7-apple-ios3.0.0 -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -mtriple=thumbv7-apple-ios3.0.0 -mcpu=swift -S | FileCheck %s --check-prefix=SWIFT
+
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
+target triple = "thumbv7-apple-ios3.0.0"
+
+;CHECK: @foo
+;CHECK: load <4 x i32>
+;CHECK-NOT: load <4 x i32>
+;CHECK: ret
+;SWIFT: @foo
+;SWIFT: load <4 x i32>
+;SWIFT: load <4 x i32>
+;SWIFT: ret
+define i32 @foo(i32* nocapture %A, i32 %n) nounwind readonly ssp {
+ %1 = icmp sgt i32 %n, 0
+ br i1 %1, label %.lr.ph, label %._crit_edge
+
+.lr.ph: ; preds = %0, %.lr.ph
+ %i.02 = phi i32 [ %5, %.lr.ph ], [ 0, %0 ]
+ %sum.01 = phi i32 [ %4, %.lr.ph ], [ 0, %0 ]
+ %2 = getelementptr inbounds i32* %A, i32 %i.02
+ %3 = load i32* %2, align 4
+ %4 = add nsw i32 %3, %sum.01
+ %5 = add nsw i32 %i.02, 1
+ %exitcond = icmp eq i32 %5, %n
+ br i1 %exitcond, label %._crit_edge, label %.lr.ph
+
+._crit_edge: ; preds = %.lr.ph, %0
+ %sum.0.lcssa = phi i32 [ 0, %0 ], [ %4, %.lr.ph ]
+ ret i32 %sum.0.lcssa
+}
diff --git a/test/Transforms/LoopVectorize/ARM/gcc-examples.ll b/test/Transforms/LoopVectorize/ARM/gcc-examples.ll
new file mode 100644
index 0000000..6a68e81
--- /dev/null
+++ b/test/Transforms/LoopVectorize/ARM/gcc-examples.ll
@@ -0,0 +1,60 @@
+; RUN: opt < %s -loop-vectorize -mtriple=thumbv7-apple-ios3.0.0 -mcpu=swift -S -dce | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
+target triple = "thumbv7-apple-ios3.0.0"
+
+@b = common global [2048 x i32] zeroinitializer, align 16
+@c = common global [2048 x i32] zeroinitializer, align 16
+@a = common global [2048 x i32] zeroinitializer, align 16
+
+; Select VF = 8;
+;CHECK: @example1
+;CHECK: load <4 x i32>
+;CHECK: add nsw <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK: ret void
+define void @example1() nounwind uwtable ssp {
+ br label %1
+
+; <label>:1 ; preds = %1, %0
+ %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
+ %2 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv
+ %3 = load i32* %2, align 4
+ %4 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %indvars.iv
+ %5 = load i32* %4, align 4
+ %6 = add nsw i32 %5, %3
+ %7 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ store i32 %6, i32* %7, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 256
+ br i1 %exitcond, label %8, label %1
+
+; <label>:8 ; preds = %1
+ ret void
+}
+
+;CHECK: @example10b
+;CHECK: load <4 x i16>
+;CHECK: sext <4 x i16>
+;CHECK: store <4 x i32>
+;CHECK: ret void
+define void @example10b(i16* noalias nocapture %sa, i16* noalias nocapture %sb, i16* noalias nocapture %sc, i32* noalias nocapture %ia, i32* noalias nocapture %ib, i32* noalias nocapture %ic) nounwind uwtable ssp {
+ br label %1
+
+; <label>:1 ; preds = %1, %0
+ %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
+ %2 = getelementptr inbounds i16* %sb, i64 %indvars.iv
+ %3 = load i16* %2, align 2
+ %4 = sext i16 %3 to i32
+ %5 = getelementptr inbounds i32* %ia, i64 %indvars.iv
+ store i32 %4, i32* %5, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 1024
+ br i1 %exitcond, label %6, label %1
+
+; <label>:6 ; preds = %1
+ ret void
+}
+
diff --git a/test/Transforms/LoopVectorize/ARM/lit.local.cfg b/test/Transforms/LoopVectorize/ARM/lit.local.cfg
new file mode 100644
index 0000000..cb77b09
--- /dev/null
+++ b/test/Transforms/LoopVectorize/ARM/lit.local.cfg
@@ -0,0 +1,6 @@
+config.suffixes = ['.ll', '.c', '.cpp']
+
+targets = set(config.root.targets_to_build.split())
+if not 'ARM' in targets:
+ config.unsupported = True
+
diff --git a/test/Transforms/LoopVectorize/ARM/mul-cast-vect.ll b/test/Transforms/LoopVectorize/ARM/mul-cast-vect.ll
new file mode 100644
index 0000000..d2e3de2
--- /dev/null
+++ b/test/Transforms/LoopVectorize/ARM/mul-cast-vect.ll
@@ -0,0 +1,114 @@
+; RUN: opt < %s -cost-model -analyze -mtriple=armv7-linux-gnueabihf -mcpu=cortex-a9 | FileCheck --check-prefix=COST %s
+; To see the assembly output: llc -mcpu=cortex-a9 < %s | FileCheck --check-prefix=ASM %s
+; ASM lines below are only for reference, tests on that direction should go to tests/CodeGen/ARM
+
+; ModuleID = 'arm.ll'
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
+target triple = "armv7--linux-gnueabihf"
+
+%T216 = type <2 x i16>
+%T232 = type <2 x i32>
+%T264 = type <2 x i64>
+
+%T416 = type <4 x i16>
+%T432 = type <4 x i32>
+%T464 = type <4 x i64>
+
+define void @direct(%T432* %loadaddr, %T432* %loadaddr2, %T432* %storeaddr) {
+; COST: function 'direct':
+ %v0 = load %T432* %loadaddr
+; ASM: vld1.64
+ %v1 = load %T432* %loadaddr2
+; ASM: vld1.64
+ %r3 = mul %T432 %v0, %v1
+; COST: cost of 2 for instruction: {{.*}} mul <4 x i32>
+; ASM: vmul.i32
+ store %T432 %r3, %T432* %storeaddr
+; ASM: vst1.64
+ ret void
+}
+
+define void @ups1632(%T416* %loadaddr, %T416* %loadaddr2, %T432* %storeaddr) {
+; COST: function 'ups1632':
+ %v0 = load %T416* %loadaddr
+; ASM: vldr
+ %v1 = load %T416* %loadaddr2
+; ASM: vldr
+ %r1 = sext %T416 %v0 to %T432
+ %r2 = sext %T416 %v1 to %T432
+; COST: cost of 0 for instruction: {{.*}} sext <4 x i16> {{.*}} to <4 x i32>
+ %r3 = mul %T432 %r1, %r2
+; COST: cost of 2 for instruction: {{.*}} mul <4 x i32>
+; ASM: vmull.s16
+ store %T432 %r3, %T432* %storeaddr
+; ASM: vst1.64
+ ret void
+}
+
+define void @upu1632(%T416* %loadaddr, %T416* %loadaddr2, %T432* %storeaddr) {
+; COST: function 'upu1632':
+ %v0 = load %T416* %loadaddr
+; ASM: vldr
+ %v1 = load %T416* %loadaddr2
+; ASM: vldr
+ %r1 = zext %T416 %v0 to %T432
+ %r2 = zext %T416 %v1 to %T432
+; COST: cost of 0 for instruction: {{.*}} zext <4 x i16> {{.*}} to <4 x i32>
+ %r3 = mul %T432 %r1, %r2
+; COST: cost of 2 for instruction: {{.*}} mul <4 x i32>
+; ASM: vmull.u16
+ store %T432 %r3, %T432* %storeaddr
+; ASM: vst1.64
+ ret void
+}
+
+define void @ups3264(%T232* %loadaddr, %T232* %loadaddr2, %T264* %storeaddr) {
+; COST: function 'ups3264':
+ %v0 = load %T232* %loadaddr
+; ASM: vldr
+ %v1 = load %T232* %loadaddr2
+; ASM: vldr
+ %r3 = mul %T232 %v0, %v1
+; ASM: vmul.i32
+; COST: cost of 1 for instruction: {{.*}} mul <2 x i32>
+ %st = sext %T232 %r3 to %T264
+; ASM: vmovl.s32
+; COST: cost of 1 for instruction: {{.*}} sext <2 x i32> {{.*}} to <2 x i64>
+ store %T264 %st, %T264* %storeaddr
+; ASM: vst1.64
+ ret void
+}
+
+define void @upu3264(%T232* %loadaddr, %T232* %loadaddr2, %T264* %storeaddr) {
+; COST: function 'upu3264':
+ %v0 = load %T232* %loadaddr
+; ASM: vldr
+ %v1 = load %T232* %loadaddr2
+; ASM: vldr
+ %r3 = mul %T232 %v0, %v1
+; ASM: vmul.i32
+; COST: cost of 1 for instruction: {{.*}} mul <2 x i32>
+ %st = zext %T232 %r3 to %T264
+; ASM: vmovl.u32
+; COST: cost of 1 for instruction: {{.*}} zext <2 x i32> {{.*}} to <2 x i64>
+ store %T264 %st, %T264* %storeaddr
+; ASM: vst1.64
+ ret void
+}
+
+define void @dn3216(%T432* %loadaddr, %T432* %loadaddr2, %T416* %storeaddr) {
+; COST: function 'dn3216':
+ %v0 = load %T432* %loadaddr
+; ASM: vld1.64
+ %v1 = load %T432* %loadaddr2
+; ASM: vld1.64
+ %r3 = mul %T432 %v0, %v1
+; ASM: vmul.i32
+; COST: cost of 2 for instruction: {{.*}} mul <4 x i32>
+ %st = trunc %T432 %r3 to %T416
+; ASM: vmovn.i32
+; COST: cost of 1 for instruction: {{.*}} trunc <4 x i32> {{.*}} to <4 x i16>
+ store %T416 %st, %T416* %storeaddr
+; ASM: vstr
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/ARM/width-detect.ll b/test/Transforms/LoopVectorize/ARM/width-detect.ll
new file mode 100644
index 0000000..c0795b6
--- /dev/null
+++ b/test/Transforms/LoopVectorize/ARM/width-detect.ll
@@ -0,0 +1,52 @@
+; RUN: opt < %s -loop-vectorize -mtriple=thumbv7-apple-ios3.0.0 -S | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
+target triple = "thumbv7-apple-ios3.0.0"
+
+;CHECK:foo_F64
+;CHECK: <2 x double>
+;CHECK:ret
+define double @foo_F64(double* nocapture %A, i32 %n) nounwind uwtable readonly ssp {
+ %1 = icmp sgt i32 %n, 0
+ br i1 %1, label %.lr.ph, label %._crit_edge
+
+.lr.ph: ; preds = %0, %.lr.ph
+ %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
+ %prod.01 = phi double [ %4, %.lr.ph ], [ 0.000000e+00, %0 ]
+ %2 = getelementptr inbounds double* %A, i64 %indvars.iv
+ %3 = load double* %2, align 8
+ %4 = fmul fast double %prod.01, %3
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %._crit_edge, label %.lr.ph
+
+._crit_edge: ; preds = %.lr.ph, %0
+ %prod.0.lcssa = phi double [ 0.000000e+00, %0 ], [ %4, %.lr.ph ]
+ ret double %prod.0.lcssa
+}
+
+;CHECK:foo_I8
+;CHECK: xor <16 x i8>
+;CHECK:ret
+define signext i8 @foo_I8(i8* nocapture %A, i32 %n) nounwind uwtable readonly ssp {
+ %1 = icmp sgt i32 %n, 0
+ br i1 %1, label %.lr.ph, label %._crit_edge
+
+.lr.ph: ; preds = %0, %.lr.ph
+ %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
+ %red.01 = phi i8 [ %4, %.lr.ph ], [ 0, %0 ]
+ %2 = getelementptr inbounds i8* %A, i64 %indvars.iv
+ %3 = load i8* %2, align 1
+ %4 = xor i8 %3, %red.01
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %._crit_edge, label %.lr.ph
+
+._crit_edge: ; preds = %.lr.ph, %0
+ %red.0.lcssa = phi i8 [ 0, %0 ], [ %4, %.lr.ph ]
+ ret i8 %red.0.lcssa
+}
+
+
diff --git a/test/Transforms/LoopVectorize/X86/avx1.ll b/test/Transforms/LoopVectorize/X86/avx1.ll
index a2d176a..6c0366e 100644
--- a/test/Transforms/LoopVectorize/X86/avx1.ll
+++ b/test/Transforms/LoopVectorize/X86/avx1.ll
@@ -27,7 +27,7 @@ define i32 @read_mod_write_single_ptr(float* nocapture %a, i32 %n) nounwind uwta
;CHECK: @read_mod_i64
-;CHECK: load <8 x i64>
+;CHECK: load <2 x i64>
;CHECK: ret i32
define i32 @read_mod_i64(i64* nocapture %a, i32 %n) nounwind uwtable ssp {
%1 = icmp sgt i32 %n, 0
@@ -37,7 +37,7 @@ define i32 @read_mod_i64(i64* nocapture %a, i32 %n) nounwind uwtable ssp {
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%2 = getelementptr inbounds i64* %a, i64 %indvars.iv
%3 = load i64* %2, align 4
- %4 = mul i64 %3, 3
+ %4 = add i64 %3, 3
store i64 %4, i64* %2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
diff --git a/test/Transforms/LoopVectorize/X86/constant-vector-operand.ll b/test/Transforms/LoopVectorize/X86/constant-vector-operand.ll
new file mode 100644
index 0000000..6c92440
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/constant-vector-operand.ll
@@ -0,0 +1,28 @@
+; RUN: opt -mtriple=x86_64-apple-darwin -mcpu=core2 -loop-vectorize -dce -instcombine -S < %s | FileCheck %s
+
+@B = common global [1024 x i32] zeroinitializer, align 16
+@A = common global [1024 x i32] zeroinitializer, align 16
+
+; We use to not vectorize this loop because the shift was deemed to expensive.
+; Now that we differentiate shift cost base on the operand value kind, we will
+; vectorize this loop.
+; CHECK: ashr <4 x i32>
+define void @f() {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x i32]* @B, i64 0, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %shl = ashr i32 %0, 3
+ %arrayidx2 = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ store i32 %shl, i32* %arrayidx2, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/X86/conversion-cost.ll b/test/Transforms/LoopVectorize/X86/conversion-cost.ll
index 8f1bb54..760d28d 100644
--- a/test/Transforms/LoopVectorize/X86/conversion-cost.ll
+++ b/test/Transforms/LoopVectorize/X86/conversion-cost.ll
@@ -4,7 +4,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
target triple = "x86_64-apple-macosx10.8.0"
;CHECK: @conversion_cost1
-;CHECK: store <2 x i8>
+;CHECK: store <32 x i8>
;CHECK: ret
define i32 @conversion_cost1(i32 %n, i8* nocapture %A, float* nocapture %B) nounwind uwtable ssp {
%1 = icmp sgt i32 %n, 3
@@ -33,11 +33,10 @@ define i32 @conversion_cost2(i32 %n, i8* nocapture %A, float* nocapture %B) noun
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 9, %0 ]
- %2 = add nsw i64 %indvars.iv, 3
- %3 = trunc i64 %2 to i32
- %4 = sitofp i32 %3 to float
- %5 = getelementptr inbounds float* %B, i64 %indvars.iv
- store float %4, float* %5, align 4
+ %add = add nsw i64 %indvars.iv, 3
+ %tofp = sitofp i64 %add to float
+ %gep = getelementptr inbounds float* %B, i64 %indvars.iv
+ store float %tofp, float* %gep, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
diff --git a/test/Transforms/LoopVectorize/X86/cost-model.ll b/test/Transforms/LoopVectorize/X86/cost-model.ll
index 628f991..b7f479a 100644
--- a/test/Transforms/LoopVectorize/X86/cost-model.ll
+++ b/test/Transforms/LoopVectorize/X86/cost-model.ll
@@ -8,8 +8,11 @@ target triple = "x86_64-apple-macosx10.8.0"
@d = common global [2048 x i32] zeroinitializer, align 16
@a = common global [2048 x i32] zeroinitializer, align 16
+; The program below gathers and scatters data. We better not vectorize it.
;CHECK: cost_model_1
-;CHECK: <4 x i32>
+;CHECK-NOT: <2 x i32>
+;CHECK-NOT: <4 x i32>
+;CHECK-NOT: <8 x i32>
;CHECK: ret void
define void @cost_model_1() nounwind uwtable noinline ssp {
entry:
diff --git a/test/Transforms/LoopVectorize/X86/gcc-examples.ll b/test/Transforms/LoopVectorize/X86/gcc-examples.ll
index 574c529..d2d0eac 100644
--- a/test/Transforms/LoopVectorize/X86/gcc-examples.ll
+++ b/test/Transforms/LoopVectorize/X86/gcc-examples.ll
@@ -1,4 +1,5 @@
-; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 -dce -instcombine -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 -force-vector-unroll=0 -dce -instcombine -S | FileCheck %s -check-prefix=UNROLL
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
@@ -9,10 +10,19 @@ target triple = "x86_64-apple-macosx10.8.0"
; Select VF = 8;
;CHECK: @example1
-;CHECK: load <8 x i32>
-;CHECK: add nsw <8 x i32>
-;CHECK: store <8 x i32>
+;CHECK: load <4 x i32>
+;CHECK: add nsw <4 x i32>
+;CHECK: store <4 x i32>
;CHECK: ret void
+
+;UNROLL: @example1
+;UNROLL: load <4 x i32>
+;UNROLL: load <4 x i32>
+;UNROLL: add nsw <4 x i32>
+;UNROLL: add nsw <4 x i32>
+;UNROLL: store <4 x i32>
+;UNROLL: store <4 x i32>
+;UNROLL: ret void
define void @example1() nounwind uwtable ssp {
br label %1
@@ -34,13 +44,18 @@ define void @example1() nounwind uwtable ssp {
ret void
}
-
-; Select VF=4 because sext <8 x i1> to <8 x i32> is expensive.
+; Select VF=4 because sext <8 x i1> to <8 x i32> is expensive.
;CHECK: @example10b
;CHECK: load <4 x i16>
;CHECK: sext <4 x i16>
;CHECK: store <4 x i32>
;CHECK: ret void
+;UNROLL: @example10b
+;UNROLL: load <4 x i16>
+;UNROLL: load <4 x i16>
+;UNROLL: store <4 x i32>
+;UNROLL: store <4 x i32>
+;UNROLL: ret void
define void @example10b(i16* noalias nocapture %sa, i16* noalias nocapture %sb, i16* noalias nocapture %sc, i32* noalias nocapture %ia, i32* noalias nocapture %ib, i32* noalias nocapture %ic) nounwind uwtable ssp {
br label %1
diff --git a/test/Transforms/LoopVectorize/X86/min-trip-count-switch.ll b/test/Transforms/LoopVectorize/X86/min-trip-count-switch.ll
new file mode 100644
index 0000000..186fba8
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/min-trip-count-switch.ll
@@ -0,0 +1,28 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -vectorizer-min-trip-count=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; CHECK: <4 x float>
+define void @trivial_loop(float* nocapture %a) nounwind uwtable optsize {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float* %a, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !tbaa !0
+ %add = fadd float %0, 1.000000e+00
+ store float %add, float* %arrayidx, align 4, !tbaa !0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 8
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!0 = metadata !{metadata !"float", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/Transforms/LoopVectorize/X86/no-vector.ll b/test/Transforms/LoopVectorize/X86/no-vector.ll
new file mode 100644
index 0000000..692eec9
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/no-vector.ll
@@ -0,0 +1,22 @@
+; RUN: opt -S -mtriple=i386-unknown-freebsd -mcpu=i486 -loop-vectorize < %s
+
+define i32 @PR14639(i8* nocapture %s, i32 %len) nounwind {
+entry:
+ %cmp4 = icmp sgt i32 %len, 0
+ br i1 %cmp4, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %i.06 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %r.05 = phi i32 [ %xor, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i8* %s, i32 %i.06
+ %0 = load i8* %arrayidx, align 1
+ %conv = sext i8 %0 to i32
+ %xor = xor i32 %conv, %r.05
+ %inc = add nsw i32 %i.06, 1
+ %exitcond = icmp eq i32 %inc, %len
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ %r.0.lcssa = phi i32 [ 0, %entry ], [ %xor, %for.body ]
+ ret i32 %r.0.lcssa
+}
diff --git a/test/Transforms/LoopVectorize/X86/parallel-loops-after-reg2mem.ll b/test/Transforms/LoopVectorize/X86/parallel-loops-after-reg2mem.ll
new file mode 100644
index 0000000..452d0df
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/parallel-loops-after-reg2mem.ll
@@ -0,0 +1,52 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; The parallel loop has been invalidated by the new memory accesses introduced
+; by reg2mem (Loop::isParallel() starts to return false). Ensure the loop is
+; now non-vectorizable.
+
+;CHECK-NOT: <4 x i32>
+define void @parallel_loop(i32* nocapture %a, i32* nocapture %b) nounwind uwtable {
+entry:
+ %indvars.iv.next.reg2mem = alloca i64
+ %indvars.iv.reg2mem = alloca i64
+ %"reg2mem alloca point" = bitcast i32 0 to i32
+ store i64 0, i64* %indvars.iv.reg2mem
+ br label %for.body
+
+for.body: ; preds = %for.body.for.body_crit_edge, %entry
+ %indvars.iv.reload = load i64* %indvars.iv.reg2mem
+ %arrayidx = getelementptr inbounds i32* %b, i64 %indvars.iv.reload
+ %0 = load i32* %arrayidx, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv.reload
+ %1 = load i32* %arrayidx2, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ %idxprom3 = sext i32 %1 to i64
+ %arrayidx4 = getelementptr inbounds i32* %a, i64 %idxprom3
+ store i32 %0, i32* %arrayidx4, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ %indvars.iv.next = add i64 %indvars.iv.reload, 1
+ ; A new store without the parallel metadata here:
+ store i64 %indvars.iv.next, i64* %indvars.iv.next.reg2mem
+ %indvars.iv.next.reload1 = load i64* %indvars.iv.next.reg2mem
+ %arrayidx6 = getelementptr inbounds i32* %b, i64 %indvars.iv.next.reload1
+ %2 = load i32* %arrayidx6, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ store i32 %2, i32* %arrayidx2, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ %indvars.iv.next.reload = load i64* %indvars.iv.next.reg2mem
+ %lftr.wideiv = trunc i64 %indvars.iv.next.reload to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 512
+ br i1 %exitcond, label %for.end, label %for.body.for.body_crit_edge, !llvm.loop.parallel !3
+
+for.body.for.body_crit_edge: ; preds = %for.body
+ %indvars.iv.next.reload2 = load i64* %indvars.iv.next.reg2mem
+ store i64 %indvars.iv.next.reload2, i64* %indvars.iv.reg2mem
+ br label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!0 = metadata !{metadata !"int", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
+!3 = metadata !{metadata !3}
diff --git a/test/Transforms/LoopVectorize/X86/parallel-loops.ll b/test/Transforms/LoopVectorize/X86/parallel-loops.ll
new file mode 100644
index 0000000..f648722
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/parallel-loops.ll
@@ -0,0 +1,114 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; A tricky loop:
+;
+; void loop(int *a, int *b) {
+; for (int i = 0; i < 512; ++i) {
+; a[a[i]] = b[i];
+; a[i] = b[i+1];
+; }
+;}
+
+;CHECK: @loop
+;CHECK-NOT: <4 x i32>
+define void @loop(i32* nocapture %a, i32* nocapture %b) nounwind uwtable {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4, !tbaa !0
+ %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %1 = load i32* %arrayidx2, align 4, !tbaa !0
+ %idxprom3 = sext i32 %1 to i64
+ %arrayidx4 = getelementptr inbounds i32* %a, i64 %idxprom3
+ store i32 %0, i32* %arrayidx4, align 4, !tbaa !0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %arrayidx6 = getelementptr inbounds i32* %b, i64 %indvars.iv.next
+ %2 = load i32* %arrayidx6, align 4, !tbaa !0
+ store i32 %2, i32* %arrayidx2, align 4, !tbaa !0
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 512
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+; The same loop with parallel loop metadata added to the loop branch
+; and the memory instructions.
+
+;CHECK: @parallel_loop
+;CHECK: <4 x i32>
+define void @parallel_loop(i32* nocapture %a, i32* nocapture %b) nounwind uwtable {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %1 = load i32* %arrayidx2, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ %idxprom3 = sext i32 %1 to i64
+ %arrayidx4 = getelementptr inbounds i32* %a, i64 %idxprom3
+ ; This store might have originated from inlining a function with a parallel
+ ; loop. Refers to a list with the "original loop reference" (!4) also included.
+ store i32 %0, i32* %arrayidx4, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !5
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %arrayidx6 = getelementptr inbounds i32* %b, i64 %indvars.iv.next
+ %2 = load i32* %arrayidx6, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ store i32 %2, i32* %arrayidx2, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 512
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop.parallel !3
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+; The same loop with an illegal parallel loop metadata: the memory
+; accesses refer to a different loop's identifier.
+
+;CHECK: @mixed_metadata
+;CHECK-NOT: <4 x i32>
+
+define void @mixed_metadata(i32* nocapture %a, i32* nocapture %b) nounwind uwtable {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !6
+ %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %1 = load i32* %arrayidx2, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !6
+ %idxprom3 = sext i32 %1 to i64
+ %arrayidx4 = getelementptr inbounds i32* %a, i64 %idxprom3
+ ; This refers to the loop marked with !7 which we are not in at the moment.
+ ; It should prevent detecting as a parallel loop.
+ store i32 %0, i32* %arrayidx4, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !7
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %arrayidx6 = getelementptr inbounds i32* %b, i64 %indvars.iv.next
+ %2 = load i32* %arrayidx6, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !6
+ store i32 %2, i32* %arrayidx2, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !6
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 512
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop.parallel !6
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!0 = metadata !{metadata !"int", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
+!3 = metadata !{metadata !3}
+!4 = metadata !{metadata !4}
+!5 = metadata !{metadata !3, metadata !4}
+!6 = metadata !{metadata !6}
+!7 = metadata !{metadata !7}
diff --git a/test/Transforms/LoopVectorize/X86/reduction-crash.ll b/test/Transforms/LoopVectorize/X86/reduction-crash.ll
new file mode 100644
index 0000000..f580846
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/reduction-crash.ll
@@ -0,0 +1,35 @@
+; RUN: opt -S -loop-vectorize -mcpu=prescott < %s | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S128"
+target triple = "i386-apple-darwin"
+
+; PR15344
+define void @test1(float* nocapture %arg, i32 %arg1) nounwind {
+; CHECK: @test1
+; CHECK: preheader
+; CHECK: insertelement <2 x double> zeroinitializer, double %tmp, i32 0
+; CHECK: vector.memcheck
+
+bb:
+ br label %bb2
+
+bb2: ; preds = %bb
+ %tmp = load double* null, align 8
+ br i1 undef, label %bb3, label %bb12
+
+bb3: ; preds = %bb3, %bb2
+ %tmp4 = phi double [ %tmp9, %bb3 ], [ %tmp, %bb2 ]
+ %tmp5 = phi i32 [ %tmp8, %bb3 ], [ 0, %bb2 ]
+ %tmp6 = getelementptr inbounds [16 x double]* undef, i32 0, i32 %tmp5
+ %tmp7 = load double* %tmp6, align 4
+ %tmp8 = add nsw i32 %tmp5, 1
+ %tmp9 = fadd fast double %tmp4, undef
+ %tmp10 = getelementptr inbounds float* %arg, i32 %tmp5
+ store float undef, float* %tmp10, align 4
+ %tmp11 = icmp eq i32 %tmp8, %arg1
+ br i1 %tmp11, label %bb12, label %bb3
+
+bb12: ; preds = %bb3, %bb2
+ %tmp13 = phi double [ %tmp, %bb2 ], [ %tmp9, %bb3 ]
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/X86/small-size.ll b/test/Transforms/LoopVectorize/X86/small-size.ll
new file mode 100644
index 0000000..f390b33
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/small-size.ll
@@ -0,0 +1,170 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+@b = common global [2048 x i32] zeroinitializer, align 16
+@c = common global [2048 x i32] zeroinitializer, align 16
+@a = common global [2048 x i32] zeroinitializer, align 16
+@G = common global [32 x [1024 x i32]] zeroinitializer, align 16
+@ub = common global [1024 x i32] zeroinitializer, align 16
+@uc = common global [1024 x i32] zeroinitializer, align 16
+@d = common global [2048 x i32] zeroinitializer, align 16
+@fa = common global [1024 x float] zeroinitializer, align 16
+@fb = common global [1024 x float] zeroinitializer, align 16
+@ic = common global [1024 x i32] zeroinitializer, align 16
+@da = common global [1024 x float] zeroinitializer, align 16
+@db = common global [1024 x float] zeroinitializer, align 16
+@dc = common global [1024 x float] zeroinitializer, align 16
+@dd = common global [1024 x float] zeroinitializer, align 16
+@dj = common global [1024 x i32] zeroinitializer, align 16
+
+; We can optimize this test without a tail.
+;CHECK: @example1
+;CHECK: load <4 x i32>
+;CHECK: add nsw <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK: ret void
+define void @example1() optsize {
+ br label %1
+
+; <label>:1 ; preds = %1, %0
+ %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
+ %2 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv
+ %3 = load i32* %2, align 4
+ %4 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %indvars.iv
+ %5 = load i32* %4, align 4
+ %6 = add nsw i32 %5, %3
+ %7 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ store i32 %6, i32* %7, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 256
+ br i1 %exitcond, label %8, label %1
+
+; <label>:8 ; preds = %1
+ ret void
+}
+
+; Can't vectorize in 'optsize' mode because we need a tail.
+;CHECK: @example2
+;CHECK-NOT: store <4 x i32>
+;CHECK: ret void
+define void @example2(i32 %n, i32 %x) optsize {
+ %1 = icmp sgt i32 %n, 0
+ br i1 %1, label %.lr.ph5, label %.preheader
+
+..preheader_crit_edge: ; preds = %.lr.ph5
+ %phitmp = sext i32 %n to i64
+ br label %.preheader
+
+.preheader: ; preds = %..preheader_crit_edge, %0
+ %i.0.lcssa = phi i64 [ %phitmp, %..preheader_crit_edge ], [ 0, %0 ]
+ %2 = icmp eq i32 %n, 0
+ br i1 %2, label %._crit_edge, label %.lr.ph
+
+.lr.ph5: ; preds = %0, %.lr.ph5
+ %indvars.iv6 = phi i64 [ %indvars.iv.next7, %.lr.ph5 ], [ 0, %0 ]
+ %3 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv6
+ store i32 %x, i32* %3, align 4
+ %indvars.iv.next7 = add i64 %indvars.iv6, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next7 to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %..preheader_crit_edge, label %.lr.ph5
+
+.lr.ph: ; preds = %.preheader, %.lr.ph
+ %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ %i.0.lcssa, %.preheader ]
+ %.02 = phi i32 [ %4, %.lr.ph ], [ %n, %.preheader ]
+ %4 = add nsw i32 %.02, -1
+ %5 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv
+ %6 = load i32* %5, align 4
+ %7 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %indvars.iv
+ %8 = load i32* %7, align 4
+ %9 = and i32 %8, %6
+ %10 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ store i32 %9, i32* %10, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %11 = icmp eq i32 %4, 0
+ br i1 %11, label %._crit_edge, label %.lr.ph
+
+._crit_edge: ; preds = %.lr.ph, %.preheader
+ ret void
+}
+
+; N is unknown, we need a tail. Can't vectorize.
+;CHECK: @example3
+;CHECK-NOT: <4 x i32>
+;CHECK: ret void
+define void @example3(i32 %n, i32* noalias nocapture %p, i32* noalias nocapture %q) optsize {
+ %1 = icmp eq i32 %n, 0
+ br i1 %1, label %._crit_edge, label %.lr.ph
+
+.lr.ph: ; preds = %0, %.lr.ph
+ %.05 = phi i32 [ %2, %.lr.ph ], [ %n, %0 ]
+ %.014 = phi i32* [ %5, %.lr.ph ], [ %p, %0 ]
+ %.023 = phi i32* [ %3, %.lr.ph ], [ %q, %0 ]
+ %2 = add nsw i32 %.05, -1
+ %3 = getelementptr inbounds i32* %.023, i64 1
+ %4 = load i32* %.023, align 16
+ %5 = getelementptr inbounds i32* %.014, i64 1
+ store i32 %4, i32* %.014, align 16
+ %6 = icmp eq i32 %2, 0
+ br i1 %6, label %._crit_edge, label %.lr.ph
+
+._crit_edge: ; preds = %.lr.ph, %0
+ ret void
+}
+
+
+; We can't vectorize this one because we need a runtime ptr check.
+;CHECK: @example23
+;CHECK-NOT: <4 x i32>
+;CHECK: ret void
+define void @example23(i16* nocapture %src, i32* nocapture %dst) optsize {
+ br label %1
+
+; <label>:1 ; preds = %1, %0
+ %.04 = phi i16* [ %src, %0 ], [ %2, %1 ]
+ %.013 = phi i32* [ %dst, %0 ], [ %6, %1 ]
+ %i.02 = phi i32 [ 0, %0 ], [ %7, %1 ]
+ %2 = getelementptr inbounds i16* %.04, i64 1
+ %3 = load i16* %.04, align 2
+ %4 = zext i16 %3 to i32
+ %5 = shl nuw nsw i32 %4, 7
+ %6 = getelementptr inbounds i32* %.013, i64 1
+ store i32 %5, i32* %.013, align 4
+ %7 = add nsw i32 %i.02, 1
+ %exitcond = icmp eq i32 %7, 256
+ br i1 %exitcond, label %8, label %1
+
+; <label>:8 ; preds = %1
+ ret void
+}
+
+
+; We CAN vectorize this example because the pointers are marked as noalias.
+;CHECK: @example23b
+;CHECK: <4 x i32>
+;CHECK: ret void
+define void @example23b(i16* noalias nocapture %src, i32* noalias nocapture %dst) optsize {
+ br label %1
+
+; <label>:1 ; preds = %1, %0
+ %.04 = phi i16* [ %src, %0 ], [ %2, %1 ]
+ %.013 = phi i32* [ %dst, %0 ], [ %6, %1 ]
+ %i.02 = phi i32 [ 0, %0 ], [ %7, %1 ]
+ %2 = getelementptr inbounds i16* %.04, i64 1
+ %3 = load i16* %.04, align 2
+ %4 = zext i16 %3 to i32
+ %5 = shl nuw nsw i32 %4, 7
+ %6 = getelementptr inbounds i32* %.013, i64 1
+ store i32 %5, i32* %.013, align 4
+ %7 = add nsw i32 %i.02, 1
+ %exitcond = icmp eq i32 %7, 256
+ br i1 %exitcond, label %8, label %1
+
+; <label>:8 ; preds = %1
+ ret void
+}
+
+
diff --git a/test/Transforms/LoopVectorize/X86/struct-store.ll b/test/Transforms/LoopVectorize/X86/struct-store.ll
new file mode 100644
index 0000000..a995e43
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/struct-store.ll
@@ -0,0 +1,27 @@
+; RUN: opt < %s -loop-vectorize -mtriple=x86_64-unknown-linux-gnu -S
+
+; Make sure we are not crashing on this one.
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@glbl = external global [16 x { i64, i64 }], align 16
+
+declare void @fn()
+
+define void @test() {
+entry:
+ br label %loop
+
+loop:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %loop ], [ 0, %entry ]
+ %tmp = getelementptr inbounds [16 x { i64, i64 }]* @glbl, i64 0, i64 %indvars.iv
+ store { i64, i64 } { i64 ptrtoint (void ()* @fn to i64), i64 0 }, { i64, i64 }* %tmp, align 16
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp ne i32 %lftr.wideiv, 16
+ br i1 %exitcond, label %loop, label %exit
+
+exit:
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll b/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll
new file mode 100644
index 0000000..ef63a14
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll
@@ -0,0 +1,50 @@
+; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -force-vector-width=4 -force-vector-unroll=0 -dce -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+;CHECK: @foo
+;CHECK: load <4 x i32>
+;CHECK-NOT: load <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK-NOT: store <4 x i32>
+;CHECK: ret
+define i32 @foo(i32* nocapture %A) nounwind uwtable ssp {
+ br label %1
+
+; <label>:1 ; preds = %1, %0
+ %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
+ %2 = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %3 = load i32* %2, align 4
+ %4 = add nsw i32 %3, 6
+ store i32 %4, i32* %2, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 100
+ br i1 %exitcond, label %5, label %1
+
+; <label>:5 ; preds = %1
+ ret i32 undef
+}
+
+;CHECK: @bar
+;CHECK: store <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK: ret
+define i32 @bar(i32* nocapture %A, i32 %n) nounwind uwtable ssp {
+ %1 = icmp sgt i32 %n, 0
+ br i1 %1, label %.lr.ph, label %._crit_edge
+
+.lr.ph: ; preds = %0, %.lr.ph
+ %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
+ %2 = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %3 = load i32* %2, align 4
+ %4 = add nsw i32 %3, 6
+ store i32 %4, i32* %2, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %._crit_edge, label %.lr.ph
+
+._crit_edge: ; preds = %.lr.ph, %0
+ ret i32 undef
+}
diff --git a/test/Transforms/LoopVectorize/X86/unroll_selection.ll b/test/Transforms/LoopVectorize/X86/unroll_selection.ll
new file mode 100644
index 0000000..2d7b663
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/unroll_selection.ll
@@ -0,0 +1,71 @@
+; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -force-vector-width=4 -force-vector-unroll=0 -dce -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+; Don't unroll when we have register pressure.
+;CHECK: reg_pressure
+;CHECK: load <4 x double>
+;CHECK-NOT: load <4 x double>
+;CHECK: store <4 x double>
+;CHECK-NOT: store <4 x double>
+;CHECK: ret
+define void @reg_pressure(double* nocapture %A, i32 %n) nounwind uwtable ssp {
+ %1 = sext i32 %n to i64
+ br label %2
+
+; <label>:2 ; preds = %2, %0
+ %indvars.iv = phi i64 [ %indvars.iv.next, %2 ], [ %1, %0 ]
+ %3 = getelementptr inbounds double* %A, i64 %indvars.iv
+ %4 = load double* %3, align 8
+ %5 = fadd double %4, 3.000000e+00
+ %6 = fmul double %4, 2.000000e+00
+ %7 = fadd double %5, %6
+ %8 = fadd double %7, 2.000000e+00
+ %9 = fmul double %8, 5.000000e-01
+ %10 = fadd double %6, %9
+ %11 = fsub double %10, %5
+ %12 = fadd double %4, %11
+ %13 = fdiv double %8, %12
+ %14 = fmul double %13, %8
+ %15 = fmul double %6, %14
+ %16 = fmul double %5, %15
+ %17 = fadd double %16, -3.000000e+00
+ %18 = fsub double %4, %5
+ %19 = fadd double %6, %18
+ %20 = fadd double %13, %19
+ %21 = fadd double %20, %17
+ %22 = fadd double %21, 3.000000e+00
+ %23 = fmul double %4, %22
+ store double %23, double* %3, align 8
+ %indvars.iv.next = add i64 %indvars.iv, -1
+ %24 = trunc i64 %indvars.iv to i32
+ %25 = icmp eq i32 %24, 0
+ br i1 %25, label %26, label %2
+
+; <label>:26 ; preds = %2
+ ret void
+}
+
+; This is a small loop. Unroll it twice.
+;CHECK: small_loop
+;CHECK: xor
+;CHECK: xor
+;CHECK: ret
+define void @small_loop(i16* nocapture %A, i64 %n) nounwind uwtable ssp {
+ %1 = icmp eq i64 %n, 0
+ br i1 %1, label %._crit_edge, label %.lr.ph
+
+.lr.ph: ; preds = %0, %.lr.ph
+ %i.01 = phi i64 [ %5, %.lr.ph ], [ 0, %0 ]
+ %2 = getelementptr inbounds i16* %A, i64 %i.01
+ %3 = load i16* %2, align 2
+ %4 = xor i16 %3, 3
+ store i16 %4, i16* %2, align 2
+ %5 = add i64 %i.01, 1
+ %exitcond = icmp eq i64 %5, %n
+ br i1 %exitcond, label %._crit_edge, label %.lr.ph
+
+._crit_edge: ; preds = %.lr.ph, %0
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/X86/vector-scalar-select-cost.ll b/test/Transforms/LoopVectorize/X86/vector-scalar-select-cost.ll
new file mode 100644
index 0000000..3b3a787
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/vector-scalar-select-cost.ll
@@ -0,0 +1,66 @@
+; RUN: opt < %s -loop-vectorize -mcpu=core2 -debug-only=loop-vectorize 2>&1 -S | FileCheck %s
+; REQUIRES: asserts
+; Make sure we use the right select kind when querying select costs.
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+@a = common global [2048 x i32] zeroinitializer, align 16
+@b = common global [2048 x i32] zeroinitializer, align 16
+@c = common global [2048 x i32] zeroinitializer, align 16
+
+; CHECK: Checking a loop in "scalarselect"
+define void @scalarselect(i1 %cond) {
+ br label %1
+
+; <label>:1
+ %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
+ %2 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv
+ %3 = load i32* %2, align 4
+ %4 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %indvars.iv
+ %5 = load i32* %4, align 4
+ %6 = add nsw i32 %5, %3
+ %7 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+
+; A scalar select has a cost of 1 on core2
+; CHECK: cost of 1 for VF 2 {{.*}} select i1 %cond, i32 %6, i32 0
+
+ %sel = select i1 %cond, i32 %6, i32 zeroinitializer
+ store i32 %sel, i32* %7, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 256
+ br i1 %exitcond, label %8, label %1
+
+; <label>:8
+ ret void
+}
+
+; CHECK: Checking a loop in "vectorselect"
+define void @vectorselect(i1 %cond) {
+ br label %1
+
+; <label>:1
+ %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
+ %2 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv
+ %3 = load i32* %2, align 4
+ %4 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %indvars.iv
+ %5 = load i32* %4, align 4
+ %6 = add nsw i32 %5, %3
+ %7 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ %8 = icmp ult i64 %indvars.iv, 8
+
+; A vector select has a cost of 4 on core2
+; CHECK: cost of 4 for VF 2 {{.*}} select i1 %8, i32 %6, i32 0
+
+ %sel = select i1 %8, i32 %6, i32 zeroinitializer
+ store i32 %sel, i32* %7, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 256
+ br i1 %exitcond, label %9, label %1
+
+; <label>:9
+ ret void
+}
+
diff --git a/test/Transforms/LoopVectorize/X86/vector_ptr_load_store.ll b/test/Transforms/LoopVectorize/X86/vector_ptr_load_store.ll
new file mode 100644
index 0000000..59bb8d0
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/vector_ptr_load_store.ll
@@ -0,0 +1,150 @@
+; RUN: opt -loop-vectorize -mcpu=corei7-avx -debug -S < %s 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+%0 = type { %0*, %1 }
+%1 = type { i8*, i32 }
+
+@p = global [2048 x [8 x i32*]] zeroinitializer, align 16
+@q = global [2048 x i16] zeroinitializer, align 16
+@r = global [2048 x i16] zeroinitializer, align 16
+
+; Tests for widest type
+; Ensure that we count the pointer store in the first test case. We have a
+; consecutive vector of pointers store, therefore we should count it towards the
+; widest vector count.
+;
+; CHECK: test_consecutive_store
+; CHECK: The Widest type: 64 bits
+define void @test_consecutive_store(%0**, %0**, %0** nocapture) nounwind ssp uwtable align 2 {
+ %4 = load %0** %2, align 8
+ %5 = icmp eq %0** %0, %1
+ br i1 %5, label %12, label %6
+
+; <label>:6 ; preds = %3
+ br label %7
+
+; <label>:7 ; preds = %7, %6
+ %8 = phi %0** [ %0, %6 ], [ %9, %7 ]
+ store %0* %4, %0** %8, align 8
+ %9 = getelementptr inbounds %0** %8, i64 1
+ %10 = icmp eq %0** %9, %1
+ br i1 %10, label %11, label %7
+
+; <label>:11 ; preds = %7
+ br label %12
+
+; <label>:12 ; preds = %11, %3
+ ret void
+}
+
+; However, if the store of a set of pointers is not to consecutive memory we do
+; NOT count the store towards the widest vector type.
+; In the test case below we add i16 types to store it in an array of pointer,
+; therefore the widest type should be i16.
+; int* p[2048][8];
+; short q[2048];
+; for (int y = 0; y < 8; ++y)
+; for (int i = 0; i < 1024; ++i) {
+; p[i][y] = (int*) (1 + q[i]);
+; }
+; CHECK: test_nonconsecutive_store
+; CHECK: The Widest type: 16 bits
+define void @test_nonconsecutive_store() nounwind ssp uwtable {
+ br label %1
+
+; <label>:1 ; preds = %14, %0
+ %2 = phi i64 [ 0, %0 ], [ %15, %14 ]
+ br label %3
+
+; <label>:3 ; preds = %3, %1
+ %4 = phi i64 [ 0, %1 ], [ %11, %3 ]
+ %5 = getelementptr inbounds [2048 x i16]* @q, i64 0, i64 %4
+ %6 = load i16* %5, align 2
+ %7 = sext i16 %6 to i64
+ %8 = add i64 %7, 1
+ %9 = inttoptr i64 %8 to i32*
+ %10 = getelementptr inbounds [2048 x [8 x i32*]]* @p, i64 0, i64 %4, i64 %2
+ store i32* %9, i32** %10, align 8
+ %11 = add i64 %4, 1
+ %12 = trunc i64 %11 to i32
+ %13 = icmp ne i32 %12, 1024
+ br i1 %13, label %3, label %14
+
+; <label>:14 ; preds = %3
+ %15 = add i64 %2, 1
+ %16 = trunc i64 %15 to i32
+ %17 = icmp ne i32 %16, 8
+ br i1 %17, label %1, label %18
+
+; <label>:18 ; preds = %14
+ ret void
+}
+
+
+@ia = global [1024 x i32*] zeroinitializer, align 16
+@ib = global [1024 x i32] zeroinitializer, align 16
+@ic = global [1024 x i8] zeroinitializer, align 16
+@p2 = global [2048 x [8 x i32*]] zeroinitializer, align 16
+@q2 = global [2048 x i16] zeroinitializer, align 16
+
+;; Now we check the same rules for loads. We should take consecutive loads of
+;; pointer types into account.
+; CHECK: test_consecutive_ptr_load
+; CHECK: The Widest type: 64 bits
+define i8 @test_consecutive_ptr_load() nounwind readonly ssp uwtable {
+ br label %1
+
+; <label>:1 ; preds = %1, %0
+ %2 = phi i64 [ 0, %0 ], [ %10, %1 ]
+ %3 = phi i8 [ 0, %0 ], [ %9, %1 ]
+ %4 = getelementptr inbounds [1024 x i32*]* @ia, i32 0, i64 %2
+ %5 = load i32** %4, align 4
+ %6 = ptrtoint i32* %5 to i64
+ %7 = trunc i64 %6 to i8
+ %8 = add i8 %3, 1
+ %9 = add i8 %7, %8
+ %10 = add i64 %2, 1
+ %11 = icmp ne i64 %10, 1024
+ br i1 %11, label %1, label %12
+
+; <label>:12 ; preds = %1
+ %13 = phi i8 [ %9, %1 ]
+ ret i8 %13
+}
+
+;; However, we should not take unconsecutive loads of pointers into account.
+; CHECK: test_nonconsecutive_ptr_load
+; CHECK: The Widest type: 16 bits
+define void @test_nonconsecutive_ptr_load() nounwind ssp uwtable {
+ br label %1
+
+; <label>:1 ; preds = %13, %0
+ %2 = phi i64 [ 0, %0 ], [ %14, %13 ]
+ br label %3
+
+; <label>:3 ; preds = %3, %1
+ %4 = phi i64 [ 0, %1 ], [ %10, %3 ]
+ %5 = getelementptr inbounds [2048 x [8 x i32*]]* @p2, i64 0, i64 %4, i64 %2
+ %6 = getelementptr inbounds [2048 x i16]* @q2, i64 0, i64 %4
+ %7 = load i32** %5, align 2
+ %8 = ptrtoint i32* %7 to i64
+ %9 = trunc i64 %8 to i16
+ store i16 %9, i16* %6, align 8
+ %10 = add i64 %4, 1
+ %11 = trunc i64 %10 to i32
+ %12 = icmp ne i32 %11, 1024
+ br i1 %12, label %3, label %13
+
+; <label>:13 ; preds = %3
+ %14 = add i64 %2, 1
+ %15 = trunc i64 %14 to i32
+ %16 = icmp ne i32 %15, 8
+ br i1 %16, label %1, label %17
+
+; <label>:17 ; preds = %13
+ ret void
+}
+
diff --git a/test/Transforms/LoopVectorize/bzip_reverse_loops.ll b/test/Transforms/LoopVectorize/bzip_reverse_loops.ll
new file mode 100644
index 0000000..431e422
--- /dev/null
+++ b/test/Transforms/LoopVectorize/bzip_reverse_loops.ll
@@ -0,0 +1,71 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S -enable-if-conversion | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+;CHECK: fc
+;CHECK: load <4 x i16>
+;CHECK-NEXT: shufflevector <4 x i16>
+;CHECK: select <4 x i1>
+;CHECK: store <4 x i16>
+;CHECK: ret
+define void @fc(i16* nocapture %p, i32 %n, i32 %size) nounwind uwtable ssp {
+entry:
+ br label %do.body
+
+do.body: ; preds = %cond.end, %entry
+ %n.addr.0 = phi i32 [ %n, %entry ], [ %dec, %cond.end ]
+ %p.addr.0 = phi i16* [ %p, %entry ], [ %incdec.ptr, %cond.end ]
+ %incdec.ptr = getelementptr inbounds i16* %p.addr.0, i64 -1
+ %0 = load i16* %incdec.ptr, align 2, !tbaa !0
+ %conv = zext i16 %0 to i32
+ %cmp = icmp ult i32 %conv, %size
+ br i1 %cmp, label %cond.end, label %cond.true
+
+cond.true: ; preds = %do.body
+ %sub = sub i32 %conv, %size
+ %phitmp = trunc i32 %sub to i16
+ br label %cond.end
+
+cond.end: ; preds = %do.body, %cond.true
+ %cond = phi i16 [ %phitmp, %cond.true ], [ 0, %do.body ]
+ store i16 %cond, i16* %incdec.ptr, align 2, !tbaa !0
+ %dec = add i32 %n.addr.0, -1
+ %tobool = icmp eq i32 %dec, 0
+ br i1 %tobool, label %do.end, label %do.body
+
+do.end: ; preds = %cond.end
+ ret void
+}
+
+;CHECK: example1
+;CHECK: load <4 x i32>
+;CHECK-NEXT: shufflevector <4 x i32>
+;CHECK: select <4 x i1>
+;CHECK: store <4 x i32>
+;CHECK: ret
+define void @example1(i32* nocapture %a, i32 %n, i32 %wsize) nounwind uwtable ssp {
+entry:
+ br label %do.body
+
+do.body: ; preds = %do.body, %entry
+ %n.addr.0 = phi i32 [ %n, %entry ], [ %dec, %do.body ]
+ %p.0 = phi i32* [ %a, %entry ], [ %incdec.ptr, %do.body ]
+ %incdec.ptr = getelementptr inbounds i32* %p.0, i64 -1
+ %0 = load i32* %incdec.ptr, align 4, !tbaa !3
+ %cmp = icmp slt i32 %0, %wsize
+ %sub = sub nsw i32 %0, %wsize
+ %cond = select i1 %cmp, i32 0, i32 %sub
+ store i32 %cond, i32* %incdec.ptr, align 4, !tbaa !3
+ %dec = add nsw i32 %n.addr.0, -1
+ %tobool = icmp eq i32 %dec, 0
+ br i1 %tobool, label %do.end, label %do.body
+
+do.end: ; preds = %do.body
+ ret void
+}
+
+!0 = metadata !{metadata !"short", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
+!3 = metadata !{metadata !"int", metadata !1}
diff --git a/test/Transforms/LoopVectorize/calloc.ll b/test/Transforms/LoopVectorize/calloc.ll
new file mode 100644
index 0000000..08c84ef
--- /dev/null
+++ b/test/Transforms/LoopVectorize/calloc.ll
@@ -0,0 +1,53 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+;CHECK: hexit
+;CHECK: zext <4 x i8>
+;CHECK: ret
+
+define noalias i8* @hexit(i8* nocapture %bytes, i64 %length) nounwind uwtable ssp {
+entry:
+ %shl = shl i64 %length, 1
+ %add28 = or i64 %shl, 1
+ %call = tail call i8* @calloc(i64 1, i64 %add28) nounwind
+ %cmp29 = icmp eq i64 %shl, 0
+ br i1 %cmp29, label %for.end, label %for.body.lr.ph
+
+for.body.lr.ph: ; preds = %entry
+ %0 = shl i64 %length, 1
+ br label %for.body
+
+for.body: ; preds = %for.body, %for.body.lr.ph
+ %i.030 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
+ %shr = lshr i64 %i.030, 1
+ %arrayidx = getelementptr inbounds i8* %bytes, i64 %shr
+ %1 = load i8* %arrayidx, align 1, !tbaa !0
+ %conv = zext i8 %1 to i32
+ %and = shl i64 %i.030, 2
+ %neg = and i64 %and, 4
+ %and3 = xor i64 %neg, 4
+ %sh_prom = trunc i64 %and3 to i32
+ %shl4 = shl i32 15, %sh_prom
+ %and5 = and i32 %conv, %shl4
+ %shr11 = lshr i32 %and5, %sh_prom
+ %conv13 = and i32 %shr11, 254
+ %cmp15 = icmp ugt i32 %conv13, 9
+ %cond = select i1 %cmp15, i32 87, i32 48
+ %add17 = add nsw i32 %cond, %shr11
+ %conv18 = trunc i32 %add17 to i8
+ %arrayidx19 = getelementptr inbounds i8* %call, i64 %i.030
+ store i8 %conv18, i8* %arrayidx19, align 1, !tbaa !0
+ %inc = add i64 %i.030, 1
+ %exitcond = icmp eq i64 %inc, %0
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret i8* %call
+}
+
+declare noalias i8* @calloc(i64, i64) nounwind
+
+!0 = metadata !{metadata !"omnipotent char", metadata !1}
+!1 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/Transforms/LoopVectorize/cast-induction.ll b/test/Transforms/LoopVectorize/cast-induction.ll
new file mode 100644
index 0000000..2aa29ed
--- /dev/null
+++ b/test/Transforms/LoopVectorize/cast-induction.ll
@@ -0,0 +1,30 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
+
+; rdar://problem/12848162
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+@a = common global [2048 x i32] zeroinitializer, align 16
+
+;CHECK: @example12
+;CHECK: trunc i64
+;CHECK: store <4 x i32>
+;CHECK: ret void
+define void @example12() nounwind uwtable ssp {
+ br label %1
+
+; <label>:1 ; preds = %1, %0
+ %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
+ %2 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ %3 = trunc i64 %indvars.iv to i32
+ store i32 %3, i32* %2, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 1024
+ br i1 %exitcond, label %4, label %1
+
+; <label>:4 ; preds = %1
+ ret void
+}
+
diff --git a/test/Transforms/LoopVectorize/cpp-new-array.ll b/test/Transforms/LoopVectorize/cpp-new-array.ll
index 26902eb..da0fb05 100644
--- a/test/Transforms/LoopVectorize/cpp-new-array.ll
+++ b/test/Transforms/LoopVectorize/cpp-new-array.ll
@@ -1,10 +1,10 @@
-; RUN: opt < %s -loop-vectorize -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
;CHECK: @cpp_new_arrays
-;CHECK: insertelement <4 x i32>
+;CHECK: sext i32
;CHECK: load <4 x float>
;CHECK: fadd <4 x float>
;CHECK: ret i32
diff --git a/test/Transforms/LoopVectorize/dbg.value.ll b/test/Transforms/LoopVectorize/dbg.value.ll
new file mode 100644
index 0000000..a2ea951
--- /dev/null
+++ b/test/Transforms/LoopVectorize/dbg.value.ll
@@ -0,0 +1,70 @@
+; RUN: opt < %s -S -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine | FileCheck %s
+; Make sure we vectorize with debugging turned on.
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+@A = global [1024 x i32] zeroinitializer, align 16
+@B = global [1024 x i32] zeroinitializer, align 16
+@C = global [1024 x i32] zeroinitializer, align 16
+
+; CHECK: @test
+define i32 @test() #0 {
+entry:
+ tail call void @llvm.dbg.value(metadata !1, i64 0, metadata !9), !dbg !18
+ br label %for.body, !dbg !18
+
+for.body:
+ ;CHECK: load <4 x i32>
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x i32]* @B, i64 0, i64 %indvars.iv, !dbg !19
+ %0 = load i32* %arrayidx, align 4, !dbg !19, !tbaa !21
+ %arrayidx2 = getelementptr inbounds [1024 x i32]* @C, i64 0, i64 %indvars.iv, !dbg !19
+ %1 = load i32* %arrayidx2, align 4, !dbg !19, !tbaa !21
+ %add = add nsw i32 %1, %0, !dbg !19
+ %arrayidx4 = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv, !dbg !19
+ store i32 %add, i32* %arrayidx4, align 4, !dbg !19, !tbaa !21
+ %indvars.iv.next = add i64 %indvars.iv, 1, !dbg !18
+ tail call void @llvm.dbg.value(metadata !{null}, i64 0, metadata !9), !dbg !18
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32, !dbg !18
+ %exitcond = icmp ne i32 %lftr.wideiv, 1024, !dbg !18
+ br i1 %exitcond, label %for.body, label %for.end, !dbg !18
+
+for.end:
+ ret i32 0, !dbg !24
+}
+
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+declare void @llvm.dbg.value(metadata, i64, metadata) #1
+
+attributes #0 = { nounwind ssp uwtable "fp-contract-model"="standard" "no-frame-pointer-elim" "no-frame-pointer-elim-non-leaf" "realign-stack" "relocation-model"="pic" "ssp-buffers-size"="8" }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+
+!0 = metadata !{i32 786449, i32 0, i32 4, metadata !"test", metadata !"/path/to/somewhere", metadata !"clang", i1 true, i1 true, metadata !"", i32 0, metadata !1, metadata !1, metadata !2, metadata !11, metadata !""}
+!1 = metadata !{i32 0}
+!2 = metadata !{metadata !3}
+!3 = metadata !{i32 786478, i32 0, metadata !4, metadata !"test", metadata !"test", metadata !"test", metadata !4, i32 5, metadata !5, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 ()* @test, null, null, metadata !8, i32 5}
+!4 = metadata !{i32 786473, metadata !"test", metadata !"/path/to/somewhere", null}
+!5 = metadata !{i32 786453, i32 0, metadata !"", i32 0, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !6, i32 0, i32 0}
+!6 = metadata !{metadata !7}
+!7 = metadata !{i32 786468, null, metadata !"int", null, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5}
+!8 = metadata !{metadata !9}
+!9 = metadata !{i32 786688, metadata !10, metadata !"i", metadata !4, i32 6, metadata !7, i32 0, i32 0}
+!10 = metadata !{i32 786443, metadata !3, i32 6, i32 0, metadata !4, i32 0}
+!11 = metadata !{metadata !12, metadata !16, metadata !17}
+!12 = metadata !{i32 786484, i32 0, null, metadata !"A", metadata !"A", metadata !"", metadata !4, i32 1, metadata !13, i32 0, i32 1, [1024 x i32]* @A, null}
+!13 = metadata !{i32 786433, null, metadata !"", null, i32 0, i64 32768, i64 32, i32 0, i32 0, metadata !7, metadata !14, i32 0, i32 0}
+!14 = metadata !{metadata !15}
+!15 = metadata !{i32 786465, i64 0, i64 1024}
+!16 = metadata !{i32 786484, i32 0, null, metadata !"B", metadata !"B", metadata !"", metadata !4, i32 2, metadata !13, i32 0, i32 1, [1024 x i32]* @B, null}
+!17 = metadata !{i32 786484, i32 0, null, metadata !"C", metadata !"C", metadata !"", metadata !4, i32 3, metadata !13, i32 0, i32 1, [1024 x i32]* @C, null}
+!18 = metadata !{i32 6, i32 0, metadata !10, null}
+!19 = metadata !{i32 7, i32 0, metadata !20, null}
+!20 = metadata !{i32 786443, metadata !10, i32 6, i32 0, metadata !4, i32 1}
+!21 = metadata !{metadata !"int", metadata !22}
+!22 = metadata !{metadata !"omnipotent char", metadata !23}
+!23 = metadata !{metadata !"Simple C/C++ TBAA"}
+!24 = metadata !{i32 9, i32 0, metadata !3, null}
diff --git a/test/Transforms/LoopVectorize/flags.ll b/test/Transforms/LoopVectorize/flags.ll
index 2f22a76..656912e 100644
--- a/test/Transforms/LoopVectorize/flags.ll
+++ b/test/Transforms/LoopVectorize/flags.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
diff --git a/test/Transforms/LoopVectorize/float-reduction.ll b/test/Transforms/LoopVectorize/float-reduction.ll
new file mode 100644
index 0000000..565684c
--- /dev/null
+++ b/test/Transforms/LoopVectorize/float-reduction.ll
@@ -0,0 +1,29 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+;CHECK: @foo
+;CHECK: fadd <4 x float>
+;CHECK: ret
+define float @foo(float* nocapture %A, i32* nocapture %n) nounwind uwtable readonly ssp {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %sum.04 = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
+ %arrayidx = getelementptr inbounds float* %A, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !tbaa !0
+ %add = fadd fast float %sum.04, %0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 200
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret float %add
+}
+
+!0 = metadata !{metadata !"float", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/Transforms/LoopVectorize/gcc-examples.ll b/test/Transforms/LoopVectorize/gcc-examples.ll
index fce29d2..f335557 100644
--- a/test/Transforms/LoopVectorize/gcc-examples.ll
+++ b/test/Transforms/LoopVectorize/gcc-examples.ll
@@ -1,4 +1,5 @@
-; RUN: opt < %s -loop-vectorize -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-unroll=1 -dce -instcombine -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-unroll=4 -dce -instcombine -S | FileCheck %s -check-prefix=UNROLL
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
@@ -24,6 +25,20 @@ target triple = "x86_64-apple-macosx10.8.0"
;CHECK: add nsw <4 x i32>
;CHECK: store <4 x i32>
;CHECK: ret void
+;UNROLL: @example1
+;UNROLL: load <4 x i32>
+;UNROLL: load <4 x i32>
+;UNROLL: load <4 x i32>
+;UNROLL: load <4 x i32>
+;UNROLL: add nsw <4 x i32>
+;UNROLL: add nsw <4 x i32>
+;UNROLL: add nsw <4 x i32>
+;UNROLL: add nsw <4 x i32>
+;UNROLL: store <4 x i32>
+;UNROLL: store <4 x i32>
+;UNROLL: store <4 x i32>
+;UNROLL: store <4 x i32>
+;UNROLL: ret void
define void @example1() nounwind uwtable ssp {
br label %1
@@ -48,6 +63,12 @@ define void @example1() nounwind uwtable ssp {
;CHECK: @example2
;CHECK: store <4 x i32>
;CHECK: ret void
+;UNROLL: @example2
+;UNROLL: store <4 x i32>
+;UNROLL: store <4 x i32>
+;UNROLL: store <4 x i32>
+;UNROLL: store <4 x i32>
+;UNROLL: ret void
define void @example2(i32 %n, i32 %x) nounwind uwtable ssp {
%1 = icmp sgt i32 %n, 0
br i1 %1, label %.lr.ph5, label %.preheader
@@ -89,10 +110,15 @@ define void @example2(i32 %n, i32 %x) nounwind uwtable ssp {
ret void
}
-; We can't vectorize this loop because it has non constant loop bounds.
;CHECK: @example3
-;CHECK-NOT: <4 x i32>
+;CHECK: <4 x i32>
;CHECK: ret void
+;UNROLL: @example3
+;UNROLL: <4 x i32>
+;UNROLL: <4 x i32>
+;UNROLL: <4 x i32>
+;UNROLL: <4 x i32>
+;UNROLL: ret void
define void @example3(i32 %n, i32* noalias nocapture %p, i32* noalias nocapture %q) nounwind uwtable ssp {
%1 = icmp eq i32 %n, 0
br i1 %1, label %._crit_edge, label %.lr.ph
@@ -116,6 +142,12 @@ define void @example3(i32 %n, i32* noalias nocapture %p, i32* noalias nocapture
;CHECK: @example4
;CHECK: load <4 x i32>
;CHECK: ret void
+;UNROLL: @example4
+;UNROLL: load <4 x i32>
+;UNROLL: load <4 x i32>
+;UNROLL: load <4 x i32>
+;UNROLL: load <4 x i32>
+;UNROLL: ret void
define void @example4(i32 %n, i32* noalias nocapture %p, i32* noalias nocapture %q) nounwind uwtable ssp {
%1 = add nsw i32 %n, -1
%2 = icmp eq i32 %n, 0
@@ -176,6 +208,12 @@ define void @example4(i32 %n, i32* noalias nocapture %p, i32* noalias nocapture
;CHECK: @example8
;CHECK: store <4 x i32>
;CHECK: ret void
+;UNROLL: @example8
+;UNROLL: store <4 x i32>
+;UNROLL: store <4 x i32>
+;UNROLL: store <4 x i32>
+;UNROLL: store <4 x i32>
+;UNROLL: ret void
define void @example8(i32 %x) nounwind uwtable ssp {
br label %.preheader
@@ -330,7 +368,7 @@ define void @example11() nounwind uwtable ssp {
}
;CHECK: @example12
-;CHECK: trunc <4 x i64>
+;CHECK: trunc i64
;CHECK: store <4 x i32>
;CHECK: ret void
define void @example12() nounwind uwtable ssp {
@@ -391,9 +429,9 @@ define void @example13(i32** nocapture %A, i32** nocapture %B, i32* nocapture %o
ret void
}
-; Can't vectorize because of reductions.
+; Can vectorize.
;CHECK: @example14
-;CHECK-NOT: <4 x i32>
+;CHECK: <4 x i32>
;CHECK: ret void
define void @example14(i32** nocapture %in, i32** nocapture %coeff, i32* nocapture %out) nounwind uwtable ssp {
.preheader3:
@@ -537,9 +575,9 @@ define void @example14(i32** nocapture %in, i32** nocapture %coeff, i32* nocaptu
ret void
}
-; Can't vectorize because the src and dst pointers are not disjoint.
;CHECK: @example21
-;CHECK-NOT: <4 x i32>
+;CHECK: load <4 x i32>
+;CHECK: shufflevector {{.*}} <i32 3, i32 2, i32 1, i32 0>
;CHECK: ret i32
define i32 @example21(i32* nocapture %b, i32 %n) nounwind uwtable readonly ssp {
%1 = icmp sgt i32 %n, 0
@@ -565,9 +603,8 @@ define i32 @example21(i32* nocapture %b, i32 %n) nounwind uwtable readonly ssp {
ret i32 %a.0.lcssa
}
-; Can't vectorize because there are multiple PHIs.
;CHECK: @example23
-;CHECK-NOT: <4 x i32>
+;CHECK: <4 x i32>
;CHECK: ret void
define void @example23(i16* nocapture %src, i32* nocapture %dst) nounwind uwtable ssp {
br label %1
diff --git a/test/Transforms/LoopVectorize/global_alias.ll b/test/Transforms/LoopVectorize/global_alias.ll
new file mode 100644
index 0000000..121da8b
--- /dev/null
+++ b/test/Transforms/LoopVectorize/global_alias.ll
@@ -0,0 +1,1078 @@
+; RUN: opt < %s -O3 -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
+
+%struct.anon = type { [100 x i32], i32, [100 x i32] }
+%struct.anon.0 = type { [100 x [100 x i32]], i32, [100 x [100 x i32]] }
+
+@Foo = common global %struct.anon zeroinitializer, align 4
+@Bar = common global %struct.anon.0 zeroinitializer, align 4
+
+@PB = external global i32*
+@PA = external global i32*
+
+
+;; === First, the tests that should always vectorize, wither statically or by adding run-time checks ===
+
+
+; /// Different objects, positive induction, constant distance
+; int noAlias01 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; Foo.A[i] = Foo.B[i] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @noAlias01
+; CHECK: add nsw <4 x i32>
+; CHECK: ret
+
+define i32 @noAlias01(i32 %a) nounwind {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %1
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %arrayidx1 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
+ store i32 %add, i32* %arrayidx1, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx2 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx2, align 4
+ ret i32 %7
+}
+
+; /// Different objects, positive induction with widening slide
+; int noAlias02 (int a) {
+; int i;
+; for (i=0; i<SIZE-10; i++)
+; Foo.A[i] = Foo.B[i+10] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @noAlias02
+; CHECK: add nsw <4 x i32>
+; CHECK: ret
+
+define i32 @noAlias02(i32 %a) {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 90
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %add = add nsw i32 %1, 10
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %add
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add1 = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %arrayidx2 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
+ store i32 %add1, i32* %arrayidx2, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx3 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx3, align 4
+ ret i32 %7
+}
+
+; /// Different objects, positive induction with shortening slide
+; int noAlias03 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; Foo.A[i+10] = Foo.B[i] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @noAlias03
+; CHECK: add nsw <4 x i32>
+; CHECK: ret
+
+define i32 @noAlias03(i32 %a) {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %1
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %add1 = add nsw i32 %4, 10
+ %arrayidx2 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %add1
+ store i32 %add, i32* %arrayidx2, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx3 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx3, align 4
+ ret i32 %7
+}
+
+; /// Pointer access, positive stride, run-time check added
+; int noAlias04 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; *(PA+i) = *(PB+i) + a;
+; return *(PA+a);
+; }
+; CHECK: define i32 @noAlias04
+; CHECK-NOT: add nsw <4 x i32>
+; CHECK: ret
+;
+; TODO: This test vectorizes (with run-time check) on real targets with -O3)
+; Check why it's not being vectorized even when forcing vectorization
+
+define i32 @noAlias04(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32** @PB, align 4
+ %2 = load i32* %i, align 4
+ %add.ptr = getelementptr inbounds i32* %1, i32 %2
+ %3 = load i32* %add.ptr, align 4
+ %4 = load i32* %a.addr, align 4
+ %add = add nsw i32 %3, %4
+ %5 = load i32** @PA, align 4
+ %6 = load i32* %i, align 4
+ %add.ptr1 = getelementptr inbounds i32* %5, i32 %6
+ store i32 %add, i32* %add.ptr1, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %7 = load i32* %i, align 4
+ %inc = add nsw i32 %7, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %8 = load i32** @PA, align 4
+ %9 = load i32* %a.addr, align 4
+ %add.ptr2 = getelementptr inbounds i32* %8, i32 %9
+ %10 = load i32* %add.ptr2, align 4
+ ret i32 %10
+}
+
+; /// Different objects, positive induction, multi-array
+; int noAlias05 (int a) {
+; int i, N=10;
+; for (i=0; i<SIZE; i++)
+; Bar.A[N][i] = Bar.B[N][i] + a;
+; return Bar.A[N][a];
+; }
+; CHECK: define i32 @noAlias05
+; CHECK: add nsw <4 x i32>
+; CHECK: ret
+
+define i32 @noAlias05(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ %N = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 10, i32* %N, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %2 = load i32* %N, align 4
+ %arrayidx = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 2), i32 0, i32 %2
+ %arrayidx1 = getelementptr inbounds [100 x i32]* %arrayidx, i32 0, i32 %1
+ %3 = load i32* %arrayidx1, align 4
+ %4 = load i32* %a.addr, align 4
+ %add = add nsw i32 %3, %4
+ %5 = load i32* %i, align 4
+ %6 = load i32* %N, align 4
+ %arrayidx2 = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx3 = getelementptr inbounds [100 x i32]* %arrayidx2, i32 0, i32 %5
+ store i32 %add, i32* %arrayidx3, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %7 = load i32* %i, align 4
+ %inc = add nsw i32 %7, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %8 = load i32* %a.addr, align 4
+ %9 = load i32* %N, align 4
+ %arrayidx4 = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %9
+ %arrayidx5 = getelementptr inbounds [100 x i32]* %arrayidx4, i32 0, i32 %8
+ %10 = load i32* %arrayidx5, align 4
+ ret i32 %10
+}
+
+; /// Same objects, positive induction, multi-array, different sub-elements
+; int noAlias06 (int a) {
+; int i, N=10;
+; for (i=0; i<SIZE; i++)
+; Bar.A[N][i] = Bar.A[N+1][i] + a;
+; return Bar.A[N][a];
+; }
+; CHECK: define i32 @noAlias06
+; CHECK: add nsw <4 x i32>
+; CHECK: ret
+
+define i32 @noAlias06(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ %N = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 10, i32* %N, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %2 = load i32* %N, align 4
+ %add = add nsw i32 %2, 1
+ %arrayidx = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %add
+ %arrayidx1 = getelementptr inbounds [100 x i32]* %arrayidx, i32 0, i32 %1
+ %3 = load i32* %arrayidx1, align 4
+ %4 = load i32* %a.addr, align 4
+ %add2 = add nsw i32 %3, %4
+ %5 = load i32* %i, align 4
+ %6 = load i32* %N, align 4
+ %arrayidx3 = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx4 = getelementptr inbounds [100 x i32]* %arrayidx3, i32 0, i32 %5
+ store i32 %add2, i32* %arrayidx4, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %7 = load i32* %i, align 4
+ %inc = add nsw i32 %7, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %8 = load i32* %a.addr, align 4
+ %9 = load i32* %N, align 4
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %9
+ %arrayidx6 = getelementptr inbounds [100 x i32]* %arrayidx5, i32 0, i32 %8
+ %10 = load i32* %arrayidx6, align 4
+ ret i32 %10
+}
+
+; /// Different objects, negative induction, constant distance
+; int noAlias07 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; Foo.A[SIZE-i-1] = Foo.B[SIZE-i-1] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @noAlias07
+; CHECK: sub nsw <4 x i32>
+; CHECK: ret
+
+define i32 @noAlias07(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %sub = sub nsw i32 100, %1
+ %sub1 = sub nsw i32 %sub, 1
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %sub2 = sub nsw i32 100, %4
+ %sub3 = sub nsw i32 %sub2, 1
+ %arrayidx4 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub3
+ store i32 %add, i32* %arrayidx4, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx5 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx5, align 4
+ ret i32 %7
+}
+
+; /// Different objects, negative induction, shortening slide
+; int noAlias08 (int a) {
+; int i;
+; for (i=0; i<SIZE-10; i++)
+; Foo.A[SIZE-i-1] = Foo.B[SIZE-i-10] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @noAlias08
+; CHECK: sub nsw <4 x i32>
+; CHECK: ret
+
+define i32 @noAlias08(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 90
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %sub = sub nsw i32 100, %1
+ %sub1 = sub nsw i32 %sub, 10
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %sub2 = sub nsw i32 100, %4
+ %sub3 = sub nsw i32 %sub2, 1
+ %arrayidx4 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub3
+ store i32 %add, i32* %arrayidx4, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx5 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx5, align 4
+ ret i32 %7
+}
+
+; /// Different objects, negative induction, widening slide
+; int noAlias09 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; Foo.A[SIZE-i-10] = Foo.B[SIZE-i-1] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @noAlias09
+; CHECK: sub nsw <4 x i32>
+; CHECK: ret
+
+define i32 @noAlias09(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %sub = sub nsw i32 100, %1
+ %sub1 = sub nsw i32 %sub, 1
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %sub2 = sub nsw i32 100, %4
+ %sub3 = sub nsw i32 %sub2, 10
+ %arrayidx4 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub3
+ store i32 %add, i32* %arrayidx4, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx5 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx5, align 4
+ ret i32 %7
+}
+
+; /// Pointer access, negative stride, run-time check added
+; int noAlias10 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; *(PA+SIZE-i-1) = *(PB+SIZE-i-1) + a;
+; return *(PA+a);
+; }
+; CHECK: define i32 @noAlias10
+; CHECK-NOT: sub nsw <4 x i32>
+; CHECK: ret
+;
+; TODO: This test vectorizes (with run-time check) on real targets with -O3)
+; Check why it's not being vectorized even when forcing vectorization
+
+define i32 @noAlias10(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32** @PB, align 4
+ %add.ptr = getelementptr inbounds i32* %1, i32 100
+ %2 = load i32* %i, align 4
+ %idx.neg = sub i32 0, %2
+ %add.ptr1 = getelementptr inbounds i32* %add.ptr, i32 %idx.neg
+ %add.ptr2 = getelementptr inbounds i32* %add.ptr1, i32 -1
+ %3 = load i32* %add.ptr2, align 4
+ %4 = load i32* %a.addr, align 4
+ %add = add nsw i32 %3, %4
+ %5 = load i32** @PA, align 4
+ %add.ptr3 = getelementptr inbounds i32* %5, i32 100
+ %6 = load i32* %i, align 4
+ %idx.neg4 = sub i32 0, %6
+ %add.ptr5 = getelementptr inbounds i32* %add.ptr3, i32 %idx.neg4
+ %add.ptr6 = getelementptr inbounds i32* %add.ptr5, i32 -1
+ store i32 %add, i32* %add.ptr6, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %7 = load i32* %i, align 4
+ %inc = add nsw i32 %7, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %8 = load i32** @PA, align 4
+ %9 = load i32* %a.addr, align 4
+ %add.ptr7 = getelementptr inbounds i32* %8, i32 %9
+ %10 = load i32* %add.ptr7, align 4
+ ret i32 %10
+}
+
+; /// Different objects, negative induction, multi-array
+; int noAlias11 (int a) {
+; int i, N=10;
+; for (i=0; i<SIZE; i++)
+; Bar.A[N][SIZE-i-1] = Bar.B[N][SIZE-i-1] + a;
+; return Bar.A[N][a];
+; }
+; CHECK: define i32 @noAlias11
+; CHECK: sub nsw <4 x i32>
+; CHECK: ret
+
+define i32 @noAlias11(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ %N = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 10, i32* %N, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %sub = sub nsw i32 100, %1
+ %sub1 = sub nsw i32 %sub, 1
+ %2 = load i32* %N, align 4
+ %arrayidx = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 2), i32 0, i32 %2
+ %arrayidx2 = getelementptr inbounds [100 x i32]* %arrayidx, i32 0, i32 %sub1
+ %3 = load i32* %arrayidx2, align 4
+ %4 = load i32* %a.addr, align 4
+ %add = add nsw i32 %3, %4
+ %5 = load i32* %i, align 4
+ %sub3 = sub nsw i32 100, %5
+ %sub4 = sub nsw i32 %sub3, 1
+ %6 = load i32* %N, align 4
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx6 = getelementptr inbounds [100 x i32]* %arrayidx5, i32 0, i32 %sub4
+ store i32 %add, i32* %arrayidx6, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %7 = load i32* %i, align 4
+ %inc = add nsw i32 %7, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %8 = load i32* %a.addr, align 4
+ %9 = load i32* %N, align 4
+ %arrayidx7 = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %9
+ %arrayidx8 = getelementptr inbounds [100 x i32]* %arrayidx7, i32 0, i32 %8
+ %10 = load i32* %arrayidx8, align 4
+ ret i32 %10
+}
+
+; /// Same objects, negative induction, multi-array, different sub-elements
+; int noAlias12 (int a) {
+; int i, N=10;
+; for (i=0; i<SIZE; i++)
+; Bar.A[N][SIZE-i-1] = Bar.A[N+1][SIZE-i-1] + a;
+; return Bar.A[N][a];
+; }
+; CHECK: define i32 @noAlias12
+; CHECK: sub nsw <4 x i32>
+; CHECK: ret
+
+define i32 @noAlias12(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ %N = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 10, i32* %N, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %sub = sub nsw i32 100, %1
+ %sub1 = sub nsw i32 %sub, 1
+ %2 = load i32* %N, align 4
+ %add = add nsw i32 %2, 1
+ %arrayidx = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %add
+ %arrayidx2 = getelementptr inbounds [100 x i32]* %arrayidx, i32 0, i32 %sub1
+ %3 = load i32* %arrayidx2, align 4
+ %4 = load i32* %a.addr, align 4
+ %add3 = add nsw i32 %3, %4
+ %5 = load i32* %i, align 4
+ %sub4 = sub nsw i32 100, %5
+ %sub5 = sub nsw i32 %sub4, 1
+ %6 = load i32* %N, align 4
+ %arrayidx6 = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx7 = getelementptr inbounds [100 x i32]* %arrayidx6, i32 0, i32 %sub5
+ store i32 %add3, i32* %arrayidx7, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %7 = load i32* %i, align 4
+ %inc = add nsw i32 %7, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %8 = load i32* %a.addr, align 4
+ %9 = load i32* %N, align 4
+ %arrayidx8 = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %9
+ %arrayidx9 = getelementptr inbounds [100 x i32]* %arrayidx8, i32 0, i32 %8
+ %10 = load i32* %arrayidx9, align 4
+ ret i32 %10
+}
+
+; /// Same objects, positive induction, constant distance, just enough for vector size
+; int noAlias13 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; Foo.A[i] = Foo.A[i+4] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @noAlias13
+; CHECK: add nsw <4 x i32>
+; CHECK: ret
+
+define i32 @noAlias13(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %add = add nsw i32 %1, 4
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %add
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add1 = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %arrayidx2 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
+ store i32 %add1, i32* %arrayidx2, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx3 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx3, align 4
+ ret i32 %7
+}
+
+; /// Same objects, negative induction, constant distance, just enough for vector size
+; int noAlias14 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; Foo.A[SIZE-i-1] = Foo.A[SIZE-i-5] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @noAlias14
+; CHECK: sub nsw <4 x i32>
+; CHECK: ret
+
+define i32 @noAlias14(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %sub = sub nsw i32 100, %1
+ %sub1 = sub nsw i32 %sub, 5
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub1
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %sub2 = sub nsw i32 100, %4
+ %sub3 = sub nsw i32 %sub2, 1
+ %arrayidx4 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub3
+ store i32 %add, i32* %arrayidx4, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx5 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx5, align 4
+ ret i32 %7
+}
+
+
+;; === Now, the tests that we could vectorize with induction changes or run-time checks ===
+
+
+; /// Different objects, swapped induction, alias at the end
+; int mayAlias01 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; Foo.A[i] = Foo.B[SIZE-i-1] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @mayAlias01
+; CHECK-NOT: add nsw <4 x i32>
+; CHECK: ret
+
+define i32 @mayAlias01(i32 %a) nounwind {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %sub = sub nsw i32 100, %1
+ %sub1 = sub nsw i32 %sub, 1
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %arrayidx2 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
+ store i32 %add, i32* %arrayidx2, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx3 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx3, align 4
+ ret i32 %7
+}
+
+; /// Different objects, swapped induction, alias at the beginning
+; int mayAlias02 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; Foo.A[SIZE-i-1] = Foo.B[i] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @mayAlias02
+; CHECK-NOT: add nsw <4 x i32>
+; CHECK: ret
+
+define i32 @mayAlias02(i32 %a) nounwind {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %1
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %sub = sub nsw i32 100, %4
+ %sub1 = sub nsw i32 %sub, 1
+ %arrayidx2 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub1
+ store i32 %add, i32* %arrayidx2, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx3 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx3, align 4
+ ret i32 %7
+}
+
+; /// Pointer access, run-time check added
+; int mayAlias03 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; *(PA+i) = *(PB+SIZE-i-1) + a;
+; return *(PA+a);
+; }
+; CHECK: define i32 @mayAlias03
+; CHECK-NOT: add nsw <4 x i32>
+; CHECK: ret
+
+define i32 @mayAlias03(i32 %a) nounwind {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32** @PB, align 4
+ %add.ptr = getelementptr inbounds i32* %1, i32 100
+ %2 = load i32* %i, align 4
+ %idx.neg = sub i32 0, %2
+ %add.ptr1 = getelementptr inbounds i32* %add.ptr, i32 %idx.neg
+ %add.ptr2 = getelementptr inbounds i32* %add.ptr1, i32 -1
+ %3 = load i32* %add.ptr2, align 4
+ %4 = load i32* %a.addr, align 4
+ %add = add nsw i32 %3, %4
+ %5 = load i32** @PA, align 4
+ %6 = load i32* %i, align 4
+ %add.ptr3 = getelementptr inbounds i32* %5, i32 %6
+ store i32 %add, i32* %add.ptr3, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %7 = load i32* %i, align 4
+ %inc = add nsw i32 %7, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %8 = load i32** @PA, align 4
+ %9 = load i32* %a.addr, align 4
+ %add.ptr4 = getelementptr inbounds i32* %8, i32 %9
+ %10 = load i32* %add.ptr4, align 4
+ ret i32 %10
+}
+
+
+;; === Finally, the tests that should only vectorize with care (or if we ignore undefined behaviour at all) ===
+
+
+; int mustAlias01 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; Foo.A[i+10] = Foo.B[SIZE-i-1] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @mustAlias01
+; CHECK-NOT: add nsw <4 x i32>
+; CHECK: ret
+
+define i32 @mustAlias01(i32 %a) nounwind {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %sub = sub nsw i32 100, %1
+ %sub1 = sub nsw i32 %sub, 1
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %add2 = add nsw i32 %4, 10
+ %arrayidx3 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %add2
+ store i32 %add, i32* %arrayidx3, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx4 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx4, align 4
+ ret i32 %7
+}
+
+; int mustAlias02 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; Foo.A[i] = Foo.B[SIZE-i-10] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @mustAlias02
+; CHECK-NOT: add nsw <4 x i32>
+; CHECK: ret
+
+define i32 @mustAlias02(i32 %a) nounwind {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %sub = sub nsw i32 100, %1
+ %sub1 = sub nsw i32 %sub, 10
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %arrayidx2 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
+ store i32 %add, i32* %arrayidx2, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx3 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx3, align 4
+ ret i32 %7
+}
+
+; int mustAlias03 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; Foo.A[i+10] = Foo.B[SIZE-i-10] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @mustAlias03
+; CHECK-NOT: add nsw <4 x i32>
+; CHECK: ret
+
+define i32 @mustAlias03(i32 %a) nounwind {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %sub = sub nsw i32 100, %1
+ %sub1 = sub nsw i32 %sub, 10
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %add2 = add nsw i32 %4, 10
+ %arrayidx3 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %add2
+ store i32 %add, i32* %arrayidx3, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx4 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx4, align 4
+ ret i32 %7
+}
diff --git a/test/Transforms/LoopVectorize/i8-induction.ll b/test/Transforms/LoopVectorize/i8-induction.ll
new file mode 100644
index 0000000..7759b70
--- /dev/null
+++ b/test/Transforms/LoopVectorize/i8-induction.ll
@@ -0,0 +1,35 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+@a = common global i8 0, align 1
+@b = common global i8 0, align 1
+
+define void @f() nounwind uwtable ssp {
+scalar.ph:
+ store i8 0, i8* inttoptr (i64 1 to i8*), align 1, !tbaa !0
+ %0 = load i8* @a, align 1, !tbaa !0
+ br label %for.body
+
+for.body:
+ %mul16 = phi i8 [ 0, %scalar.ph ], [ %mul, %for.body ] ; <------- i8 induction var.
+ %c.015 = phi i8 [ undef, %scalar.ph ], [ %conv8, %for.body ]
+ %conv2 = sext i8 %c.015 to i32
+ %tobool = icmp ne i8 %c.015, 0
+ %.sink = select i1 %tobool, i8 %c.015, i8 %0
+ %mul = mul i8 %mul16, %.sink
+ %add = add nsw i32 %conv2, 1
+ %conv8 = trunc i32 %add to i8
+ %sext = shl i32 %add, 24
+ %phitmp14 = icmp slt i32 %sext, 268435456
+ br i1 %phitmp14, label %for.body, label %for.end
+
+for.end: ; preds = %for.body
+ store i8 %mul, i8* @b, align 1, !tbaa !0
+ ret void
+}
+
+!0 = metadata !{metadata !"omnipotent char", metadata !1}
+!1 = metadata !{metadata !"Simple C/C++ TBAA"}
+
diff --git a/test/Transforms/LoopVectorize/if-conv-crash.ll b/test/Transforms/LoopVectorize/if-conv-crash.ll
new file mode 100644
index 0000000..3283456
--- /dev/null
+++ b/test/Transforms/LoopVectorize/if-conv-crash.ll
@@ -0,0 +1,39 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -enable-if-conversion
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+define fastcc void @DD_dump() nounwind uwtable ssp {
+entry:
+ br i1 undef, label %lor.lhs.false, label %if.end25
+
+lor.lhs.false: ; preds = %entry
+ br i1 undef, label %if.end21, label %if.else
+
+if.else: ; preds = %lor.lhs.false
+ br i1 undef, label %num_q.exit, label %while.body.i.preheader
+
+while.body.i.preheader: ; preds = %if.else
+ br label %while.body.i
+
+while.body.i: ; preds = %if.end.i, %while.body.i.preheader
+ switch i8 undef, label %if.end.i [
+ i8 39, label %if.then.i
+ i8 92, label %if.then.i
+ ]
+
+if.then.i: ; preds = %while.body.i, %while.body.i
+ br label %if.end.i
+
+if.end.i: ; preds = %if.then.i, %while.body.i
+ br i1 undef, label %num_q.exit, label %while.body.i
+
+num_q.exit: ; preds = %if.end.i, %if.else
+ unreachable
+
+if.end21: ; preds = %lor.lhs.false
+ unreachable
+
+if.end25: ; preds = %entry
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/if-conversion-reduction.ll b/test/Transforms/LoopVectorize/if-conversion-reduction.ll
new file mode 100644
index 0000000..3a2d82e
--- /dev/null
+++ b/test/Transforms/LoopVectorize/if-conversion-reduction.ll
@@ -0,0 +1,38 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -enable-if-conversion -dce -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+;CHECK: @reduction_func
+;CHECK-NOT: load <4 x i32>
+;CHECK: ret i32
+define i32 @reduction_func(i32* nocapture %A, i32 %n) nounwind uwtable readonly ssp {
+entry:
+ %cmp10 = icmp sgt i32 %n, 0
+ br i1 %cmp10, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.inc
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
+ %sum.011 = phi i32 [ %sum.1, %for.inc ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %cmp1 = icmp sgt i32 %0, 30
+ br i1 %cmp1, label %if.then, label %for.inc
+
+if.then: ; preds = %for.body
+ %add = add i32 %sum.011, 2
+ %add4 = add i32 %add, %0
+ br label %for.inc
+
+for.inc: ; preds = %for.body, %if.then
+ %sum.1 = phi i32 [ %add4, %if.then ], [ %sum.011, %for.body ]
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.inc, %entry
+ %sum.0.lcssa = phi i32 [ 0, %entry ], [ 4, %for.inc ]
+ ret i32 %sum.0.lcssa
+}
+
diff --git a/test/Transforms/LoopVectorize/if-conversion.ll b/test/Transforms/LoopVectorize/if-conversion.ll
new file mode 100644
index 0000000..6e7c03a
--- /dev/null
+++ b/test/Transforms/LoopVectorize/if-conversion.ll
@@ -0,0 +1,108 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -enable-if-conversion -dce -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+; This is the loop in this example:
+;
+;int function0(int *a, int *b, int start, int end) {
+;
+; for (int i=start; i<end; ++i) {
+; unsigned k = a[i];
+;
+; if (a[i] > b[i]) <------ notice the IF inside the loop.
+; k = k * 5 + 3;
+;
+; a[i] = k; <---- K is a phi node that becomes vector-select.
+; }
+;}
+
+;CHECK: @function0
+;CHECK: load <4 x i32>
+;CHECK: icmp sgt <4 x i32>
+;CHECK: mul <4 x i32>
+;CHECK: add <4 x i32>
+;CHECK: select <4 x i1>
+;CHECK: ret i32
+define i32 @function0(i32* nocapture %a, i32* nocapture %b, i32 %start, i32 %end) nounwind uwtable ssp {
+entry:
+ %cmp16 = icmp slt i32 %start, %end
+ br i1 %cmp16, label %for.body.lr.ph, label %for.end
+
+for.body.lr.ph:
+ %0 = sext i32 %start to i64
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ %0, %for.body.lr.ph ], [ %indvars.iv.next, %if.end ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %1 = load i32* %arrayidx, align 4
+ %arrayidx4 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %2 = load i32* %arrayidx4, align 4
+ %cmp5 = icmp sgt i32 %1, %2
+ br i1 %cmp5, label %if.then, label %if.end
+
+if.then:
+ %mul = mul i32 %1, 5
+ %add = add i32 %mul, 3
+ br label %if.end
+
+if.end:
+ %k.0 = phi i32 [ %add, %if.then ], [ %1, %for.body ]
+ store i32 %k.0, i32* %arrayidx, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %3 = trunc i64 %indvars.iv.next to i32
+ %cmp = icmp slt i32 %3, %end
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end:
+ ret i32 undef
+}
+
+
+
+; int func(int *A, int n) {
+; unsigned sum = 0;
+; for (int i = 0; i < n; ++i)
+; if (A[i] > 30)
+; sum += A[i] + 2;
+;
+; return sum;
+; }
+
+;CHECK: @reduction_func
+;CHECK: load <4 x i32>
+;CHECK: icmp sgt <4 x i32>
+;CHECK: add <4 x i32>
+;CHECK: select <4 x i1>
+;CHECK: ret i32
+define i32 @reduction_func(i32* nocapture %A, i32 %n) nounwind uwtable readonly ssp {
+entry:
+ %cmp10 = icmp sgt i32 %n, 0
+ br i1 %cmp10, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.inc
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
+ %sum.011 = phi i32 [ %sum.1, %for.inc ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %cmp1 = icmp sgt i32 %0, 30
+ br i1 %cmp1, label %if.then, label %for.inc
+
+if.then: ; preds = %for.body
+ %add = add i32 %sum.011, 2
+ %add4 = add i32 %add, %0
+ br label %for.inc
+
+for.inc: ; preds = %for.body, %if.then
+ %sum.1 = phi i32 [ %add4, %if.then ], [ %sum.011, %for.body ]
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.inc, %entry
+ %sum.0.lcssa = phi i32 [ 0, %entry ], [ %sum.1, %for.inc ]
+ ret i32 %sum.0.lcssa
+}
+
diff --git a/test/Transforms/LoopVectorize/increment.ll b/test/Transforms/LoopVectorize/increment.ll
index 71ea768..3fa6b19 100644
--- a/test/Transforms/LoopVectorize/increment.ll
+++ b/test/Transforms/LoopVectorize/increment.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
diff --git a/test/Transforms/LoopVectorize/induction_plus.ll b/test/Transforms/LoopVectorize/induction_plus.ll
index b31bceb..96595cd 100644
--- a/test/Transforms/LoopVectorize/induction_plus.ll
+++ b/test/Transforms/LoopVectorize/induction_plus.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-width=4 -instcombine -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
@@ -6,8 +6,7 @@ target triple = "x86_64-apple-macosx10.8.0"
@array = common global [1024 x i32] zeroinitializer, align 16
;CHECK: @array_at_plus_one
-;CHECK: add <4 x i64>
-;CHECK: trunc <4 x i64>
+;CHECK: trunc i64
;CHECK: add i64 %index, 12
;CHECK: ret i32
define i32 @array_at_plus_one(i32 %n) nounwind uwtable ssp {
diff --git a/test/Transforms/LoopVectorize/intrinsic.ll b/test/Transforms/LoopVectorize/intrinsic.ll
new file mode 100644
index 0000000..e79d78d
--- /dev/null
+++ b/test/Transforms/LoopVectorize/intrinsic.ll
@@ -0,0 +1,935 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+;CHECK: @sqrt_f32
+;CHECK: llvm.sqrt.v4f32
+;CHECK: ret void
+define void @sqrt_f32(i32 %n, float* noalias %y, float* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !tbaa !0
+ %call = tail call float @llvm.sqrt.f32(float %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ store float %call, float* %arrayidx2, align 4, !tbaa !0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare float @llvm.sqrt.f32(float) nounwind readnone
+
+;CHECK: @sqrt_f64
+;CHECK: llvm.sqrt.v4f64
+;CHECK: ret void
+define void @sqrt_f64(i32 %n, double* noalias %y, double* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %0 = load double* %arrayidx, align 8, !tbaa !3
+ %call = tail call double @llvm.sqrt.f64(double %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ store double %call, double* %arrayidx2, align 8, !tbaa !3
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare double @llvm.sqrt.f64(double) nounwind readnone
+
+;CHECK: @sin_f32
+;CHECK: llvm.sin.v4f32
+;CHECK: ret void
+define void @sin_f32(i32 %n, float* noalias %y, float* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !tbaa !0
+ %call = tail call float @llvm.sin.f32(float %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ store float %call, float* %arrayidx2, align 4, !tbaa !0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare float @llvm.sin.f32(float) nounwind readnone
+
+;CHECK: @sin_f64
+;CHECK: llvm.sin.v4f64
+;CHECK: ret void
+define void @sin_f64(i32 %n, double* noalias %y, double* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %0 = load double* %arrayidx, align 8, !tbaa !3
+ %call = tail call double @llvm.sin.f64(double %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ store double %call, double* %arrayidx2, align 8, !tbaa !3
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare double @llvm.sin.f64(double) nounwind readnone
+
+;CHECK: @cos_f32
+;CHECK: llvm.cos.v4f32
+;CHECK: ret void
+define void @cos_f32(i32 %n, float* noalias %y, float* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !tbaa !0
+ %call = tail call float @llvm.cos.f32(float %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ store float %call, float* %arrayidx2, align 4, !tbaa !0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare float @llvm.cos.f32(float) nounwind readnone
+
+;CHECK: @cos_f64
+;CHECK: llvm.cos.v4f64
+;CHECK: ret void
+define void @cos_f64(i32 %n, double* noalias %y, double* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %0 = load double* %arrayidx, align 8, !tbaa !3
+ %call = tail call double @llvm.cos.f64(double %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ store double %call, double* %arrayidx2, align 8, !tbaa !3
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare double @llvm.cos.f64(double) nounwind readnone
+
+;CHECK: @exp_f32
+;CHECK: llvm.exp.v4f32
+;CHECK: ret void
+define void @exp_f32(i32 %n, float* noalias %y, float* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !tbaa !0
+ %call = tail call float @llvm.exp.f32(float %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ store float %call, float* %arrayidx2, align 4, !tbaa !0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare float @llvm.exp.f32(float) nounwind readnone
+
+;CHECK: @exp_f64
+;CHECK: llvm.exp.v4f64
+;CHECK: ret void
+define void @exp_f64(i32 %n, double* noalias %y, double* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %0 = load double* %arrayidx, align 8, !tbaa !3
+ %call = tail call double @llvm.exp.f64(double %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ store double %call, double* %arrayidx2, align 8, !tbaa !3
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare double @llvm.exp.f64(double) nounwind readnone
+
+;CHECK: @exp2_f32
+;CHECK: llvm.exp2.v4f32
+;CHECK: ret void
+define void @exp2_f32(i32 %n, float* noalias %y, float* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !tbaa !0
+ %call = tail call float @llvm.exp2.f32(float %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ store float %call, float* %arrayidx2, align 4, !tbaa !0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare float @llvm.exp2.f32(float) nounwind readnone
+
+;CHECK: @exp2_f64
+;CHECK: llvm.exp2.v4f64
+;CHECK: ret void
+define void @exp2_f64(i32 %n, double* noalias %y, double* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %0 = load double* %arrayidx, align 8, !tbaa !3
+ %call = tail call double @llvm.exp2.f64(double %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ store double %call, double* %arrayidx2, align 8, !tbaa !3
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare double @llvm.exp2.f64(double) nounwind readnone
+
+;CHECK: @log_f32
+;CHECK: llvm.log.v4f32
+;CHECK: ret void
+define void @log_f32(i32 %n, float* noalias %y, float* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !tbaa !0
+ %call = tail call float @llvm.log.f32(float %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ store float %call, float* %arrayidx2, align 4, !tbaa !0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare float @llvm.log.f32(float) nounwind readnone
+
+;CHECK: @log_f64
+;CHECK: llvm.log.v4f64
+;CHECK: ret void
+define void @log_f64(i32 %n, double* noalias %y, double* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %0 = load double* %arrayidx, align 8, !tbaa !3
+ %call = tail call double @llvm.log.f64(double %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ store double %call, double* %arrayidx2, align 8, !tbaa !3
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare double @llvm.log.f64(double) nounwind readnone
+
+;CHECK: @log10_f32
+;CHECK: llvm.log10.v4f32
+;CHECK: ret void
+define void @log10_f32(i32 %n, float* noalias %y, float* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !tbaa !0
+ %call = tail call float @llvm.log10.f32(float %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ store float %call, float* %arrayidx2, align 4, !tbaa !0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare float @llvm.log10.f32(float) nounwind readnone
+
+;CHECK: @log10_f64
+;CHECK: llvm.log10.v4f64
+;CHECK: ret void
+define void @log10_f64(i32 %n, double* noalias %y, double* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %0 = load double* %arrayidx, align 8, !tbaa !3
+ %call = tail call double @llvm.log10.f64(double %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ store double %call, double* %arrayidx2, align 8, !tbaa !3
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare double @llvm.log10.f64(double) nounwind readnone
+
+;CHECK: @log2_f32
+;CHECK: llvm.log2.v4f32
+;CHECK: ret void
+define void @log2_f32(i32 %n, float* noalias %y, float* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !tbaa !0
+ %call = tail call float @llvm.log2.f32(float %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ store float %call, float* %arrayidx2, align 4, !tbaa !0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare float @llvm.log2.f32(float) nounwind readnone
+
+;CHECK: @log2_f64
+;CHECK: llvm.log2.v4f64
+;CHECK: ret void
+define void @log2_f64(i32 %n, double* noalias %y, double* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %0 = load double* %arrayidx, align 8, !tbaa !3
+ %call = tail call double @llvm.log2.f64(double %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ store double %call, double* %arrayidx2, align 8, !tbaa !3
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare double @llvm.log2.f64(double) nounwind readnone
+
+;CHECK: @fabs_f32
+;CHECK: llvm.fabs.v4f32
+;CHECK: ret void
+define void @fabs_f32(i32 %n, float* noalias %y, float* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !tbaa !0
+ %call = tail call float @llvm.fabs.f32(float %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ store float %call, float* %arrayidx2, align 4, !tbaa !0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare float @llvm.fabs.f32(float) nounwind readnone
+
+define void @fabs_f64(i32 %n, double* noalias %y, double* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %0 = load double* %arrayidx, align 8, !tbaa !3
+ %call = tail call double @llvm.fabs(double %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ store double %call, double* %arrayidx2, align 8, !tbaa !3
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare double @llvm.fabs(double) nounwind readnone
+
+;CHECK: @floor_f32
+;CHECK: llvm.floor.v4f32
+;CHECK: ret void
+define void @floor_f32(i32 %n, float* noalias %y, float* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !tbaa !0
+ %call = tail call float @llvm.floor.f32(float %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ store float %call, float* %arrayidx2, align 4, !tbaa !0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare float @llvm.floor.f32(float) nounwind readnone
+
+;CHECK: @floor_f64
+;CHECK: llvm.floor.v4f64
+;CHECK: ret void
+define void @floor_f64(i32 %n, double* noalias %y, double* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %0 = load double* %arrayidx, align 8, !tbaa !3
+ %call = tail call double @llvm.floor.f64(double %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ store double %call, double* %arrayidx2, align 8, !tbaa !3
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare double @llvm.floor.f64(double) nounwind readnone
+
+;CHECK: @ceil_f32
+;CHECK: llvm.ceil.v4f32
+;CHECK: ret void
+define void @ceil_f32(i32 %n, float* noalias %y, float* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !tbaa !0
+ %call = tail call float @llvm.ceil.f32(float %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ store float %call, float* %arrayidx2, align 4, !tbaa !0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare float @llvm.ceil.f32(float) nounwind readnone
+
+;CHECK: @ceil_f64
+;CHECK: llvm.ceil.v4f64
+;CHECK: ret void
+define void @ceil_f64(i32 %n, double* noalias %y, double* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %0 = load double* %arrayidx, align 8, !tbaa !3
+ %call = tail call double @llvm.ceil.f64(double %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ store double %call, double* %arrayidx2, align 8, !tbaa !3
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare double @llvm.ceil.f64(double) nounwind readnone
+
+;CHECK: @trunc_f32
+;CHECK: llvm.trunc.v4f32
+;CHECK: ret void
+define void @trunc_f32(i32 %n, float* noalias %y, float* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !tbaa !0
+ %call = tail call float @llvm.trunc.f32(float %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ store float %call, float* %arrayidx2, align 4, !tbaa !0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare float @llvm.trunc.f32(float) nounwind readnone
+
+;CHECK: @trunc_f64
+;CHECK: llvm.trunc.v4f64
+;CHECK: ret void
+define void @trunc_f64(i32 %n, double* noalias %y, double* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %0 = load double* %arrayidx, align 8, !tbaa !3
+ %call = tail call double @llvm.trunc.f64(double %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ store double %call, double* %arrayidx2, align 8, !tbaa !3
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare double @llvm.trunc.f64(double) nounwind readnone
+
+;CHECK: @rint_f32
+;CHECK: llvm.rint.v4f32
+;CHECK: ret void
+define void @rint_f32(i32 %n, float* noalias %y, float* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !tbaa !0
+ %call = tail call float @llvm.rint.f32(float %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ store float %call, float* %arrayidx2, align 4, !tbaa !0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare float @llvm.rint.f32(float) nounwind readnone
+
+;CHECK: @rint_f64
+;CHECK: llvm.rint.v4f64
+;CHECK: ret void
+define void @rint_f64(i32 %n, double* noalias %y, double* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %0 = load double* %arrayidx, align 8, !tbaa !3
+ %call = tail call double @llvm.rint.f64(double %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ store double %call, double* %arrayidx2, align 8, !tbaa !3
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare double @llvm.rint.f64(double) nounwind readnone
+
+;CHECK: @nearbyint_f32
+;CHECK: llvm.nearbyint.v4f32
+;CHECK: ret void
+define void @nearbyint_f32(i32 %n, float* noalias %y, float* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !tbaa !0
+ %call = tail call float @llvm.nearbyint.f32(float %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ store float %call, float* %arrayidx2, align 4, !tbaa !0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare float @llvm.nearbyint.f32(float) nounwind readnone
+
+;CHECK: @nearbyint_f64
+;CHECK: llvm.nearbyint.v4f64
+;CHECK: ret void
+define void @nearbyint_f64(i32 %n, double* noalias %y, double* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %0 = load double* %arrayidx, align 8, !tbaa !3
+ %call = tail call double @llvm.nearbyint.f64(double %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ store double %call, double* %arrayidx2, align 8, !tbaa !3
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare double @llvm.nearbyint.f64(double) nounwind readnone
+
+;CHECK: @fma_f32
+;CHECK: llvm.fma.v4f32
+;CHECK: ret void
+define void @fma_f32(i32 %n, float* noalias %y, float* noalias %x, float* noalias %z, float* noalias %w) nounwind uwtable {
+entry:
+ %cmp12 = icmp sgt i32 %n, 0
+ br i1 %cmp12, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !tbaa !0
+ %arrayidx2 = getelementptr inbounds float* %w, i64 %indvars.iv
+ %1 = load float* %arrayidx2, align 4, !tbaa !0
+ %arrayidx4 = getelementptr inbounds float* %z, i64 %indvars.iv
+ %2 = load float* %arrayidx4, align 4, !tbaa !0
+ %3 = tail call float @llvm.fma.f32(float %0, float %2, float %1)
+ %arrayidx6 = getelementptr inbounds float* %x, i64 %indvars.iv
+ store float %3, float* %arrayidx6, align 4, !tbaa !0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare float @llvm.fma.f32(float, float, float) nounwind readnone
+
+;CHECK: @fma_f64
+;CHECK: llvm.fma.v4f64
+;CHECK: ret void
+define void @fma_f64(i32 %n, double* noalias %y, double* noalias %x, double* noalias %z, double* noalias %w) nounwind uwtable {
+entry:
+ %cmp12 = icmp sgt i32 %n, 0
+ br i1 %cmp12, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %0 = load double* %arrayidx, align 8, !tbaa !3
+ %arrayidx2 = getelementptr inbounds double* %w, i64 %indvars.iv
+ %1 = load double* %arrayidx2, align 8, !tbaa !3
+ %arrayidx4 = getelementptr inbounds double* %z, i64 %indvars.iv
+ %2 = load double* %arrayidx4, align 8, !tbaa !3
+ %3 = tail call double @llvm.fma.f64(double %0, double %2, double %1)
+ %arrayidx6 = getelementptr inbounds double* %x, i64 %indvars.iv
+ store double %3, double* %arrayidx6, align 8, !tbaa !3
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare double @llvm.fma.f64(double, double, double) nounwind readnone
+
+;CHECK: @fmuladd_f32
+;CHECK: llvm.fmuladd.v4f32
+;CHECK: ret void
+define void @fmuladd_f32(i32 %n, float* noalias %y, float* noalias %x, float* noalias %z, float* noalias %w) nounwind uwtable {
+entry:
+ %cmp12 = icmp sgt i32 %n, 0
+ br i1 %cmp12, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !tbaa !0
+ %arrayidx2 = getelementptr inbounds float* %w, i64 %indvars.iv
+ %1 = load float* %arrayidx2, align 4, !tbaa !0
+ %arrayidx4 = getelementptr inbounds float* %z, i64 %indvars.iv
+ %2 = load float* %arrayidx4, align 4, !tbaa !0
+ %3 = tail call float @llvm.fmuladd.f32(float %0, float %2, float %1)
+ %arrayidx6 = getelementptr inbounds float* %x, i64 %indvars.iv
+ store float %3, float* %arrayidx6, align 4, !tbaa !0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare float @llvm.fmuladd.f32(float, float, float) nounwind readnone
+
+;CHECK: @fmuladd_f64
+;CHECK: llvm.fmuladd.v4f64
+;CHECK: ret void
+define void @fmuladd_f64(i32 %n, double* noalias %y, double* noalias %x, double* noalias %z, double* noalias %w) nounwind uwtable {
+entry:
+ %cmp12 = icmp sgt i32 %n, 0
+ br i1 %cmp12, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %0 = load double* %arrayidx, align 8, !tbaa !3
+ %arrayidx2 = getelementptr inbounds double* %w, i64 %indvars.iv
+ %1 = load double* %arrayidx2, align 8, !tbaa !3
+ %arrayidx4 = getelementptr inbounds double* %z, i64 %indvars.iv
+ %2 = load double* %arrayidx4, align 8, !tbaa !3
+ %3 = tail call double @llvm.fmuladd.f64(double %0, double %2, double %1)
+ %arrayidx6 = getelementptr inbounds double* %x, i64 %indvars.iv
+ store double %3, double* %arrayidx6, align 8, !tbaa !3
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare double @llvm.fmuladd.f64(double, double, double) nounwind readnone
+
+;CHECK: @pow_f32
+;CHECK: llvm.pow.v4f32
+;CHECK: ret void
+define void @pow_f32(i32 %n, float* noalias %y, float* noalias %x, float* noalias %z) nounwind uwtable {
+entry:
+ %cmp9 = icmp sgt i32 %n, 0
+ br i1 %cmp9, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !tbaa !0
+ %arrayidx2 = getelementptr inbounds float* %z, i64 %indvars.iv
+ %1 = load float* %arrayidx2, align 4, !tbaa !0
+ %call = tail call float @llvm.pow.f32(float %0, float %1) nounwind readnone
+ %arrayidx4 = getelementptr inbounds float* %x, i64 %indvars.iv
+ store float %call, float* %arrayidx4, align 4, !tbaa !0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare float @llvm.pow.f32(float, float) nounwind readnone
+
+;CHECK: @pow_f64
+;CHECK: llvm.pow.v4f64
+;CHECK: ret void
+define void @pow_f64(i32 %n, double* noalias %y, double* noalias %x, double* noalias %z) nounwind uwtable {
+entry:
+ %cmp9 = icmp sgt i32 %n, 0
+ br i1 %cmp9, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %0 = load double* %arrayidx, align 8, !tbaa !3
+ %arrayidx2 = getelementptr inbounds double* %z, i64 %indvars.iv
+ %1 = load double* %arrayidx2, align 8, !tbaa !3
+ %call = tail call double @llvm.pow.f64(double %0, double %1) nounwind readnone
+ %arrayidx4 = getelementptr inbounds double* %x, i64 %indvars.iv
+ store double %call, double* %arrayidx4, align 8, !tbaa !3
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+; CHECK: fabs_libm
+; CHECK: call <4 x float> @llvm.fabs.v4f32
+; CHECK: ret void
+define void @fabs_libm(float* nocapture %x) nounwind {
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float* %x, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4
+ %call = tail call float @fabsf(float %0) nounwind readnone
+ store float %call, float* %arrayidx, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+declare float @fabsf(float) nounwind readnone
+
+declare double @llvm.pow.f64(double, double) nounwind readnone
+
+!0 = metadata !{metadata !"float", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
+!3 = metadata !{metadata !"double", metadata !1}
+!4 = metadata !{metadata !"int", metadata !1}
diff --git a/test/Transforms/LoopVectorize/lcssa-crash.ll b/test/Transforms/LoopVectorize/lcssa-crash.ll
new file mode 100644
index 0000000..06b3b08
--- /dev/null
+++ b/test/Transforms/LoopVectorize/lcssa-crash.ll
@@ -0,0 +1,29 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+%type1 = type { %type2 }
+%type2 = type { [0 x i8*], i8**, i32, i32, i32 }
+
+define void @test() nounwind uwtable align 2 {
+ br label %for.body.lr.ph.i.i.i
+
+for.body.lr.ph.i.i.i:
+ br label %for.body.i.i.i
+
+for.body.i.i.i:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.inc.i.i.i ], [ 0, %for.body.lr.ph.i.i.i ]
+ br label %for.inc.i.i.i
+
+for.inc.i.i.i:
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp ne i32 %lftr.wideiv, undef
+ br i1 %exitcond, label %for.body.i.i.i, label %for.end.i.i.i
+
+for.end.i.i.i:
+ %lcssa = phi %type1* [ undef, %for.inc.i.i.i ]
+ unreachable
+}
+
diff --git a/test/Transforms/LoopVectorize/no_int_induction.ll b/test/Transforms/LoopVectorize/no_int_induction.ll
new file mode 100644
index 0000000..45aa8c7
--- /dev/null
+++ b/test/Transforms/LoopVectorize/no_int_induction.ll
@@ -0,0 +1,33 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
+
+; int __attribute__((noinline)) sum_array(int *A, int n) {
+; return std::accumulate(A, A + n, 0);
+; }
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+;CHECK: @sum_array
+;CHECK: phi <4 x i32>
+;CHECK: load <4 x i32>
+;CHECK: add nsw <4 x i32>
+;CHECK: ret i32
+define i32 @sum_array(i32* %A, i32 %n) nounwind uwtable readonly noinline ssp {
+ %1 = sext i32 %n to i64
+ %2 = getelementptr inbounds i32* %A, i64 %1
+ %3 = icmp eq i32 %n, 0
+ br i1 %3, label %_ZSt10accumulateIPiiET0_T_S2_S1_.exit, label %.lr.ph.i
+
+.lr.ph.i: ; preds = %0, %.lr.ph.i
+ %.03.i = phi i32* [ %6, %.lr.ph.i ], [ %A, %0 ]
+ %.012.i = phi i32 [ %5, %.lr.ph.i ], [ 0, %0 ]
+ %4 = load i32* %.03.i, align 4
+ %5 = add nsw i32 %4, %.012.i
+ %6 = getelementptr inbounds i32* %.03.i, i64 1
+ %7 = icmp eq i32* %6, %2
+ br i1 %7, label %_ZSt10accumulateIPiiET0_T_S2_S1_.exit, label %.lr.ph.i
+
+_ZSt10accumulateIPiiET0_T_S2_S1_.exit: ; preds = %.lr.ph.i, %0
+ %.01.lcssa.i = phi i32 [ 0, %0 ], [ %5, %.lr.ph.i ]
+ ret i32 %.01.lcssa.i
+}
diff --git a/test/Transforms/LoopVectorize/nofloat.ll b/test/Transforms/LoopVectorize/nofloat.ll
new file mode 100644
index 0000000..de23bf0
--- /dev/null
+++ b/test/Transforms/LoopVectorize/nofloat.ll
@@ -0,0 +1,29 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
+
+; Make sure that we don't vectorize functions with 'noimplicitfloat' attributes.
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+@a = common global [2048 x i32] zeroinitializer, align 16
+
+;CHECK: @example12
+;CHECK-NOT: store <4 x i32>
+;CHECK: ret void
+define void @example12() noimplicitfloat { ; <--------- "noimplicitfloat" attribute here!
+ br label %1
+
+; <label>:1 ; preds = %1, %0
+ %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
+ %2 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ %3 = trunc i64 %indvars.iv to i32
+ store i32 %3, i32* %2, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 1024
+ br i1 %exitcond, label %4, label %1
+
+; <label>:4 ; preds = %1
+ ret void
+}
+
diff --git a/test/Transforms/LoopVectorize/non-const-n.ll b/test/Transforms/LoopVectorize/non-const-n.ll
index 1a6c15e..8262a18 100644
--- a/test/Transforms/LoopVectorize/non-const-n.ll
+++ b/test/Transforms/LoopVectorize/non-const-n.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
diff --git a/test/Transforms/LoopVectorize/nsw-crash.ll b/test/Transforms/LoopVectorize/nsw-crash.ll
new file mode 100644
index 0000000..e5fad14
--- /dev/null
+++ b/test/Transforms/LoopVectorize/nsw-crash.ll
@@ -0,0 +1,25 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4
+
+target datalayout =
+"e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.7.0"
+
+define void @test() {
+entry:
+ br i1 undef, label %while.end, label %while.body.lr.ph
+
+while.body.lr.ph:
+ br label %while.body
+
+while.body:
+ %it.sroa.0.091 = phi i32* [ undef, %while.body.lr.ph ], [ %incdec.ptr.i, %while.body ]
+ %incdec.ptr.i = getelementptr inbounds i32* %it.sroa.0.091, i64 1
+ %inc32 = add i32 undef, 1 ; <------------- Make sure we don't set NSW flags to the undef.
+ %cmp.i11 = icmp eq i32* %incdec.ptr.i, undef
+ br i1 %cmp.i11, label %while.end, label %while.body
+
+while.end:
+ ret void
+}
+
+
diff --git a/test/Transforms/LoopVectorize/phi-hang.ll b/test/Transforms/LoopVectorize/phi-hang.ll
new file mode 100644
index 0000000..b80d459
--- /dev/null
+++ b/test/Transforms/LoopVectorize/phi-hang.ll
@@ -0,0 +1,29 @@
+; RUN: opt -S -loop-vectorize < %s
+
+; PR15384
+define void @test1(i32 %arg) {
+bb:
+ br label %bb1
+
+bb1: ; preds = %bb5, %bb
+ %tmp = phi i32 [ 1, %bb ], [ %tmp7, %bb5 ]
+ %tmp2 = phi i32 [ %arg, %bb ], [ %tmp9, %bb5 ]
+ br i1 true, label %bb5, label %bb3
+
+bb3: ; preds = %bb1
+ br label %bb4
+
+bb4: ; preds = %bb3
+ br label %bb5
+
+bb5: ; preds = %bb4, %bb1
+ %tmp6 = phi i32 [ 0, %bb4 ], [ %tmp, %bb1 ]
+ %tmp7 = phi i32 [ 0, %bb4 ], [ %tmp6, %bb1 ]
+ %tmp8 = phi i32 [ 0, %bb4 ], [ %tmp, %bb1 ]
+ %tmp9 = add nsw i32 %tmp2, 1
+ %tmp10 = icmp eq i32 %tmp9, 0
+ br i1 %tmp10, label %bb11, label %bb1
+
+bb11: ; preds = %bb5
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/ptr_loops.ll b/test/Transforms/LoopVectorize/ptr_loops.ll
new file mode 100644
index 0000000..25599f8
--- /dev/null
+++ b/test/Transforms/LoopVectorize/ptr_loops.ll
@@ -0,0 +1,74 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S -enable-if-conversion | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+@A = global [36 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35], align 16
+@B = global [36 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35], align 16
+
+;CHECK:_Z5test1v
+;CHECK: load <4 x i32>
+;CHECK: shufflevector <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK: ret
+define i32 @_Z5test1v() nounwind uwtable ssp {
+ br label %1
+
+; <label>:1 ; preds = %0, %1
+ %p.02 = phi i32* [ getelementptr inbounds ([36 x i32]* @A, i64 0, i64 18), %0 ], [ %4, %1 ]
+ %b.01 = phi i32* [ getelementptr inbounds ([36 x i32]* @B, i64 0, i64 0), %0 ], [ %5, %1 ]
+ %2 = load i32* %b.01, align 4
+ %3 = shl nsw i32 %2, 1
+ store i32 %3, i32* %p.02, align 4
+ %4 = getelementptr inbounds i32* %p.02, i64 -1
+ %5 = getelementptr inbounds i32* %b.01, i64 1
+ %6 = icmp eq i32* %4, getelementptr ([36 x i32]* @A, i64 128102389400760775, i64 3)
+ br i1 %6, label %7, label %1
+
+; <label>:7 ; preds = %1
+ ret i32 0
+}
+
+;CHECK:_Z5test2v
+;CHECK: load <4 x i32>
+;CHECK: shufflevector <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK: ret
+define i32 @_Z5test2v() nounwind uwtable ssp {
+ br label %1
+
+; <label>:1 ; preds = %0, %1
+ %p.02 = phi i32* [ getelementptr inbounds ([36 x i32]* @A, i64 0, i64 25), %0 ], [ %3, %1 ]
+ %b.01 = phi i32* [ getelementptr inbounds ([36 x i32]* @B, i64 0, i64 2), %0 ], [ %4, %1 ]
+ %2 = load i32* %b.01, align 4
+ store i32 %2, i32* %p.02, align 4
+ %3 = getelementptr inbounds i32* %p.02, i64 -1
+ %4 = getelementptr inbounds i32* %b.01, i64 1
+ %5 = icmp eq i32* %4, getelementptr inbounds ([36 x i32]* @A, i64 0, i64 18)
+ br i1 %5, label %6, label %1
+
+; <label>:6 ; preds = %1
+ ret i32 0
+}
+
+;CHECK:_Z5test3v
+;CHECK: load <4 x i32>
+;CHECK: shufflevector <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK: ret
+define i32 @_Z5test3v() nounwind uwtable ssp {
+ br label %1
+
+; <label>:1 ; preds = %0, %1
+ %p.02 = phi i32* [ getelementptr inbounds ([36 x i32]* @A, i64 0, i64 29), %0 ], [ %3, %1 ]
+ %b.01 = phi i32* [ getelementptr inbounds ([36 x i32]* @B, i64 0, i64 5), %0 ], [ %4, %1 ]
+ %2 = load i32* %b.01, align 4
+ store i32 %2, i32* %p.02, align 4
+ %3 = getelementptr inbounds i32* %p.02, i64 -1
+ %4 = getelementptr inbounds i32* %b.01, i64 1
+ %5 = icmp eq i32* %3, getelementptr ([36 x i32]* @A, i64 128102389400760775, i64 3)
+ br i1 %5, label %6, label %1
+
+; <label>:6 ; preds = %1
+ ret i32 0
+}
diff --git a/test/Transforms/LoopVectorize/read-only.ll b/test/Transforms/LoopVectorize/read-only.ll
index b4d1bac..bfaa6d4 100644
--- a/test/Transforms/LoopVectorize/read-only.ll
+++ b/test/Transforms/LoopVectorize/read-only.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
diff --git a/test/Transforms/LoopVectorize/reduction.ll b/test/Transforms/LoopVectorize/reduction.ll
index c1848b3..08b7b27 100644
--- a/test/Transforms/LoopVectorize/reduction.ll
+++ b/test/Transforms/LoopVectorize/reduction.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
@@ -7,6 +7,11 @@ target triple = "x86_64-apple-macosx10.8.0"
;CHECK: phi <4 x i32>
;CHECK: load <4 x i32>
;CHECK: add <4 x i32>
+;CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+;CHECK: add <4 x i32>
+;CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+;CHECK: add <4 x i32>
+;CHECK: extractelement <4 x i32> %{{.*}}, i32 0
;CHECK: ret i32
define i32 @reduction_sum(i32 %n, i32* noalias nocapture %A, i32* noalias nocapture %B) nounwind uwtable readonly noinline ssp {
%1 = icmp sgt i32 %n, 0
@@ -37,6 +42,11 @@ define i32 @reduction_sum(i32 %n, i32* noalias nocapture %A, i32* noalias nocapt
;CHECK: phi <4 x i32>
;CHECK: load <4 x i32>
;CHECK: mul <4 x i32>
+;CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+;CHECK: mul <4 x i32>
+;CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+;CHECK: mul <4 x i32>
+;CHECK: extractelement <4 x i32> %{{.*}}, i32 0
;CHECK: ret i32
define i32 @reduction_prod(i32 %n, i32* noalias nocapture %A, i32* noalias nocapture %B) nounwind uwtable readonly noinline ssp {
%1 = icmp sgt i32 %n, 0
@@ -67,6 +77,11 @@ define i32 @reduction_prod(i32 %n, i32* noalias nocapture %A, i32* noalias nocap
;CHECK: phi <4 x i32>
;CHECK: load <4 x i32>
;CHECK: mul nsw <4 x i32>
+;CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+;CHECK: add <4 x i32>
+;CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+;CHECK: add <4 x i32>
+;CHECK: extractelement <4 x i32> %{{.*}}, i32 0
;CHECK: ret i32
define i32 @reduction_mix(i32 %n, i32* noalias nocapture %A, i32* noalias nocapture %B) nounwind uwtable readonly noinline ssp {
%1 = icmp sgt i32 %n, 0
@@ -95,6 +110,11 @@ define i32 @reduction_mix(i32 %n, i32* noalias nocapture %A, i32* noalias nocapt
;CHECK: @reduction_mul
;CHECK: mul <4 x i32>
+;CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+;CHECK: mul <4 x i32>
+;CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+;CHECK: mul <4 x i32>
+;CHECK: extractelement <4 x i32> %{{.*}}, i32 0
;CHECK: ret i32
define i32 @reduction_mul(i32 %n, i32* noalias nocapture %A, i32* noalias nocapture %B) nounwind uwtable readonly noinline ssp {
%1 = icmp sgt i32 %n, 0
@@ -124,6 +144,11 @@ define i32 @reduction_mul(i32 %n, i32* noalias nocapture %A, i32* noalias nocapt
;CHECK: @start_at_non_zero
;CHECK: phi <4 x i32>
;CHECK: <i32 120, i32 0, i32 0, i32 0>
+;CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+;CHECK: add <4 x i32>
+;CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+;CHECK: add <4 x i32>
+;CHECK: extractelement <4 x i32> %{{.*}}, i32 0
;CHECK: ret i32
define i32 @start_at_non_zero(i32* nocapture %in, i32* nocapture %coeff, i32* nocapture %out, i32 %n) nounwind uwtable readonly ssp {
entry:
@@ -152,6 +177,11 @@ for.end: ; preds = %for.body, %entry
;CHECK: @reduction_and
;CHECK: and <4 x i32>
;CHECK: <i32 -1, i32 -1, i32 -1, i32 -1>
+;CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+;CHECK: and <4 x i32>
+;CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+;CHECK: and <4 x i32>
+;CHECK: extractelement <4 x i32> %{{.*}}, i32 0
;CHECK: ret i32
define i32 @reduction_and(i32 %n, i32* nocapture %A, i32* nocapture %B) nounwind uwtable readonly {
entry:
@@ -179,6 +209,11 @@ for.end: ; preds = %for.body, %entry
;CHECK: @reduction_or
;CHECK: or <4 x i32>
+;CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+;CHECK: or <4 x i32>
+;CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+;CHECK: or <4 x i32>
+;CHECK: extractelement <4 x i32> %{{.*}}, i32 0
;CHECK: ret i32
define i32 @reduction_or(i32 %n, i32* nocapture %A, i32* nocapture %B) nounwind uwtable readonly {
entry:
@@ -206,6 +241,11 @@ for.end: ; preds = %for.body, %entry
;CHECK: @reduction_xor
;CHECK: xor <4 x i32>
+;CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+;CHECK: xor <4 x i32>
+;CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+;CHECK: xor <4 x i32>
+;CHECK: extractelement <4 x i32> %{{.*}}, i32 0
;CHECK: ret i32
define i32 @reduction_xor(i32 %n, i32* nocapture %A, i32* nocapture %B) nounwind uwtable readonly {
entry:
@@ -230,3 +270,56 @@ for.end: ; preds = %for.body, %entry
%result.0.lcssa = phi i32 [ 0, %entry ], [ %xor, %for.body ]
ret i32 %result.0.lcssa
}
+
+; In this code the subtracted variable is on the RHS and this is not an induction variable.
+;CHECK: @reduction_sub_rhs
+;CHECK-NOT: phi <4 x i32>
+;CHECK-NOT: sub nsw <4 x i32>
+;CHECK: ret i32
+define i32 @reduction_sub_rhs(i32 %n, i32* noalias nocapture %A) nounwind uwtable readonly {
+entry:
+ %cmp4 = icmp sgt i32 %n, 0
+ br i1 %cmp4, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %x.05 = phi i32 [ %sub, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %sub = sub nsw i32 %0, %x.05
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ %x.0.lcssa = phi i32 [ 0, %entry ], [ %sub, %for.body ]
+ ret i32 %x.0.lcssa
+}
+
+
+; In this test the reduction variable is on the LHS and we can vectorize it.
+;CHECK: @reduction_sub_lhs
+;CHECK: phi <4 x i32>
+;CHECK: sub nsw <4 x i32>
+;CHECK: ret i32
+define i32 @reduction_sub_lhs(i32 %n, i32* noalias nocapture %A) nounwind uwtable readonly {
+entry:
+ %cmp4 = icmp sgt i32 %n, 0
+ br i1 %cmp4, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %x.05 = phi i32 [ %sub, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %sub = sub nsw i32 %x.05, %0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ %x.0.lcssa = phi i32 [ 0, %entry ], [ %sub, %for.body ]
+ ret i32 %x.0.lcssa
+}
diff --git a/test/Transforms/LoopVectorize/runtime-check.ll b/test/Transforms/LoopVectorize/runtime-check.ll
index 23933cf..86098a6 100644
--- a/test/Transforms/LoopVectorize/runtime-check.ll
+++ b/test/Transforms/LoopVectorize/runtime-check.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.9.0"
@@ -9,6 +9,10 @@ target triple = "x86_64-apple-macosx10.9.0"
; a[i] = b[i] * 3;
; }
+;CHECK: for.body.preheader:
+;CHECK: br i1 %cmp.zero, label %middle.block, label %vector.memcheck
+;CHECK: vector.memcheck:
+;CHECK: br i1 %found.conflict, label %middle.block, label %vector.ph
;CHECK: load <4 x float>
define i32 @foo(float* nocapture %a, float* nocapture %b, i32 %n) nounwind uwtable ssp {
entry:
diff --git a/test/Transforms/LoopVectorize/same-base-access.ll b/test/Transforms/LoopVectorize/same-base-access.ll
new file mode 100644
index 0000000..1573893
--- /dev/null
+++ b/test/Transforms/LoopVectorize/same-base-access.ll
@@ -0,0 +1,110 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S -enable-if-conversion | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+; This is kernel11 from "LivermoreLoops". We can't vectorize it because we
+; access both x[k] and x[k-1].
+;
+; void kernel11(double *x, double *y, int n) {
+; for ( int k=1 ; k<n ; k++ )
+; x[k] = x[k-1] + y[k];
+; }
+
+; CHECK: @kernel11
+; CHECK-NOT: <4 x double>
+; CHECK: ret
+define i32 @kernel11(double* %x, double* %y, i32 %n) nounwind uwtable ssp {
+ %1 = alloca double*, align 8
+ %2 = alloca double*, align 8
+ %3 = alloca i32, align 4
+ %k = alloca i32, align 4
+ store double* %x, double** %1, align 8
+ store double* %y, double** %2, align 8
+ store i32 %n, i32* %3, align 4
+ store i32 1, i32* %k, align 4
+ br label %4
+
+; <label>:4 ; preds = %25, %0
+ %5 = load i32* %k, align 4
+ %6 = load i32* %3, align 4
+ %7 = icmp slt i32 %5, %6
+ br i1 %7, label %8, label %28
+
+; <label>:8 ; preds = %4
+ %9 = load i32* %k, align 4
+ %10 = sub nsw i32 %9, 1
+ %11 = sext i32 %10 to i64
+ %12 = load double** %1, align 8
+ %13 = getelementptr inbounds double* %12, i64 %11
+ %14 = load double* %13, align 8
+ %15 = load i32* %k, align 4
+ %16 = sext i32 %15 to i64
+ %17 = load double** %2, align 8
+ %18 = getelementptr inbounds double* %17, i64 %16
+ %19 = load double* %18, align 8
+ %20 = fadd double %14, %19
+ %21 = load i32* %k, align 4
+ %22 = sext i32 %21 to i64
+ %23 = load double** %1, align 8
+ %24 = getelementptr inbounds double* %23, i64 %22
+ store double %20, double* %24, align 8
+ br label %25
+
+; <label>:25 ; preds = %8
+ %26 = load i32* %k, align 4
+ %27 = add nsw i32 %26, 1
+ store i32 %27, i32* %k, align 4
+ br label %4
+
+; <label>:28 ; preds = %4
+ ret i32 0
+}
+
+
+
+; We don't vectorize this function because A[i*7] is scalarized, and the
+; different scalars can in theory wrap around and overwrite other scalar
+; elements. At the moment we only allow read/write access to arrays
+; that are consecutive.
+;
+; void foo(int *a) {
+; for (int i=0; i<256; ++i) {
+; int x = a[i*7];
+; if (x>3)
+; x = x*x+x*4;
+; a[i*7] = x+3;
+; }
+; }
+
+; CHECK: @func2
+; CHECK-NOT: <4 x i32>
+; CHECK: ret
+define i32 @func2(i32* nocapture %a) nounwind uwtable ssp {
+ br label %1
+
+; <label>:1 ; preds = %7, %0
+ %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %7 ]
+ %2 = mul nsw i64 %indvars.iv, 7
+ %3 = getelementptr inbounds i32* %a, i64 %2
+ %4 = load i32* %3, align 4
+ %5 = icmp sgt i32 %4, 3
+ br i1 %5, label %6, label %7
+
+; <label>:6 ; preds = %1
+ %tmp = add i32 %4, 4
+ %tmp1 = mul i32 %tmp, %4
+ br label %7
+
+; <label>:7 ; preds = %6, %1
+ %x.0 = phi i32 [ %tmp1, %6 ], [ %4, %1 ]
+ %8 = add nsw i32 %x.0, 3
+ store i32 %8, i32* %3, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 256
+ br i1 %exitcond, label %9, label %1
+
+; <label>:9 ; preds = %7
+ ret i32 0
+}
diff --git a/test/Transforms/LoopVectorize/scalar-select.ll b/test/Transforms/LoopVectorize/scalar-select.ll
index e537bde..7a14d24 100644
--- a/test/Transforms/LoopVectorize/scalar-select.ll
+++ b/test/Transforms/LoopVectorize/scalar-select.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
diff --git a/test/Transforms/LoopVectorize/simple-unroll.ll b/test/Transforms/LoopVectorize/simple-unroll.ll
new file mode 100644
index 0000000..7e2dd5f
--- /dev/null
+++ b/test/Transforms/LoopVectorize/simple-unroll.ll
@@ -0,0 +1,39 @@
+; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-unroll=2 -dce -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+@a = common global [2048 x i32] zeroinitializer, align 16
+
+; This is the loop.
+; for (i=0; i<n; i++){
+; a[i] += i;
+; }
+;CHECK: @inc
+;CHECK: load <4 x i32>
+;CHECK: load <4 x i32>
+;CHECK: add nsw <4 x i32>
+;CHECK: add nsw <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK: ret void
+define void @inc(i32 %n) nounwind uwtable noinline ssp {
+ %1 = icmp sgt i32 %n, 0
+ br i1 %1, label %.lr.ph, label %._crit_edge
+
+.lr.ph: ; preds = %0, %.lr.ph
+ %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
+ %2 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ %3 = load i32* %2, align 4
+ %4 = trunc i64 %indvars.iv to i32
+ %5 = add nsw i32 %3, %4
+ store i32 %5, i32* %2, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %._crit_edge, label %.lr.ph
+
+._crit_edge: ; preds = %.lr.ph, %0
+ ret void
+}
+
diff --git a/test/Transforms/LoopVectorize/small-loop.ll b/test/Transforms/LoopVectorize/small-loop.ll
index 4a6e4b2..fa83dba 100644
--- a/test/Transforms/LoopVectorize/small-loop.ll
+++ b/test/Transforms/LoopVectorize/small-loop.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
diff --git a/test/Transforms/LoopVectorize/start-non-zero.ll b/test/Transforms/LoopVectorize/start-non-zero.ll
index 5aa3bc0..998001c 100644
--- a/test/Transforms/LoopVectorize/start-non-zero.ll
+++ b/test/Transforms/LoopVectorize/start-non-zero.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-width=4 -instcombine -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
diff --git a/test/Transforms/LoopVectorize/struct_access.ll b/test/Transforms/LoopVectorize/struct_access.ll
new file mode 100644
index 0000000..de65d0d
--- /dev/null
+++ b/test/Transforms/LoopVectorize/struct_access.ll
@@ -0,0 +1,50 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+%struct.coordinate = type { i32, i32 }
+
+; Make sure that we don't generate a wide load when accessing the struct.
+; struct coordinate {
+; int x;
+; int y;
+; };
+;
+;
+; int foo(struct coordinate *A, int n) {
+;
+; int sum = 0;
+; for (int i = 0; i < n; ++i)
+; sum += A[i].x;
+;
+; return sum;
+; }
+
+;CHECK: @foo
+;CHECK-NOT: load <4 x i32>
+;CHECK: ret
+define i32 @foo(%struct.coordinate* nocapture %A, i32 %n) nounwind uwtable readonly ssp {
+entry:
+ %cmp4 = icmp sgt i32 %n, 0
+ br i1 %cmp4, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %sum.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
+ %x = getelementptr inbounds %struct.coordinate* %A, i64 %indvars.iv, i32 0
+ %0 = load i32* %x, align 4, !tbaa !0
+ %add = add nsw i32 %0, %sum.05
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ %sum.0.lcssa = phi i32 [ 0, %entry ], [ %add, %for.body ]
+ ret i32 %sum.0.lcssa
+}
+
+!0 = metadata !{metadata !"int", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/Transforms/LoopVectorize/vectorize-once.ll b/test/Transforms/LoopVectorize/vectorize-once.ll
new file mode 100644
index 0000000..ac16948
--- /dev/null
+++ b/test/Transforms/LoopVectorize/vectorize-once.ll
@@ -0,0 +1,75 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S -simplifycfg | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+;
+; We want to make sure that we are vectorizeing the scalar loop only once
+; even if the pass manager runs the vectorizer multiple times due to inlining.
+
+
+; This test checks that we add metadata to vectorized loops
+; CHECK: _Z4foo1Pii
+; CHECK: <4 x i32>
+; CHECK: llvm.vectorizer.already_vectorized
+; CHECK: ret
+
+; This test comes from the loop:
+;
+;int foo (int *A, int n) {
+; return std::accumulate(A, A + n, 0);
+;}
+define i32 @_Z4foo1Pii(i32* %A, i32 %n) #0 {
+entry:
+ %idx.ext = sext i32 %n to i64
+ %add.ptr = getelementptr inbounds i32* %A, i64 %idx.ext
+ %cmp3.i = icmp eq i32 %n, 0
+ br i1 %cmp3.i, label %_ZSt10accumulateIPiiET0_T_S2_S1_.exit, label %for.body.i
+
+for.body.i: ; preds = %entry, %for.body.i
+ %__init.addr.05.i = phi i32 [ %add.i, %for.body.i ], [ 0, %entry ]
+ %__first.addr.04.i = phi i32* [ %incdec.ptr.i, %for.body.i ], [ %A, %entry ]
+ %0 = load i32* %__first.addr.04.i, align 4, !tbaa !0
+ %add.i = add nsw i32 %0, %__init.addr.05.i
+ %incdec.ptr.i = getelementptr inbounds i32* %__first.addr.04.i, i64 1
+ %cmp.i = icmp eq i32* %incdec.ptr.i, %add.ptr
+ br i1 %cmp.i, label %_ZSt10accumulateIPiiET0_T_S2_S1_.exit, label %for.body.i
+
+_ZSt10accumulateIPiiET0_T_S2_S1_.exit: ; preds = %for.body.i, %entry
+ %__init.addr.0.lcssa.i = phi i32 [ 0, %entry ], [ %add.i, %for.body.i ]
+ ret i32 %__init.addr.0.lcssa.i
+}
+
+; This test checks that we don't vectorize loops that are marked with the "already vectorized" metadata.
+; CHECK: _Z4foo2Pii
+; CHECK-NOT: <4 x i32>
+; CHECK: llvm.vectorizer.already_vectorized
+; CHECK: ret
+define i32 @_Z4foo2Pii(i32* %A, i32 %n) #0 {
+entry:
+ %idx.ext = sext i32 %n to i64
+ %add.ptr = getelementptr inbounds i32* %A, i64 %idx.ext
+ %cmp3.i = icmp eq i32 %n, 0
+ br i1 %cmp3.i, label %_ZSt10accumulateIPiiET0_T_S2_S1_.exit, label %for.body.i
+
+for.body.i: ; preds = %entry, %for.body.i
+ %__init.addr.05.i = phi i32 [ %add.i, %for.body.i ], [ 0, %entry ]
+ %__first.addr.04.i = phi i32* [ %incdec.ptr.i, %for.body.i ], [ %A, %entry ]
+ %0 = load i32* %__first.addr.04.i, align 4, !tbaa !0
+ %add.i = add nsw i32 %0, %__init.addr.05.i
+ %incdec.ptr.i = getelementptr inbounds i32* %__first.addr.04.i, i64 1
+ %cmp.i = icmp eq i32* %incdec.ptr.i, %add.ptr
+ br i1 %cmp.i, label %_ZSt10accumulateIPiiET0_T_S2_S1_.exit, label %for.body.i, !llvm.vectorizer.already_vectorized !3
+
+_ZSt10accumulateIPiiET0_T_S2_S1_.exit: ; preds = %for.body.i, %entry
+ %__init.addr.0.lcssa.i = phi i32 [ 0, %entry ], [ %add.i, %for.body.i ]
+ ret i32 %__init.addr.0.lcssa.i
+}
+
+attributes #0 = { nounwind readonly ssp uwtable "fp-contract-model"="standard" "no-frame-pointer-elim" "no-frame-pointer-elim-non-leaf" "realign-stack" "relocation-model"="pic" "ssp-buffers-size"="8" }
+
+!0 = metadata !{metadata !"int", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
+!3 = metadata !{}
+
diff --git a/test/Transforms/LoopVectorize/write-only.ll b/test/Transforms/LoopVectorize/write-only.ll
index eb02760..54cbe8d 100644
--- a/test/Transforms/LoopVectorize/write-only.ll
+++ b/test/Transforms/LoopVectorize/write-only.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
OpenPOWER on IntegriCloud