diff options
Diffstat (limited to 'test/Transforms/SLPVectorizer')
-rw-r--r-- | test/Transforms/SLPVectorizer/X86/barriercall.ll | 32 | ||||
-rw-r--r-- | test/Transforms/SLPVectorizer/X86/cast.ll | 38 | ||||
-rw-r--r-- | test/Transforms/SLPVectorizer/X86/compare-reduce.ll | 53 | ||||
-rw-r--r-- | test/Transforms/SLPVectorizer/X86/diamond.ll | 78 | ||||
-rw-r--r-- | test/Transforms/SLPVectorizer/X86/flag.ll | 51 | ||||
-rw-r--r-- | test/Transforms/SLPVectorizer/X86/hoist.ll | 59 | ||||
-rw-r--r-- | test/Transforms/SLPVectorizer/X86/lit.local.cfg | 6 | ||||
-rw-r--r-- | test/Transforms/SLPVectorizer/X86/loopinvariant.ll | 69 | ||||
-rw-r--r-- | test/Transforms/SLPVectorizer/X86/multi_user.ll | 47 | ||||
-rw-r--r-- | test/Transforms/SLPVectorizer/X86/reduction.ll | 47 | ||||
-rw-r--r-- | test/Transforms/SLPVectorizer/X86/reduction2.ll | 32 | ||||
-rw-r--r-- | test/Transforms/SLPVectorizer/X86/saxpy.ll | 45 | ||||
-rw-r--r-- | test/Transforms/SLPVectorizer/X86/simple-loop.ll | 100 | ||||
-rw-r--r-- | test/Transforms/SLPVectorizer/X86/simplebb.ll | 25 | ||||
-rw-r--r-- | test/Transforms/SLPVectorizer/X86/vector.ll | 14 | ||||
-rw-r--r-- | test/Transforms/SLPVectorizer/lit.local.cfg | 1 |
16 files changed, 697 insertions, 0 deletions
diff --git a/test/Transforms/SLPVectorizer/X86/barriercall.ll b/test/Transforms/SLPVectorizer/X86/barriercall.ll new file mode 100644 index 0000000..04eb8f9 --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/barriercall.ll @@ -0,0 +1,32 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +;CHECK: @foo +;CHECK: store <4 x i32> +;CHECK: ret +define i32 @foo(i32* nocapture %A, i32 %n) { +entry: + %call = tail call i32 (...)* @bar() #2 + %mul = mul nsw i32 %n, 5 + %add = add nsw i32 %mul, 9 + store i32 %add, i32* %A, align 4 + %mul1 = mul nsw i32 %n, 9 + %add2 = add nsw i32 %mul1, 9 + %arrayidx3 = getelementptr inbounds i32* %A, i64 1 + store i32 %add2, i32* %arrayidx3, align 4 + %mul4 = shl i32 %n, 3 + %add5 = add nsw i32 %mul4, 9 + %arrayidx6 = getelementptr inbounds i32* %A, i64 2 + store i32 %add5, i32* %arrayidx6, align 4 + %mul7 = mul nsw i32 %n, 10 + %add8 = add nsw i32 %mul7, 9 + %arrayidx9 = getelementptr inbounds i32* %A, i64 3 + store i32 %add8, i32* %arrayidx9, align 4 + ret i32 undef +} + + ; We can still vectorize the stores below. + +declare i32 @bar(...) diff --git a/test/Transforms/SLPVectorizer/X86/cast.ll b/test/Transforms/SLPVectorizer/X86/cast.ll new file mode 100644 index 0000000..344dbbc --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/cast.ll @@ -0,0 +1,38 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.9.0" + +; int foo(int * restrict A, char * restrict B) { +; A[0] = B[0]; +; A[1] = B[1]; +; A[2] = B[2]; +; A[3] = B[3]; +; } +;CHECK: @foo +;CHECK: load <4 x i8> +;CHECK: sext +;CHECK: store <4 x i32> +define i32 @foo(i32* noalias nocapture %A, i8* noalias nocapture %B) { +entry: + %0 = load i8* %B, align 1 + %conv = sext i8 %0 to i32 + store i32 %conv, i32* %A, align 4 + %arrayidx2 = getelementptr inbounds i8* %B, i64 1 + %1 = load i8* %arrayidx2, align 1 + %conv3 = sext i8 %1 to i32 + %arrayidx4 = getelementptr inbounds i32* %A, i64 1 + store i32 %conv3, i32* %arrayidx4, align 4 + %arrayidx5 = getelementptr inbounds i8* %B, i64 2 + %2 = load i8* %arrayidx5, align 1 + %conv6 = sext i8 %2 to i32 + %arrayidx7 = getelementptr inbounds i32* %A, i64 2 + store i32 %conv6, i32* %arrayidx7, align 4 + %arrayidx8 = getelementptr inbounds i8* %B, i64 3 + %3 = load i8* %arrayidx8, align 1 + %conv9 = sext i8 %3 to i32 + %arrayidx10 = getelementptr inbounds i32* %A, i64 3 + store i32 %conv9, i32* %arrayidx10, align 4 + ret i32 undef +} + diff --git a/test/Transforms/SLPVectorizer/X86/compare-reduce.ll b/test/Transforms/SLPVectorizer/X86/compare-reduce.ll new file mode 100644 index 0000000..05f8e61 --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/compare-reduce.ll @@ -0,0 +1,53 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.7.0" + +@.str = private unnamed_addr constant [6 x i8] c"bingo\00", align 1 + +;CHECK: @reduce_compare +;CHECK: load <2 x double> +;CHECK: fmul <2 x double> +;CHECK: fmul <2 x double> +;CHECK: fadd <2 x double> +;CHECK: extractelement +;CHECK: extractelement +;CHECK: ret +define void @reduce_compare(double* nocapture %A, i32 %n) { +entry: + %conv = sitofp i32 %n to double + br label %for.body + +for.body: ; preds = %for.inc, %entry + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ] + %0 = shl nsw i64 %indvars.iv, 1 + %arrayidx = getelementptr inbounds double* %A, i64 %0 + %1 = load double* %arrayidx, align 8 + %mul1 = fmul double %conv, %1 + %mul2 = fmul double %mul1, 7.000000e+00 + %add = fadd double %mul2, 5.000000e+00 + %2 = or i64 %0, 1 + %arrayidx6 = getelementptr inbounds double* %A, i64 %2 + %3 = load double* %arrayidx6, align 8 + %mul8 = fmul double %conv, %3 + %mul9 = fmul double %mul8, 4.000000e+00 + %add10 = fadd double %mul9, 9.000000e+00 + %cmp11 = fcmp ogt double %add, %add10 + br i1 %cmp11, label %if.then, label %for.inc + +if.then: ; preds = %for.body + %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([6 x i8]* @.str, i64 0, i64 0)) + br label %for.inc + +for.inc: ; preds = %for.body, %if.then + %indvars.iv.next = add i64 %indvars.iv, 1 + %lftr.wideiv = trunc i64 %indvars.iv.next to i32 + %exitcond = icmp eq i32 %lftr.wideiv, 100 + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.inc + ret void +} + +declare i32 @printf(i8* nocapture, ...) + diff --git a/test/Transforms/SLPVectorizer/X86/diamond.ll b/test/Transforms/SLPVectorizer/X86/diamond.ll new file mode 100644 index 0000000..8e85cb6 --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/diamond.ll @@ -0,0 +1,78 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +; int foo(int * restrict B, int * restrict A, int n, int m) { +; B[0] = n * A[0] + m * A[0]; +; B[1] = n * A[1] + m * A[1]; +; B[2] = n * A[2] + m * A[2]; +; B[3] = n * A[3] + m * A[3]; +; return 0; +; } + +; CHECK: @foo +; CHECK: load <4 x i32> +; CHECK: mul <4 x i32> +; CHECK: store <4 x i32> +; CHECK: ret +define i32 @foo(i32* noalias nocapture %B, i32* noalias nocapture %A, i32 %n, i32 %m) #0 { +entry: + %0 = load i32* %A, align 4 + %mul238 = add i32 %m, %n + %add = mul i32 %0, %mul238 + store i32 %add, i32* %B, align 4 + %arrayidx4 = getelementptr inbounds i32* %A, i64 1 + %1 = load i32* %arrayidx4, align 4 + %add8 = mul i32 %1, %mul238 + %arrayidx9 = getelementptr inbounds i32* %B, i64 1 + store i32 %add8, i32* %arrayidx9, align 4 + %arrayidx10 = getelementptr inbounds i32* %A, i64 2 + %2 = load i32* %arrayidx10, align 4 + %add14 = mul i32 %2, %mul238 + %arrayidx15 = getelementptr inbounds i32* %B, i64 2 + store i32 %add14, i32* %arrayidx15, align 4 + %arrayidx16 = getelementptr inbounds i32* %A, i64 3 + %3 = load i32* %arrayidx16, align 4 + %add20 = mul i32 %3, %mul238 + %arrayidx21 = getelementptr inbounds i32* %B, i64 3 + store i32 %add20, i32* %arrayidx21, align 4 + ret i32 0 +} + + +; int foo_fail(int * restrict B, int * restrict A, int n, int m) { +; B[0] = n * A[0] + m * A[0]; +; B[1] = n * A[1] + m * A[1]; +; B[2] = n * A[2] + m * A[2]; +; B[3] = n * A[3] + m * A[3]; +; return A[0]; +; } + +; CHECK: @foo_fail +; CHECK-NOT: load <4 x i32> +; CHECK: ret +define i32 @foo_fail(i32* noalias nocapture %B, i32* noalias nocapture %A, i32 %n, i32 %m) { +entry: + %0 = load i32* %A, align 4 + %mul238 = add i32 %m, %n + %add = mul i32 %0, %mul238 + store i32 %add, i32* %B, align 4 + %arrayidx4 = getelementptr inbounds i32* %A, i64 1 + %1 = load i32* %arrayidx4, align 4 + %add8 = mul i32 %1, %mul238 + %arrayidx9 = getelementptr inbounds i32* %B, i64 1 + store i32 %add8, i32* %arrayidx9, align 4 + %arrayidx10 = getelementptr inbounds i32* %A, i64 2 + %2 = load i32* %arrayidx10, align 4 + %add14 = mul i32 %2, %mul238 + %arrayidx15 = getelementptr inbounds i32* %B, i64 2 + store i32 %add14, i32* %arrayidx15, align 4 + %arrayidx16 = getelementptr inbounds i32* %A, i64 3 + %3 = load i32* %arrayidx16, align 4 + %add20 = mul i32 %3, %mul238 + %arrayidx21 = getelementptr inbounds i32* %B, i64 3 + store i32 %add20, i32* %arrayidx21, align 4 + ret i32 %0 ;<--------- This value has multiple users and can't be vectorized. +} + diff --git a/test/Transforms/SLPVectorizer/X86/flag.ll b/test/Transforms/SLPVectorizer/X86/flag.ll new file mode 100644 index 0000000..3ca5407 --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/flag.ll @@ -0,0 +1,51 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -slp-threshold=1000 -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +; Check that the command line flag works. +;CHECK:rollable +;CHECK-NOT:load <4 x i32> +;CHECK: ret + +define i32 @rollable(i32* noalias nocapture %in, i32* noalias nocapture %out, i64 %n) { + %1 = icmp eq i64 %n, 0 + br i1 %1, label %._crit_edge, label %.lr.ph + +.lr.ph: ; preds = %0, %.lr.ph + %i.019 = phi i64 [ %26, %.lr.ph ], [ 0, %0 ] + %2 = shl i64 %i.019, 2 + %3 = getelementptr inbounds i32* %in, i64 %2 + %4 = load i32* %3, align 4 + %5 = or i64 %2, 1 + %6 = getelementptr inbounds i32* %in, i64 %5 + %7 = load i32* %6, align 4 + %8 = or i64 %2, 2 + %9 = getelementptr inbounds i32* %in, i64 %8 + %10 = load i32* %9, align 4 + %11 = or i64 %2, 3 + %12 = getelementptr inbounds i32* %in, i64 %11 + %13 = load i32* %12, align 4 + %14 = mul i32 %4, 7 + %15 = add i32 %14, 7 + %16 = mul i32 %7, 7 + %17 = add i32 %16, 14 + %18 = mul i32 %10, 7 + %19 = add i32 %18, 21 + %20 = mul i32 %13, 7 + %21 = add i32 %20, 28 + %22 = getelementptr inbounds i32* %out, i64 %2 + store i32 %15, i32* %22, align 4 + %23 = getelementptr inbounds i32* %out, i64 %5 + store i32 %17, i32* %23, align 4 + %24 = getelementptr inbounds i32* %out, i64 %8 + store i32 %19, i32* %24, align 4 + %25 = getelementptr inbounds i32* %out, i64 %11 + store i32 %21, i32* %25, align 4 + %26 = add i64 %i.019, 1 + %exitcond = icmp eq i64 %26, %n + br i1 %exitcond, label %._crit_edge, label %.lr.ph + +._crit_edge: ; preds = %.lr.ph, %0 + ret i32 undef +} diff --git a/test/Transforms/SLPVectorizer/X86/hoist.ll b/test/Transforms/SLPVectorizer/X86/hoist.ll new file mode 100644 index 0000000..5074cea --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/hoist.ll @@ -0,0 +1,59 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128" +target triple = "i386-apple-macosx10.9.0" + +;int foo(int *A, int n, int k) { +; for (int i=0; i < 10000; i+=4) { +; A[i] += n; +; A[i+1] += k; +; A[i+2] += n; +; A[i+3] += k; +; } +;} + +; preheader: +;CHECK: entry +;CHECK-NEXT: insertelement +;CHECK-NEXT: insertelement +;CHECK-NEXT: insertelement +;CHECK-NEXT: insertelement +; loop body: +;CHECK: phi +;CHECK: load <4 x i32> +;CHECK: add <4 x i32> +;CHECK: store <4 x i32> +;CHECK: ret +define i32 @foo(i32* nocapture %A, i32 %n, i32 %k) { +entry: + br label %for.body + +for.body: ; preds = %entry, %for.body + %i.024 = phi i32 [ 0, %entry ], [ %add10, %for.body ] + %arrayidx = getelementptr inbounds i32* %A, i32 %i.024 + %0 = load i32* %arrayidx, align 4 + %add = add nsw i32 %0, %n + store i32 %add, i32* %arrayidx, align 4 + %add121 = or i32 %i.024, 1 + %arrayidx2 = getelementptr inbounds i32* %A, i32 %add121 + %1 = load i32* %arrayidx2, align 4 + %add3 = add nsw i32 %1, %k + store i32 %add3, i32* %arrayidx2, align 4 + %add422 = or i32 %i.024, 2 + %arrayidx5 = getelementptr inbounds i32* %A, i32 %add422 + %2 = load i32* %arrayidx5, align 4 + %add6 = add nsw i32 %2, %n + store i32 %add6, i32* %arrayidx5, align 4 + %add723 = or i32 %i.024, 3 + %arrayidx8 = getelementptr inbounds i32* %A, i32 %add723 + %3 = load i32* %arrayidx8, align 4 + %add9 = add nsw i32 %3, %k + store i32 %add9, i32* %arrayidx8, align 4 + %add10 = add nsw i32 %i.024, 4 + %cmp = icmp slt i32 %add10, 10000 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret i32 undef +} + diff --git a/test/Transforms/SLPVectorizer/X86/lit.local.cfg b/test/Transforms/SLPVectorizer/X86/lit.local.cfg new file mode 100644 index 0000000..a8ad0f1 --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/lit.local.cfg @@ -0,0 +1,6 @@ +config.suffixes = ['.ll', '.c', '.cpp'] + +targets = set(config.root.targets_to_build.split()) +if not 'X86' in targets: + config.unsupported = True + diff --git a/test/Transforms/SLPVectorizer/X86/loopinvariant.ll b/test/Transforms/SLPVectorizer/X86/loopinvariant.ll new file mode 100644 index 0000000..4a37fce --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/loopinvariant.ll @@ -0,0 +1,69 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +;CHECK: @foo +;CHECK: load <4 x i32> +;CHECK: add <4 x i32> +;CHECK: store <4 x i32> +;CHECK: load <4 x i32> +;CHECK: add <4 x i32> +;CHECK: store <4 x i32> +;CHECK: ret +define i32 @foo(i32* nocapture %A, i32 %n) #0 { +entry: + %cmp62 = icmp sgt i32 %n, 0 + br i1 %cmp62, label %for.body, label %for.end + +for.body: ; preds = %entry, %for.body + %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv + %0 = load i32* %arrayidx, align 4 + %add1 = add nsw i32 %0, %n + store i32 %add1, i32* %arrayidx, align 4 + %1 = or i64 %indvars.iv, 1 + %arrayidx4 = getelementptr inbounds i32* %A, i64 %1 + %2 = load i32* %arrayidx4, align 4 + %add5 = add nsw i32 %2, %n + store i32 %add5, i32* %arrayidx4, align 4 + %3 = or i64 %indvars.iv, 2 + %arrayidx8 = getelementptr inbounds i32* %A, i64 %3 + %4 = load i32* %arrayidx8, align 4 + %add9 = add nsw i32 %4, %n + store i32 %add9, i32* %arrayidx8, align 4 + %5 = or i64 %indvars.iv, 3 + %arrayidx12 = getelementptr inbounds i32* %A, i64 %5 + %6 = load i32* %arrayidx12, align 4 + %add13 = add nsw i32 %6, %n + store i32 %add13, i32* %arrayidx12, align 4 + %7 = or i64 %indvars.iv, 4 + %arrayidx16 = getelementptr inbounds i32* %A, i64 %7 + %8 = load i32* %arrayidx16, align 4 + %add17 = add nsw i32 %8, %n + store i32 %add17, i32* %arrayidx16, align 4 + %9 = or i64 %indvars.iv, 5 + %arrayidx20 = getelementptr inbounds i32* %A, i64 %9 + %10 = load i32* %arrayidx20, align 4 + %add21 = add nsw i32 %10, %n + store i32 %add21, i32* %arrayidx20, align 4 + %11 = or i64 %indvars.iv, 6 + %arrayidx24 = getelementptr inbounds i32* %A, i64 %11 + %12 = load i32* %arrayidx24, align 4 + %add25 = add nsw i32 %12, %n + store i32 %add25, i32* %arrayidx24, align 4 + %13 = or i64 %indvars.iv, 7 + %arrayidx28 = getelementptr inbounds i32* %A, i64 %13 + %14 = load i32* %arrayidx28, align 4 + %add29 = add nsw i32 %14, %n + store i32 %add29, i32* %arrayidx28, align 4 + %indvars.iv.next = add i64 %indvars.iv, 8 + %15 = trunc i64 %indvars.iv.next to i32 + %cmp = icmp slt i32 %15, %n + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body, %entry + ret i32 undef +} + +attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" } diff --git a/test/Transforms/SLPVectorizer/X86/multi_user.ll b/test/Transforms/SLPVectorizer/X86/multi_user.ll new file mode 100644 index 0000000..aaa6063 --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/multi_user.ll @@ -0,0 +1,47 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.7.0" + +;int foo (int *A, int n) { +; A[0] += n * 5 + 7; +; A[1] += n * 5 + 8; +; A[2] += n * 5 + 9; +; A[3] += n * 5 + 10; +; A[4] += n * 5 + 11; +;} + +;CHECK: @foo +;CHECK: insertelement <4 x i32> +;CHECK: load <4 x i32> +;CHECK: add <4 x i32> +;CHECK: store <4 x i32> +;CHECK: ret +define i32 @foo(i32* nocapture %A, i32 %n) { + %1 = mul nsw i32 %n, 5 + %2 = add nsw i32 %1, 7 + %3 = load i32* %A, align 4 + %4 = add nsw i32 %2, %3 + store i32 %4, i32* %A, align 4 + %5 = add nsw i32 %1, 8 + %6 = getelementptr inbounds i32* %A, i64 1 + %7 = load i32* %6, align 4 + %8 = add nsw i32 %5, %7 + store i32 %8, i32* %6, align 4 + %9 = add nsw i32 %1, 9 + %10 = getelementptr inbounds i32* %A, i64 2 + %11 = load i32* %10, align 4 + %12 = add nsw i32 %9, %11 + store i32 %12, i32* %10, align 4 + %13 = add nsw i32 %1, 10 + %14 = getelementptr inbounds i32* %A, i64 3 + %15 = load i32* %14, align 4 + %16 = add nsw i32 %13, %15 + store i32 %16, i32* %14, align 4 + %17 = add nsw i32 %1, 11 + %18 = getelementptr inbounds i32* %A, i64 4 + %19 = load i32* %18, align 4 + %20 = add nsw i32 %17, %19 + store i32 %20, i32* %18, align 4 + ret i32 undef +} diff --git a/test/Transforms/SLPVectorizer/X86/reduction.ll b/test/Transforms/SLPVectorizer/X86/reduction.ll new file mode 100644 index 0000000..70b7c3a --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/reduction.ll @@ -0,0 +1,47 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128" +target triple = "i386-apple-macosx10.8.0" + +; int foo(double *A, int n, int m) { +; double sum = 0, v1 = 2, v0 = 3; +; for (int i=0; i < n; ++i) +; sum += 7*A[i*2] + 7*A[i*2+1]; +; return sum; +; } + +;CHECK: reduce +;CHECK: load <2 x double> +;CHECK: fmul <2 x double> +;CHECK: ret +define i32 @reduce(double* nocapture %A, i32 %n, i32 %m) { +entry: + %cmp13 = icmp sgt i32 %n, 0 + br i1 %cmp13, label %for.body, label %for.end + +for.body: ; preds = %entry, %for.body + %i.015 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %sum.014 = phi double [ %add6, %for.body ], [ 0.000000e+00, %entry ] + %mul = shl nsw i32 %i.015, 1 + %arrayidx = getelementptr inbounds double* %A, i32 %mul + %0 = load double* %arrayidx, align 4 + %mul1 = fmul double %0, 7.000000e+00 + %add12 = or i32 %mul, 1 + %arrayidx3 = getelementptr inbounds double* %A, i32 %add12 + %1 = load double* %arrayidx3, align 4 + %mul4 = fmul double %1, 7.000000e+00 + %add5 = fadd double %mul1, %mul4 + %add6 = fadd double %sum.014, %add5 + %inc = add nsw i32 %i.015, 1 + %exitcond = icmp eq i32 %inc, %n + br i1 %exitcond, label %for.cond.for.end_crit_edge, label %for.body + +for.cond.for.end_crit_edge: ; preds = %for.body + %phitmp = fptosi double %add6 to i32 + br label %for.end + +for.end: ; preds = %for.cond.for.end_crit_edge, %entry + %sum.0.lcssa = phi i32 [ %phitmp, %for.cond.for.end_crit_edge ], [ 0, %entry ] + ret i32 %sum.0.lcssa +} + diff --git a/test/Transforms/SLPVectorizer/X86/reduction2.ll b/test/Transforms/SLPVectorizer/X86/reduction2.ll new file mode 100644 index 0000000..7aa7d7e --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/reduction2.ll @@ -0,0 +1,32 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128" +target triple = "i386-apple-macosx10.8.0" + +;CHECK: @foo +;CHECK: load <2 x double> +;CHECK: ret +define double @foo(double* nocapture %D) { + br label %1 + +; <label>:1 ; preds = %1, %0 + %i.02 = phi i32 [ 0, %0 ], [ %10, %1 ] + %sum.01 = phi double [ 0.000000e+00, %0 ], [ %9, %1 ] + %2 = shl nsw i32 %i.02, 1 + %3 = getelementptr inbounds double* %D, i32 %2 + %4 = load double* %3, align 4 + %A4 = fmul double %4, %4 + %5 = or i32 %2, 1 + %6 = getelementptr inbounds double* %D, i32 %5 + %7 = load double* %6, align 4 + %A7 = fmul double %7, %7 + %8 = fadd double %A4, %A7 + %9 = fadd double %sum.01, %8 + %10 = add nsw i32 %i.02, 1 + %exitcond = icmp eq i32 %10, 100 + br i1 %exitcond, label %11, label %1 + +; <label>:11 ; preds = %1 + ret double %9 +} + diff --git a/test/Transforms/SLPVectorizer/X86/saxpy.ll b/test/Transforms/SLPVectorizer/X86/saxpy.ll new file mode 100644 index 0000000..b520913 --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/saxpy.ll @@ -0,0 +1,45 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +; SLP vectorization example from http://cs.stanford.edu/people/eschkufz/research/asplos291-schkufza.pdf +;CHECK: SAXPY +;CHECK: mul <4 x i32> +;CHECK: ret + +define void @SAXPY(i32* noalias nocapture %x, i32* noalias nocapture %y, i32 %a, i64 %i) { + %1 = getelementptr inbounds i32* %x, i64 %i + %2 = load i32* %1, align 4 + %3 = mul nsw i32 %2, %a + %4 = getelementptr inbounds i32* %y, i64 %i + %5 = load i32* %4, align 4 + %6 = add nsw i32 %3, %5 + store i32 %6, i32* %1, align 4 + %7 = add i64 %i, 1 + %8 = getelementptr inbounds i32* %x, i64 %7 + %9 = load i32* %8, align 4 + %10 = mul nsw i32 %9, %a + %11 = getelementptr inbounds i32* %y, i64 %7 + %12 = load i32* %11, align 4 + %13 = add nsw i32 %10, %12 + store i32 %13, i32* %8, align 4 + %14 = add i64 %i, 2 + %15 = getelementptr inbounds i32* %x, i64 %14 + %16 = load i32* %15, align 4 + %17 = mul nsw i32 %16, %a + %18 = getelementptr inbounds i32* %y, i64 %14 + %19 = load i32* %18, align 4 + %20 = add nsw i32 %17, %19 + store i32 %20, i32* %15, align 4 + %21 = add i64 %i, 3 + %22 = getelementptr inbounds i32* %x, i64 %21 + %23 = load i32* %22, align 4 + %24 = mul nsw i32 %23, %a + %25 = getelementptr inbounds i32* %y, i64 %21 + %26 = load i32* %25, align 4 + %27 = add nsw i32 %24, %26 + store i32 %27, i32* %22, align 4 + ret void +} + diff --git a/test/Transforms/SLPVectorizer/X86/simple-loop.ll b/test/Transforms/SLPVectorizer/X86/simple-loop.ll new file mode 100644 index 0000000..0111b94 --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/simple-loop.ll @@ -0,0 +1,100 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +;CHECK:rollable +define i32 @rollable(i32* noalias nocapture %in, i32* noalias nocapture %out, i64 %n) { + %1 = icmp eq i64 %n, 0 + br i1 %1, label %._crit_edge, label %.lr.ph + +.lr.ph: ; preds = %0, %.lr.ph + %i.019 = phi i64 [ %26, %.lr.ph ], [ 0, %0 ] + %2 = shl i64 %i.019, 2 + %3 = getelementptr inbounds i32* %in, i64 %2 +;CHECK:load <4 x i32> + %4 = load i32* %3, align 4 + %5 = or i64 %2, 1 + %6 = getelementptr inbounds i32* %in, i64 %5 + %7 = load i32* %6, align 4 + %8 = or i64 %2, 2 + %9 = getelementptr inbounds i32* %in, i64 %8 + %10 = load i32* %9, align 4 + %11 = or i64 %2, 3 + %12 = getelementptr inbounds i32* %in, i64 %11 + %13 = load i32* %12, align 4 +;CHECK:mul <4 x i32> + %14 = mul i32 %4, 7 +;CHECK:add <4 x i32> + %15 = add i32 %14, 7 + %16 = mul i32 %7, 7 + %17 = add i32 %16, 14 + %18 = mul i32 %10, 7 + %19 = add i32 %18, 21 + %20 = mul i32 %13, 7 + %21 = add i32 %20, 28 + %22 = getelementptr inbounds i32* %out, i64 %2 +;CHECK:store <4 x i32> + store i32 %15, i32* %22, align 4 + %23 = getelementptr inbounds i32* %out, i64 %5 + store i32 %17, i32* %23, align 4 + %24 = getelementptr inbounds i32* %out, i64 %8 + store i32 %19, i32* %24, align 4 + %25 = getelementptr inbounds i32* %out, i64 %11 + store i32 %21, i32* %25, align 4 + %26 = add i64 %i.019, 1 + %exitcond = icmp eq i64 %26, %n + br i1 %exitcond, label %._crit_edge, label %.lr.ph + +._crit_edge: ; preds = %.lr.ph, %0 +;CHECK: ret + ret i32 undef +} + +;CHECK:unrollable +;CHECK-NOT: <4 x i32> +;CHECK: ret +define i32 @unrollable(i32* %in, i32* %out, i64 %n) nounwind ssp uwtable { + %1 = icmp eq i64 %n, 0 + br i1 %1, label %._crit_edge, label %.lr.ph + +.lr.ph: ; preds = %0, %.lr.ph + %i.019 = phi i64 [ %26, %.lr.ph ], [ 0, %0 ] + %2 = shl i64 %i.019, 2 + %3 = getelementptr inbounds i32* %in, i64 %2 + %4 = load i32* %3, align 4 + %5 = or i64 %2, 1 + %6 = getelementptr inbounds i32* %in, i64 %5 + %7 = load i32* %6, align 4 + %8 = or i64 %2, 2 + %9 = getelementptr inbounds i32* %in, i64 %8 + %10 = load i32* %9, align 4 + %11 = or i64 %2, 3 + %12 = getelementptr inbounds i32* %in, i64 %11 + %13 = load i32* %12, align 4 + %14 = mul i32 %4, 7 + %15 = add i32 %14, 7 + %16 = mul i32 %7, 7 + %17 = add i32 %16, 14 + %18 = mul i32 %10, 7 + %19 = add i32 %18, 21 + %20 = mul i32 %13, 7 + %21 = add i32 %20, 28 + %22 = getelementptr inbounds i32* %out, i64 %2 + store i32 %15, i32* %22, align 4 + %23 = getelementptr inbounds i32* %out, i64 %5 + store i32 %17, i32* %23, align 4 + %barrier = call i32 @goo(i32 0) ; <---------------- memory barrier. + %24 = getelementptr inbounds i32* %out, i64 %8 + store i32 %19, i32* %24, align 4 + %25 = getelementptr inbounds i32* %out, i64 %11 + store i32 %21, i32* %25, align 4 + %26 = add i64 %i.019, 1 + %exitcond = icmp eq i64 %26, %n + br i1 %exitcond, label %._crit_edge, label %.lr.ph + +._crit_edge: ; preds = %.lr.ph, %0 + ret i32 undef +} + +declare i32 @goo(i32) diff --git a/test/Transforms/SLPVectorizer/X86/simplebb.ll b/test/Transforms/SLPVectorizer/X86/simplebb.ll new file mode 100644 index 0000000..cd0b99e --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/simplebb.ll @@ -0,0 +1,25 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +; Simple 3-pair chain with loads and stores +; CHECK: test1 +; CHECK: store <2 x double> +; CHECK: ret +define void @test1(double* %a, double* %b, double* %c) { +entry: + %i0 = load double* %a, align 8 + %i1 = load double* %b, align 8 + %mul = fmul double %i0, %i1 + %arrayidx3 = getelementptr inbounds double* %a, i64 1 + %i3 = load double* %arrayidx3, align 8 + %arrayidx4 = getelementptr inbounds double* %b, i64 1 + %i4 = load double* %arrayidx4, align 8 + %mul5 = fmul double %i3, %i4 + store double %mul, double* %c, align 8 + %arrayidx5 = getelementptr inbounds double* %c, i64 1 + store double %mul5, double* %arrayidx5, align 8 + ret void +} + diff --git a/test/Transforms/SLPVectorizer/X86/vector.ll b/test/Transforms/SLPVectorizer/X86/vector.ll new file mode 100644 index 0000000..02a1897 --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/vector.ll @@ -0,0 +1,14 @@ +; RUN: opt < %s -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +; Make sure that we are not crashing or changing the code. +;CHECK: test +;CHECK: icmp +;CHECK: ret +define void @test(<4 x i32> %in, <4 x i32> %in2) { + %k = icmp eq <4 x i32> %in, %in2 + ret void +} + diff --git a/test/Transforms/SLPVectorizer/lit.local.cfg b/test/Transforms/SLPVectorizer/lit.local.cfg new file mode 100644 index 0000000..19eebc0 --- /dev/null +++ b/test/Transforms/SLPVectorizer/lit.local.cfg @@ -0,0 +1 @@ +config.suffixes = ['.ll', '.c', '.cpp'] |