diff options
Diffstat (limited to 'test/Transforms/ScalarRepl')
19 files changed, 262 insertions, 26 deletions
diff --git a/test/Transforms/ScalarRepl/2003-09-12-IncorrectPromote.ll b/test/Transforms/ScalarRepl/2003-09-12-IncorrectPromote.ll index eb1c945..0b5e415 100644 --- a/test/Transforms/ScalarRepl/2003-09-12-IncorrectPromote.ll +++ b/test/Transforms/ScalarRepl/2003-09-12-IncorrectPromote.ll @@ -1,7 +1,7 @@ ; Scalar replacement was incorrectly promoting this alloca!! ; ; RUN: opt < %s -scalarrepl -S | \ -; RUN: sed {s/;.*//g} | grep {\\\[} +; RUN: sed "s/;.*//g" | grep "\[" define i8* @test() { %A = alloca [30 x i8] ; <[30 x i8]*> [#uses=1] diff --git a/test/Transforms/ScalarRepl/2003-10-29-ArrayProblem.ll b/test/Transforms/ScalarRepl/2003-10-29-ArrayProblem.ll index 00e43a7..77c7b54 100644 --- a/test/Transforms/ScalarRepl/2003-10-29-ArrayProblem.ll +++ b/test/Transforms/ScalarRepl/2003-10-29-ArrayProblem.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -scalarrepl -S | grep {alloca %%T} +; RUN: opt < %s -scalarrepl -S | grep "alloca %%T" %T = type { [80 x i8], i32, i32 } declare i32 @.callback_1(i8*) diff --git a/test/Transforms/ScalarRepl/2008-01-29-PromoteBug.ll b/test/Transforms/ScalarRepl/2008-01-29-PromoteBug.ll index 8bc4ff0..a53f3de 100644 --- a/test/Transforms/ScalarRepl/2008-01-29-PromoteBug.ll +++ b/test/Transforms/ScalarRepl/2008-01-29-PromoteBug.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -scalarrepl -instcombine -S | grep {ret i8 17} +; RUN: opt < %s -scalarrepl -instcombine -S | grep "ret i8 17" ; rdar://5707076 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32" target triple = "i386-apple-darwin9.1.0" diff --git a/test/Transforms/ScalarRepl/2008-06-22-LargeArray.ll b/test/Transforms/ScalarRepl/2008-06-22-LargeArray.ll index 71ba601..f597613 100644 --- a/test/Transforms/ScalarRepl/2008-06-22-LargeArray.ll +++ b/test/Transforms/ScalarRepl/2008-06-22-LargeArray.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -scalarrepl -S | grep {call.*mem} +; RUN: opt < %s -scalarrepl -S | grep "call.*mem" ; PR2369 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" diff --git a/test/Transforms/ScalarRepl/2008-08-22-out-of-range-array-promote.ll b/test/Transforms/ScalarRepl/2008-08-22-out-of-range-array-promote.ll index 7cccb19..b2a9d43 100644 --- a/test/Transforms/ScalarRepl/2008-08-22-out-of-range-array-promote.ll +++ b/test/Transforms/ScalarRepl/2008-08-22-out-of-range-array-promote.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -scalarrepl -S | grep {s = alloca .struct.x} +; RUN: opt < %s -scalarrepl -S | grep "s = alloca .struct.x" ; PR2423 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" target triple = "i386-apple-darwin8" diff --git a/test/Transforms/ScalarRepl/2009-02-02-ScalarPromoteOutOfRange.ll b/test/Transforms/ScalarRepl/2009-02-02-ScalarPromoteOutOfRange.ll index 9c0f203..3c8a364 100644 --- a/test/Transforms/ScalarRepl/2009-02-02-ScalarPromoteOutOfRange.ll +++ b/test/Transforms/ScalarRepl/2009-02-02-ScalarPromoteOutOfRange.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -scalarrepl -instcombine -S | grep {ret i32 %x} +; RUN: opt < %s -scalarrepl -instcombine -S | grep "ret i32 %x" target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32" target triple = "i386-pc-linux-gnu" diff --git a/test/Transforms/ScalarRepl/2009-02-05-LoadFCA.ll b/test/Transforms/ScalarRepl/2009-02-05-LoadFCA.ll index f8ab875..67228a7 100644 --- a/test/Transforms/ScalarRepl/2009-02-05-LoadFCA.ll +++ b/test/Transforms/ScalarRepl/2009-02-05-LoadFCA.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -scalarrepl -instcombine -inline -instcombine -S | grep {ret i32 42} +; RUN: opt < %s -scalarrepl -instcombine -inline -instcombine -S | grep "ret i32 42" ; PR3489 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" target triple = "x86_64-apple-darwin10.0" diff --git a/test/Transforms/ScalarRepl/2009-03-04-MemCpyAlign.ll b/test/Transforms/ScalarRepl/2009-03-04-MemCpyAlign.ll index 3218d59..a4182d4 100644 --- a/test/Transforms/ScalarRepl/2009-03-04-MemCpyAlign.ll +++ b/test/Transforms/ScalarRepl/2009-03-04-MemCpyAlign.ll @@ -1,6 +1,6 @@ ; The store into %p should end up with a known alignment of 1, since the memcpy ; is only known to access it with 1-byte alignment. -; RUN: opt < %s -scalarrepl -S | grep {store i16 1, .*, align 1} +; RUN: opt < %s -scalarrepl -S | grep "store i16 1, .*, align 1" ; PR3720 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64" diff --git a/test/Transforms/ScalarRepl/2011-06-08-VectorExtractValue.ll b/test/Transforms/ScalarRepl/2011-06-08-VectorExtractValue.ll index 98fa1c6..4596885 100644 --- a/test/Transforms/ScalarRepl/2011-06-08-VectorExtractValue.ll +++ b/test/Transforms/ScalarRepl/2011-06-08-VectorExtractValue.ll @@ -10,8 +10,7 @@ target triple = "x86_64-apple-macosx10.7.0" ; CHECK: main ; CHECK-NOT: alloca -; CHECK: %[[A:[a-z0-9]*]] = and i128 -; CHECK: %[[B:[a-z0-9]*]] = trunc i128 %[[A]] to i32 +; CHECK: extractelement <2 x float> zeroinitializer, i32 0 define void @main() uwtable ssp { entry: @@ -28,8 +27,7 @@ entry: ; CHECK: test1 ; CHECK-NOT: alloca -; CHECK: %[[A:[a-z0-9]*]] = and i128 -; CHECK: %[[B:[a-z0-9]*]] = trunc i128 %[[A]] to i32 +; CHECK: extractelement <2 x float> zeroinitializer, i32 0 define void @test1() uwtable ssp { entry: @@ -43,9 +41,8 @@ entry: ; CHECK: test2 ; CHECK-NOT: alloca -; CHECK: and i128 -; CHECK: or i128 -; CHECK: trunc i128 +; CHECK: %[[A:[a-z0-9]*]] = extractelement <2 x float> zeroinitializer, i32 0 +; CHECK: fadd float %[[A]], 1.000000e+00 ; CHECK-NOT: insertelement ; CHECK-NOT: extractelement @@ -62,3 +59,17 @@ entry: %r = fadd float %r1, %r2 ret float %r } + +; CHECK: test3 +; CHECK: %[[A:[a-z0-9]*]] = extractelement <2 x float> <float 2.000000e+00, float 3.000000e+00>, i32 1 +; CHECK: ret float %[[A]] + +define float @test3() { +entry: + %ai = alloca { <2 x float>, <2 x float> }, align 8 + store { <2 x float>, <2 x float> } {<2 x float> <float 0.0, float 1.0>, <2 x float> <float 2.0, float 3.0>}, { <2 x float>, <2 x float> }* %ai, align 8 + %tmpcast = bitcast { <2 x float>, <2 x float> }* %ai to [4 x float]* + %arrayidx = getelementptr inbounds [4 x float]* %tmpcast, i64 0, i64 3 + %f = load float* %arrayidx, align 4 + ret float %f +} diff --git a/test/Transforms/ScalarRepl/crash.ll b/test/Transforms/ScalarRepl/crash.ll index cd4dc32..58c5a3a 100644 --- a/test/Transforms/ScalarRepl/crash.ll +++ b/test/Transforms/ScalarRepl/crash.ll @@ -260,5 +260,27 @@ entry: ret void } +; rdar://11861001 - The dynamic GEP here was incorrectly making all accesses +; to the alloca think they were also dynamic. Inserts and extracts created to +; access the vector were all being based from the dynamic access, even in BBs +; not dominated by the GEP. +define fastcc void @test() optsize inlinehint ssp align 2 { +entry: + %alloc.0.0 = alloca <4 x float>, align 16 + %bitcast = bitcast <4 x float>* %alloc.0.0 to [4 x float]* + %idx3 = getelementptr inbounds [4 x float]* %bitcast, i32 0, i32 3 + store float 0.000000e+00, float* %idx3, align 4 + br label %for.body10 + +for.body10: ; preds = %for.body10, %entry + %loopidx = phi i32 [ 0, %entry ], [ undef, %for.body10 ] + %unusedidx = getelementptr inbounds <4 x float>* %alloc.0.0, i32 0, i32 %loopidx + br i1 undef, label %for.end, label %for.body10 + +for.end: ; preds = %for.body10 + store <4 x float> <float -1.000000e+00, float -1.000000e+00, float -1.000000e+00, float 0.000000e+00>, <4 x float>* %alloc.0.0, align 16 + ret void +} + declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind diff --git a/test/Transforms/ScalarRepl/dynamic-vector-gep.ll b/test/Transforms/ScalarRepl/dynamic-vector-gep.ll new file mode 100644 index 0000000..565cd76 --- /dev/null +++ b/test/Transforms/ScalarRepl/dynamic-vector-gep.ll @@ -0,0 +1,167 @@ +; RUN: opt < %s -scalarrepl -S | FileCheck %s + +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64" +target triple = "x86_64-apple-darwin10.0.0" + +; CHECK: @test1 +; CHECK: %[[alloc:[\.a-z0-9]*]] = alloca <4 x float> +; CHECK: store <4 x float> zeroinitializer, <4 x float>* %[[alloc]] +; CHECK: memset +; CHECK: extractelement <4 x float> zeroinitializer, i32 %idx2 + +; Split the array but don't replace the memset with an insert +; element as its not a constant offset. +; The load, however, can be replaced with an extract element. +define float @test1(i32 %idx1, i32 %idx2) { +entry: + %0 = alloca [4 x <4 x float>] + store [4 x <4 x float>] zeroinitializer, [4 x <4 x float>]* %0 + %ptr1 = getelementptr [4 x <4 x float>]* %0, i32 0, i32 0, i32 %idx1 + %cast = bitcast float* %ptr1 to i8* + call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 4, i32 4, i1 false) + %ptr2 = getelementptr [4 x <4 x float>]* %0, i32 0, i32 1, i32 %idx2 + %ret = load float* %ptr2 + ret float %ret +} + +; CHECK: @test2 +; CHECK: %[[ins:[\.a-z0-9]*]] = insertelement <4 x float> zeroinitializer, float 1.000000e+00, i32 %idx1 +; CHECK: extractelement <4 x float> %[[ins]], i32 %idx2 + +; Do SROA on the array when it has dynamic vector reads and writes. +define float @test2(i32 %idx1, i32 %idx2) { +entry: + %0 = alloca [4 x <4 x float>] + store [4 x <4 x float>] zeroinitializer, [4 x <4 x float>]* %0 + %ptr1 = getelementptr [4 x <4 x float>]* %0, i32 0, i32 0, i32 %idx1 + store float 1.0, float* %ptr1 + %ptr2 = getelementptr [4 x <4 x float>]* %0, i32 0, i32 0, i32 %idx2 + %ret = load float* %ptr2 + ret float %ret +} + +; CHECK: test3 +; CHECK: %0 = alloca [4 x <4 x float>] +; CHECK-NOT: alloca + +; Don't do SROA on a dynamically indexed vector when it spans +; more than one array element of the alloca array it is within. +define float @test3(i32 %idx1, i32 %idx2) { +entry: + %0 = alloca [4 x <4 x float>] + store [4 x <4 x float>] zeroinitializer, [4 x <4 x float>]* %0 + %bigvec = bitcast [4 x <4 x float>]* %0 to <16 x float>* + %ptr1 = getelementptr <16 x float>* %bigvec, i32 0, i32 %idx1 + store float 1.0, float* %ptr1 + %ptr2 = getelementptr <16 x float>* %bigvec, i32 0, i32 %idx2 + %ret = load float* %ptr2 + ret float %ret +} + +; CHECK: test4 +; CHECK: insertelement <16 x float> zeroinitializer, float 1.000000e+00, i32 %idx1 +; CHECK: extractelement <16 x float> %0, i32 %idx2 + +; Don't do SROA on a dynamically indexed vector when it spans +; more than one array element of the alloca array it is within. +; However, unlike test3, the store is on the vector type +; so SROA will convert the large alloca into the large vector +; type and do all accesses with insert/extract element +define float @test4(i32 %idx1, i32 %idx2) { +entry: + %0 = alloca [4 x <4 x float>] + %bigvec = bitcast [4 x <4 x float>]* %0 to <16 x float>* + store <16 x float> zeroinitializer, <16 x float>* %bigvec + %ptr1 = getelementptr <16 x float>* %bigvec, i32 0, i32 %idx1 + store float 1.0, float* %ptr1 + %ptr2 = getelementptr <16 x float>* %bigvec, i32 0, i32 %idx2 + %ret = load float* %ptr2 + ret float %ret +} + +; CHECK: @test5 +; CHECK: %0 = alloca [4 x <4 x float>] +; CHECK-NOT: alloca + +; Don't do SROA as the is a second dynamically indexed array +; which may span multiple elements of the alloca. +define float @test5(i32 %idx1, i32 %idx2) { +entry: + %0 = alloca [4 x <4 x float>] + store [4 x <4 x float>] zeroinitializer, [4 x <4 x float>]* %0 + %ptr1 = getelementptr [4 x <4 x float>]* %0, i32 0, i32 0, i32 %idx1 + %ptr2 = bitcast float* %ptr1 to [1 x <2 x float>]* + %ptr3 = getelementptr [1 x <2 x float>]* %ptr2, i32 0, i32 0, i32 %idx1 + store float 1.0, float* %ptr1 + %ptr4 = getelementptr [4 x <4 x float>]* %0, i32 0, i32 0, i32 %idx2 + %ret = load float* %ptr4 + ret float %ret +} + +; CHECK: test6 +; CHECK: insertelement <4 x float> zeroinitializer, float 1.000000e+00, i32 %idx1 +; CHECK: extractelement <4 x float> zeroinitializer, i32 %idx2 + +%vector.pair = type { %vector.anon, %vector.anon } +%vector.anon = type { %vector } +%vector = type { <4 x float> } + +; Dynamic GEPs on vectors were crashing when the vector was inside a struct +; as the new GEP for the new alloca might not include all the indices from +; the original GEP, just the indices it needs to get to the correct offset of +; some type, not necessarily the dynamic vector. +; This test makes sure we don't have this crash. +define float @test6(i32 %idx1, i32 %idx2) { +entry: + %0 = alloca %vector.pair + store %vector.pair zeroinitializer, %vector.pair* %0 + %ptr1 = getelementptr %vector.pair* %0, i32 0, i32 0, i32 0, i32 0, i32 %idx1 + store float 1.0, float* %ptr1 + %ptr2 = getelementptr %vector.pair* %0, i32 0, i32 1, i32 0, i32 0, i32 %idx2 + %ret = load float* %ptr2 + ret float %ret +} + +; CHECK: test7 +; CHECK: insertelement <4 x float> zeroinitializer, float 1.000000e+00, i32 %idx1 +; CHECK: extractelement <4 x float> zeroinitializer, i32 %idx2 + +%array.pair = type { [2 x %array.anon], %array.anon } +%array.anon = type { [2 x %vector] } + +; This is the same as test6 and tests the same crash, but on arrays. +define float @test7(i32 %idx1, i32 %idx2) { +entry: + %0 = alloca %array.pair + store %array.pair zeroinitializer, %array.pair* %0 + %ptr1 = getelementptr %array.pair* %0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 %idx1 + store float 1.0, float* %ptr1 + %ptr2 = getelementptr %array.pair* %0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 %idx2 + %ret = load float* %ptr2 + ret float %ret +} + +; CHECK: test8 +; CHECK: %[[offset1:[\.a-z0-9]*]] = add i32 %idx1, 1 +; CHECK: %[[ins:[\.a-z0-9]*]] = insertelement <4 x float> zeroinitializer, float 1.000000e+00, i32 %[[offset1]] +; CHECK: %[[offset2:[\.a-z0-9]*]] = add i32 %idx2, 2 +; CHECK: extractelement <4 x float> %[[ins]], i32 %[[offset2]] + +; Do SROA on the vector when it has dynamic vector reads and writes +; from a non-zero offset. +define float @test8(i32 %idx1, i32 %idx2) { +entry: + %0 = alloca <4 x float> + store <4 x float> zeroinitializer, <4 x float>* %0 + %ptr1 = getelementptr <4 x float>* %0, i32 0, i32 1 + %ptr2 = bitcast float* %ptr1 to <3 x float>* + %ptr3 = getelementptr <3 x float>* %ptr2, i32 0, i32 %idx1 + store float 1.0, float* %ptr3 + %ptr4 = getelementptr <4 x float>* %0, i32 0, i32 2 + %ptr5 = bitcast float* %ptr4 to <2 x float>* + %ptr6 = getelementptr <2 x float>* %ptr5, i32 0, i32 %idx2 + %ret = load float* %ptr6 + ret float %ret +} + +declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i32, i1) diff --git a/test/Transforms/ScalarRepl/memcpy-from-global.ll b/test/Transforms/ScalarRepl/memcpy-from-global.ll index 59475ad..5557a8f 100644 --- a/test/Transforms/ScalarRepl/memcpy-from-global.ll +++ b/test/Transforms/ScalarRepl/memcpy-from-global.ll @@ -45,8 +45,10 @@ declare void @llvm.memcpy.i32(i8*, i8*, i32, i32) declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind %T = type { i8, [123 x i8] } +%U = type { i32, i32, i32, i32, i32 } @G = constant %T {i8 1, [123 x i8] zeroinitializer } +@H = constant [2 x %U] zeroinitializer, align 16 define void @test2() { %A = alloca %T @@ -108,3 +110,37 @@ define void @test5() { declare void @baz(i8* byval) + + +define void @test6() { + %A = alloca %U, align 16 + %a = bitcast %U* %A to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* bitcast ([2 x %U]* @H to i8*), i64 20, i32 16, i1 false) + call void @bar(i8* %a) readonly +; CHECK: @test6 +; CHECK-NEXT: %a = bitcast +; CHECK-NEXT: call void @bar(i8* %a) + ret void +} + +define void @test7() { + %A = alloca %U, align 16 + %a = bitcast %U* %A to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* bitcast (%U* getelementptr ([2 x %U]* @H, i64 0, i32 0) to i8*), i64 20, i32 4, i1 false) + call void @bar(i8* %a) readonly +; CHECK: @test7 +; CHECK-NEXT: %a = bitcast +; CHECK-NEXT: call void @bar(i8* %a) + ret void +} + +define void @test8() { + %A = alloca %U, align 16 + %a = bitcast %U* %A to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* bitcast (%U* getelementptr ([2 x %U]* @H, i64 0, i32 1) to i8*), i64 20, i32 4, i1 false) + call void @bar(i8* %a) readonly +; CHECK: @test8 +; CHECK: llvm.memcpy +; CHECK: bar + ret void +} diff --git a/test/Transforms/ScalarRepl/memset-aggregate-byte-leader.ll b/test/Transforms/ScalarRepl/memset-aggregate-byte-leader.ll index 0d61e5a..3510dfc 100644 --- a/test/Transforms/ScalarRepl/memset-aggregate-byte-leader.ll +++ b/test/Transforms/ScalarRepl/memset-aggregate-byte-leader.ll @@ -1,6 +1,6 @@ ; PR1226 ; RUN: opt < %s -scalarrepl -S | \ -; RUN: not grep {call void @llvm.memcpy.i32} +; RUN: not grep "call void @llvm.memcpy.i32" ; RUN: opt < %s -scalarrepl -S | grep getelementptr ; END. diff --git a/test/Transforms/ScalarRepl/memset-aggregate.ll b/test/Transforms/ScalarRepl/memset-aggregate.ll index 42e7a0f..95ecf17 100644 --- a/test/Transforms/ScalarRepl/memset-aggregate.ll +++ b/test/Transforms/ScalarRepl/memset-aggregate.ll @@ -1,7 +1,7 @@ ; PR1226 -; RUN: opt < %s -scalarrepl -S | grep {ret i32 16843009} +; RUN: opt < %s -scalarrepl -S | grep "ret i32 16843009" ; RUN: opt < %s -scalarrepl -S | not grep alloca -; RUN: opt < %s -scalarrepl -instcombine -S | grep {ret i16 514} +; RUN: opt < %s -scalarrepl -instcombine -S | grep "ret i16 514" target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64" target triple = "i686-apple-darwin8" diff --git a/test/Transforms/ScalarRepl/not-a-vector.ll b/test/Transforms/ScalarRepl/not-a-vector.ll index f873456..67fefb4 100644 --- a/test/Transforms/ScalarRepl/not-a-vector.ll +++ b/test/Transforms/ScalarRepl/not-a-vector.ll @@ -1,6 +1,6 @@ ; RUN: opt < %s -scalarrepl -S | not grep alloca -; RUN: opt < %s -scalarrepl -S | not grep {7 x double} -; RUN: opt < %s -scalarrepl -instcombine -S | grep {ret double %B} +; RUN: opt < %s -scalarrepl -S | not grep "7 x double" +; RUN: opt < %s -scalarrepl -instcombine -S | grep "ret double %B" target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64" define double @test(double %A, double %B) { diff --git a/test/Transforms/ScalarRepl/union-fp-int.ll b/test/Transforms/ScalarRepl/union-fp-int.ll index 8b7e50d..6a49918 100644 --- a/test/Transforms/ScalarRepl/union-fp-int.ll +++ b/test/Transforms/ScalarRepl/union-fp-int.ll @@ -1,7 +1,7 @@ ; RUN: opt < %s -scalarrepl -S | \ ; RUN: not grep alloca ; RUN: opt < %s -scalarrepl -S | \ -; RUN: grep {bitcast.*float.*i32} +; RUN: grep "bitcast.*float.*i32" target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64" define i32 @test(float %X) { diff --git a/test/Transforms/ScalarRepl/union-pointer.ll b/test/Transforms/ScalarRepl/union-pointer.ll index ea4ec14..03d25ac 100644 --- a/test/Transforms/ScalarRepl/union-pointer.ll +++ b/test/Transforms/ScalarRepl/union-pointer.ll @@ -1,7 +1,7 @@ ; PR892 ; RUN: opt < %s -scalarrepl -S | \ ; RUN: not grep alloca -; RUN: opt < %s -scalarrepl -S | grep {ret i8} +; RUN: opt < %s -scalarrepl -S | grep "ret i8" target datalayout = "e-p:32:32-n8:16:32" target triple = "i686-apple-darwin8.7.2" diff --git a/test/Transforms/ScalarRepl/vector_memcpy.ll b/test/Transforms/ScalarRepl/vector_memcpy.ll index decbd30..33e8034 100644 --- a/test/Transforms/ScalarRepl/vector_memcpy.ll +++ b/test/Transforms/ScalarRepl/vector_memcpy.ll @@ -1,6 +1,6 @@ ; RUN: opt < %s -scalarrepl -S > %t -; RUN: grep {ret <16 x float> %A} %t -; RUN: grep {ret <16 x float> zeroinitializer} %t +; RUN: grep "ret <16 x float> %A" %t +; RUN: grep "ret <16 x float> zeroinitializer" %t target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64" define <16 x float> @foo(<16 x float> %A) nounwind { diff --git a/test/Transforms/ScalarRepl/volatile.ll b/test/Transforms/ScalarRepl/volatile.ll index fadf1aa..056526c 100644 --- a/test/Transforms/ScalarRepl/volatile.ll +++ b/test/Transforms/ScalarRepl/volatile.ll @@ -1,5 +1,5 @@ -; RUN: opt < %s -scalarrepl -S | grep {load volatile} -; RUN: opt < %s -scalarrepl -S | grep {store volatile} +; RUN: opt < %s -scalarrepl -S | grep "load volatile" +; RUN: opt < %s -scalarrepl -S | grep "store volatile" define i32 @voltest(i32 %T) { %A = alloca {i32, i32} |