summaryrefslogtreecommitdiffstats
path: root/test/Transforms/ScalarRepl/copy-aggregate.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/Transforms/ScalarRepl/copy-aggregate.ll')
-rw-r--r--test/Transforms/ScalarRepl/copy-aggregate.ll52
1 files changed, 51 insertions, 1 deletions
diff --git a/test/Transforms/ScalarRepl/copy-aggregate.ll b/test/Transforms/ScalarRepl/copy-aggregate.ll
index 2992413..997da4b 100644
--- a/test/Transforms/ScalarRepl/copy-aggregate.ll
+++ b/test/Transforms/ScalarRepl/copy-aggregate.ll
@@ -1,9 +1,11 @@
-; RUN: opt < %s -scalarrepl -S | not grep alloca
+; RUN: opt < %s -scalarrepl -S | FileCheck %s
; PR3290
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
;; Store of integer to whole alloca struct.
define i32 @test1(i64 %V) nounwind {
+; CHECK: test1
+; CHECK-NOT: alloca
%X = alloca {{i32, i32}}
%Y = bitcast {{i32,i32}}* %X to i64*
store i64 %V, i64* %Y
@@ -18,6 +20,8 @@ define i32 @test1(i64 %V) nounwind {
;; Store of integer to whole struct/array alloca.
define float @test2(i128 %V) nounwind {
+; CHECK: test2
+; CHECK-NOT: alloca
%X = alloca {[4 x float]}
%Y = bitcast {[4 x float]}* %X to i128*
store i128 %V, i128* %Y
@@ -32,6 +36,8 @@ define float @test2(i128 %V) nounwind {
;; Load of whole alloca struct as integer
define i64 @test3(i32 %a, i32 %b) nounwind {
+; CHECK: test3
+; CHECK-NOT: alloca
%X = alloca {{i32, i32}}
%A = getelementptr {{i32,i32}}* %X, i32 0, i32 0, i32 0
@@ -46,6 +52,8 @@ define i64 @test3(i32 %a, i32 %b) nounwind {
;; load of integer from whole struct/array alloca.
define i128 @test4(float %a, float %b) nounwind {
+; CHECK: test4
+; CHECK-NOT: alloca
%X = alloca {[4 x float]}
%A = getelementptr {[4 x float]}* %X, i32 0, i32 0, i32 0
%B = getelementptr {[4 x float]}* %X, i32 0, i32 0, i32 3
@@ -56,3 +64,45 @@ define i128 @test4(float %a, float %b) nounwind {
%V = load i128* %Y
ret i128 %V
}
+
+;; If the elements of a struct or array alloca contain padding, SROA can still
+;; split up the alloca as long as there is no padding between the elements.
+%padded = type { i16, i8 }
+%arr = type [4 x %padded]
+define void @test5(%arr* %p, %arr* %q) {
+entry:
+; CHECK: test5
+; CHECK-NOT: i128
+ %var = alloca %arr, align 4
+ %vari8 = bitcast %arr* %var to i8*
+ %pi8 = bitcast %arr* %p to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %vari8, i8* %pi8, i32 16, i32 4, i1 false)
+ %qi8 = bitcast %arr* %q to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %qi8, i8* %vari8, i32 16, i32 4, i1 false)
+ ret void
+}
+
+;; Check that an array alloca can be split up when it is also accessed with
+;; a load or store as a homogeneous structure with the same element type and
+;; number of elements as the array.
+%homogeneous = type { <8 x i16>, <8 x i16>, <8 x i16> }
+%wrapped_array = type { [3 x <8 x i16>] }
+define void @test6(i8* %p, %wrapped_array* %arr) {
+entry:
+; CHECK: test6
+; CHECK: store <8 x i16>
+; CHECK: store <8 x i16>
+; CHECK: store <8 x i16>
+ %var = alloca %wrapped_array, align 16
+ %res = call %homogeneous @test6callee(i8* %p)
+ %varcast = bitcast %wrapped_array* %var to %homogeneous*
+ store %homogeneous %res, %homogeneous* %varcast
+ %tmp1 = bitcast %wrapped_array* %arr to i8*
+ %tmp2 = bitcast %wrapped_array* %var to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp1, i8* %tmp2, i32 48, i32 16, i1 false)
+ ret void
+}
+
+declare %homogeneous @test6callee(i8* nocapture) nounwind
+
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
OpenPOWER on IntegriCloud