summaryrefslogtreecommitdiffstats
path: root/test/Transforms/Inline
diff options
context:
space:
mode:
Diffstat (limited to 'test/Transforms/Inline')
-rw-r--r--test/Transforms/Inline/2007-06-06-NoInline.ll46
-rw-r--r--test/Transforms/Inline/2008-09-02-AlwaysInline.ll10
-rw-r--r--test/Transforms/Inline/2008-10-30-AlwaysInline.ll14
-rw-r--r--test/Transforms/Inline/2008-11-04-AlwaysInline.ll7
-rw-r--r--test/Transforms/Inline/alloca-bonus.ll155
-rw-r--r--test/Transforms/Inline/always-inline.ll125
-rw-r--r--test/Transforms/Inline/always_inline_dyn_alloca.ll15
-rw-r--r--test/Transforms/Inline/blockaddress.ll27
-rw-r--r--test/Transforms/Inline/dg.exp3
-rw-r--r--test/Transforms/Inline/dynamic_alloca_test.ll52
-rw-r--r--test/Transforms/Inline/inline-invoke-tail.ll8
-rw-r--r--test/Transforms/Inline/inline_cleanup.ll158
-rw-r--r--test/Transforms/Inline/inline_constprop.ll114
-rw-r--r--test/Transforms/Inline/inline_returns_twice.ll41
-rw-r--r--test/Transforms/Inline/lit.local.cfg1
-rw-r--r--test/Transforms/Inline/noinline-recursive-fn.ll43
-rw-r--r--test/Transforms/Inline/ptr-diff.ll58
17 files changed, 740 insertions, 137 deletions
diff --git a/test/Transforms/Inline/2007-06-06-NoInline.ll b/test/Transforms/Inline/2007-06-06-NoInline.ll
deleted file mode 100644
index d5a7953..0000000
--- a/test/Transforms/Inline/2007-06-06-NoInline.ll
+++ /dev/null
@@ -1,46 +0,0 @@
-; RUN: opt < %s -inline -S | grep "define internal i32 @bar"
-@llvm.noinline = appending global [1 x i8*] [ i8* bitcast (i32 (i32, i32)* @bar to i8*) ], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define internal i32 @bar(i32 %x, i32 %y) {
-entry:
- %x_addr = alloca i32 ; <i32*> [#uses=2]
- %y_addr = alloca i32 ; <i32*> [#uses=2]
- %retval = alloca i32, align 4 ; <i32*> [#uses=2]
- %tmp = alloca i32, align 4 ; <i32*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i32 %x, i32* %x_addr
- store i32 %y, i32* %y_addr
- %tmp1 = load i32* %x_addr ; <i32> [#uses=1]
- %tmp2 = load i32* %y_addr ; <i32> [#uses=1]
- %tmp3 = add i32 %tmp1, %tmp2 ; <i32> [#uses=1]
- store i32 %tmp3, i32* %tmp
- %tmp4 = load i32* %tmp ; <i32> [#uses=1]
- store i32 %tmp4, i32* %retval
- br label %return
-
-return: ; preds = %entry
- %retval5 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval5
-}
-
-define i32 @foo(i32 %a, i32 %b) {
-entry:
- %a_addr = alloca i32 ; <i32*> [#uses=2]
- %b_addr = alloca i32 ; <i32*> [#uses=2]
- %retval = alloca i32, align 4 ; <i32*> [#uses=2]
- %tmp = alloca i32, align 4 ; <i32*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i32 %a, i32* %a_addr
- store i32 %b, i32* %b_addr
- %tmp1 = load i32* %b_addr ; <i32> [#uses=1]
- %tmp2 = load i32* %a_addr ; <i32> [#uses=1]
- %tmp3 = call i32 @bar( i32 %tmp1, i32 %tmp2 ) ; <i32> [#uses=1]
- store i32 %tmp3, i32* %tmp
- %tmp4 = load i32* %tmp ; <i32> [#uses=1]
- store i32 %tmp4, i32* %retval
- br label %return
-
-return: ; preds = %entry
- %retval5 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval5
-}
diff --git a/test/Transforms/Inline/2008-09-02-AlwaysInline.ll b/test/Transforms/Inline/2008-09-02-AlwaysInline.ll
deleted file mode 100644
index 39095c4..0000000
--- a/test/Transforms/Inline/2008-09-02-AlwaysInline.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: opt < %s -inline-threshold=0 -inline -S | not grep call
-
-define i32 @fn2() alwaysinline {
- ret i32 1
-}
-
-define i32 @fn3() {
- %r = call i32 @fn2()
- ret i32 %r
-}
diff --git a/test/Transforms/Inline/2008-10-30-AlwaysInline.ll b/test/Transforms/Inline/2008-10-30-AlwaysInline.ll
deleted file mode 100644
index 11e5012..0000000
--- a/test/Transforms/Inline/2008-10-30-AlwaysInline.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: opt < %s -always-inline -S | not grep call
-
-; Ensure that threshold doesn't disrupt always inline.
-; RUN: opt < %s -inline-threshold=-2000000001 -always-inline -S | not grep call
-
-
-define internal i32 @if0() alwaysinline {
- ret i32 1
-}
-
-define i32 @f0() {
- %r = call i32 @if0()
- ret i32 %r
-}
diff --git a/test/Transforms/Inline/2008-11-04-AlwaysInline.ll b/test/Transforms/Inline/2008-11-04-AlwaysInline.ll
deleted file mode 100644
index bc9787b..0000000
--- a/test/Transforms/Inline/2008-11-04-AlwaysInline.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: opt < %s -always-inline -S | grep {@foo}
-; Ensure that foo is not removed by always inliner
-; PR 2945
-
-define internal i32 @foo() nounwind {
- ret i32 0
-}
diff --git a/test/Transforms/Inline/alloca-bonus.ll b/test/Transforms/Inline/alloca-bonus.ll
new file mode 100644
index 0000000..d04d54e
--- /dev/null
+++ b/test/Transforms/Inline/alloca-bonus.ll
@@ -0,0 +1,155 @@
+; RUN: opt -inline < %s -S -o - -inline-threshold=8 | FileCheck %s
+
+target datalayout = "p:32:32"
+
+declare void @llvm.lifetime.start(i64 %size, i8* nocapture %ptr)
+
+@glbl = external global i32
+
+define void @outer1() {
+; CHECK: @outer1
+; CHECK-NOT: call void @inner1
+ %ptr = alloca i32
+ call void @inner1(i32* %ptr)
+ ret void
+}
+
+define void @inner1(i32 *%ptr) {
+ %A = load i32* %ptr
+ store i32 0, i32* %ptr
+ %C = getelementptr inbounds i32* %ptr, i32 0
+ %D = getelementptr inbounds i32* %ptr, i32 1
+ %E = bitcast i32* %ptr to i8*
+ %F = select i1 false, i32* %ptr, i32* @glbl
+ call void @llvm.lifetime.start(i64 0, i8* %E)
+ ret void
+}
+
+define void @outer2() {
+; CHECK: @outer2
+; CHECK: call void @inner2
+ %ptr = alloca i32
+ call void @inner2(i32* %ptr)
+ ret void
+}
+
+; %D poisons this call, scalar-repl can't handle that instruction.
+define void @inner2(i32 *%ptr) {
+ %A = load i32* %ptr
+ store i32 0, i32* %ptr
+ %C = getelementptr inbounds i32* %ptr, i32 0
+ %D = getelementptr inbounds i32* %ptr, i32 %A
+ %E = bitcast i32* %ptr to i8*
+ %F = select i1 false, i32* %ptr, i32* @glbl
+ call void @llvm.lifetime.start(i64 0, i8* %E)
+ ret void
+}
+
+define void @outer3() {
+; CHECK: @outer3
+; CHECK-NOT: call void @inner3
+ %ptr = alloca i32
+ call void @inner3(i32* %ptr, i1 undef)
+ ret void
+}
+
+define void @inner3(i32 *%ptr, i1 %x) {
+ %A = icmp eq i32* %ptr, null
+ %B = and i1 %x, %A
+ br i1 %A, label %bb.true, label %bb.false
+bb.true:
+ ; This block musn't be counted in the inline cost.
+ %t1 = load i32* %ptr
+ %t2 = add i32 %t1, 1
+ %t3 = add i32 %t2, 1
+ %t4 = add i32 %t3, 1
+ %t5 = add i32 %t4, 1
+ %t6 = add i32 %t5, 1
+ %t7 = add i32 %t6, 1
+ %t8 = add i32 %t7, 1
+ %t9 = add i32 %t8, 1
+ %t10 = add i32 %t9, 1
+ %t11 = add i32 %t10, 1
+ %t12 = add i32 %t11, 1
+ %t13 = add i32 %t12, 1
+ %t14 = add i32 %t13, 1
+ %t15 = add i32 %t14, 1
+ %t16 = add i32 %t15, 1
+ %t17 = add i32 %t16, 1
+ %t18 = add i32 %t17, 1
+ %t19 = add i32 %t18, 1
+ %t20 = add i32 %t19, 1
+ ret void
+bb.false:
+ ret void
+}
+
+define void @outer4(i32 %A) {
+; CHECK: @outer4
+; CHECK-NOT: call void @inner4
+ %ptr = alloca i32
+ call void @inner4(i32* %ptr, i32 %A)
+ ret void
+}
+
+; %B poisons this call, scalar-repl can't handle that instruction. However, we
+; still want to detect that the icmp and branch *can* be handled.
+define void @inner4(i32 *%ptr, i32 %A) {
+ %B = getelementptr inbounds i32* %ptr, i32 %A
+ %C = icmp eq i32* %ptr, null
+ br i1 %C, label %bb.true, label %bb.false
+bb.true:
+ ; This block musn't be counted in the inline cost.
+ %t1 = load i32* %ptr
+ %t2 = add i32 %t1, 1
+ %t3 = add i32 %t2, 1
+ %t4 = add i32 %t3, 1
+ %t5 = add i32 %t4, 1
+ %t6 = add i32 %t5, 1
+ %t7 = add i32 %t6, 1
+ %t8 = add i32 %t7, 1
+ %t9 = add i32 %t8, 1
+ %t10 = add i32 %t9, 1
+ %t11 = add i32 %t10, 1
+ %t12 = add i32 %t11, 1
+ %t13 = add i32 %t12, 1
+ %t14 = add i32 %t13, 1
+ %t15 = add i32 %t14, 1
+ %t16 = add i32 %t15, 1
+ %t17 = add i32 %t16, 1
+ %t18 = add i32 %t17, 1
+ %t19 = add i32 %t18, 1
+ %t20 = add i32 %t19, 1
+ ret void
+bb.false:
+ ret void
+}
+
+define void @outer5() {
+; CHECK: @outer5
+; CHECK-NOT: call void @inner5
+ %ptr = alloca i32
+ call void @inner5(i1 false, i32* %ptr)
+ ret void
+}
+
+; %D poisons this call, scalar-repl can't handle that instruction. However, if
+; the flag is set appropriately, the poisoning instruction is inside of dead
+; code, and so shouldn't be counted.
+define void @inner5(i1 %flag, i32 *%ptr) {
+ %A = load i32* %ptr
+ store i32 0, i32* %ptr
+ %C = getelementptr inbounds i32* %ptr, i32 0
+ br i1 %flag, label %if.then, label %exit
+
+if.then:
+ %D = getelementptr inbounds i32* %ptr, i32 %A
+ %E = bitcast i32* %ptr to i8*
+ %F = select i1 false, i32* %ptr, i32* @glbl
+ call void @llvm.lifetime.start(i64 0, i8* %E)
+ ret void
+
+exit:
+ ret void
+}
+
diff --git a/test/Transforms/Inline/always-inline.ll b/test/Transforms/Inline/always-inline.ll
new file mode 100644
index 0000000..e0be41f
--- /dev/null
+++ b/test/Transforms/Inline/always-inline.ll
@@ -0,0 +1,125 @@
+; RUN: opt < %s -inline-threshold=0 -always-inline -S | FileCheck %s
+;
+; Ensure the threshold has no impact on these decisions.
+; RUN: opt < %s -inline-threshold=20000000 -always-inline -S | FileCheck %s
+; RUN: opt < %s -inline-threshold=-20000000 -always-inline -S | FileCheck %s
+
+define i32 @inner1() alwaysinline {
+ ret i32 1
+}
+define i32 @outer1() {
+; CHECK: @outer1
+; CHECK-NOT: call
+; CHECK: ret
+
+ %r = call i32 @inner1()
+ ret i32 %r
+}
+
+; The always inliner can't DCE internal functions. PR2945
+; CHECK: @pr2945
+define internal i32 @pr2945() nounwind {
+ ret i32 0
+}
+
+define internal void @inner2(i32 %N) alwaysinline {
+ %P = alloca i32, i32 %N
+ ret void
+}
+define void @outer2(i32 %N) {
+; The always inliner (unlike the normal one) should be willing to inline
+; a function with a dynamic alloca into one without a dynamic alloca.
+; rdar://6655932
+;
+; CHECK: @outer2
+; CHECK-NOT: call void @inner2
+; CHECK alloca i32, i32 %N
+; CHECK-NOT: call void @inner2
+; CHECK: ret void
+
+ call void @inner2( i32 %N )
+ ret void
+}
+
+declare i32 @a() returns_twice
+declare i32 @b() returns_twice
+
+define i32 @inner3() alwaysinline {
+entry:
+ %call = call i32 @a() returns_twice
+ %add = add nsw i32 1, %call
+ ret i32 %add
+}
+define i32 @outer3() {
+entry:
+; CHECK: @outer3
+; CHECK-NOT: call i32 @a
+; CHECK: ret
+
+ %call = call i32 @inner3()
+ %add = add nsw i32 1, %call
+ ret i32 %add
+}
+
+define i32 @inner4() alwaysinline returns_twice {
+entry:
+ %call = call i32 @b() returns_twice
+ %add = add nsw i32 1, %call
+ ret i32 %add
+}
+
+define i32 @outer4() {
+entry:
+; CHECK: @outer4
+; CHECK: call i32 @b()
+; CHECK: ret
+
+ %call = call i32 @inner4() returns_twice
+ %add = add nsw i32 1, %call
+ ret i32 %add
+}
+
+define i32 @inner5(i8* %addr) alwaysinline {
+entry:
+ indirectbr i8* %addr, [ label %one, label %two ]
+
+one:
+ ret i32 42
+
+two:
+ ret i32 44
+}
+define i32 @outer5(i32 %x) {
+; CHECK: @outer5
+; CHECK: call i32 @inner5
+; CHECK: ret
+
+ %cmp = icmp slt i32 %x, 42
+ %addr = select i1 %cmp, i8* blockaddress(@inner5, %one), i8* blockaddress(@inner5, %two)
+ %call = call i32 @inner5(i8* %addr)
+ ret i32 %call
+}
+
+define void @inner6(i32 %x) alwaysinline {
+entry:
+ %icmp = icmp slt i32 %x, 0
+ br i1 %icmp, label %return, label %bb
+
+bb:
+ %sub = sub nsw i32 %x, 1
+ call void @inner6(i32 %sub)
+ ret void
+
+return:
+ ret void
+}
+define void @outer6() {
+; CHECK: @outer6
+; CHECK: call void @inner6(i32 42)
+; CHECK: ret
+
+entry:
+ call void @inner6(i32 42)
+ ret void
+}
+
diff --git a/test/Transforms/Inline/always_inline_dyn_alloca.ll b/test/Transforms/Inline/always_inline_dyn_alloca.ll
deleted file mode 100644
index 25cfc49..0000000
--- a/test/Transforms/Inline/always_inline_dyn_alloca.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: opt < %s -inline -S | not grep callee
-; rdar://6655932
-
-; If callee is marked alwaysinline, inline it! Even if callee has dynamic
-; alloca and caller does not,
-
-define internal void @callee(i32 %N) alwaysinline {
- %P = alloca i32, i32 %N
- ret void
-}
-
-define void @foo(i32 %N) {
- call void @callee( i32 %N )
- ret void
-}
diff --git a/test/Transforms/Inline/blockaddress.ll b/test/Transforms/Inline/blockaddress.ll
new file mode 100644
index 0000000..4206312
--- /dev/null
+++ b/test/Transforms/Inline/blockaddress.ll
@@ -0,0 +1,27 @@
+; RUN: opt -inline -S < %s | FileCheck %s
+; PR10162
+
+; Make sure the blockaddress is mapped correctly when doit is inlined
+; CHECK: store i8* blockaddress(@f, %here.i), i8** @ptr1, align 8
+
+@i = global i32 1, align 4
+@ptr1 = common global i8* null, align 8
+
+define void @doit(i8** nocapture %pptr, i32 %cond) nounwind uwtable {
+entry:
+ %tobool = icmp eq i32 %cond, 0
+ br i1 %tobool, label %if.end, label %here
+
+here:
+ store i8* blockaddress(@doit, %here), i8** %pptr, align 8
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+define void @f(i32 %cond) nounwind uwtable {
+entry:
+ call void @doit(i8** @ptr1, i32 %cond)
+ ret void
+}
diff --git a/test/Transforms/Inline/dg.exp b/test/Transforms/Inline/dg.exp
deleted file mode 100644
index f200589..0000000
--- a/test/Transforms/Inline/dg.exp
+++ /dev/null
@@ -1,3 +0,0 @@
-load_lib llvm.exp
-
-RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp}]]
diff --git a/test/Transforms/Inline/dynamic_alloca_test.ll b/test/Transforms/Inline/dynamic_alloca_test.ll
index 0286535..15a5c66 100644
--- a/test/Transforms/Inline/dynamic_alloca_test.ll
+++ b/test/Transforms/Inline/dynamic_alloca_test.ll
@@ -3,33 +3,43 @@
; Functions with dynamic allocas can only be inlined into functions that
; already have dynamic allocas.
-; RUN: opt < %s -inline -S | \
-; RUN: grep llvm.stacksave
-; RUN: opt < %s -inline -S | not grep callee
-
+; RUN: opt < %s -inline -S | FileCheck %s
+;
+; FIXME: This test is xfailed because the inline cost rewrite disabled *all*
+; inlining of functions which contain a dynamic alloca. It should be re-enabled
+; once that functionality is restored.
+; XFAIL: *
declare void @ext(i32*)
define internal void @callee(i32 %N) {
- %P = alloca i32, i32 %N ; <i32*> [#uses=1]
- call void @ext( i32* %P )
- ret void
+ %P = alloca i32, i32 %N
+ call void @ext(i32* %P)
+ ret void
}
define void @foo(i32 %N) {
-; <label>:0
- %P = alloca i32, i32 %N ; <i32*> [#uses=1]
- call void @ext( i32* %P )
- br label %Loop
-
-Loop: ; preds = %Loop, %0
- %count = phi i32 [ 0, %0 ], [ %next, %Loop ] ; <i32> [#uses=2]
- %next = add i32 %count, 1 ; <i32> [#uses=1]
- call void @callee( i32 %N )
- %cond = icmp eq i32 %count, 100000 ; <i1> [#uses=1]
- br i1 %cond, label %out, label %Loop
-
-out: ; preds = %Loop
- ret void
+; CHECK: @foo
+; CHECK: alloca i32, i32 %{{.*}}
+; CHECK: call i8* @llvm.stacksave()
+; CHECK: alloca i32, i32 %{{.*}}
+; CHECK: call void @ext
+; CHECK: call void @llvm.stackrestore
+; CHECK: ret
+
+entry:
+ %P = alloca i32, i32 %N
+ call void @ext(i32* %P)
+ br label %loop
+
+loop:
+ %count = phi i32 [ 0, %entry ], [ %next, %loop ]
+ %next = add i32 %count, 1
+ call void @callee(i32 %N)
+ %cond = icmp eq i32 %count, 100000
+ br i1 %cond, label %out, label %loop
+
+out:
+ ret void
}
diff --git a/test/Transforms/Inline/inline-invoke-tail.ll b/test/Transforms/Inline/inline-invoke-tail.ll
index 462c29a..1f34113 100644
--- a/test/Transforms/Inline/inline-invoke-tail.ll
+++ b/test/Transforms/Inline/inline-invoke-tail.ll
@@ -23,15 +23,11 @@ invcont:
ret i32 %retval
lpad:
- %eh_ptr = call i8* @llvm.eh.exception()
- %eh_select = call i32 (i8*, i8*, ...)* @llvm.eh.selector(i8* %eh_ptr, i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*), i8* null)
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ catch i8* null
unreachable
}
-declare i8* @llvm.eh.exception() nounwind readonly
-
-declare i32 @llvm.eh.selector(i8*, i8*, ...) nounwind
-
declare i32 @__gxx_personality_v0(...)
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
diff --git a/test/Transforms/Inline/inline_cleanup.ll b/test/Transforms/Inline/inline_cleanup.ll
index 4c64721..3898aa7 100644
--- a/test/Transforms/Inline/inline_cleanup.ll
+++ b/test/Transforms/Inline/inline_cleanup.ll
@@ -1,10 +1,8 @@
; Test that the inliner doesn't leave around dead allocas, and that it folds
; uncond branches away after it is done specializing.
-; RUN: opt < %s -inline -S | \
-; RUN: not grep {alloca.*uses=0}
-; RUN: opt < %s -inline -S | \
-; RUN: not grep {br label}
+; RUN: opt < %s -inline -S | FileCheck %s
+
@A = weak global i32 0 ; <i32*> [#uses=1]
@B = weak global i32 0 ; <i32*> [#uses=1]
@C = weak global i32 0 ; <i32*> [#uses=1]
@@ -54,6 +52,18 @@ UnifiedReturnBlock: ; preds = %cond_next13
declare void @ext(i32*)
define void @test() {
+; CHECK: @test
+; CHECK-NOT: ret
+;
+; FIXME: This should be a CHECK-NOT, but currently we have a bug that causes us
+; to not nuke unused allocas.
+; CHECK: alloca
+; CHECK-NOT: ret
+;
+; No branches should survive the inliner's cleanup.
+; CHECK-NOT: br
+; CHECK: ret void
+
entry:
tail call fastcc void @foo( i32 1 )
tail call fastcc void @foo( i32 2 )
@@ -61,3 +71,143 @@ entry:
tail call fastcc void @foo( i32 8 )
ret void
}
+
+declare void @f(i32 %x)
+
+define void @inner2(i32 %x, i32 %y, i32 %z, i1 %b) {
+entry:
+ %cmp1 = icmp ne i32 %x, 0
+ br i1 %cmp1, label %then1, label %end1
+
+then1:
+ call void @f(i32 %x)
+ br label %end1
+
+end1:
+ %x2 = and i32 %x, %z
+ %cmp2 = icmp sgt i32 %x2, 1
+ br i1 %cmp2, label %then2, label %end2
+
+then2:
+ call void @f(i32 %x2)
+ br label %end2
+
+end2:
+ %y2 = or i32 %y, %z
+ %cmp3 = icmp sgt i32 %y2, 0
+ br i1 %cmp3, label %then3, label %end3
+
+then3:
+ call void @f(i32 %y2)
+ br label %end3
+
+end3:
+ br i1 %b, label %end3.1, label %end3.2
+
+end3.1:
+ %x3.1 = or i32 %x, 10
+ br label %end3.3
+
+end3.2:
+ %x3.2 = or i32 %x, 10
+ br label %end3.3
+
+end3.3:
+ %x3.3 = phi i32 [ %x3.1, %end3.1 ], [ %x3.2, %end3.2 ]
+ %cmp4 = icmp slt i32 %x3.3, 1
+ br i1 %cmp4, label %then4, label %end4
+
+then4:
+ call void @f(i32 %x3.3)
+ br label %end4
+
+end4:
+ ret void
+}
+
+define void @outer2(i32 %z, i1 %b) {
+; Ensure that after inlining, none of the blocks with a call to @f actually
+; make it through inlining.
+; CHECK: define void @outer2
+; CHECK-NOT: call
+; CHECK: ret void
+
+entry:
+ call void @inner2(i32 0, i32 -1, i32 %z, i1 %b)
+ ret void
+}
+
+define void @PR12470_inner(i16 signext %p1) nounwind uwtable {
+entry:
+ br i1 undef, label %cond.true, label %cond.false
+
+cond.true:
+ br label %cond.end
+
+cond.false:
+ %conv = sext i16 %p1 to i32
+ br label %cond.end
+
+cond.end:
+ %cond = phi i32 [ undef, %cond.true ], [ 0, %cond.false ]
+ %tobool = icmp eq i32 %cond, 0
+ br i1 %tobool, label %if.end5, label %if.then
+
+if.then:
+ ret void
+
+if.end5:
+ ret void
+}
+
+define void @PR12470_outer() {
+; This previously crashed during inliner cleanup and folding inner return
+; instructions. Check that we don't crash and we produce a function with a single
+; return instruction due to merging the returns of the inlined function.
+; CHECK: define void @PR12470_outer
+; CHECK-NOT: call
+; CHECK: ret void
+; CHECK-NOT: ret void
+; CHECK: }
+
+entry:
+ call void @PR12470_inner(i16 signext 1)
+ ret void
+}
+
+define void @crasher_inner() nounwind uwtable {
+entry:
+ br i1 false, label %for.end28, label %for.body6
+
+for.body6:
+ br i1 undef, label %for.body6, label %for.cond12.for.inc26_crit_edge
+
+for.cond12.for.inc26_crit_edge:
+ br label %for.body6.1
+
+for.end28:
+ ret void
+
+for.body6.1:
+ br i1 undef, label %for.body6.1, label %for.cond12.for.inc26_crit_edge.1
+
+for.cond12.for.inc26_crit_edge.1:
+ br label %for.body6.2
+
+for.body6.2:
+ br i1 undef, label %for.body6.2, label %for.cond12.for.inc26_crit_edge.2
+
+for.cond12.for.inc26_crit_edge.2:
+ br label %for.end28
+}
+
+define void @crasher_outer() {
+; CHECK: @crasher_outer
+; CHECK-NOT: call
+; CHECK: ret void
+; CHECK-NOT: ret
+; CHECK: }
+entry:
+ tail call void @crasher_inner()
+ ret void
+}
diff --git a/test/Transforms/Inline/inline_constprop.ll b/test/Transforms/Inline/inline_constprop.ll
index 537c69b..dc35b60 100644
--- a/test/Transforms/Inline/inline_constprop.ll
+++ b/test/Transforms/Inline/inline_constprop.ll
@@ -1,14 +1,112 @@
-; RUN: opt < %s -inline -S | not grep callee
-; RUN: opt < %s -inline -S | not grep div
+; RUN: opt < %s -inline -inline-threshold=20 -S | FileCheck %s
+define internal i32 @callee1(i32 %A, i32 %B) {
+ %C = sdiv i32 %A, %B
+ ret i32 %C
+}
+
+define i32 @caller1() {
+; CHECK: define i32 @caller1
+; CHECK-NEXT: ret i32 3
+
+ %X = call i32 @callee1( i32 10, i32 3 )
+ ret i32 %X
+}
+
+define i32 @caller2() {
+; Check that we can constant-prop through instructions after inlining callee21
+; to get constants in the inlined callsite to callee22.
+; FIXME: Currently, the threshold is fixed at 20 because we don't perform
+; *recursive* cost analysis to realize that the nested call site will definitely
+; inline and be cheap. We should eventually do that and lower the threshold here
+; to 1.
+;
+; CHECK: @caller2
+; CHECK-NOT: call void @callee2
+; CHECK: ret
+
+ %x = call i32 @callee21(i32 42, i32 48)
+ ret i32 %x
+}
+
+define i32 @callee21(i32 %x, i32 %y) {
+ %sub = sub i32 %y, %x
+ %result = call i32 @callee22(i32 %sub)
+ ret i32 %result
+}
-define internal i32 @callee(i32 %A, i32 %B) {
- %C = sdiv i32 %A, %B ; <i32> [#uses=1]
- ret i32 %C
+declare i8* @getptr()
+
+define i32 @callee22(i32 %x) {
+ %icmp = icmp ugt i32 %x, 42
+ br i1 %icmp, label %bb.true, label %bb.false
+bb.true:
+ ; This block musn't be counted in the inline cost.
+ %x1 = add i32 %x, 1
+ %x2 = add i32 %x1, 1
+ %x3 = add i32 %x2, 1
+ %x4 = add i32 %x3, 1
+ %x5 = add i32 %x4, 1
+ %x6 = add i32 %x5, 1
+ %x7 = add i32 %x6, 1
+ %x8 = add i32 %x7, 1
+
+ ret i32 %x8
+bb.false:
+ ret i32 %x
}
-define i32 @test() {
- %X = call i32 @callee( i32 10, i32 3 ) ; <i32> [#uses=1]
- ret i32 %X
+define i32 @caller3() {
+; Check that even if the expensive path is hidden behind several basic blocks,
+; it doesn't count toward the inline cost when constant-prop proves those paths
+; dead.
+;
+; CHECK: @caller3
+; CHECK-NOT: call
+; CHECK: ret i32 6
+
+entry:
+ %x = call i32 @callee3(i32 42, i32 48)
+ ret i32 %x
}
+define i32 @callee3(i32 %x, i32 %y) {
+ %sub = sub i32 %y, %x
+ %icmp = icmp ugt i32 %sub, 42
+ br i1 %icmp, label %bb.true, label %bb.false
+
+bb.true:
+ %icmp2 = icmp ult i32 %sub, 64
+ br i1 %icmp2, label %bb.true.true, label %bb.true.false
+
+bb.true.true:
+ ; This block musn't be counted in the inline cost.
+ %x1 = add i32 %x, 1
+ %x2 = add i32 %x1, 1
+ %x3 = add i32 %x2, 1
+ %x4 = add i32 %x3, 1
+ %x5 = add i32 %x4, 1
+ %x6 = add i32 %x5, 1
+ %x7 = add i32 %x6, 1
+ %x8 = add i32 %x7, 1
+ br label %bb.merge
+
+bb.true.false:
+ ; This block musn't be counted in the inline cost.
+ %y1 = add i32 %y, 1
+ %y2 = add i32 %y1, 1
+ %y3 = add i32 %y2, 1
+ %y4 = add i32 %y3, 1
+ %y5 = add i32 %y4, 1
+ %y6 = add i32 %y5, 1
+ %y7 = add i32 %y6, 1
+ %y8 = add i32 %y7, 1
+ br label %bb.merge
+
+bb.merge:
+ %result = phi i32 [ %x8, %bb.true.true ], [ %y8, %bb.true.false ]
+ ret i32 %result
+
+bb.false:
+ ret i32 %sub
+}
diff --git a/test/Transforms/Inline/inline_returns_twice.ll b/test/Transforms/Inline/inline_returns_twice.ll
new file mode 100644
index 0000000..ab2e954
--- /dev/null
+++ b/test/Transforms/Inline/inline_returns_twice.ll
@@ -0,0 +1,41 @@
+; RUN: opt < %s -inline -S | FileCheck %s
+
+; Check that functions with "returns_twice" calls are only inlined,
+; if they are themselve marked as such.
+
+declare i32 @a() returns_twice
+declare i32 @b() returns_twice
+
+define i32 @f() {
+entry:
+ %call = call i32 @a() returns_twice
+ %add = add nsw i32 1, %call
+ ret i32 %add
+}
+
+define i32 @g() {
+entry:
+; CHECK: define i32 @g
+; CHECK: call i32 @f()
+; CHECK-NOT: call i32 @a()
+ %call = call i32 @f()
+ %add = add nsw i32 1, %call
+ ret i32 %add
+}
+
+define i32 @h() returns_twice {
+entry:
+ %call = call i32 @b() returns_twice
+ %add = add nsw i32 1, %call
+ ret i32 %add
+}
+
+define i32 @i() {
+entry:
+; CHECK: define i32 @i
+; CHECK: call i32 @b()
+; CHECK-NOT: call i32 @h()
+ %call = call i32 @h() returns_twice
+ %add = add nsw i32 1, %call
+ ret i32 %add
+}
diff --git a/test/Transforms/Inline/lit.local.cfg b/test/Transforms/Inline/lit.local.cfg
new file mode 100644
index 0000000..19eebc0
--- /dev/null
+++ b/test/Transforms/Inline/lit.local.cfg
@@ -0,0 +1 @@
+config.suffixes = ['.ll', '.c', '.cpp']
diff --git a/test/Transforms/Inline/noinline-recursive-fn.ll b/test/Transforms/Inline/noinline-recursive-fn.ll
index 1d5ebbb..6cde0e2 100644
--- a/test/Transforms/Inline/noinline-recursive-fn.ll
+++ b/test/Transforms/Inline/noinline-recursive-fn.ll
@@ -17,7 +17,7 @@ entry:
bb: ; preds = %entry
%1 = sub nsw i32 %x, 1 ; <i32> [#uses=1]
call void @foo(i32 %1) nounwind ssp
- volatile store i32 1, i32* @g, align 4
+ store volatile i32 1, i32* @g, align 4
ret void
return: ; preds = %entry
@@ -42,7 +42,7 @@ entry:
%0 = bitcast i8* %Bar to void (i32, i8*, i8*)*
%1 = sub nsw i32 %x, 1
call void %0(i32 %1, i8* %Foo, i8* %Bar) nounwind
- volatile store i32 42, i32* @g, align 4
+ store volatile i32 42, i32* @g, align 4
ret void
}
@@ -54,7 +54,7 @@ entry:
bb: ; preds = %entry
%1 = bitcast i8* %Foo to void (i32, i8*, i8*)* ; <void (i32, i8*, i8*)*> [#uses=1]
call void %1(i32 %x, i8* %Foo, i8* %Bar) nounwind
- volatile store i32 13, i32* @g, align 4
+ store volatile i32 13, i32* @g, align 4
ret void
return: ; preds = %entry
@@ -71,3 +71,40 @@ entry:
call void @f2(i32 123, i8* bitcast (void (i32, i8*, i8*)* @f1 to i8*), i8* bitcast (void (i32, i8*, i8*)* @f2 to i8*)) nounwind ssp
ret void
}
+
+
+; Check that a recursive function, when called with a constant that makes the
+; recursive path dead code can actually be inlined.
+define i32 @fib(i32 %i) {
+entry:
+ %is.zero = icmp eq i32 %i, 0
+ br i1 %is.zero, label %zero.then, label %zero.else
+
+zero.then:
+ ret i32 0
+
+zero.else:
+ %is.one = icmp eq i32 %i, 1
+ br i1 %is.one, label %one.then, label %one.else
+
+one.then:
+ ret i32 1
+
+one.else:
+ %i1 = sub i32 %i, 1
+ %f1 = call i32 @fib(i32 %i1)
+ %i2 = sub i32 %i, 2
+ %f2 = call i32 @fib(i32 %i2)
+ %f = add i32 %f1, %f2
+ ret i32 %f
+}
+
+define i32 @fib_caller() {
+; CHECK: @fib_caller
+; CHECK-NOT: call
+; CHECK: ret
+ %f1 = call i32 @fib(i32 0)
+ %f2 = call i32 @fib(i32 1)
+ %result = add i32 %f1, %f2
+ ret i32 %result
+}
diff --git a/test/Transforms/Inline/ptr-diff.ll b/test/Transforms/Inline/ptr-diff.ll
new file mode 100644
index 0000000..60fc3e2
--- /dev/null
+++ b/test/Transforms/Inline/ptr-diff.ll
@@ -0,0 +1,58 @@
+; RUN: opt -inline < %s -S -o - -inline-threshold=10 | FileCheck %s
+
+target datalayout = "p:32:32"
+
+define i32 @outer1() {
+; CHECK: @outer1
+; CHECK-NOT: call
+; CHECK: ret i32
+
+ %ptr = alloca i32
+ %ptr1 = getelementptr inbounds i32* %ptr, i32 0
+ %ptr2 = getelementptr inbounds i32* %ptr, i32 42
+ %result = call i32 @inner1(i32* %ptr1, i32* %ptr2)
+ ret i32 %result
+}
+
+define i32 @inner1(i32* %begin, i32* %end) {
+ %begin.i = ptrtoint i32* %begin to i32
+ %end.i = ptrtoint i32* %end to i32
+ %distance = sub i32 %end.i, %begin.i
+ %icmp = icmp sle i32 %distance, 42
+ br i1 %icmp, label %then, label %else
+
+then:
+ ret i32 3
+
+else:
+ %t = load i32* %begin
+ ret i32 %t
+}
+
+define i32 @outer2(i32* %ptr) {
+; Test that an inbounds GEP disables this -- it isn't safe in general as
+; wrapping changes the behavior of lessthan and greaterthan comparisions.
+; CHECK: @outer2
+; CHECK: call i32 @inner2
+; CHECK: ret i32
+
+ %ptr1 = getelementptr i32* %ptr, i32 0
+ %ptr2 = getelementptr i32* %ptr, i32 42
+ %result = call i32 @inner2(i32* %ptr1, i32* %ptr2)
+ ret i32 %result
+}
+
+define i32 @inner2(i32* %begin, i32* %end) {
+ %begin.i = ptrtoint i32* %begin to i32
+ %end.i = ptrtoint i32* %end to i32
+ %distance = sub i32 %end.i, %begin.i
+ %icmp = icmp sle i32 %distance, 42
+ br i1 %icmp, label %then, label %else
+
+then:
+ ret i32 3
+
+else:
+ %t = load i32* %begin
+ ret i32 %t
+}
OpenPOWER on IntegriCloud