summaryrefslogtreecommitdiffstats
path: root/test/CodeGen/Thumb2
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/Thumb2')
-rw-r--r--test/CodeGen/Thumb2/2010-01-06-TailDuplicateLabels.ll2
-rw-r--r--test/CodeGen/Thumb2/constant-islands.ll4
-rw-r--r--test/CodeGen/Thumb2/inflate-regs.ll49
-rw-r--r--test/CodeGen/Thumb2/inlineasm.ll9
-rw-r--r--test/CodeGen/Thumb2/large-call.ll9
-rw-r--r--test/CodeGen/Thumb2/thumb2-cmn.ll32
-rw-r--r--test/CodeGen/Thumb2/thumb2-cmp.ll18
-rw-r--r--test/CodeGen/Thumb2/thumb2-cmp2.ll18
-rw-r--r--test/CodeGen/Thumb2/thumb2-jtb.ll8
-rw-r--r--test/CodeGen/Thumb2/thumb2-ldr_post.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-ldr_pre.ll4
-rw-r--r--test/CodeGen/Thumb2/thumb2-rev16.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-ror.ll6
-rw-r--r--test/CodeGen/Thumb2/thumb2-tbb.ll4
-rw-r--r--test/CodeGen/Thumb2/thumb2-teq.ll16
-rw-r--r--test/CodeGen/Thumb2/thumb2-teq2.ll18
-rw-r--r--test/CodeGen/Thumb2/thumb2-tst.ll16
-rw-r--r--test/CodeGen/Thumb2/thumb2-tst2.ll18
-rw-r--r--test/CodeGen/Thumb2/thumb2-uxt_rot.ll21
-rw-r--r--test/CodeGen/Thumb2/tls1.ll6
20 files changed, 174 insertions, 88 deletions
diff --git a/test/CodeGen/Thumb2/2010-01-06-TailDuplicateLabels.ll b/test/CodeGen/Thumb2/2010-01-06-TailDuplicateLabels.ll
index af7d716..348e9d3 100644
--- a/test/CodeGen/Thumb2/2010-01-06-TailDuplicateLabels.ll
+++ b/test/CodeGen/Thumb2/2010-01-06-TailDuplicateLabels.ll
@@ -1,4 +1,4 @@
-; RUN: llc -relocation-model=pic < %s | grep {:$} | sort | uniq -d | count 0
+; RUN: llc -relocation-model=pic < %s | grep ":$" | sort | uniq -d | count 0
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
target triple = "thumbv7-apple-darwin10"
diff --git a/test/CodeGen/Thumb2/constant-islands.ll b/test/CodeGen/Thumb2/constant-islands.ll
index 19d2385..255b709 100644
--- a/test/CodeGen/Thumb2/constant-islands.ll
+++ b/test/CodeGen/Thumb2/constant-islands.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mcpu=cortex-a8 -O0 -filetype=obj -o %t.o
; RUN: llc < %s -march=thumb -mcpu=cortex-a8 -O0 -filetype=obj -o %t.o
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 -O2 -filetype=obj -o %t.o
-; RUN: llc < %s -march=thumb -mcpu=cortex-a8 -O2 -filetype=obj -o %t.o
+; RUN: llc < %s -march=arm -mcpu=cortex-a8 -O2 -filetype=obj -verify-machineinstrs -o %t.o
+; RUN: llc < %s -march=thumb -mcpu=cortex-a8 -O2 -filetype=obj -verify-machineinstrs -o %t.o
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
target triple = "thumbv7-apple-ios"
diff --git a/test/CodeGen/Thumb2/inflate-regs.ll b/test/CodeGen/Thumb2/inflate-regs.ll
new file mode 100644
index 0000000..d8a558c
--- /dev/null
+++ b/test/CodeGen/Thumb2/inflate-regs.ll
@@ -0,0 +1,49 @@
+; RUN: llc < %s -mcpu=cortex-a8 | FileCheck %s
+target triple = "thumbv7-apple-ios"
+
+; CHECK: local_split
+;
+; The load must go into d0-15 which are all clobbered by the asm.
+; RAGreedy should split the range and use d16-d31 to avoid a spill.
+;
+; CHECK: vldr s
+; CHECK-NOT: vstr
+; CHECK: vadd.f32
+; CHECK-NOT: vstr
+; CHECK: vorr
+; CHECK: vstr s
+define void @local_split(float* nocapture %p) nounwind ssp {
+entry:
+ %x = load float* %p, align 4
+ %a = fadd float %x, 1.0
+ tail call void asm sideeffect "", "~{d0},~{d1},~{d2},~{d3},~{d4},~{d5},~{d6},~{d7},~{d8},~{d9},~{d10},~{d11},~{d12},~{d13},~{d14},~{d15}"() nounwind
+ store float %a, float* %p, align 4
+ ret void
+}
+
+; CHECK: global_split
+;
+; Same thing, but across basic blocks.
+;
+; CHECK: vldr s
+; CHECK-NOT: vstr
+; CHECK: vadd.f32
+; CHECK-NOT: vstr
+; CHECK: vorr
+; CHECK: vstr s
+define void @global_split(float* nocapture %p1, float* nocapture %p2) nounwind ssp {
+entry:
+ %0 = load float* %p1, align 4
+ %add = fadd float %0, 1.000000e+00
+ tail call void asm sideeffect "", "~{d0},~{d1},~{d2},~{d3},~{d4},~{d5},~{d6},~{d7},~{d8},~{d9},~{d10},~{d11},~{d12},~{d13},~{d14},~{d15}"() nounwind
+ %cmp = fcmp ogt float %add, 0.000000e+00
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ store float %add, float* %p2, align 4
+ br label %if.end
+
+if.end:
+ store float %add, float* %p1, align 4
+ ret void
+}
diff --git a/test/CodeGen/Thumb2/inlineasm.ll b/test/CodeGen/Thumb2/inlineasm.ll
new file mode 100644
index 0000000..30f28f8
--- /dev/null
+++ b/test/CodeGen/Thumb2/inlineasm.ll
@@ -0,0 +1,9 @@
+; RUN: llc < %s -filetype=obj
+
+target triple = "thumbv7-none--eabi"
+
+define void @t1() nounwind {
+entry:
+ call void asm sideeffect "mov r0, r1", ""() nounwind
+ ret void
+}
diff --git a/test/CodeGen/Thumb2/large-call.ll b/test/CodeGen/Thumb2/large-call.ll
index aef6f85..61c477a 100644
--- a/test/CodeGen/Thumb2/large-call.ll
+++ b/test/CodeGen/Thumb2/large-call.ll
@@ -3,17 +3,18 @@ target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-
target triple = "thumbv7-apple-ios0.0.0"
; This test case would clobber the outgoing call arguments by writing to the
-; emergency spill slot at [sp, #4] without adjusting the stack pointer first.
+; emergency spill slots at [sp, #4] or [sp, #8] without adjusting the stack
+; pointer first.
; CHECK: main
; CHECK: vmov.f64
; Adjust SP for the large call
; CHECK: sub sp,
-; CHECK: mov [[FR:r[0-9]+]], sp
-; Store to call frame + #4
-; CHECK: str{{.*\[}}[[FR]], #4]
+; Store to call frame + #8
+; CHECK: vstr{{.*\[}}sp, #8]
; Don't clobber that store until the call.
; CHECK-NOT: [sp, #4]
+; CHECK-NOT: [sp, #8]
; CHECK: variadic
define i32 @main() ssp {
diff --git a/test/CodeGen/Thumb2/thumb2-cmn.ll b/test/CodeGen/Thumb2/thumb2-cmn.ll
index df221b9..67b07e6 100644
--- a/test/CodeGen/Thumb2/thumb2-cmn.ll
+++ b/test/CodeGen/Thumb2/thumb2-cmn.ll
@@ -1,7 +1,7 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 -join-physregs | FileCheck %s
+; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-; These tests implicitly depend on 'movs r0, #0' being rematerialized below the
-; test as 'mov.w r0, #0'. So far, that requires physreg joining.
+; These tests could be improved by 'movs r0, #0' being rematerialized below the
+; test as 'mov.w r0, #0'.
define i1 @f1(i32 %a, i32 %b) {
%nb = sub i32 0, %b
@@ -9,7 +9,7 @@ define i1 @f1(i32 %a, i32 %b) {
ret i1 %tmp
}
; CHECK: f1:
-; CHECK: cmn.w r0, r1
+; CHECK: cmn {{.*}}, r1
define i1 @f2(i32 %a, i32 %b) {
%nb = sub i32 0, %b
@@ -17,7 +17,7 @@ define i1 @f2(i32 %a, i32 %b) {
ret i1 %tmp
}
; CHECK: f2:
-; CHECK: cmn.w r0, r1
+; CHECK: cmn {{.*}}, r1
define i1 @f3(i32 %a, i32 %b) {
%nb = sub i32 0, %b
@@ -25,7 +25,7 @@ define i1 @f3(i32 %a, i32 %b) {
ret i1 %tmp
}
; CHECK: f3:
-; CHECK: cmn.w r0, r1
+; CHECK: cmn {{.*}}, r1
define i1 @f4(i32 %a, i32 %b) {
%nb = sub i32 0, %b
@@ -33,7 +33,7 @@ define i1 @f4(i32 %a, i32 %b) {
ret i1 %tmp
}
; CHECK: f4:
-; CHECK: cmn.w r0, r1
+; CHECK: cmn {{.*}}, r1
define i1 @f5(i32 %a, i32 %b) {
%tmp = shl i32 %b, 5
@@ -42,7 +42,7 @@ define i1 @f5(i32 %a, i32 %b) {
ret i1 %tmp1
}
; CHECK: f5:
-; CHECK: cmn.w r0, r1, lsl #5
+; CHECK: cmn.w {{.*}}, r1, lsl #5
define i1 @f6(i32 %a, i32 %b) {
%tmp = lshr i32 %b, 6
@@ -51,7 +51,7 @@ define i1 @f6(i32 %a, i32 %b) {
ret i1 %tmp1
}
; CHECK: f6:
-; CHECK: cmn.w r0, r1, lsr #6
+; CHECK: cmn.w {{.*}}, r1, lsr #6
define i1 @f7(i32 %a, i32 %b) {
%tmp = ashr i32 %b, 7
@@ -60,7 +60,7 @@ define i1 @f7(i32 %a, i32 %b) {
ret i1 %tmp1
}
; CHECK: f7:
-; CHECK: cmn.w r0, r1, asr #7
+; CHECK: cmn.w {{.*}}, r1, asr #7
define i1 @f8(i32 %a, i32 %b) {
%l8 = shl i32 %a, 24
@@ -71,5 +71,15 @@ define i1 @f8(i32 %a, i32 %b) {
ret i1 %tmp1
}
; CHECK: f8:
-; CHECK: cmn.w r0, r0, ror #8
+; CHECK: cmn.w {{.*}}, {{.*}}, ror #8
+
+define void @f9(i32 %a, i32 %b) nounwind optsize {
+ tail call void asm sideeffect "cmn.w r0, r1", ""() nounwind, !srcloc !0
+ ret void
+}
+
+!0 = metadata !{i32 81}
+
+; CHECK: f9:
+; CHECK: cmn.w r0, r1
diff --git a/test/CodeGen/Thumb2/thumb2-cmp.ll b/test/CodeGen/Thumb2/thumb2-cmp.ll
index da12114..4ce7acc 100644
--- a/test/CodeGen/Thumb2/thumb2-cmp.ll
+++ b/test/CodeGen/Thumb2/thumb2-cmp.ll
@@ -1,12 +1,12 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 -join-physregs | FileCheck %s
+; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-; These tests implicitly depend on 'movs r0, #0' being rematerialized below the
-; test as 'mov.w r0, #0'. So far, that requires physreg joining.
+; These tests would be improved by 'movs r0, #0' being rematerialized below the
+; test as 'mov.w r0, #0'.
; 0x000000bb = 187
define i1 @f1(i32 %a) {
; CHECK: f1:
-; CHECK: cmp r0, #187
+; CHECK: cmp {{.*}}, #187
%tmp = icmp ne i32 %a, 187
ret i1 %tmp
}
@@ -14,7 +14,7 @@ define i1 @f1(i32 %a) {
; 0x00aa00aa = 11141290
define i1 @f2(i32 %a) {
; CHECK: f2:
-; CHECK: cmp.w r0, #11141290
+; CHECK: cmp.w {{.*}}, #11141290
%tmp = icmp eq i32 %a, 11141290
ret i1 %tmp
}
@@ -22,7 +22,7 @@ define i1 @f2(i32 %a) {
; 0xcc00cc00 = 3422604288
define i1 @f3(i32 %a) {
; CHECK: f3:
-; CHECK: cmp.w r0, #-872363008
+; CHECK: cmp.w {{.*}}, #-872363008
%tmp = icmp ne i32 %a, 3422604288
ret i1 %tmp
}
@@ -30,7 +30,7 @@ define i1 @f3(i32 %a) {
; 0xdddddddd = 3722304989
define i1 @f4(i32 %a) {
; CHECK: f4:
-; CHECK: cmp.w r0, #-572662307
+; CHECK: cmp.w {{.*}}, #-572662307
%tmp = icmp ne i32 %a, 3722304989
ret i1 %tmp
}
@@ -38,7 +38,7 @@ define i1 @f4(i32 %a) {
; 0x00110000 = 1114112
define i1 @f5(i32 %a) {
; CHECK: f5:
-; CHECK: cmp.w r0, #1114112
+; CHECK: cmp.w {{.*}}, #1114112
%tmp = icmp eq i32 %a, 1114112
ret i1 %tmp
}
@@ -46,7 +46,7 @@ define i1 @f5(i32 %a) {
; Check that we don't do an invalid (a > b) --> !(a < b + 1) transform.
;
; CHECK: f6:
-; CHECK-NOT: cmp.w r0, #-2147483648
+; CHECK-NOT: cmp.w {{.*}}, #-2147483648
; CHECK: bx lr
define i32 @f6(i32 %a) {
%tmp = icmp sgt i32 %a, 2147483647
diff --git a/test/CodeGen/Thumb2/thumb2-cmp2.ll b/test/CodeGen/Thumb2/thumb2-cmp2.ll
index 15052e0..f6790de 100644
--- a/test/CodeGen/Thumb2/thumb2-cmp2.ll
+++ b/test/CodeGen/Thumb2/thumb2-cmp2.ll
@@ -1,25 +1,25 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 -join-physregs | FileCheck %s
+; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-; These tests implicitly depend on 'movs r0, #0' being rematerialized below the
-; test as 'mov.w r0, #0'. So far, that requires physreg joining.
+; These tests would be improved by 'movs r0, #0' being rematerialized below the
+; test as 'mov.w r0, #0'.
define i1 @f1(i32 %a, i32 %b) {
; CHECK: f1:
-; CHECK: cmp r0, r1
+; CHECK: cmp {{.*}}, r1
%tmp = icmp ne i32 %a, %b
ret i1 %tmp
}
define i1 @f2(i32 %a, i32 %b) {
; CHECK: f2:
-; CHECK: cmp r0, r1
+; CHECK: cmp {{.*}}, r1
%tmp = icmp eq i32 %a, %b
ret i1 %tmp
}
define i1 @f6(i32 %a, i32 %b) {
; CHECK: f6:
-; CHECK: cmp.w r0, r1, lsl #5
+; CHECK: cmp.w {{.*}}, r1, lsl #5
%tmp = shl i32 %b, 5
%tmp1 = icmp eq i32 %tmp, %a
ret i1 %tmp1
@@ -27,7 +27,7 @@ define i1 @f6(i32 %a, i32 %b) {
define i1 @f7(i32 %a, i32 %b) {
; CHECK: f7:
-; CHECK: cmp.w r0, r1, lsr #6
+; CHECK: cmp.w {{.*}}, r1, lsr #6
%tmp = lshr i32 %b, 6
%tmp1 = icmp ne i32 %tmp, %a
ret i1 %tmp1
@@ -35,7 +35,7 @@ define i1 @f7(i32 %a, i32 %b) {
define i1 @f8(i32 %a, i32 %b) {
; CHECK: f8:
-; CHECK: cmp.w r0, r1, asr #7
+; CHECK: cmp.w {{.*}}, r1, asr #7
%tmp = ashr i32 %b, 7
%tmp1 = icmp eq i32 %a, %tmp
ret i1 %tmp1
@@ -43,7 +43,7 @@ define i1 @f8(i32 %a, i32 %b) {
define i1 @f9(i32 %a, i32 %b) {
; CHECK: f9:
-; CHECK: cmp.w r0, r0, ror #8
+; CHECK: cmp.w {{.*}}, {{.*}}, ror #8
%l8 = shl i32 %a, 24
%r8 = lshr i32 %a, 8
%tmp = or i32 %l8, %r8
diff --git a/test/CodeGen/Thumb2/thumb2-jtb.ll b/test/CodeGen/Thumb2/thumb2-jtb.ll
index 7e1655f..0748b9b3 100644
--- a/test/CodeGen/Thumb2/thumb2-jtb.ll
+++ b/test/CodeGen/Thumb2/thumb2-jtb.ll
@@ -1,9 +1,15 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 -arm-adjust-jump-tables=0 | not grep tbb
+; RUN: llc < %s -march=thumb -mattr=+thumb2 -arm-adjust-jump-tables=0 | FileCheck %s
; Do not use tbb / tbh if any destination is before the jumptable.
; rdar://7102917
define i16 @main__getopt_internal_2E_exit_2E_ce(i32, i1 %b) nounwind {
+; CHECK: main__getopt_internal_2E_exit_2E_ce
+; CHECK-NOT: tbb
+; CHECK-NOT: tbh
+; 32-bit jump tables use explicit branches, not data regions, so make sure
+; we don't annotate this region.
+; CHECK-NOT: data_region
entry:
br i1 %b, label %codeRepl127.exitStub, label %newFuncRoot
diff --git a/test/CodeGen/Thumb2/thumb2-ldr_post.ll b/test/CodeGen/Thumb2/thumb2-ldr_post.ll
index d1af4ba..2178eec 100644
--- a/test/CodeGen/Thumb2/thumb2-ldr_post.ll
+++ b/test/CodeGen/Thumb2/thumb2-ldr_post.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -march=thumb -mattr=+thumb2 | \
-; RUN: grep {ldr.*\\\[.*\],} | count 1
+; RUN: grep "ldr.*\[.*\]," | count 1
define i32 @test(i32 %a, i32 %b, i32 %c) {
%tmp1 = mul i32 %a, %b ; <i32> [#uses=2]
diff --git a/test/CodeGen/Thumb2/thumb2-ldr_pre.ll b/test/CodeGen/Thumb2/thumb2-ldr_pre.ll
index 9cc3f4a..601c0b5 100644
--- a/test/CodeGen/Thumb2/thumb2-ldr_pre.ll
+++ b/test/CodeGen/Thumb2/thumb2-ldr_pre.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=thumb -mattr=+thumb2 | \
-; RUN: grep {ldr.*\\!} | count 3
+; RUN: grep "ldr.*\!" | count 3
; RUN: llc < %s -march=thumb -mattr=+thumb2 | \
-; RUN: grep {ldrsb.*\\!} | count 1
+; RUN: grep "ldrsb.*\!" | count 1
define i32* @test1(i32* %X, i32* %dest) {
%Y = getelementptr i32* %X, i32 4 ; <i32*> [#uses=2]
diff --git a/test/CodeGen/Thumb2/thumb2-rev16.ll b/test/CodeGen/Thumb2/thumb2-rev16.ll
index 39b6ac3..10cd539 100644
--- a/test/CodeGen/Thumb2/thumb2-rev16.ll
+++ b/test/CodeGen/Thumb2/thumb2-rev16.ll
@@ -1,7 +1,7 @@
; XFAIL: *
; fixme rev16 pattern is not matching
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | grep {rev16\\W*r\[0-9\]*,\\W*r\[0-9\]*} | count 1
+; RUN: llc < %s -march=thumb -mattr=+thumb2 | grep "rev16\W*r[0-9]*,\W*r[0-9]*" | count 1
; 0xff00ff00 = 4278255360
; 0x00ff00ff = 16711935
diff --git a/test/CodeGen/Thumb2/thumb2-ror.ll b/test/CodeGen/Thumb2/thumb2-ror.ll
index 590c333..5ad92cd 100644
--- a/test/CodeGen/Thumb2/thumb2-ror.ll
+++ b/test/CodeGen/Thumb2/thumb2-ror.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
+; RUN: llc < %s -march=thumb | FileCheck %s -check-prefix=THUMB1
; CHECK: f1:
; CHECK: ror.w r0, r0, #22
@@ -13,6 +13,8 @@ define i32 @f1(i32 %a) {
; CHECK: f2:
; CHECK-NOT: and
; CHECK: ror
+; THUMB1: f2
+; THUMB1: and
define i32 @f2(i32 %v, i32 %nbits) {
entry:
%and = and i32 %nbits, 31
@@ -21,4 +23,4 @@ entry:
%shl = shl i32 %v, %sub
%or = or i32 %shl, %shr
ret i32 %or
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/Thumb2/thumb2-tbb.ll b/test/CodeGen/Thumb2/thumb2-tbb.ll
index 5dc3cc3..a9d71d6 100644
--- a/test/CodeGen/Thumb2/thumb2-tbb.ll
+++ b/test/CodeGen/Thumb2/thumb2-tbb.ll
@@ -5,7 +5,9 @@ define void @bar(i32 %n.u) {
entry:
; CHECK: bar:
; CHECK: tbb
-; CHECK: .align 1
+; CHECK: .data_region jt8
+; CHECK: .end_data_region
+; CHECK-NEXT: .align 1
switch i32 %n.u, label %bb12 [i32 1, label %bb i32 2, label %bb6 i32 4, label %bb7 i32 5, label %bb8 i32 6, label %bb10 i32 7, label %bb1 i32 8, label %bb3 i32 9, label %bb4 i32 10, label %bb9 i32 11, label %bb2 i32 12, label %bb5 i32 13, label %bb11 ]
bb:
diff --git a/test/CodeGen/Thumb2/thumb2-teq.ll b/test/CodeGen/Thumb2/thumb2-teq.ll
index 00c928f..d453f46 100644
--- a/test/CodeGen/Thumb2/thumb2-teq.ll
+++ b/test/CodeGen/Thumb2/thumb2-teq.ll
@@ -1,7 +1,7 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 -join-physregs | FileCheck %s
+; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-; These tests implicitly depend on 'movs r0, #0' being rematerialized below the
-; test as 'mov.w r0, #0'. So far, that requires physreg joining.
+; These tests would be improved by 'movs r0, #0' being rematerialized below the
+; test as 'mov.w r0, #0'.
; 0x000000bb = 187
define i1 @f2(i32 %a) {
@@ -10,7 +10,7 @@ define i1 @f2(i32 %a) {
ret i1 %tmp1
}
; CHECK: f2:
-; CHECK: teq.w r0, #187
+; CHECK: teq.w {{.*}}, #187
; 0x00aa00aa = 11141290
define i1 @f3(i32 %a) {
@@ -19,7 +19,7 @@ define i1 @f3(i32 %a) {
ret i1 %tmp1
}
; CHECK: f3:
-; CHECK: teq.w r0, #11141290
+; CHECK: teq.w {{.*}}, #11141290
; 0xcc00cc00 = 3422604288
define i1 @f6(i32 %a) {
@@ -28,7 +28,7 @@ define i1 @f6(i32 %a) {
ret i1 %tmp1
}
; CHECK: f6:
-; CHECK: teq.w r0, #-872363008
+; CHECK: teq.w {{.*}}, #-872363008
; 0xdddddddd = 3722304989
define i1 @f7(i32 %a) {
@@ -37,7 +37,7 @@ define i1 @f7(i32 %a) {
ret i1 %tmp1
}
; CHECK: f7:
-; CHECK: teq.w r0, #-572662307
+; CHECK: teq.w {{.*}}, #-572662307
; 0xdddddddd = 3722304989
define i1 @f8(i32 %a) {
@@ -53,5 +53,5 @@ define i1 @f10(i32 %a) {
ret i1 %tmp1
}
; CHECK: f10:
-; CHECK: teq.w r0, #1114112
+; CHECK: teq.w {{.*}}, #1114112
diff --git a/test/CodeGen/Thumb2/thumb2-teq2.ll b/test/CodeGen/Thumb2/thumb2-teq2.ll
index 8acae90..27ecad8 100644
--- a/test/CodeGen/Thumb2/thumb2-teq2.ll
+++ b/test/CodeGen/Thumb2/thumb2-teq2.ll
@@ -1,11 +1,11 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 -join-physregs | FileCheck %s
+; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-; These tests implicitly depend on 'movs r0, #0' being rematerialized below the
-; tst as 'mov.w r0, #0'. So far, that requires physreg joining.
+; These tests would be improved by 'movs r0, #0' being rematerialized below the
+; tst as 'mov.w r0, #0'.
define i1 @f2(i32 %a, i32 %b) {
; CHECK: f2
-; CHECK: teq.w r0, r1
+; CHECK: teq.w {{.*}}, r1
%tmp = xor i32 %a, %b
%tmp1 = icmp eq i32 %tmp, 0
ret i1 %tmp1
@@ -13,7 +13,7 @@ define i1 @f2(i32 %a, i32 %b) {
define i1 @f4(i32 %a, i32 %b) {
; CHECK: f4
-; CHECK: teq.w r0, r1
+; CHECK: teq.w {{.*}}, r1
%tmp = xor i32 %a, %b
%tmp1 = icmp eq i32 0, %tmp
ret i1 %tmp1
@@ -21,7 +21,7 @@ define i1 @f4(i32 %a, i32 %b) {
define i1 @f6(i32 %a, i32 %b) {
; CHECK: f6
-; CHECK: teq.w r0, r1, lsl #5
+; CHECK: teq.w {{.*}}, r1, lsl #5
%tmp = shl i32 %b, 5
%tmp1 = xor i32 %a, %tmp
%tmp2 = icmp eq i32 %tmp1, 0
@@ -30,7 +30,7 @@ define i1 @f6(i32 %a, i32 %b) {
define i1 @f7(i32 %a, i32 %b) {
; CHECK: f7
-; CHECK: teq.w r0, r1, lsr #6
+; CHECK: teq.w {{.*}}, r1, lsr #6
%tmp = lshr i32 %b, 6
%tmp1 = xor i32 %a, %tmp
%tmp2 = icmp eq i32 %tmp1, 0
@@ -39,7 +39,7 @@ define i1 @f7(i32 %a, i32 %b) {
define i1 @f8(i32 %a, i32 %b) {
; CHECK: f8
-; CHECK: teq.w r0, r1, asr #7
+; CHECK: teq.w {{.*}}, r1, asr #7
%tmp = ashr i32 %b, 7
%tmp1 = xor i32 %a, %tmp
%tmp2 = icmp eq i32 %tmp1, 0
@@ -48,7 +48,7 @@ define i1 @f8(i32 %a, i32 %b) {
define i1 @f9(i32 %a, i32 %b) {
; CHECK: f9
-; CHECK: teq.w r0, r0, ror #8
+; CHECK: teq.w {{.*}}, {{.*}}, ror #8
%l8 = shl i32 %a, 24
%r8 = lshr i32 %a, 8
%tmp = or i32 %l8, %r8
diff --git a/test/CodeGen/Thumb2/thumb2-tst.ll b/test/CodeGen/Thumb2/thumb2-tst.ll
index 43e208c..67fe82e 100644
--- a/test/CodeGen/Thumb2/thumb2-tst.ll
+++ b/test/CodeGen/Thumb2/thumb2-tst.ll
@@ -1,7 +1,7 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 -join-physregs | FileCheck %s
+; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-; These tests implicitly depend on 'movs r0, #0' being rematerialized below the
-; tst as 'mov.w r0, #0'. So far, that requires physreg joining.
+; These tests would be improved by 'movs r0, #0' being rematerialized below the
+; tst as 'mov.w r0, #0'.
; 0x000000bb = 187
define i1 @f2(i32 %a) {
@@ -10,7 +10,7 @@ define i1 @f2(i32 %a) {
ret i1 %tmp1
}
; CHECK: f2:
-; CHECK: tst.w r0, #187
+; CHECK: tst.w {{.*}}, #187
; 0x00aa00aa = 11141290
define i1 @f3(i32 %a) {
@@ -19,7 +19,7 @@ define i1 @f3(i32 %a) {
ret i1 %tmp1
}
; CHECK: f3:
-; CHECK: tst.w r0, #11141290
+; CHECK: tst.w {{.*}}, #11141290
; 0xcc00cc00 = 3422604288
define i1 @f6(i32 %a) {
@@ -28,7 +28,7 @@ define i1 @f6(i32 %a) {
ret i1 %tmp1
}
; CHECK: f6:
-; CHECK: tst.w r0, #-872363008
+; CHECK: tst.w {{.*}}, #-872363008
; 0xdddddddd = 3722304989
define i1 @f7(i32 %a) {
@@ -37,7 +37,7 @@ define i1 @f7(i32 %a) {
ret i1 %tmp1
}
; CHECK: f7:
-; CHECK: tst.w r0, #-572662307
+; CHECK: tst.w {{.*}}, #-572662307
; 0x00110000 = 1114112
define i1 @f10(i32 %a) {
@@ -46,4 +46,4 @@ define i1 @f10(i32 %a) {
ret i1 %tmp1
}
; CHECK: f10:
-; CHECK: tst.w r0, #1114112
+; CHECK: tst.w {{.*}}, #1114112
diff --git a/test/CodeGen/Thumb2/thumb2-tst2.ll b/test/CodeGen/Thumb2/thumb2-tst2.ll
index bfe016f..e3fe792 100644
--- a/test/CodeGen/Thumb2/thumb2-tst2.ll
+++ b/test/CodeGen/Thumb2/thumb2-tst2.ll
@@ -1,11 +1,11 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 -join-physregs | FileCheck %s
+; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-; These tests implicitly depend on 'movs r0, #0' being rematerialized below the
-; tst as 'mov.w r0, #0'. So far, that requires physreg joining.
+; These tests would be improved by 'movs r0, #0' being rematerialized below the
+; tst as 'mov.w r0, #0'.
define i1 @f2(i32 %a, i32 %b) {
; CHECK: f2:
-; CHECK: tst r0, r1
+; CHECK: tst {{.*}}, r1
%tmp = and i32 %a, %b
%tmp1 = icmp eq i32 %tmp, 0
ret i1 %tmp1
@@ -13,7 +13,7 @@ define i1 @f2(i32 %a, i32 %b) {
define i1 @f4(i32 %a, i32 %b) {
; CHECK: f4:
-; CHECK: tst r0, r1
+; CHECK: tst {{.*}}, r1
%tmp = and i32 %a, %b
%tmp1 = icmp eq i32 0, %tmp
ret i1 %tmp1
@@ -21,7 +21,7 @@ define i1 @f4(i32 %a, i32 %b) {
define i1 @f6(i32 %a, i32 %b) {
; CHECK: f6:
-; CHECK: tst.w r0, r1, lsl #5
+; CHECK: tst.w {{.*}}, r1, lsl #5
%tmp = shl i32 %b, 5
%tmp1 = and i32 %a, %tmp
%tmp2 = icmp eq i32 %tmp1, 0
@@ -30,7 +30,7 @@ define i1 @f6(i32 %a, i32 %b) {
define i1 @f7(i32 %a, i32 %b) {
; CHECK: f7:
-; CHECK: tst.w r0, r1, lsr #6
+; CHECK: tst.w {{.*}}, r1, lsr #6
%tmp = lshr i32 %b, 6
%tmp1 = and i32 %a, %tmp
%tmp2 = icmp eq i32 %tmp1, 0
@@ -39,7 +39,7 @@ define i1 @f7(i32 %a, i32 %b) {
define i1 @f8(i32 %a, i32 %b) {
; CHECK: f8:
-; CHECK: tst.w r0, r1, asr #7
+; CHECK: tst.w {{.*}}, r1, asr #7
%tmp = ashr i32 %b, 7
%tmp1 = and i32 %a, %tmp
%tmp2 = icmp eq i32 %tmp1, 0
@@ -48,7 +48,7 @@ define i1 @f8(i32 %a, i32 %b) {
define i1 @f9(i32 %a, i32 %b) {
; CHECK: f9:
-; CHECK: tst.w r0, r0, ror #8
+; CHECK: tst.w {{.*}}, {{.*}}, ror #8
%l8 = shl i32 %a, 24
%r8 = lshr i32 %a, 8
%tmp = or i32 %l8, %r8
diff --git a/test/CodeGen/Thumb2/thumb2-uxt_rot.ll b/test/CodeGen/Thumb2/thumb2-uxt_rot.ll
index 03189aa..61e849e 100644
--- a/test/CodeGen/Thumb2/thumb2-uxt_rot.ll
+++ b/test/CodeGen/Thumb2/thumb2-uxt_rot.ll
@@ -1,15 +1,22 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2,+t2xtpk | FileCheck %s
+; RUN: llc < %s -march=thumb -mcpu=cortex-a8 | FileCheck %s --check-prefix=A8
+; RUN: llc < %s -march=thumb -mcpu=cortex-m3 | FileCheck %s --check-prefix=M3
+; rdar://11318438
define zeroext i8 @test1(i32 %A.u) {
-; CHECK: test1
-; CHECK: uxtb r0, r0
+; A8: test1
+; A8: uxtb r0, r0
%B.u = trunc i32 %A.u to i8
ret i8 %B.u
}
define zeroext i32 @test2(i32 %A.u, i32 %B.u) {
-; CHECK: test2
-; CHECK: uxtab r0, r0, r1
+; A8: test2
+; A8: uxtab r0, r0, r1
+
+; M3: test2
+; M3: uxtb r1, r1
+; M3-NOT: uxtab
+; M3: add r0, r1
%C.u = trunc i32 %B.u to i8
%D.u = zext i8 %C.u to i32
%E.u = add i32 %A.u, %D.u
@@ -17,8 +24,8 @@ define zeroext i32 @test2(i32 %A.u, i32 %B.u) {
}
define zeroext i32 @test3(i32 %A.u) {
-; CHECK: test3
-; CHECK: uxth.w r0, r0, ror #8
+; A8: test3
+; A8: uxth.w r0, r0, ror #8
%B.u = lshr i32 %A.u, 8
%C.u = shl i32 %A.u, 24
%D.u = or i32 %B.u, %C.u
diff --git a/test/CodeGen/Thumb2/tls1.ll b/test/CodeGen/Thumb2/tls1.ll
index 1e55557..d91e3b3 100644
--- a/test/CodeGen/Thumb2/tls1.ll
+++ b/test/CodeGen/Thumb2/tls1.ll
@@ -1,9 +1,9 @@
; RUN: llc < %s -mtriple=thumbv7-linux-gnueabi | \
-; RUN: grep {i(tpoff)}
+; RUN: grep "i(tpoff)"
; RUN: llc < %s -mtriple=thumbv7-linux-gnueabi | \
-; RUN: grep {__aeabi_read_tp}
+; RUN: grep "__aeabi_read_tp"
; RUN: llc < %s -mtriple=thumbv7-linux-gnueabi \
-; RUN: -relocation-model=pic | grep {__tls_get_addr}
+; RUN: -relocation-model=pic | grep "__tls_get_addr"
@i = thread_local global i32 15 ; <i32*> [#uses=2]
OpenPOWER on IntegriCloud