summaryrefslogtreecommitdiffstats
path: root/test/CodeGen/mips-varargs.c
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/mips-varargs.c')
-rw-r--r--test/CodeGen/mips-varargs.c115
1 files changed, 82 insertions, 33 deletions
diff --git a/test/CodeGen/mips-varargs.c b/test/CodeGen/mips-varargs.c
index ad202ff..383831f 100644
--- a/test/CodeGen/mips-varargs.c
+++ b/test/CodeGen/mips-varargs.c
@@ -28,18 +28,19 @@ int test_i32(char *fmt, ...) {
// ALL: [[VA1:%.+]] = bitcast i8** %va to i8*
// ALL: call void @llvm.va_start(i8* [[VA1]])
//
-// ALL: [[AP_CUR:%.+]] = load i8** %va, align [[PTRALIGN]]
-//
-// O32: [[TMP0:%.+]] = bitcast i8* [[AP_CUR]] to i32*
-// NEW: [[TMP0:%.+]] = bitcast i8* [[AP_CUR]] to i64*
+// O32: [[TMP0:%.+]] = bitcast i8** %va to i32**
+// O32: [[AP_CUR:%.+]] = load i32** [[TMP0]], align [[PTRALIGN]]
+// NEW: [[TMP0:%.+]] = bitcast i8** %va to i64**
+// NEW: [[AP_CUR:%.+]] = load i64** [[TMP0]], align [[PTRALIGN]]
//
-// O32: [[AP_NEXT:%.+]] = getelementptr i8* [[AP_CUR]], i32 4
-// NEW: [[AP_NEXT:%.+]] = getelementptr i8* [[AP_CUR]], {{i32|i64}} 8
+// O32: [[AP_NEXT:%.+]] = getelementptr i32* [[AP_CUR]], i32 1
+// NEW: [[AP_NEXT:%.+]] = getelementptr i64* [[AP_CUR]], {{i32|i64}} 1
//
-// ALL: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]]
+// O32: store i32* [[AP_NEXT]], i32** [[TMP0]], align [[PTRALIGN]]
+// NEW: store i64* [[AP_NEXT]], i64** [[TMP0]], align [[PTRALIGN]]
//
-// O32: [[ARG1:%.+]] = load i32* [[TMP0]], align 4
-// NEW: [[TMP2:%.+]] = load i64* [[TMP0]], align 8
+// O32: [[ARG1:%.+]] = load i32* [[AP_CUR]], align 4
+// NEW: [[TMP2:%.+]] = load i64* [[AP_CUR]], align 8
// NEW: [[ARG1:%.+]] = trunc i64 [[TMP2]] to i32
//
// ALL: call void @llvm.va_end(i8* [[VA1]])
@@ -63,32 +64,30 @@ int test_i32_2args(char *fmt, ...) {
// ALL: [[VA1:%.+]] = bitcast i8** %va to i8*
// ALL: call void @llvm.va_start(i8* [[VA1]])
//
-// ALL: [[AP_CUR:%.+]] = load i8** %va, align [[PTRALIGN]]
-//
-// O32: [[TMP0:%.+]] = bitcast i8* [[AP_CUR]] to i32*
-// NEW: [[TMP0:%.+]] = bitcast i8* [[AP_CUR]] to i64*
+// O32: [[TMP0:%.+]] = bitcast i8** %va to i32**
+// O32: [[AP_CUR:%.+]] = load i32** [[TMP0]], align [[PTRALIGN]]
+// NEW: [[TMP0:%.+]] = bitcast i8** %va to i64**
+// NEW: [[AP_CUR:%.+]] = load i64** [[TMP0]], align [[PTRALIGN]]
//
-// O32: [[AP_NEXT:%.+]] = getelementptr i8* [[AP_CUR]], i32 4
-// NEW: [[AP_NEXT:%.+]] = getelementptr i8* [[AP_CUR]], [[INTPTR_T:i32|i64]] 8
+// O32: [[AP_NEXT1:%.+]] = getelementptr i32* [[AP_CUR]], i32 1
+// NEW: [[AP_NEXT1:%.+]] = getelementptr i64* [[AP_CUR]], [[INTPTR_T:i32|i64]] 1
//
-// O32: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]]
+// O32: store i32* [[AP_NEXT1]], i32** [[TMP0]], align [[PTRALIGN]]
// FIXME: N32 optimised this store out. Why only for this ABI?
-// N64: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]]
+// N64: store i64* [[AP_NEXT1]], i64** [[TMP0]], align [[PTRALIGN]]
//
-// O32: [[ARG1:%.+]] = load i32* [[TMP0]], align 4
-// NEW: [[TMP3:%.+]] = load i64* [[TMP0]], align 8
+// O32: [[ARG1:%.+]] = load i32* [[AP_CUR]], align 4
+// NEW: [[TMP3:%.+]] = load i64* [[AP_CUR]], align 8
// NEW: [[ARG1:%.+]] = trunc i64 [[TMP3]] to i32
//
-// O32: [[TMP1:%.+]] = bitcast i8* [[AP_NEXT]] to i32*
-// NEW: [[TMP1:%.+]] = bitcast i8* [[AP_NEXT]] to i64*
+// O32: [[AP_NEXT2:%.+]] = getelementptr i32* [[AP_CUR]], i32 2
+// NEW: [[AP_NEXT2:%.+]] = getelementptr i64* [[AP_CUR]], [[INTPTR_T]] 2
//
-// O32: [[AP_NEXT3:%.+]] = getelementptr i8* [[AP_CUR]], i32 8
-// NEW: [[AP_NEXT3:%.+]] = getelementptr i8* [[AP_CUR]], [[INTPTR_T]] 16
+// O32: store i32* [[AP_NEXT2]], i32** [[TMP0]], align [[PTRALIGN]]
+// NEW: store i64* [[AP_NEXT2]], i64** [[TMP0]], align [[PTRALIGN]]
//
-// ALL: store i8* [[AP_NEXT3]], i8** %va, align [[PTRALIGN]]
-//
-// O32: [[ARG2:%.+]] = load i32* [[TMP1]], align 4
-// NEW: [[TMP4:%.+]] = load i64* [[TMP1]], align 8
+// O32: [[ARG2:%.+]] = load i32* [[AP_NEXT1]], align 4
+// NEW: [[TMP4:%.+]] = load i64* [[AP_NEXT1]], align 8
// NEW: [[ARG2:%.+]] = trunc i64 [[TMP4]] to i32
//
// ALL: call void @llvm.va_end(i8* [[VA1]])
@@ -112,9 +111,9 @@ long long test_i64(char *fmt, ...) {
// ALL: [[VA1:%.+]] = bitcast i8** %va to i8*
// ALL: call void @llvm.va_start(i8* [[VA1]])
//
-// ALL: [[AP_CUR:%.+]] = load i8** %va, align [[PTRALIGN]]
-//
-// NEW: [[TMP0:%.+]] = bitcast i8* [[AP_CUR]] to i64*
+// O32: [[AP_CUR:%.+]] = load i8** %va, align [[PTRALIGN]]
+// NEW: [[TMP0:%.+]] = bitcast i8** %va to i64**
+// NEW: [[AP_CUR:%.+]] = load i64** [[TMP0]], align [[PTRALIGN]]
//
// i64 is 8-byte aligned, while this is within O32's stack alignment there's no
// guarantee that the offset is still 8-byte aligned after earlier reads.
@@ -125,17 +124,67 @@ long long test_i64(char *fmt, ...) {
// O32: [[PTR4:%.+]] = inttoptr [[INTPTR_T]] [[PTR2]] to i8*
//
// O32: [[AP_NEXT:%.+]] = getelementptr i8* [[PTR4]], [[INTPTR_T]] 8
-// NEW: [[AP_NEXT:%.+]] = getelementptr i8* [[AP_CUR]], [[INTPTR_T]] 8
+// NEW: [[AP_NEXT:%.+]] = getelementptr i64* [[AP_CUR]], [[INTPTR_T:i32|i64]] 1
//
-// ALL: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]]
+// O32: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]]
+// NEW: store i64* [[AP_NEXT]], i64** [[TMP0]], align [[PTRALIGN]]
//
// O32: [[ARG1:%.+]] = load i64* [[PTR3]], align 8
-// NEW: [[ARG1:%.+]] = load i64* [[TMP0]], align 8
+// NEW: [[ARG1:%.+]] = load i64* [[AP_CUR]], align 8
//
// ALL: call void @llvm.va_end(i8* [[VA1]])
// ALL: ret i64 [[ARG1]]
// ALL: }
+char *test_ptr(char *fmt, ...) {
+ va_list va;
+
+ va_start(va, fmt);
+ char *v = va_arg(va, char *);
+ va_end(va);
+
+ return v;
+}
+
+// ALL-LABEL: define i8* @test_ptr(i8*{{.*}} %fmt, ...)
+//
+// O32: %va = alloca i8*, align [[PTRALIGN:4]]
+// N32: %va = alloca i8*, align [[PTRALIGN:4]]
+// N64: %va = alloca i8*, align [[PTRALIGN:8]]
+//
+// ALL: [[VA1:%.+]] = bitcast i8** %va to i8*
+// ALL: call void @llvm.va_start(i8* [[VA1]])
+//
+// O32: [[TMP0:%.+]] = bitcast i8** %va to i8***
+// O32: [[AP_CUR:%.+]] = load i8*** [[TMP0]], align [[PTRALIGN]]
+// N32 differs because the vararg is not a N32 pointer. It's been promoted to 64-bit.
+// N32: [[TMP0:%.+]] = bitcast i8** %va to i64**
+// N32: [[AP_CUR:%.+]] = load i64** [[TMP0]], align [[PTRALIGN]]
+// N64: [[TMP0:%.+]] = bitcast i8** %va to i8***
+// N64: [[AP_CUR:%.+]] = load i8*** [[TMP0]], align [[PTRALIGN]]
+//
+// O32: [[AP_NEXT:%.+]] = getelementptr i8** [[AP_CUR]], i32 1
+// N32 differs because the vararg is not a N32 pointer. It's been promoted to 64-bit.
+// N32: [[AP_NEXT:%.+]] = getelementptr i64* [[AP_CUR]], {{i32|i64}} 1
+// N64: [[AP_NEXT:%.+]] = getelementptr i8** [[AP_CUR]], {{i32|i64}} 1
+//
+// O32: store i8** [[AP_NEXT]], i8*** [[TMP0]], align [[PTRALIGN]]
+// N32 differs because the vararg is not a N32 pointer. It's been promoted to 64-bit.
+// N32: store i64* [[AP_NEXT]], i64** [[TMP0]], align [[PTRALIGN]]
+// N64: store i8** [[AP_NEXT]], i8*** [[TMP0]], align [[PTRALIGN]]
+//
+// O32: [[ARG1:%.+]] = load i8** [[AP_CUR]], align 4
+// N32 differs because the vararg is not a N32 pointer. It's been promoted to
+// 64-bit so we must truncate the excess and bitcast to a N32 pointer.
+// N32: [[TMP2:%.+]] = load i64* [[AP_CUR]], align 8
+// N32: [[TMP3:%.+]] = trunc i64 [[TMP2]] to i32
+// N32: [[ARG1:%.+]] = inttoptr i32 [[TMP3]] to i8*
+// N64: [[ARG1:%.+]] = load i8** [[AP_CUR]], align 8
+//
+// ALL: call void @llvm.va_end(i8* [[VA1]])
+// ALL: ret i8* [[ARG1]]
+// ALL: }
+
int test_v4i32(char *fmt, ...) {
va_list va;
OpenPOWER on IntegriCloud