diff options
Diffstat (limited to 'test/CodeGen/mips-varargs.c')
-rw-r--r-- | test/CodeGen/mips-varargs.c | 68 |
1 files changed, 34 insertions, 34 deletions
diff --git a/test/CodeGen/mips-varargs.c b/test/CodeGen/mips-varargs.c index 383831f..8fd1df6 100644 --- a/test/CodeGen/mips-varargs.c +++ b/test/CodeGen/mips-varargs.c @@ -29,18 +29,18 @@ int test_i32(char *fmt, ...) { // ALL: call void @llvm.va_start(i8* [[VA1]]) // // O32: [[TMP0:%.+]] = bitcast i8** %va to i32** -// O32: [[AP_CUR:%.+]] = load i32** [[TMP0]], align [[PTRALIGN]] +// O32: [[AP_CUR:%.+]] = load i32*, i32** [[TMP0]], align [[PTRALIGN]] // NEW: [[TMP0:%.+]] = bitcast i8** %va to i64** -// NEW: [[AP_CUR:%.+]] = load i64** [[TMP0]], align [[PTRALIGN]] +// NEW: [[AP_CUR:%.+]] = load i64*, i64** [[TMP0]], align [[PTRALIGN]] // -// O32: [[AP_NEXT:%.+]] = getelementptr i32* [[AP_CUR]], i32 1 -// NEW: [[AP_NEXT:%.+]] = getelementptr i64* [[AP_CUR]], {{i32|i64}} 1 +// O32: [[AP_NEXT:%.+]] = getelementptr i32, i32* [[AP_CUR]], i32 1 +// NEW: [[AP_NEXT:%.+]] = getelementptr i64, i64* [[AP_CUR]], {{i32|i64}} 1 // // O32: store i32* [[AP_NEXT]], i32** [[TMP0]], align [[PTRALIGN]] // NEW: store i64* [[AP_NEXT]], i64** [[TMP0]], align [[PTRALIGN]] // -// O32: [[ARG1:%.+]] = load i32* [[AP_CUR]], align 4 -// NEW: [[TMP2:%.+]] = load i64* [[AP_CUR]], align 8 +// O32: [[ARG1:%.+]] = load i32, i32* [[AP_CUR]], align 4 +// NEW: [[TMP2:%.+]] = load i64, i64* [[AP_CUR]], align 8 // NEW: [[ARG1:%.+]] = trunc i64 [[TMP2]] to i32 // // ALL: call void @llvm.va_end(i8* [[VA1]]) @@ -65,29 +65,29 @@ int test_i32_2args(char *fmt, ...) { // ALL: call void @llvm.va_start(i8* [[VA1]]) // // O32: [[TMP0:%.+]] = bitcast i8** %va to i32** -// O32: [[AP_CUR:%.+]] = load i32** [[TMP0]], align [[PTRALIGN]] +// O32: [[AP_CUR:%.+]] = load i32*, i32** [[TMP0]], align [[PTRALIGN]] // NEW: [[TMP0:%.+]] = bitcast i8** %va to i64** -// NEW: [[AP_CUR:%.+]] = load i64** [[TMP0]], align [[PTRALIGN]] +// NEW: [[AP_CUR:%.+]] = load i64*, i64** [[TMP0]], align [[PTRALIGN]] // -// O32: [[AP_NEXT1:%.+]] = getelementptr i32* [[AP_CUR]], i32 1 -// NEW: [[AP_NEXT1:%.+]] = getelementptr i64* [[AP_CUR]], [[INTPTR_T:i32|i64]] 1 +// O32: [[AP_NEXT1:%.+]] = getelementptr i32, i32* [[AP_CUR]], i32 1 +// NEW: [[AP_NEXT1:%.+]] = getelementptr i64, i64* [[AP_CUR]], [[INTPTR_T:i32|i64]] 1 // // O32: store i32* [[AP_NEXT1]], i32** [[TMP0]], align [[PTRALIGN]] // FIXME: N32 optimised this store out. Why only for this ABI? // N64: store i64* [[AP_NEXT1]], i64** [[TMP0]], align [[PTRALIGN]] // -// O32: [[ARG1:%.+]] = load i32* [[AP_CUR]], align 4 -// NEW: [[TMP3:%.+]] = load i64* [[AP_CUR]], align 8 +// O32: [[ARG1:%.+]] = load i32, i32* [[AP_CUR]], align 4 +// NEW: [[TMP3:%.+]] = load i64, i64* [[AP_CUR]], align 8 // NEW: [[ARG1:%.+]] = trunc i64 [[TMP3]] to i32 // -// O32: [[AP_NEXT2:%.+]] = getelementptr i32* [[AP_CUR]], i32 2 -// NEW: [[AP_NEXT2:%.+]] = getelementptr i64* [[AP_CUR]], [[INTPTR_T]] 2 +// O32: [[AP_NEXT2:%.+]] = getelementptr i32, i32* [[AP_CUR]], i32 2 +// NEW: [[AP_NEXT2:%.+]] = getelementptr i64, i64* [[AP_CUR]], [[INTPTR_T]] 2 // // O32: store i32* [[AP_NEXT2]], i32** [[TMP0]], align [[PTRALIGN]] // NEW: store i64* [[AP_NEXT2]], i64** [[TMP0]], align [[PTRALIGN]] // -// O32: [[ARG2:%.+]] = load i32* [[AP_NEXT1]], align 4 -// NEW: [[TMP4:%.+]] = load i64* [[AP_NEXT1]], align 8 +// O32: [[ARG2:%.+]] = load i32, i32* [[AP_NEXT1]], align 4 +// NEW: [[TMP4:%.+]] = load i64, i64* [[AP_NEXT1]], align 8 // NEW: [[ARG2:%.+]] = trunc i64 [[TMP4]] to i32 // // ALL: call void @llvm.va_end(i8* [[VA1]]) @@ -111,9 +111,9 @@ long long test_i64(char *fmt, ...) { // ALL: [[VA1:%.+]] = bitcast i8** %va to i8* // ALL: call void @llvm.va_start(i8* [[VA1]]) // -// O32: [[AP_CUR:%.+]] = load i8** %va, align [[PTRALIGN]] +// O32: [[AP_CUR:%.+]] = load i8*, i8** %va, align [[PTRALIGN]] // NEW: [[TMP0:%.+]] = bitcast i8** %va to i64** -// NEW: [[AP_CUR:%.+]] = load i64** [[TMP0]], align [[PTRALIGN]] +// NEW: [[AP_CUR:%.+]] = load i64*, i64** [[TMP0]], align [[PTRALIGN]] // // i64 is 8-byte aligned, while this is within O32's stack alignment there's no // guarantee that the offset is still 8-byte aligned after earlier reads. @@ -123,14 +123,14 @@ long long test_i64(char *fmt, ...) { // O32: [[PTR3:%.+]] = inttoptr [[INTPTR_T]] [[PTR2]] to i64* // O32: [[PTR4:%.+]] = inttoptr [[INTPTR_T]] [[PTR2]] to i8* // -// O32: [[AP_NEXT:%.+]] = getelementptr i8* [[PTR4]], [[INTPTR_T]] 8 -// NEW: [[AP_NEXT:%.+]] = getelementptr i64* [[AP_CUR]], [[INTPTR_T:i32|i64]] 1 +// O32: [[AP_NEXT:%.+]] = getelementptr i8, i8* [[PTR4]], [[INTPTR_T]] 8 +// NEW: [[AP_NEXT:%.+]] = getelementptr i64, i64* [[AP_CUR]], [[INTPTR_T:i32|i64]] 1 // // O32: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]] // NEW: store i64* [[AP_NEXT]], i64** [[TMP0]], align [[PTRALIGN]] // -// O32: [[ARG1:%.+]] = load i64* [[PTR3]], align 8 -// NEW: [[ARG1:%.+]] = load i64* [[AP_CUR]], align 8 +// O32: [[ARG1:%.+]] = load i64, i64* [[PTR3]], align 8 +// NEW: [[ARG1:%.+]] = load i64, i64* [[AP_CUR]], align 8 // // ALL: call void @llvm.va_end(i8* [[VA1]]) // ALL: ret i64 [[ARG1]] @@ -156,30 +156,30 @@ char *test_ptr(char *fmt, ...) { // ALL: call void @llvm.va_start(i8* [[VA1]]) // // O32: [[TMP0:%.+]] = bitcast i8** %va to i8*** -// O32: [[AP_CUR:%.+]] = load i8*** [[TMP0]], align [[PTRALIGN]] +// O32: [[AP_CUR:%.+]] = load i8**, i8*** [[TMP0]], align [[PTRALIGN]] // N32 differs because the vararg is not a N32 pointer. It's been promoted to 64-bit. // N32: [[TMP0:%.+]] = bitcast i8** %va to i64** -// N32: [[AP_CUR:%.+]] = load i64** [[TMP0]], align [[PTRALIGN]] +// N32: [[AP_CUR:%.+]] = load i64*, i64** [[TMP0]], align [[PTRALIGN]] // N64: [[TMP0:%.+]] = bitcast i8** %va to i8*** -// N64: [[AP_CUR:%.+]] = load i8*** [[TMP0]], align [[PTRALIGN]] +// N64: [[AP_CUR:%.+]] = load i8**, i8*** [[TMP0]], align [[PTRALIGN]] // -// O32: [[AP_NEXT:%.+]] = getelementptr i8** [[AP_CUR]], i32 1 +// O32: [[AP_NEXT:%.+]] = getelementptr i8*, i8** [[AP_CUR]], i32 1 // N32 differs because the vararg is not a N32 pointer. It's been promoted to 64-bit. -// N32: [[AP_NEXT:%.+]] = getelementptr i64* [[AP_CUR]], {{i32|i64}} 1 -// N64: [[AP_NEXT:%.+]] = getelementptr i8** [[AP_CUR]], {{i32|i64}} 1 +// N32: [[AP_NEXT:%.+]] = getelementptr i64, i64* [[AP_CUR]], {{i32|i64}} 1 +// N64: [[AP_NEXT:%.+]] = getelementptr i8*, i8** [[AP_CUR]], {{i32|i64}} 1 // // O32: store i8** [[AP_NEXT]], i8*** [[TMP0]], align [[PTRALIGN]] // N32 differs because the vararg is not a N32 pointer. It's been promoted to 64-bit. // N32: store i64* [[AP_NEXT]], i64** [[TMP0]], align [[PTRALIGN]] // N64: store i8** [[AP_NEXT]], i8*** [[TMP0]], align [[PTRALIGN]] // -// O32: [[ARG1:%.+]] = load i8** [[AP_CUR]], align 4 +// O32: [[ARG1:%.+]] = load i8*, i8** [[AP_CUR]], align 4 // N32 differs because the vararg is not a N32 pointer. It's been promoted to // 64-bit so we must truncate the excess and bitcast to a N32 pointer. -// N32: [[TMP2:%.+]] = load i64* [[AP_CUR]], align 8 +// N32: [[TMP2:%.+]] = load i64, i64* [[AP_CUR]], align 8 // N32: [[TMP3:%.+]] = trunc i64 [[TMP2]] to i32 // N32: [[ARG1:%.+]] = inttoptr i32 [[TMP3]] to i8* -// N64: [[ARG1:%.+]] = load i8** [[AP_CUR]], align 8 +// N64: [[ARG1:%.+]] = load i8*, i8** [[AP_CUR]], align 8 // // ALL: call void @llvm.va_end(i8* [[VA1]]) // ALL: ret i8* [[ARG1]] @@ -200,7 +200,7 @@ int test_v4i32(char *fmt, ...) { // ALL: %va = alloca i8*, align [[PTRALIGN]] // ALL: [[VA1:%.+]] = bitcast i8** %va to i8* // ALL: call void @llvm.va_start(i8* [[VA1]]) -// ALL: [[AP_CUR:%.+]] = load i8** %va, align [[PTRALIGN]] +// ALL: [[AP_CUR:%.+]] = load i8*, i8** %va, align [[PTRALIGN]] // // O32: [[PTR0:%.+]] = ptrtoint i8* [[AP_CUR]] to [[INTPTR_T:i32]] // N32: [[PTR0:%.+]] = ptrtoint i8* [[AP_CUR]] to [[INTPTR_T:i32]] @@ -216,9 +216,9 @@ int test_v4i32(char *fmt, ...) { // // ALL: [[PTR3:%.+]] = inttoptr [[INTPTR_T]] [[PTR2]] to <4 x i32>* // ALL: [[PTR4:%.+]] = inttoptr [[INTPTR_T]] [[PTR2]] to i8* -// ALL: [[AP_NEXT:%.+]] = getelementptr i8* [[PTR4]], [[INTPTR_T]] 16 +// ALL: [[AP_NEXT:%.+]] = getelementptr i8, i8* [[PTR4]], [[INTPTR_T]] 16 // ALL: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]] -// ALL: [[PTR5:%.+]] = load <4 x i32>* [[PTR3]], align 16 +// ALL: [[PTR5:%.+]] = load <4 x i32>, <4 x i32>* [[PTR3]], align 16 // ALL: call void @llvm.va_end(i8* [[VA1]]) // ALL: [[VECEXT:%.+]] = extractelement <4 x i32> [[PTR5]], i32 0 // ALL: ret i32 [[VECEXT]] |