diff options
Diffstat (limited to 'test')
254 files changed, 2115 insertions, 1680 deletions
diff --git a/test/Analysis/ScalarEvolution/sext-iv-0.ll b/test/Analysis/ScalarEvolution/sext-iv-0.ll index 4b2fcea..17f2dff 100644 --- a/test/Analysis/ScalarEvolution/sext-iv-0.ll +++ b/test/Analysis/ScalarEvolution/sext-iv-0.ll @@ -18,7 +18,7 @@ bb1: ; preds = %bb1, %bb1.thread %2 = sext i9 %1 to i64 ; <i64> [#uses=1] %3 = getelementptr double* %x, i64 %2 ; <double*> [#uses=1] %4 = load double* %3, align 8 ; <double> [#uses=1] - %5 = mul double %4, 3.900000e+00 ; <double> [#uses=1] + %5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1] %6 = sext i8 %0 to i64 ; <i64> [#uses=1] %7 = getelementptr double* %x, i64 %6 ; <double*> [#uses=1] store double %5, double* %7, align 8 diff --git a/test/Analysis/ScalarEvolution/sext-iv-1.ll b/test/Analysis/ScalarEvolution/sext-iv-1.ll index a9175c3..ca6ad0a 100644 --- a/test/Analysis/ScalarEvolution/sext-iv-1.ll +++ b/test/Analysis/ScalarEvolution/sext-iv-1.ll @@ -18,7 +18,7 @@ bb1: ; preds = %bb1, %bb1.thread %2 = sext i9 %1 to i64 ; <i64> [#uses=1] %3 = getelementptr double* %x, i64 %2 ; <double*> [#uses=1] %4 = load double* %3, align 8 ; <double> [#uses=1] - %5 = mul double %4, 3.900000e+00 ; <double> [#uses=1] + %5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1] %6 = sext i7 %0 to i64 ; <i64> [#uses=1] %7 = getelementptr double* %x, i64 %6 ; <double*> [#uses=1] store double %5, double* %7, align 8 @@ -41,7 +41,7 @@ bb1: ; preds = %bb1, %bb1.thread %2 = sext i9 %1 to i64 ; <i64> [#uses=1] %3 = getelementptr double* %x, i64 %2 ; <double*> [#uses=1] %4 = load double* %3, align 8 ; <double> [#uses=1] - %5 = mul double %4, 3.900000e+00 ; <double> [#uses=1] + %5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1] %6 = sext i8 %0 to i64 ; <i64> [#uses=1] %7 = getelementptr double* %x, i64 %6 ; <double*> [#uses=1] store double %5, double* %7, align 8 @@ -64,7 +64,7 @@ bb1: ; preds = %bb1, %bb1.thread %2 = sext i9 %1 to i64 ; <i64> [#uses=1] %3 = getelementptr double* %x, i64 %2 ; <double*> [#uses=1] %4 = load double* %3, align 8 ; <double> [#uses=1] - %5 = mul double %4, 3.900000e+00 ; <double> [#uses=1] + %5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1] %6 = sext i8 %0 to i64 ; <i64> [#uses=1] %7 = getelementptr double* %x, i64 %6 ; <double*> [#uses=1] store double %5, double* %7, align 8 @@ -87,7 +87,7 @@ bb1: ; preds = %bb1, %bb1.thread %2 = sext i9 %1 to i64 ; <i64> [#uses=1] %3 = getelementptr double* %x, i64 %2 ; <double*> [#uses=1] %4 = load double* %3, align 8 ; <double> [#uses=1] - %5 = mul double %4, 3.900000e+00 ; <double> [#uses=1] + %5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1] %6 = sext i8 %0 to i64 ; <i64> [#uses=1] %7 = getelementptr double* %x, i64 %6 ; <double*> [#uses=1] store double %5, double* %7, align 8 diff --git a/test/Analysis/ScalarEvolution/trip-count4.ll b/test/Analysis/ScalarEvolution/trip-count4.ll index a61d5da..49c4e13 100644 --- a/test/Analysis/ScalarEvolution/trip-count4.ll +++ b/test/Analysis/ScalarEvolution/trip-count4.ll @@ -13,7 +13,7 @@ loop: ; preds = %loop, %entry %indvar.i8 = ashr i64 %s0, 8 ; <i64> [#uses=1] %t0 = getelementptr double* %d, i64 %indvar.i8 ; <double*> [#uses=2] %t1 = load double* %t0 ; <double> [#uses=1] - %t2 = mul double %t1, 1.000000e-01 ; <double> [#uses=1] + %t2 = fmul double %t1, 1.000000e-01 ; <double> [#uses=1] store double %t2, double* %t0 %indvar.next = sub i64 %indvar, 1 ; <i64> [#uses=2] %exitcond = icmp eq i64 %indvar.next, 10 ; <i1> [#uses=1] diff --git a/test/Assembler/2002-04-07-HexFloatConstants.ll b/test/Assembler/2002-04-07-HexFloatConstants.ll index b9860b3..5c54b39 100644 --- a/test/Assembler/2002-04-07-HexFloatConstants.ll +++ b/test/Assembler/2002-04-07-HexFloatConstants.ll @@ -11,6 +11,6 @@ ; RUN: diff %t.1 %t.2 define double @test() { - %tmp = mul double 7.200000e+101, 0x427F4000 ; <double> [#uses=1] + %tmp = fmul double 7.200000e+101, 0x427F4000 ; <double> [#uses=1] ret double %tmp } diff --git a/test/Assembler/2002-04-07-InfConstant.ll b/test/Assembler/2002-04-07-InfConstant.ll index 317b8f3..71837c9 100644 --- a/test/Assembler/2002-04-07-InfConstant.ll +++ b/test/Assembler/2002-04-07-InfConstant.ll @@ -3,7 +3,7 @@ ; RUN: llvm-as < %s | llvm-dis | llvm-as | llvm-dis | grep 0x7FF0000000000000 define float @test() { - %tmp = mul float 0x7FF0000000000000, 1.000000e+01 ; <float> [#uses=1] + %tmp = fmul float 0x7FF0000000000000, 1.000000e+01 ; <float> [#uses=1] ret float %tmp } diff --git a/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll b/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll index 3661c4c..6e11b16 100644 --- a/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll +++ b/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll @@ -35,8 +35,8 @@ cond_next589: ; preds = %cond_next489 %tmp612 = load i32* null ; <i32> [#uses=1] %tmp629 = load i32* null ; <i32> [#uses=1] %tmp629a = sitofp i32 %tmp629 to double ; <double> [#uses=1] - %tmp631 = mul double %tmp629a, 0.000000e+00 ; <double> [#uses=1] - %tmp632 = add double 0.000000e+00, %tmp631 ; <double> [#uses=1] + %tmp631 = fmul double %tmp629a, 0.000000e+00 ; <double> [#uses=1] + %tmp632 = fadd double 0.000000e+00, %tmp631 ; <double> [#uses=1] %tmp642 = call fastcc i32 @sign( i32 %tmp576, i32 %tmp561 ) ; <i32> [#uses=1] %tmp650 = mul i32 %tmp606, %tmp642 ; <i32> [#uses=1] %tmp656 = mul i32 %tmp650, %tmp612 ; <i32> [#uses=1] @@ -46,8 +46,8 @@ cond_next589: ; preds = %cond_next489 %tmp666 = sub i32 %tmp660, %tmp496 ; <i32> [#uses=1] %tmp667 = sitofp i32 %tmp666 to double ; <double> [#uses=2] call void @levrun_linfo_inter( i32 %tmp576, i32 0, i32* null, i32* null ) - %tmp671 = mul double %tmp667, %tmp667 ; <double> [#uses=1] - %tmp675 = add double %tmp671, 0.000000e+00 ; <double> [#uses=1] + %tmp671 = fmul double %tmp667, %tmp667 ; <double> [#uses=1] + %tmp675 = fadd double %tmp671, 0.000000e+00 ; <double> [#uses=1] %tmp678 = fcmp oeq double %tmp632, %tmp675 ; <i1> [#uses=1] br i1 %tmp678, label %cond_true679, label %cond_false693 diff --git a/test/CodeGen/ARM/2008-11-19-ScavengerAssert.ll b/test/CodeGen/ARM/2008-11-19-ScavengerAssert.ll index 7b7ea6b..3f17a51 100644 --- a/test/CodeGen/ARM/2008-11-19-ScavengerAssert.ll +++ b/test/CodeGen/ARM/2008-11-19-ScavengerAssert.ll @@ -1,4 +1,4 @@ -; RUN: llvm-as < %s | llc -mtriple=arm-apple-darwin9 -stats |& grep asm-printer | grep 184 +; RUN: llvm-as < %s | llc -mtriple=arm-apple-darwin9 -stats |& grep asm-printer | grep 164 %"struct.Adv5::Ekin<3>" = type <{ i8 }> %"struct.Adv5::X::Energyflux<3>" = type { double } diff --git a/test/CodeGen/ARM/2009-02-27-SpillerBug.ll b/test/CodeGen/ARM/2009-02-27-SpillerBug.ll index 56e949f..bd5b719 100644 --- a/test/CodeGen/ARM/2009-02-27-SpillerBug.ll +++ b/test/CodeGen/ARM/2009-02-27-SpillerBug.ll @@ -11,7 +11,7 @@ bb.thread: br label %bb52 bb32: ; preds = %bb52 - %0 = add double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1] + %0 = fadd double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1] %1 = add i32 %j.1, 1 ; <i32> [#uses=1] br label %bb52 @@ -29,14 +29,14 @@ bb53: ; preds = %bb52 bb55: ; preds = %bb53 %4 = load double* @a, align 4 ; <double> [#uses=10] - %5 = add double %4, 0.000000e+00 ; <double> [#uses=16] + %5 = fadd double %4, 0.000000e+00 ; <double> [#uses=16] %6 = fcmp ogt double %k.4, 0.000000e+00 ; <i1> [#uses=1] - %.pn404 = mul double %4, %4 ; <double> [#uses=4] - %.pn402 = mul double %5, %5 ; <double> [#uses=5] + %.pn404 = fmul double %4, %4 ; <double> [#uses=4] + %.pn402 = fmul double %5, %5 ; <double> [#uses=5] %.pn165.in = load double* @N ; <double> [#uses=5] - %.pn198 = mul double 0.000000e+00, %5 ; <double> [#uses=1] - %.pn185 = sub double -0.000000e+00, 0.000000e+00 ; <double> [#uses=1] - %.pn147 = sub double -0.000000e+00, 0.000000e+00 ; <double> [#uses=1] + %.pn198 = fmul double 0.000000e+00, %5 ; <double> [#uses=1] + %.pn185 = fsub double -0.000000e+00, 0.000000e+00 ; <double> [#uses=1] + %.pn147 = fsub double -0.000000e+00, 0.000000e+00 ; <double> [#uses=1] %.pn141 = fdiv double 0.000000e+00, %4 ; <double> [#uses=1] %.pn142 = fdiv double 0.000000e+00, %5 ; <double> [#uses=1] %.pn136 = fdiv double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1] @@ -47,178 +47,178 @@ bb55: ; preds = %bb53 %.pn117 = fdiv double 0.000000e+00, %4 ; <double> [#uses=1] %.pn118 = fdiv double %.pn185, %5 ; <double> [#uses=1] %.pn88 = fdiv double %.pn147, %5 ; <double> [#uses=1] - %.pn81 = sub double %.pn141, %.pn142 ; <double> [#uses=1] - %.pn77 = sub double 0.000000e+00, %.pn136 ; <double> [#uses=1] - %.pn75 = sub double 0.000000e+00, %.pn132 ; <double> [#uses=1] - %.pn69 = sub double %.pn123, %.pn124 ; <double> [#uses=1] - %.pn67 = sub double 0.000000e+00, %.pn120 ; <double> [#uses=1] - %.pn56 = sub double %.pn117, %.pn118 ; <double> [#uses=1] - %.pn42 = sub double 0.000000e+00, %.pn88 ; <double> [#uses=1] - %.pn60 = mul double %.pn81, 0.000000e+00 ; <double> [#uses=1] - %.pn57 = add double %.pn77, 0.000000e+00 ; <double> [#uses=1] - %.pn58 = mul double %.pn75, %.pn165.in ; <double> [#uses=1] - %.pn32 = add double %.pn69, 0.000000e+00 ; <double> [#uses=1] - %.pn33 = mul double %.pn67, %.pn165.in ; <double> [#uses=1] - %.pn17 = sub double 0.000000e+00, %.pn60 ; <double> [#uses=1] - %.pn9 = add double %.pn57, %.pn58 ; <double> [#uses=1] - %.pn30 = mul double 0.000000e+00, %.pn56 ; <double> [#uses=1] - %.pn24 = mul double 0.000000e+00, %.pn42 ; <double> [#uses=1] - %.pn1 = add double %.pn32, %.pn33 ; <double> [#uses=1] - %.pn28 = sub double %.pn30, 0.000000e+00 ; <double> [#uses=1] - %.pn26 = add double %.pn28, 0.000000e+00 ; <double> [#uses=1] - %.pn22 = sub double %.pn26, 0.000000e+00 ; <double> [#uses=1] - %.pn20 = sub double %.pn24, 0.000000e+00 ; <double> [#uses=1] - %.pn18 = add double %.pn22, 0.000000e+00 ; <double> [#uses=1] - %.pn16 = add double %.pn20, 0.000000e+00 ; <double> [#uses=1] - %.pn14 = sub double %.pn18, 0.000000e+00 ; <double> [#uses=1] - %.pn12 = sub double %.pn16, %.pn17 ; <double> [#uses=1] - %.pn10 = add double %.pn14, 0.000000e+00 ; <double> [#uses=1] - %.pn8 = add double %.pn12, 0.000000e+00 ; <double> [#uses=1] - %.pn6 = sub double %.pn10, 0.000000e+00 ; <double> [#uses=1] - %.pn4 = sub double %.pn8, %.pn9 ; <double> [#uses=1] - %.pn2 = add double %.pn6, 0.000000e+00 ; <double> [#uses=1] - %.pn = add double %.pn4, 0.000000e+00 ; <double> [#uses=1] - %N1.0 = sub double %.pn2, 0.000000e+00 ; <double> [#uses=2] - %D1.0 = sub double %.pn, %.pn1 ; <double> [#uses=2] + %.pn81 = fsub double %.pn141, %.pn142 ; <double> [#uses=1] + %.pn77 = fsub double 0.000000e+00, %.pn136 ; <double> [#uses=1] + %.pn75 = fsub double 0.000000e+00, %.pn132 ; <double> [#uses=1] + %.pn69 = fsub double %.pn123, %.pn124 ; <double> [#uses=1] + %.pn67 = fsub double 0.000000e+00, %.pn120 ; <double> [#uses=1] + %.pn56 = fsub double %.pn117, %.pn118 ; <double> [#uses=1] + %.pn42 = fsub double 0.000000e+00, %.pn88 ; <double> [#uses=1] + %.pn60 = fmul double %.pn81, 0.000000e+00 ; <double> [#uses=1] + %.pn57 = fadd double %.pn77, 0.000000e+00 ; <double> [#uses=1] + %.pn58 = fmul double %.pn75, %.pn165.in ; <double> [#uses=1] + %.pn32 = fadd double %.pn69, 0.000000e+00 ; <double> [#uses=1] + %.pn33 = fmul double %.pn67, %.pn165.in ; <double> [#uses=1] + %.pn17 = fsub double 0.000000e+00, %.pn60 ; <double> [#uses=1] + %.pn9 = fadd double %.pn57, %.pn58 ; <double> [#uses=1] + %.pn30 = fmul double 0.000000e+00, %.pn56 ; <double> [#uses=1] + %.pn24 = fmul double 0.000000e+00, %.pn42 ; <double> [#uses=1] + %.pn1 = fadd double %.pn32, %.pn33 ; <double> [#uses=1] + %.pn28 = fsub double %.pn30, 0.000000e+00 ; <double> [#uses=1] + %.pn26 = fadd double %.pn28, 0.000000e+00 ; <double> [#uses=1] + %.pn22 = fsub double %.pn26, 0.000000e+00 ; <double> [#uses=1] + %.pn20 = fsub double %.pn24, 0.000000e+00 ; <double> [#uses=1] + %.pn18 = fadd double %.pn22, 0.000000e+00 ; <double> [#uses=1] + %.pn16 = fadd double %.pn20, 0.000000e+00 ; <double> [#uses=1] + %.pn14 = fsub double %.pn18, 0.000000e+00 ; <double> [#uses=1] + %.pn12 = fsub double %.pn16, %.pn17 ; <double> [#uses=1] + %.pn10 = fadd double %.pn14, 0.000000e+00 ; <double> [#uses=1] + %.pn8 = fadd double %.pn12, 0.000000e+00 ; <double> [#uses=1] + %.pn6 = fsub double %.pn10, 0.000000e+00 ; <double> [#uses=1] + %.pn4 = fsub double %.pn8, %.pn9 ; <double> [#uses=1] + %.pn2 = fadd double %.pn6, 0.000000e+00 ; <double> [#uses=1] + %.pn = fadd double %.pn4, 0.000000e+00 ; <double> [#uses=1] + %N1.0 = fsub double %.pn2, 0.000000e+00 ; <double> [#uses=2] + %D1.0 = fsub double %.pn, %.pn1 ; <double> [#uses=2] br i1 %6, label %bb62, label %bb64 bb62: ; preds = %bb55 - %7 = mul double 0.000000e+00, %4 ; <double> [#uses=1] - %8 = sub double -0.000000e+00, %7 ; <double> [#uses=3] - %9 = mul double 0.000000e+00, %5 ; <double> [#uses=1] - %10 = sub double -0.000000e+00, %9 ; <double> [#uses=3] - %11 = mul double %.pn404, %4 ; <double> [#uses=5] - %12 = mul double %.pn402, %5 ; <double> [#uses=5] - %13 = mul double 0.000000e+00, -2.000000e+00 ; <double> [#uses=1] + %7 = fmul double 0.000000e+00, %4 ; <double> [#uses=1] + %8 = fsub double -0.000000e+00, %7 ; <double> [#uses=3] + %9 = fmul double 0.000000e+00, %5 ; <double> [#uses=1] + %10 = fsub double -0.000000e+00, %9 ; <double> [#uses=3] + %11 = fmul double %.pn404, %4 ; <double> [#uses=5] + %12 = fmul double %.pn402, %5 ; <double> [#uses=5] + %13 = fmul double 0.000000e+00, -2.000000e+00 ; <double> [#uses=1] %14 = fdiv double 0.000000e+00, %.pn402 ; <double> [#uses=1] - %15 = sub double 0.000000e+00, %14 ; <double> [#uses=1] - %16 = mul double 0.000000e+00, %15 ; <double> [#uses=1] - %17 = add double %13, %16 ; <double> [#uses=1] - %18 = mul double %.pn165.in, -2.000000e+00 ; <double> [#uses=5] - %19 = mul double %18, 0.000000e+00 ; <double> [#uses=1] - %20 = add double %17, %19 ; <double> [#uses=1] - %21 = mul double 0.000000e+00, %20 ; <double> [#uses=1] - %22 = add double 0.000000e+00, %21 ; <double> [#uses=1] + %15 = fsub double 0.000000e+00, %14 ; <double> [#uses=1] + %16 = fmul double 0.000000e+00, %15 ; <double> [#uses=1] + %17 = fadd double %13, %16 ; <double> [#uses=1] + %18 = fmul double %.pn165.in, -2.000000e+00 ; <double> [#uses=5] + %19 = fmul double %18, 0.000000e+00 ; <double> [#uses=1] + %20 = fadd double %17, %19 ; <double> [#uses=1] + %21 = fmul double 0.000000e+00, %20 ; <double> [#uses=1] + %22 = fadd double 0.000000e+00, %21 ; <double> [#uses=1] %23 = fdiv double 0.000000e+00, %12 ; <double> [#uses=1] - %24 = sub double 0.000000e+00, %23 ; <double> [#uses=0] - %25 = mul double %18, 0.000000e+00 ; <double> [#uses=1] - %26 = add double 0.000000e+00, %25 ; <double> [#uses=1] - %27 = mul double 0.000000e+00, %26 ; <double> [#uses=1] - %28 = sub double %22, %27 ; <double> [#uses=1] - %29 = mul double %11, %4 ; <double> [#uses=1] - %30 = mul double %12, %5 ; <double> [#uses=3] - %31 = mul double %.pn165.in, -4.000000e+00 ; <double> [#uses=1] - %32 = mul double %.pn165.in, 0x3FF5555555555555 ; <double> [#uses=1] - %33 = mul double %32, 0.000000e+00 ; <double> [#uses=2] - %34 = add double %28, 0.000000e+00 ; <double> [#uses=1] - %35 = sub double -0.000000e+00, 0.000000e+00 ; <double> [#uses=1] + %24 = fsub double 0.000000e+00, %23 ; <double> [#uses=0] + %25 = fmul double %18, 0.000000e+00 ; <double> [#uses=1] + %26 = fadd double 0.000000e+00, %25 ; <double> [#uses=1] + %27 = fmul double 0.000000e+00, %26 ; <double> [#uses=1] + %28 = fsub double %22, %27 ; <double> [#uses=1] + %29 = fmul double %11, %4 ; <double> [#uses=1] + %30 = fmul double %12, %5 ; <double> [#uses=3] + %31 = fmul double %.pn165.in, -4.000000e+00 ; <double> [#uses=1] + %32 = fmul double %.pn165.in, 0x3FF5555555555555 ; <double> [#uses=1] + %33 = fmul double %32, 0.000000e+00 ; <double> [#uses=2] + %34 = fadd double %28, 0.000000e+00 ; <double> [#uses=1] + %35 = fsub double -0.000000e+00, 0.000000e+00 ; <double> [#uses=1] %36 = fdiv double %35, %11 ; <double> [#uses=1] %37 = fdiv double 0.000000e+00, %12 ; <double> [#uses=1] - %38 = sub double %36, %37 ; <double> [#uses=1] - %39 = mul double 0.000000e+00, %38 ; <double> [#uses=1] - %40 = add double 0.000000e+00, %39 ; <double> [#uses=1] - %41 = add double %40, 0.000000e+00 ; <double> [#uses=1] - %42 = add double %41, 0.000000e+00 ; <double> [#uses=1] - %43 = mul double %42, 0.000000e+00 ; <double> [#uses=1] - %44 = sub double %34, %43 ; <double> [#uses=1] + %38 = fsub double %36, %37 ; <double> [#uses=1] + %39 = fmul double 0.000000e+00, %38 ; <double> [#uses=1] + %40 = fadd double 0.000000e+00, %39 ; <double> [#uses=1] + %41 = fadd double %40, 0.000000e+00 ; <double> [#uses=1] + %42 = fadd double %41, 0.000000e+00 ; <double> [#uses=1] + %43 = fmul double %42, 0.000000e+00 ; <double> [#uses=1] + %44 = fsub double %34, %43 ; <double> [#uses=1] %45 = tail call double @llvm.exp.f64(double %8) nounwind ; <double> [#uses=1] - %46 = sub double -0.000000e+00, %45 ; <double> [#uses=2] + %46 = fsub double -0.000000e+00, %45 ; <double> [#uses=2] %47 = fdiv double %46, 0.000000e+00 ; <double> [#uses=1] - %48 = mul double %30, %5 ; <double> [#uses=1] + %48 = fmul double %30, %5 ; <double> [#uses=1] %49 = fdiv double 0.000000e+00, %48 ; <double> [#uses=1] - %50 = sub double %47, %49 ; <double> [#uses=1] - %51 = mul double %50, -4.000000e+00 ; <double> [#uses=1] - %52 = add double %51, 0.000000e+00 ; <double> [#uses=1] + %50 = fsub double %47, %49 ; <double> [#uses=1] + %51 = fmul double %50, -4.000000e+00 ; <double> [#uses=1] + %52 = fadd double %51, 0.000000e+00 ; <double> [#uses=1] %53 = fdiv double %46, %11 ; <double> [#uses=1] - %54 = sub double %53, 0.000000e+00 ; <double> [#uses=1] - %55 = mul double %31, %54 ; <double> [#uses=1] - %56 = add double %52, %55 ; <double> [#uses=1] - %57 = add double %56, 0.000000e+00 ; <double> [#uses=1] - %58 = add double %44, %57 ; <double> [#uses=1] - %59 = sub double %58, 0.000000e+00 ; <double> [#uses=1] + %54 = fsub double %53, 0.000000e+00 ; <double> [#uses=1] + %55 = fmul double %31, %54 ; <double> [#uses=1] + %56 = fadd double %52, %55 ; <double> [#uses=1] + %57 = fadd double %56, 0.000000e+00 ; <double> [#uses=1] + %58 = fadd double %44, %57 ; <double> [#uses=1] + %59 = fsub double %58, 0.000000e+00 ; <double> [#uses=1] %60 = tail call double @llvm.exp.f64(double 0.000000e+00) nounwind ; <double> [#uses=1] - %61 = sub double -0.000000e+00, %60 ; <double> [#uses=1] + %61 = fsub double -0.000000e+00, %60 ; <double> [#uses=1] %62 = fdiv double 0.000000e+00, -6.000000e+00 ; <double> [#uses=1] %63 = fdiv double %61, %5 ; <double> [#uses=1] - %64 = sub double 0.000000e+00, %63 ; <double> [#uses=1] - %65 = mul double %62, %64 ; <double> [#uses=1] - %66 = sub double 0.000000e+00, %65 ; <double> [#uses=1] - %67 = sub double -0.000000e+00, 0.000000e+00 ; <double> [#uses=2] + %64 = fsub double 0.000000e+00, %63 ; <double> [#uses=1] + %65 = fmul double %62, %64 ; <double> [#uses=1] + %66 = fsub double 0.000000e+00, %65 ; <double> [#uses=1] + %67 = fsub double -0.000000e+00, 0.000000e+00 ; <double> [#uses=2] %68 = tail call double @llvm.exp.f64(double %10) nounwind ; <double> [#uses=1] - %69 = sub double -0.000000e+00, %68 ; <double> [#uses=2] + %69 = fsub double -0.000000e+00, %68 ; <double> [#uses=2] %70 = fdiv double %67, %.pn404 ; <double> [#uses=1] %71 = fdiv double %69, %.pn402 ; <double> [#uses=1] - %72 = sub double %70, %71 ; <double> [#uses=1] - %73 = mul double %72, -5.000000e-01 ; <double> [#uses=1] + %72 = fsub double %70, %71 ; <double> [#uses=1] + %73 = fmul double %72, -5.000000e-01 ; <double> [#uses=1] %74 = fdiv double %67, %4 ; <double> [#uses=1] %75 = fdiv double %69, %5 ; <double> [#uses=1] - %76 = sub double %74, %75 ; <double> [#uses=1] - %77 = mul double %76, 0.000000e+00 ; <double> [#uses=1] - %78 = add double %73, %77 ; <double> [#uses=1] - %79 = mul double 0.000000e+00, %78 ; <double> [#uses=1] - %80 = add double %66, %79 ; <double> [#uses=1] + %76 = fsub double %74, %75 ; <double> [#uses=1] + %77 = fmul double %76, 0.000000e+00 ; <double> [#uses=1] + %78 = fadd double %73, %77 ; <double> [#uses=1] + %79 = fmul double 0.000000e+00, %78 ; <double> [#uses=1] + %80 = fadd double %66, %79 ; <double> [#uses=1] %81 = fdiv double 0.000000e+00, %.pn404 ; <double> [#uses=1] %82 = fdiv double 0.000000e+00, %.pn402 ; <double> [#uses=1] - %83 = sub double %81, %82 ; <double> [#uses=1] - %84 = mul double %83, -5.000000e-01 ; <double> [#uses=1] + %83 = fsub double %81, %82 ; <double> [#uses=1] + %84 = fmul double %83, -5.000000e-01 ; <double> [#uses=1] %85 = fdiv double 0.000000e+00, %4 ; <double> [#uses=1] %86 = fdiv double 0.000000e+00, %5 ; <double> [#uses=1] - %87 = sub double %85, %86 ; <double> [#uses=1] - %88 = mul double %87, 0.000000e+00 ; <double> [#uses=1] - %89 = add double %84, %88 ; <double> [#uses=1] - %90 = mul double 0.000000e+00, %89 ; <double> [#uses=1] - %91 = sub double %80, %90 ; <double> [#uses=1] + %87 = fsub double %85, %86 ; <double> [#uses=1] + %88 = fmul double %87, 0.000000e+00 ; <double> [#uses=1] + %89 = fadd double %84, %88 ; <double> [#uses=1] + %90 = fmul double 0.000000e+00, %89 ; <double> [#uses=1] + %91 = fsub double %80, %90 ; <double> [#uses=1] %92 = tail call double @llvm.exp.f64(double %8) nounwind ; <double> [#uses=1] - %93 = sub double -0.000000e+00, %92 ; <double> [#uses=1] + %93 = fsub double -0.000000e+00, %92 ; <double> [#uses=1] %94 = tail call double @llvm.exp.f64(double %10) nounwind ; <double> [#uses=1] - %95 = sub double -0.000000e+00, %94 ; <double> [#uses=3] + %95 = fsub double -0.000000e+00, %94 ; <double> [#uses=3] %96 = fdiv double %95, %.pn402 ; <double> [#uses=1] - %97 = sub double 0.000000e+00, %96 ; <double> [#uses=1] - %98 = mul double 0.000000e+00, %97 ; <double> [#uses=1] + %97 = fsub double 0.000000e+00, %96 ; <double> [#uses=1] + %98 = fmul double 0.000000e+00, %97 ; <double> [#uses=1] %99 = fdiv double %93, %11 ; <double> [#uses=1] %100 = fdiv double %95, %12 ; <double> [#uses=1] - %101 = sub double %99, %100 ; <double> [#uses=1] - %102 = sub double %98, %101 ; <double> [#uses=1] + %101 = fsub double %99, %100 ; <double> [#uses=1] + %102 = fsub double %98, %101 ; <double> [#uses=1] %103 = fdiv double %95, %5 ; <double> [#uses=1] - %104 = sub double 0.000000e+00, %103 ; <double> [#uses=1] - %105 = mul double %18, %104 ; <double> [#uses=1] - %106 = add double %102, %105 ; <double> [#uses=1] - %107 = mul double %106, %k.4 ; <double> [#uses=1] - %108 = add double %91, %107 ; <double> [#uses=1] - %109 = sub double %108, 0.000000e+00 ; <double> [#uses=1] + %104 = fsub double 0.000000e+00, %103 ; <double> [#uses=1] + %105 = fmul double %18, %104 ; <double> [#uses=1] + %106 = fadd double %102, %105 ; <double> [#uses=1] + %107 = fmul double %106, %k.4 ; <double> [#uses=1] + %108 = fadd double %91, %107 ; <double> [#uses=1] + %109 = fsub double %108, 0.000000e+00 ; <double> [#uses=1] %110 = tail call double @llvm.exp.f64(double %8) nounwind ; <double> [#uses=1] - %111 = sub double -0.000000e+00, %110 ; <double> [#uses=2] + %111 = fsub double -0.000000e+00, %110 ; <double> [#uses=2] %112 = tail call double @llvm.exp.f64(double %10) nounwind ; <double> [#uses=1] - %113 = sub double -0.000000e+00, %112 ; <double> [#uses=2] + %113 = fsub double -0.000000e+00, %112 ; <double> [#uses=2] %114 = fdiv double %111, %11 ; <double> [#uses=1] %115 = fdiv double %113, %12 ; <double> [#uses=1] - %116 = sub double %114, %115 ; <double> [#uses=1] - %117 = mul double 0.000000e+00, %116 ; <double> [#uses=1] + %116 = fsub double %114, %115 ; <double> [#uses=1] + %117 = fmul double 0.000000e+00, %116 ; <double> [#uses=1] %118 = fdiv double %111, %29 ; <double> [#uses=1] %119 = fdiv double %113, %30 ; <double> [#uses=1] - %120 = sub double %118, %119 ; <double> [#uses=1] - %121 = sub double %117, %120 ; <double> [#uses=1] - %122 = mul double %18, 0.000000e+00 ; <double> [#uses=1] - %123 = add double %121, %122 ; <double> [#uses=1] - %124 = mul double %33, 0.000000e+00 ; <double> [#uses=1] - %125 = add double %123, %124 ; <double> [#uses=1] - %126 = add double %109, %125 ; <double> [#uses=1] + %120 = fsub double %118, %119 ; <double> [#uses=1] + %121 = fsub double %117, %120 ; <double> [#uses=1] + %122 = fmul double %18, 0.000000e+00 ; <double> [#uses=1] + %123 = fadd double %121, %122 ; <double> [#uses=1] + %124 = fmul double %33, 0.000000e+00 ; <double> [#uses=1] + %125 = fadd double %123, %124 ; <double> [#uses=1] + %126 = fadd double %109, %125 ; <double> [#uses=1] %127 = tail call double @llvm.exp.f64(double 0.000000e+00) nounwind ; <double> [#uses=1] - %128 = sub double -0.000000e+00, %127 ; <double> [#uses=2] + %128 = fsub double -0.000000e+00, %127 ; <double> [#uses=2] %129 = fdiv double %128, %30 ; <double> [#uses=1] - %130 = sub double 0.000000e+00, %129 ; <double> [#uses=1] - %131 = sub double 0.000000e+00, %130 ; <double> [#uses=1] + %130 = fsub double 0.000000e+00, %129 ; <double> [#uses=1] + %131 = fsub double 0.000000e+00, %130 ; <double> [#uses=1] %132 = fdiv double 0.000000e+00, %.pn404 ; <double> [#uses=1] - %133 = sub double %132, 0.000000e+00 ; <double> [#uses=1] - %134 = mul double %18, %133 ; <double> [#uses=1] - %135 = add double %131, %134 ; <double> [#uses=1] + %133 = fsub double %132, 0.000000e+00 ; <double> [#uses=1] + %134 = fmul double %18, %133 ; <double> [#uses=1] + %135 = fadd double %131, %134 ; <double> [#uses=1] %136 = fdiv double %128, %5 ; <double> [#uses=1] - %137 = sub double 0.000000e+00, %136 ; <double> [#uses=1] - %138 = mul double %33, %137 ; <double> [#uses=1] - %139 = add double %135, %138 ; <double> [#uses=1] - %140 = sub double %126, %139 ; <double> [#uses=1] - %141 = add double %N1.0, %59 ; <double> [#uses=1] - %142 = add double %D1.0, %140 ; <double> [#uses=1] + %137 = fsub double 0.000000e+00, %136 ; <double> [#uses=1] + %138 = fmul double %33, %137 ; <double> [#uses=1] + %139 = fadd double %135, %138 ; <double> [#uses=1] + %140 = fsub double %126, %139 ; <double> [#uses=1] + %141 = fadd double %N1.0, %59 ; <double> [#uses=1] + %142 = fadd double %D1.0, %140 ; <double> [#uses=1] br label %bb64 bb64: ; preds = %bb62, %bb55 diff --git a/test/CodeGen/ARM/2009-03-07-SpillerBug.ll b/test/CodeGen/ARM/2009-03-07-SpillerBug.ll index 7556616..399ed30 100644 --- a/test/CodeGen/ARM/2009-03-07-SpillerBug.ll +++ b/test/CodeGen/ARM/2009-03-07-SpillerBug.ll @@ -26,39 +26,39 @@ entry: bb3: ; preds = %entry %2 = fdiv double 1.000000e+00, 0.000000e+00 ; <double> [#uses=1] - %3 = mul double 0.000000e+00, %2 ; <double> [#uses=2] + %3 = fmul double 0.000000e+00, %2 ; <double> [#uses=2] %4 = call double @llvm.sqrt.f64(double 0.000000e+00) nounwind ; <double> [#uses=1] %5 = fdiv double 1.000000e+00, %4 ; <double> [#uses=2] - %6 = mul double %3, %5 ; <double> [#uses=2] - %7 = mul double 0.000000e+00, %5 ; <double> [#uses=2] - %8 = mul double %3, %7 ; <double> [#uses=1] - %9 = sub double %8, 0.000000e+00 ; <double> [#uses=1] - %10 = mul double 0.000000e+00, %6 ; <double> [#uses=1] - %11 = sub double 0.000000e+00, %10 ; <double> [#uses=1] - %12 = sub double -0.000000e+00, %11 ; <double> [#uses=1] - %13 = mul double %0, %0 ; <double> [#uses=2] - %14 = sub double %13, 0.000000e+00 ; <double> [#uses=1] + %6 = fmul double %3, %5 ; <double> [#uses=2] + %7 = fmul double 0.000000e+00, %5 ; <double> [#uses=2] + %8 = fmul double %3, %7 ; <double> [#uses=1] + %9 = fsub double %8, 0.000000e+00 ; <double> [#uses=1] + %10 = fmul double 0.000000e+00, %6 ; <double> [#uses=1] + %11 = fsub double 0.000000e+00, %10 ; <double> [#uses=1] + %12 = fsub double -0.000000e+00, %11 ; <double> [#uses=1] + %13 = fmul double %0, %0 ; <double> [#uses=2] + %14 = fsub double %13, 0.000000e+00 ; <double> [#uses=1] %15 = call double @llvm.sqrt.f64(double %14) ; <double> [#uses=1] - %16 = mul double 0.000000e+00, %15 ; <double> [#uses=1] + %16 = fmul double 0.000000e+00, %15 ; <double> [#uses=1] %17 = fdiv double %16, %0 ; <double> [#uses=1] - %18 = add double 0.000000e+00, %17 ; <double> [#uses=1] + %18 = fadd double 0.000000e+00, %17 ; <double> [#uses=1] %19 = call double @acos(double %18) nounwind readonly ; <double> [#uses=1] %20 = load double* null, align 4 ; <double> [#uses=1] - %21 = mul double %20, 0x401921FB54442D18 ; <double> [#uses=1] + %21 = fmul double %20, 0x401921FB54442D18 ; <double> [#uses=1] %22 = call double @sin(double %19) nounwind readonly ; <double> [#uses=2] - %23 = mul double %22, 0.000000e+00 ; <double> [#uses=2] - %24 = mul double %6, %23 ; <double> [#uses=1] - %25 = mul double %7, %23 ; <double> [#uses=1] + %23 = fmul double %22, 0.000000e+00 ; <double> [#uses=2] + %24 = fmul double %6, %23 ; <double> [#uses=1] + %25 = fmul double %7, %23 ; <double> [#uses=1] %26 = call double @sin(double %21) nounwind readonly ; <double> [#uses=1] - %27 = mul double %22, %26 ; <double> [#uses=2] - %28 = mul double %9, %27 ; <double> [#uses=1] - %29 = mul double %27, %12 ; <double> [#uses=1] - %30 = add double %24, %28 ; <double> [#uses=1] - %31 = add double 0.000000e+00, %29 ; <double> [#uses=1] - %32 = add double %25, 0.000000e+00 ; <double> [#uses=1] - %33 = add double %30, 0.000000e+00 ; <double> [#uses=1] - %34 = add double %31, 0.000000e+00 ; <double> [#uses=1] - %35 = add double %32, 0.000000e+00 ; <double> [#uses=1] + %27 = fmul double %22, %26 ; <double> [#uses=2] + %28 = fmul double %9, %27 ; <double> [#uses=1] + %29 = fmul double %27, %12 ; <double> [#uses=1] + %30 = fadd double %24, %28 ; <double> [#uses=1] + %31 = fadd double 0.000000e+00, %29 ; <double> [#uses=1] + %32 = fadd double %25, 0.000000e+00 ; <double> [#uses=1] + %33 = fadd double %30, 0.000000e+00 ; <double> [#uses=1] + %34 = fadd double %31, 0.000000e+00 ; <double> [#uses=1] + %35 = fadd double %32, 0.000000e+00 ; <double> [#uses=1] %36 = bitcast %struct.ggPoint3* %x to i8* ; <i8*> [#uses=1] call void @llvm.memcpy.i32(i8* null, i8* %36, i32 24, i32 4) nounwind store double %33, double* null, align 8 @@ -68,9 +68,9 @@ bb5.i.i.i: ; preds = %bb3 unreachable _Z20ggRaySphereIntersectRK6ggRay3RK8ggSphereddRd.exit: ; preds = %bb3 - %37 = sub double %13, 0.000000e+00 ; <double> [#uses=0] - %38 = sub double -0.000000e+00, %34 ; <double> [#uses=0] - %39 = sub double -0.000000e+00, %35 ; <double> [#uses=0] + %37 = fsub double %13, 0.000000e+00 ; <double> [#uses=0] + %38 = fsub double -0.000000e+00, %34 ; <double> [#uses=0] + %39 = fsub double -0.000000e+00, %35 ; <double> [#uses=0] ret i32 1 bb7: ; preds = %entry diff --git a/test/CodeGen/ARM/2009-04-08-FloatUndef.ll b/test/CodeGen/ARM/2009-04-08-FloatUndef.ll index 9dc3b34..f394847 100644 --- a/test/CodeGen/ARM/2009-04-08-FloatUndef.ll +++ b/test/CodeGen/ARM/2009-04-08-FloatUndef.ll @@ -4,8 +4,8 @@ define void @execute_shader(<4 x float>* %OUT, <4 x float>* %IN, <4 x float>* %C entry: %input2 = load <4 x float>* null, align 16 ; <<4 x float>> [#uses=2] %shuffle7 = shufflevector <4 x float> %input2, <4 x float> <float 0.000000e+00, float 1.000000e+00, float 0.000000e+00, float 1.000000e+00>, <4 x i32> <i32 2, i32 2, i32 2, i32 2> ; <<4 x float>> [#uses=1] - %mul1 = mul <4 x float> %shuffle7, zeroinitializer ; <<4 x float>> [#uses=1] - %add2 = add <4 x float> %mul1, %input2 ; <<4 x float>> [#uses=1] + %mul1 = fmul <4 x float> %shuffle7, zeroinitializer ; <<4 x float>> [#uses=1] + %add2 = fadd <4 x float> %mul1, %input2 ; <<4 x float>> [#uses=1] store <4 x float> %add2, <4 x float>* null, align 16 ret void } diff --git a/test/CodeGen/ARM/2009-06-04-MissingLiveIn.ll b/test/CodeGen/ARM/2009-06-04-MissingLiveIn.ll new file mode 100644 index 0000000..5eaae7a --- /dev/null +++ b/test/CodeGen/ARM/2009-06-04-MissingLiveIn.ll @@ -0,0 +1,263 @@ +; RUN: llvm-as < %s | llc -mtriple=arm-apple-darwin -mattr=+v6 + + %struct.anon = type { i16, i16 } + %struct.cab_archive = type { i32, i16, i16, i16, i16, i8, %struct.cab_folder*, %struct.cab_file* } + %struct.cab_file = type { i32, i16, i64, i8*, i32, i32, i32, %struct.cab_folder*, %struct.cab_file*, %struct.cab_archive*, %struct.cab_state* } + %struct.cab_folder = type { i16, i16, %struct.cab_archive*, i64, %struct.cab_folder* } + %struct.cab_state = type { i8*, i8*, [38912 x i8], i16, i16, i8*, i16 } + %struct.qtm_model = type { i32, i32, %struct.anon* } + %struct.qtm_stream = type { i32, i32, i8, i8*, i32, i32, i32, i16, i16, i16, i8, i32, i8*, i8*, i8*, i8*, i8*, i32, i32, i8, [42 x i32], [42 x i8], [27 x i8], [27 x i8], %struct.qtm_model, %struct.qtm_model, %struct.qtm_model, %struct.qtm_model, %struct.qtm_model, %struct.qtm_model, %struct.qtm_model, %struct.qtm_model, %struct.qtm_model, [65 x %struct.anon], [65 x %struct.anon], [65 x %struct.anon], [65 x %struct.anon], [25 x %struct.anon], [37 x %struct.anon], [43 x %struct.anon], [28 x %struct.anon], [8 x %struct.anon], %struct.cab_file*, i32 (%struct.cab_file*, i8*, i32)* } + +declare fastcc i32 @qtm_read_input(%struct.qtm_stream* nocapture) nounwind + +define fastcc i32 @qtm_decompress(%struct.qtm_stream* %qtm, i64 %out_bytes) nounwind { +entry: + br i1 undef, label %bb245, label %bb3 + +bb3: ; preds = %entry + br i1 undef, label %bb5, label %bb4 + +bb4: ; preds = %bb3 + ret i32 undef + +bb5: ; preds = %bb3 + br i1 undef, label %bb245, label %bb14 + +bb14: ; preds = %bb5 + br label %bb238 + +bb28: ; preds = %bb215 + br label %bb31 + +bb29: ; preds = %bb31 + br i1 undef, label %bb31, label %bb32 + +bb31: ; preds = %bb29, %bb28 + br i1 undef, label %bb29, label %bb32 + +bb32: ; preds = %bb31, %bb29 + br label %bb33 + +bb33: ; preds = %bb33, %bb32 + br i1 undef, label %bb34, label %bb33 + +bb34: ; preds = %bb33 + br i1 undef, label %bb35, label %bb36 + +bb35: ; preds = %bb34 + br label %bb36 + +bb36: ; preds = %bb46, %bb35, %bb34 + br i1 undef, label %bb40, label %bb37 + +bb37: ; preds = %bb36 + br i1 undef, label %bb77, label %bb60 + +bb40: ; preds = %bb36 + br i1 undef, label %bb46, label %bb41 + +bb41: ; preds = %bb40 + br i1 undef, label %bb45, label %bb42 + +bb42: ; preds = %bb41 + ret i32 undef + +bb45: ; preds = %bb41 + br label %bb46 + +bb46: ; preds = %bb45, %bb40 + br label %bb36 + +bb60: ; preds = %bb60, %bb37 + br label %bb60 + +bb77: ; preds = %bb37 + switch i32 undef, label %bb197 [ + i32 5, label %bb108 + i32 6, label %bb138 + ] + +bb108: ; preds = %bb77 + br label %bb111 + +bb109: ; preds = %bb111 + br i1 undef, label %bb111, label %bb112 + +bb111: ; preds = %bb109, %bb108 + br i1 undef, label %bb109, label %bb112 + +bb112: ; preds = %bb111, %bb109 + br label %bb113 + +bb113: ; preds = %bb113, %bb112 + br i1 undef, label %bb114, label %bb113 + +bb114: ; preds = %bb113 + br i1 undef, label %bb115, label %bb116 + +bb115: ; preds = %bb114 + br label %bb116 + +bb116: ; preds = %bb115, %bb114 + br i1 undef, label %bb120, label %bb117 + +bb117: ; preds = %bb116 + br label %bb136 + +bb120: ; preds = %bb116 + ret i32 undef + +bb128: ; preds = %bb136 + br i1 undef, label %bb134, label %bb129 + +bb129: ; preds = %bb128 + br i1 undef, label %bb133, label %bb130 + +bb130: ; preds = %bb129 + br i1 undef, label %bb132, label %bb131 + +bb131: ; preds = %bb130 + ret i32 undef + +bb132: ; preds = %bb130 + br label %bb133 + +bb133: ; preds = %bb132, %bb129 + br label %bb134 + +bb134: ; preds = %bb133, %bb128 + br label %bb136 + +bb136: ; preds = %bb134, %bb117 + br i1 undef, label %bb198, label %bb128 + +bb138: ; preds = %bb77 + %0 = trunc i32 undef to i16 ; <i16> [#uses=1] + br label %bb141 + +bb139: ; preds = %bb141 + %scevgep441442881 = load i16* undef ; <i16> [#uses=1] + %1 = icmp ugt i16 %scevgep441442881, %0 ; <i1> [#uses=1] + br i1 %1, label %bb141, label %bb142 + +bb141: ; preds = %bb139, %bb138 + br i1 undef, label %bb139, label %bb142 + +bb142: ; preds = %bb141, %bb139 + br label %bb143 + +bb143: ; preds = %bb143, %bb142 + br i1 undef, label %bb144, label %bb143 + +bb144: ; preds = %bb143 + br i1 undef, label %bb145, label %bb146 + +bb145: ; preds = %bb144 + unreachable + +bb146: ; preds = %bb156, %bb144 + br i1 undef, label %bb150, label %bb147 + +bb147: ; preds = %bb146 + br i1 undef, label %bb157, label %bb148 + +bb148: ; preds = %bb147 + br i1 undef, label %bb149, label %bb157 + +bb149: ; preds = %bb148 + br label %bb150 + +bb150: ; preds = %bb149, %bb146 + br i1 undef, label %bb156, label %bb152 + +bb152: ; preds = %bb150 + unreachable + +bb156: ; preds = %bb150 + br label %bb146 + +bb157: ; preds = %bb148, %bb147 + br i1 undef, label %bb167, label %bb160 + +bb160: ; preds = %bb157 + ret i32 undef + +bb167: ; preds = %bb157 + br label %bb170 + +bb168: ; preds = %bb170 + br i1 undef, label %bb170, label %bb171 + +bb170: ; preds = %bb168, %bb167 + br i1 undef, label %bb168, label %bb171 + +bb171: ; preds = %bb170, %bb168 + br label %bb172 + +bb172: ; preds = %bb172, %bb171 + br i1 undef, label %bb173, label %bb172 + +bb173: ; preds = %bb172 + br i1 undef, label %bb174, label %bb175 + +bb174: ; preds = %bb173 + unreachable + +bb175: ; preds = %bb179, %bb173 + br i1 undef, label %bb179, label %bb176 + +bb176: ; preds = %bb175 + br i1 undef, label %bb186, label %bb177 + +bb177: ; preds = %bb176 + br i1 undef, label %bb178, label %bb186 + +bb178: ; preds = %bb177 + br label %bb179 + +bb179: ; preds = %bb178, %bb175 + br label %bb175 + +bb186: ; preds = %bb177, %bb176 + br label %bb195 + +bb187: ; preds = %bb195 + br i1 undef, label %bb193, label %bb189 + +bb189: ; preds = %bb187 + %2 = tail call fastcc i32 @qtm_read_input(%struct.qtm_stream* %qtm) nounwind ; <i32> [#uses=0] + ret i32 undef + +bb193: ; preds = %bb187 + br label %bb195 + +bb195: ; preds = %bb193, %bb186 + br i1 undef, label %bb198, label %bb187 + +bb197: ; preds = %bb77 + ret i32 -124 + +bb198: ; preds = %bb195, %bb136 + br i1 undef, label %bb211.preheader, label %bb214 + +bb211.preheader: ; preds = %bb198 + br label %bb211 + +bb211: ; preds = %bb211, %bb211.preheader + br i1 undef, label %bb214, label %bb211 + +bb214: ; preds = %bb211, %bb198 + br label %bb215 + +bb215: ; preds = %bb238, %bb214 + br i1 undef, label %bb28, label %bb216 + +bb216: ; preds = %bb215 + br label %bb238 + +bb238: ; preds = %bb216, %bb14 + br label %bb215 + +bb245: ; preds = %bb5, %entry + ret i32 undef +} diff --git a/test/CodeGen/ARM/cse-libcalls.ll b/test/CodeGen/ARM/cse-libcalls.ll index 3b499a4..4f4091a 100644 --- a/test/CodeGen/ARM/cse-libcalls.ll +++ b/test/CodeGen/ARM/cse-libcalls.ll @@ -16,7 +16,7 @@ bb28.i: ; preds = %bb28.i, %entry br i1 false, label %bb502.loopexit.i, label %bb28.i bb.nph53.i: ; preds = %bb502.loopexit.i - %tmp354.i = sub double -0.000000e+00, %tmp10.i4 ; <double> [#uses=0] + %tmp354.i = fsub double -0.000000e+00, %tmp10.i4 ; <double> [#uses=0] br label %bb244.i bb244.i: ; preds = %bb244.i, %bb.nph53.i diff --git a/test/CodeGen/ARM/fixunsdfdi.ll b/test/CodeGen/ARM/fixunsdfdi.ll index d3038b9..777a3d6 100644 --- a/test/CodeGen/ARM/fixunsdfdi.ll +++ b/test/CodeGen/ARM/fixunsdfdi.ll @@ -13,7 +13,7 @@ bb5: ; preds = %bb3 %u.in.mask = and i64 %x14, -4294967296 ; <i64> [#uses=1] %.ins = or i64 0, %u.in.mask ; <i64> [#uses=1] %0 = bitcast i64 %.ins to double ; <double> [#uses=1] - %1 = sub double %x, %0 ; <double> [#uses=1] + %1 = fsub double %x, %0 ; <double> [#uses=1] %2 = fptosi double %1 to i32 ; <i32> [#uses=1] %3 = add i32 %2, 0 ; <i32> [#uses=1] %4 = zext i32 %3 to i64 ; <i64> [#uses=1] diff --git a/test/CodeGen/ARM/fnmul.ll b/test/CodeGen/ARM/fnmul.ll index 87a30c9..7bbda2d 100644 --- a/test/CodeGen/ARM/fnmul.ll +++ b/test/CodeGen/ARM/fnmul.ll @@ -4,8 +4,8 @@ define double @t1(double %a, double %b) { entry: - %tmp2 = sub double -0.000000e+00, %a ; <double> [#uses=1] - %tmp4 = mul double %tmp2, %b ; <double> [#uses=1] + %tmp2 = fsub double -0.000000e+00, %a ; <double> [#uses=1] + %tmp4 = fmul double %tmp2, %b ; <double> [#uses=1] ret double %tmp4 } diff --git a/test/CodeGen/ARM/fparith.ll b/test/CodeGen/ARM/fparith.ll index 11933d5..568a6c4 100644 --- a/test/CodeGen/ARM/fparith.ll +++ b/test/CodeGen/ARM/fparith.ll @@ -10,49 +10,49 @@ define float @f1(float %a, float %b) { entry: - %tmp = add float %a, %b ; <float> [#uses=1] + %tmp = fadd float %a, %b ; <float> [#uses=1] ret float %tmp } define double @f2(double %a, double %b) { entry: - %tmp = add double %a, %b ; <double> [#uses=1] + %tmp = fadd double %a, %b ; <double> [#uses=1] ret double %tmp } define float @f3(float %a, float %b) { entry: - %tmp = mul float %a, %b ; <float> [#uses=1] + %tmp = fmul float %a, %b ; <float> [#uses=1] ret float %tmp } define double @f4(double %a, double %b) { entry: - %tmp = mul double %a, %b ; <double> [#uses=1] + %tmp = fmul double %a, %b ; <double> [#uses=1] ret double %tmp } define float @f5(float %a, float %b) { entry: - %tmp = sub float %a, %b ; <float> [#uses=1] + %tmp = fsub float %a, %b ; <float> [#uses=1] ret float %tmp } define double @f6(double %a, double %b) { entry: - %tmp = sub double %a, %b ; <double> [#uses=1] + %tmp = fsub double %a, %b ; <double> [#uses=1] ret double %tmp } define float @f7(float %a) { entry: - %tmp1 = sub float -0.000000e+00, %a ; <float> [#uses=1] + %tmp1 = fsub float -0.000000e+00, %a ; <float> [#uses=1] ret float %tmp1 } define double @f8(double %a) { entry: - %tmp1 = sub double -0.000000e+00, %a ; <double> [#uses=1] + %tmp1 = fsub double -0.000000e+00, %a ; <double> [#uses=1] ret double %tmp1 } diff --git a/test/CodeGen/ARM/fpmem.ll b/test/CodeGen/ARM/fpmem.ll index 48204ec..13653bb 100644 --- a/test/CodeGen/ARM/fpmem.ll +++ b/test/CodeGen/ARM/fpmem.ll @@ -11,12 +11,12 @@ define float @f1(float %a) { define float @f2(float* %v, float %u) { %tmp = load float* %v ; <float> [#uses=1] - %tmp1 = add float %tmp, %u ; <float> [#uses=1] + %tmp1 = fadd float %tmp, %u ; <float> [#uses=1] ret float %tmp1 } define void @f3(float %a, float %b, float* %v) { - %tmp = add float %a, %b ; <float> [#uses=1] + %tmp = fadd float %a, %b ; <float> [#uses=1] store float %tmp, float* %v ret void } diff --git a/test/CodeGen/ARM/illegal-vector-bitcast.ll b/test/CodeGen/ARM/illegal-vector-bitcast.ll index 79f9929..ad24eb5 100644 --- a/test/CodeGen/ARM/illegal-vector-bitcast.ll +++ b/test/CodeGen/ARM/illegal-vector-bitcast.ll @@ -3,7 +3,7 @@ define void @foo(<8 x float>* %f, <8 x float>* %g, <4 x i64>* %y) { %h = load <8 x float>* %f - %i = mul <8 x float> %h, <float 0x3FF19999A0000000, float 0x400A666660000000, float 0x40119999A0000000, float 0x40159999A0000000, float 0.5, float 0x3FE3333340000000, float 0x3FE6666660000000, float 0x3FE99999A0000000> + %i = fmul <8 x float> %h, <float 0x3FF19999A0000000, float 0x400A666660000000, float 0x40119999A0000000, float 0x40159999A0000000, float 0.5, float 0x3FE3333340000000, float 0x3FE6666660000000, float 0x3FE99999A0000000> %m = bitcast <8 x float> %i to <4 x i64> %z = load <4 x i64>* %y %n = mul <4 x i64> %z, %m diff --git a/test/CodeGen/ARM/lsr-scale-addr-mode.ll b/test/CodeGen/ARM/lsr-scale-addr-mode.ll index 6db0d43..02902f2 100644 --- a/test/CodeGen/ARM/lsr-scale-addr-mode.ll +++ b/test/CodeGen/ARM/lsr-scale-addr-mode.ll @@ -1,4 +1,4 @@ -; RUN: llvm-as < %s | llc -march=arm | grep -F {str r2, \[r0, +r3, lsl #2\]} +; RUN: llvm-as < %s | llc -march=arm | grep lsl | grep -F {lsl #2\]} ; Should use scaled addressing mode. define void @sintzero(i32* %a) nounwind { diff --git a/test/CodeGen/ARM/memcpy-inline.ll b/test/CodeGen/ARM/memcpy-inline.ll index 5d1beea..4bf0b4f 100644 --- a/test/CodeGen/ARM/memcpy-inline.ll +++ b/test/CodeGen/ARM/memcpy-inline.ll @@ -1,9 +1,7 @@ +; RUN: llvm-as < %s | llc -mtriple=arm-apple-darwin | grep ldmia +; RUN: llvm-as < %s | llc -mtriple=arm-apple-darwin | grep stmia ; RUN: llvm-as < %s | llc -mtriple=arm-apple-darwin | grep ldrb ; RUN: llvm-as < %s | llc -mtriple=arm-apple-darwin | grep ldrh -; This used to look for ldmia. But it's no longer lucky enough to -; have the load / store instructions lined up just right after -; scheduler change for pr3457. We'll look for a robust solution -; later. %struct.x = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } @src = external global %struct.x diff --git a/test/CodeGen/ARM/str_pre-2.ll b/test/CodeGen/ARM/str_pre-2.ll index 247465f..e9f1945 100644 --- a/test/CodeGen/ARM/str_pre-2.ll +++ b/test/CodeGen/ARM/str_pre-2.ll @@ -1,6 +1,5 @@ ; RUN: llvm-as < %s | llc -mtriple=arm-linux-gnu | grep {str.*\\!} ; RUN: llvm-as < %s | llc -mtriple=arm-linux-gnu | grep {ldr.*\\\[.*\], #+4} -; XFAIL: * @b = external global i64* diff --git a/test/CodeGen/ARM/vfp.ll b/test/CodeGen/ARM/vfp.ll index 2acb33f..f58da44 100644 --- a/test/CodeGen/ARM/vfp.ll +++ b/test/CodeGen/ARM/vfp.ll @@ -39,10 +39,10 @@ define void @test_abs(float* %P, double* %D) { define void @test_add(float* %P, double* %D) { %a = load float* %P ; <float> [#uses=2] - %b = add float %a, %a ; <float> [#uses=1] + %b = fadd float %a, %a ; <float> [#uses=1] store float %b, float* %P %A = load double* %D ; <double> [#uses=2] - %B = add double %A, %A ; <double> [#uses=1] + %B = fadd double %A, %A ; <double> [#uses=1] store double %B, double* %D ret void } @@ -61,8 +61,8 @@ define void @test_fma(float* %P1, float* %P2, float* %P3) { %a1 = load float* %P1 ; <float> [#uses=1] %a2 = load float* %P2 ; <float> [#uses=1] %a3 = load float* %P3 ; <float> [#uses=1] - %X = mul float %a1, %a2 ; <float> [#uses=1] - %Y = sub float %X, %a3 ; <float> [#uses=1] + %X = fmul float %a1, %a2 ; <float> [#uses=1] + %Y = fsub float %X, %a3 ; <float> [#uses=1] store float %Y, float* %P1 ret void } diff --git a/test/CodeGen/CBackend/2006-12-11-Float-Bitcast.ll b/test/CodeGen/CBackend/2006-12-11-Float-Bitcast.ll index ca4e48e..f8393a3 100644 --- a/test/CodeGen/CBackend/2006-12-11-Float-Bitcast.ll +++ b/test/CodeGen/CBackend/2006-12-11-Float-Bitcast.ll @@ -23,7 +23,7 @@ define double @test4(i64 %L) { define double @test5(double %D) { %X = bitcast double %D to double ; <double> [#uses=1] - %Y = add double %X, 2.000000e+00 ; <double> [#uses=1] + %Y = fadd double %X, 2.000000e+00 ; <double> [#uses=1] %Z = bitcast double %Y to i64 ; <i64> [#uses=1] %res = bitcast i64 %Z to double ; <double> [#uses=1] ret double %res @@ -31,7 +31,7 @@ define double @test5(double %D) { define float @test6(float %F) { %X = bitcast float %F to float ; <float> [#uses=1] - %Y = add float %X, 2.000000e+00 ; <float> [#uses=1] + %Y = fadd float %X, 2.000000e+00 ; <float> [#uses=1] %Z = bitcast float %Y to i32 ; <i32> [#uses=1] %res = bitcast i32 %Z to float ; <float> [#uses=1] ret float %res diff --git a/test/CodeGen/CBackend/2008-10-21-PPCLongDoubleConstant.ll b/test/CodeGen/CBackend/2008-10-21-PPCLongDoubleConstant.ll index afcac99..32d635a 100644 --- a/test/CodeGen/CBackend/2008-10-21-PPCLongDoubleConstant.ll +++ b/test/CodeGen/CBackend/2008-10-21-PPCLongDoubleConstant.ll @@ -20,7 +20,7 @@ entry: br label %bb4 bb4: ; preds = %bb5.split, %bb4, %entry - %0 = fcmp ogt ppc_fp128 0xM00000000000000000000000000000000, select (i1 fcmp olt (ppc_fp128 fpext (double 0x3C447AE147AE147B to ppc_fp128), ppc_fp128 mul (ppc_fp128 0xM00000000000000010000000000000000, ppc_fp128 0xM40140000000000000000000000000000)), ppc_fp128 mul (ppc_fp128 0xM00000000000000010000000000000000, ppc_fp128 0xM40140000000000000000000000000000), ppc_fp128 fpext (double 0x3C447AE147AE147B to ppc_fp128)) ; <i1> [#uses=1] + %0 = fcmp ogt ppc_fp128 0xM00000000000000000000000000000000, select (i1 fcmp olt (ppc_fp128 fpext (double 0x3C447AE147AE147B to ppc_fp128), ppc_fp128 fmul (ppc_fp128 0xM00000000000000010000000000000000, ppc_fp128 0xM40140000000000000000000000000000)), ppc_fp128 fmul (ppc_fp128 0xM00000000000000010000000000000000, ppc_fp128 0xM40140000000000000000000000000000), ppc_fp128 fpext (double 0x3C447AE147AE147B to ppc_fp128)) ; <i1> [#uses=1] br i1 %0, label %bb4, label %bb5.split bb5.split: ; preds = %bb4 diff --git a/test/CodeGen/CBackend/fneg.ll b/test/CodeGen/CBackend/fneg.ll new file mode 100644 index 0000000..68849b2 --- /dev/null +++ b/test/CodeGen/CBackend/fneg.ll @@ -0,0 +1,7 @@ +; RUN: llvm-as < %s | llc -march=c + +define void @func() nounwind { + entry: + %0 = fsub double -0.0, undef + ret void +} diff --git a/test/CodeGen/CBackend/vectors.ll b/test/CodeGen/CBackend/vectors.ll index de78975..d01e992 100644 --- a/test/CodeGen/CBackend/vectors.ll +++ b/test/CodeGen/CBackend/vectors.ll @@ -14,7 +14,7 @@ define i32 @test2(<4 x i32> %a, i32 %b) { } define <4 x float> @test3(<4 x float> %Y) { - %Z = add <4 x float> %Y, %Y + %Z = fadd <4 x float> %Y, %Y %X = shufflevector <4 x float> zeroinitializer, <4 x float> %Z, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ret <4 x float> %X } diff --git a/test/CodeGen/CellSPU/dp_farith.ll b/test/CodeGen/CellSPU/dp_farith.ll index 2579a40..d4802ae 100644 --- a/test/CodeGen/CellSPU/dp_farith.ll +++ b/test/CodeGen/CellSPU/dp_farith.ll @@ -11,88 +11,88 @@ target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i target triple = "spu" define double @fadd(double %arg1, double %arg2) { - %A = add double %arg1, %arg2 + %A = fadd double %arg1, %arg2 ret double %A } define <2 x double> @fadd_vec(<2 x double> %arg1, <2 x double> %arg2) { - %A = add <2 x double> %arg1, %arg2 + %A = fadd <2 x double> %arg1, %arg2 ret <2 x double> %A } define double @fsub(double %arg1, double %arg2) { - %A = sub double %arg1, %arg2 + %A = fsub double %arg1, %arg2 ret double %A } define <2 x double> @fsub_vec(<2 x double> %arg1, <2 x double> %arg2) { - %A = sub <2 x double> %arg1, %arg2 + %A = fsub <2 x double> %arg1, %arg2 ret <2 x double> %A } define double @fmul(double %arg1, double %arg2) { - %A = mul double %arg1, %arg2 + %A = fmul double %arg1, %arg2 ret double %A } define <2 x double> @fmul_vec(<2 x double> %arg1, <2 x double> %arg2) { - %A = mul <2 x double> %arg1, %arg2 + %A = fmul <2 x double> %arg1, %arg2 ret <2 x double> %A } define double @fma(double %arg1, double %arg2, double %arg3) { - %A = mul double %arg1, %arg2 - %B = add double %A, %arg3 + %A = fmul double %arg1, %arg2 + %B = fadd double %A, %arg3 ret double %B } define <2 x double> @fma_vec(<2 x double> %arg1, <2 x double> %arg2, <2 x double> %arg3) { - %A = mul <2 x double> %arg1, %arg2 - %B = add <2 x double> %A, %arg3 + %A = fmul <2 x double> %arg1, %arg2 + %B = fadd <2 x double> %A, %arg3 ret <2 x double> %B } define double @fms(double %arg1, double %arg2, double %arg3) { - %A = mul double %arg1, %arg2 - %B = sub double %A, %arg3 + %A = fmul double %arg1, %arg2 + %B = fsub double %A, %arg3 ret double %B } define <2 x double> @fms_vec(<2 x double> %arg1, <2 x double> %arg2, <2 x double> %arg3) { - %A = mul <2 x double> %arg1, %arg2 - %B = sub <2 x double> %A, %arg3 + %A = fmul <2 x double> %arg1, %arg2 + %B = fsub <2 x double> %A, %arg3 ret <2 x double> %B } ; - (a * b - c) define double @d_fnms_1(double %arg1, double %arg2, double %arg3) { - %A = mul double %arg1, %arg2 - %B = sub double %A, %arg3 - %C = sub double -0.000000e+00, %B ; <double> [#uses=1] + %A = fmul double %arg1, %arg2 + %B = fsub double %A, %arg3 + %C = fsub double -0.000000e+00, %B ; <double> [#uses=1] ret double %C } ; Annother way of getting fnms ; - ( a * b ) + c => c - (a * b) define double @d_fnms_2(double %arg1, double %arg2, double %arg3) { - %A = mul double %arg1, %arg2 - %B = sub double %arg3, %A + %A = fmul double %arg1, %arg2 + %B = fsub double %arg3, %A ret double %B } ; FNMS: - (a * b - c) => c - (a * b) define <2 x double> @d_fnms_vec_1(<2 x double> %arg1, <2 x double> %arg2, <2 x double> %arg3) { - %A = mul <2 x double> %arg1, %arg2 - %B = sub <2 x double> %arg3, %A ; + %A = fmul <2 x double> %arg1, %arg2 + %B = fsub <2 x double> %arg3, %A ; ret <2 x double> %B } ; Another way to get fnms using a constant vector ; - ( a * b - c) define <2 x double> @d_fnms_vec_2(<2 x double> %arg1, <2 x double> %arg2, <2 x double> %arg3) { - %A = mul <2 x double> %arg1, %arg2 ; <<2 x double>> [#uses=1] - %B = sub <2 x double> %A, %arg3 ; <<2 x double>> [#uses=1] - %C = sub <2 x double> < double -0.00000e+00, double -0.00000e+00 >, %B + %A = fmul <2 x double> %arg1, %arg2 ; <<2 x double>> [#uses=1] + %B = fsub <2 x double> %A, %arg3 ; <<2 x double>> [#uses=1] + %C = fsub <2 x double> < double -0.00000e+00, double -0.00000e+00 >, %B ret <2 x double> %C } diff --git a/test/CodeGen/CellSPU/fneg-fabs.ll b/test/CodeGen/CellSPU/fneg-fabs.ll index 4c6fbb9..5bd66f4 100644 --- a/test/CodeGen/CellSPU/fneg-fabs.ll +++ b/test/CodeGen/CellSPU/fneg-fabs.ll @@ -7,22 +7,22 @@ target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i target triple = "spu" define double @fneg_dp(double %X) { - %Y = sub double -0.000000e+00, %X + %Y = fsub double -0.000000e+00, %X ret double %Y } define <2 x double> @fneg_dp_vec(<2 x double> %X) { - %Y = sub <2 x double> < double -0.0000e+00, double -0.0000e+00 >, %X + %Y = fsub <2 x double> < double -0.0000e+00, double -0.0000e+00 >, %X ret <2 x double> %Y } define float @fneg_sp(float %X) { - %Y = sub float -0.000000e+00, %X + %Y = fsub float -0.000000e+00, %X ret float %Y } define <4 x float> @fneg_sp_vec(<4 x float> %X) { - %Y = sub <4 x float> <float -0.000000e+00, float -0.000000e+00, + %Y = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %X ret <4 x float> %Y } diff --git a/test/CodeGen/CellSPU/sp_farith.ll b/test/CodeGen/CellSPU/sp_farith.ll index df3baef..d77dd92 100644 --- a/test/CodeGen/CellSPU/sp_farith.ll +++ b/test/CodeGen/CellSPU/sp_farith.ll @@ -12,79 +12,79 @@ target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i target triple = "spu" define float @fp_add(float %arg1, float %arg2) { - %A = add float %arg1, %arg2 ; <float> [#uses=1] + %A = fadd float %arg1, %arg2 ; <float> [#uses=1] ret float %A } define <4 x float> @fp_add_vec(<4 x float> %arg1, <4 x float> %arg2) { - %A = add <4 x float> %arg1, %arg2 ; <<4 x float>> [#uses=1] + %A = fadd <4 x float> %arg1, %arg2 ; <<4 x float>> [#uses=1] ret <4 x float> %A } define float @fp_sub(float %arg1, float %arg2) { - %A = sub float %arg1, %arg2 ; <float> [#uses=1] + %A = fsub float %arg1, %arg2 ; <float> [#uses=1] ret float %A } define <4 x float> @fp_sub_vec(<4 x float> %arg1, <4 x float> %arg2) { - %A = sub <4 x float> %arg1, %arg2 ; <<4 x float>> [#uses=1] + %A = fsub <4 x float> %arg1, %arg2 ; <<4 x float>> [#uses=1] ret <4 x float> %A } define float @fp_mul(float %arg1, float %arg2) { - %A = mul float %arg1, %arg2 ; <float> [#uses=1] + %A = fmul float %arg1, %arg2 ; <float> [#uses=1] ret float %A } define <4 x float> @fp_mul_vec(<4 x float> %arg1, <4 x float> %arg2) { - %A = mul <4 x float> %arg1, %arg2 ; <<4 x float>> [#uses=1] + %A = fmul <4 x float> %arg1, %arg2 ; <<4 x float>> [#uses=1] ret <4 x float> %A } define float @fp_mul_add(float %arg1, float %arg2, float %arg3) { - %A = mul float %arg1, %arg2 ; <float> [#uses=1] - %B = add float %A, %arg3 ; <float> [#uses=1] + %A = fmul float %arg1, %arg2 ; <float> [#uses=1] + %B = fadd float %A, %arg3 ; <float> [#uses=1] ret float %B } define <4 x float> @fp_mul_add_vec(<4 x float> %arg1, <4 x float> %arg2, <4 x float> %arg3) { - %A = mul <4 x float> %arg1, %arg2 ; <<4 x float>> [#uses=1] - %B = add <4 x float> %A, %arg3 ; <<4 x float>> [#uses=1] + %A = fmul <4 x float> %arg1, %arg2 ; <<4 x float>> [#uses=1] + %B = fadd <4 x float> %A, %arg3 ; <<4 x float>> [#uses=1] ret <4 x float> %B } define float @fp_mul_sub(float %arg1, float %arg2, float %arg3) { - %A = mul float %arg1, %arg2 ; <float> [#uses=1] - %B = sub float %A, %arg3 ; <float> [#uses=1] + %A = fmul float %arg1, %arg2 ; <float> [#uses=1] + %B = fsub float %A, %arg3 ; <float> [#uses=1] ret float %B } define <4 x float> @fp_mul_sub_vec(<4 x float> %arg1, <4 x float> %arg2, <4 x float> %arg3) { - %A = mul <4 x float> %arg1, %arg2 ; <<4 x float>> [#uses=1] - %B = sub <4 x float> %A, %arg3 ; <<4 x float>> [#uses=1] + %A = fmul <4 x float> %arg1, %arg2 ; <<4 x float>> [#uses=1] + %B = fsub <4 x float> %A, %arg3 ; <<4 x float>> [#uses=1] ret <4 x float> %B } ; Test the straightforward way of getting fnms ; c - a * b define float @fp_neg_mul_sub_1(float %arg1, float %arg2, float %arg3) { - %A = mul float %arg1, %arg2 - %B = sub float %arg3, %A + %A = fmul float %arg1, %arg2 + %B = fsub float %arg3, %A ret float %B } ; Test another way of getting fnms ; - ( a *b -c ) = c - a * b define float @fp_neg_mul_sub_2(float %arg1, float %arg2, float %arg3) { - %A = mul float %arg1, %arg2 - %B = sub float %A, %arg3 - %C = sub float -0.0, %B + %A = fmul float %arg1, %arg2 + %B = fsub float %A, %arg3 + %C = fsub float -0.0, %B ret float %C } define <4 x float> @fp_neg_mul_sub_vec(<4 x float> %arg1, <4 x float> %arg2, <4 x float> %arg3) { - %A = mul <4 x float> %arg1, %arg2 - %B = sub <4 x float> %A, %arg3 - %D = sub <4 x float> < float -0.0, float -0.0, float -0.0, float -0.0 >, %B + %A = fmul <4 x float> %arg1, %arg2 + %B = fsub <4 x float> %A, %arg3 + %D = fsub <4 x float> < float -0.0, float -0.0, float -0.0, float -0.0 >, %B ret <4 x float> %D } diff --git a/test/CodeGen/Generic/2006-07-03-schedulers.ll b/test/CodeGen/Generic/2006-07-03-schedulers.ll index 4c4481c..597ee56 100644 --- a/test/CodeGen/Generic/2006-07-03-schedulers.ll +++ b/test/CodeGen/Generic/2006-07-03-schedulers.ll @@ -12,13 +12,13 @@ define i32 @testissue(i32 %i, float %x, float %y) { br label %bb1 bb1: ; preds = %bb1, %0 - %x1 = mul float %x, %y ; <float> [#uses=1] - %y1 = mul float %y, 7.500000e-01 ; <float> [#uses=1] - %z1 = add float %x1, %y1 ; <float> [#uses=1] - %x2 = mul float %x, 5.000000e-01 ; <float> [#uses=1] - %y2 = mul float %y, 0x3FECCCCCC0000000 ; <float> [#uses=1] - %z2 = add float %x2, %y2 ; <float> [#uses=1] - %z3 = add float %z1, %z2 ; <float> [#uses=1] + %x1 = fmul float %x, %y ; <float> [#uses=1] + %y1 = fmul float %y, 7.500000e-01 ; <float> [#uses=1] + %z1 = fadd float %x1, %y1 ; <float> [#uses=1] + %x2 = fmul float %x, 5.000000e-01 ; <float> [#uses=1] + %y2 = fmul float %y, 0x3FECCCCCC0000000 ; <float> [#uses=1] + %z2 = fadd float %x2, %y2 ; <float> [#uses=1] + %z3 = fadd float %z1, %z2 ; <float> [#uses=1] %i1 = shl i32 %i, 3 ; <i32> [#uses=1] %j1 = add i32 %i, 7 ; <i32> [#uses=1] %m1 = add i32 %i1, %j1 ; <i32> [#uses=2] diff --git a/test/CodeGen/Generic/2007-05-15-InfiniteRecursion.ll b/test/CodeGen/Generic/2007-05-15-InfiniteRecursion.ll index 7495795..a61108a 100644 --- a/test/CodeGen/Generic/2007-05-15-InfiniteRecursion.ll +++ b/test/CodeGen/Generic/2007-05-15-InfiniteRecursion.ll @@ -71,10 +71,10 @@ cond_next159.i: ; preds = %cond_true356.i.preheader %tmp178.i = add i32 %tmp116117.i, -128 ; <i32> [#uses=2] %tmp181.i = mul i32 %tmp178.i, %tmp178.i ; <i32> [#uses=1] %tmp181182.i = sitofp i32 %tmp181.i to float ; <float> [#uses=1] - %tmp199200.pn.in.i = mul float %tmp181182.i, 0.000000e+00 ; <float> [#uses=1] + %tmp199200.pn.in.i = fmul float %tmp181182.i, 0.000000e+00 ; <float> [#uses=1] %tmp199200.pn.i = fpext float %tmp199200.pn.in.i to double ; <double> [#uses=1] - %tmp201.pn.i = sub double 1.000000e+00, %tmp199200.pn.i ; <double> [#uses=1] - %factor.2.in.i = mul double 0.000000e+00, %tmp201.pn.i ; <double> [#uses=1] + %tmp201.pn.i = fsub double 1.000000e+00, %tmp199200.pn.i ; <double> [#uses=1] + %factor.2.in.i = fmul double 0.000000e+00, %tmp201.pn.i ; <double> [#uses=1] %factor.2.i = fptrunc double %factor.2.in.i to float ; <float> [#uses=1] br i1 false, label %cond_next312.i, label %cond_false222.i diff --git a/test/CodeGen/Generic/2008-02-04-ExtractSubvector.ll b/test/CodeGen/Generic/2008-02-04-ExtractSubvector.ll index 1cf822b..9acb852 100644 --- a/test/CodeGen/Generic/2008-02-04-ExtractSubvector.ll +++ b/test/CodeGen/Generic/2008-02-04-ExtractSubvector.ll @@ -5,7 +5,7 @@ entry: br label %bb15 bb15: ; preds = %bb15, %entry - %tmp21 = add <8 x double> zeroinitializer, zeroinitializer ; <<8 x double>> [#uses=1] + %tmp21 = fadd <8 x double> zeroinitializer, zeroinitializer ; <<8 x double>> [#uses=1] br i1 false, label %bb30, label %bb15 bb30: ; preds = %bb15 diff --git a/test/CodeGen/Generic/2008-02-25-NegateZero.ll b/test/CodeGen/Generic/2008-02-25-NegateZero.ll index e5a5274..0169307 100644 --- a/test/CodeGen/Generic/2008-02-25-NegateZero.ll +++ b/test/CodeGen/Generic/2008-02-25-NegateZero.ll @@ -5,8 +5,8 @@ define void @test() { entry: %tmp98 = load float* null, align 4 ; <float> [#uses=1] %tmp106 = load float* null, align 4 ; <float> [#uses=1] - %tmp113 = add float %tmp98, %tmp106 ; <float> [#uses=1] - %tmp119 = sub float %tmp113, 0.000000e+00 ; <float> [#uses=1] + %tmp113 = fadd float %tmp98, %tmp106 ; <float> [#uses=1] + %tmp119 = fsub float %tmp113, 0.000000e+00 ; <float> [#uses=1] call void (i32, ...)* @foo( i32 0, float 0.000000e+00, float %tmp119 ) nounwind ret void } diff --git a/test/CodeGen/Generic/2008-02-26-NegatableCrash.ll b/test/CodeGen/Generic/2008-02-26-NegatableCrash.ll index 7fe19d9..b2112f3 100644 --- a/test/CodeGen/Generic/2008-02-26-NegatableCrash.ll +++ b/test/CodeGen/Generic/2008-02-26-NegatableCrash.ll @@ -30,16 +30,16 @@ bb.nph1770: ; preds = %bb429 br i1 false, label %bb471, label %bb505 bb471: ; preds = %bb471, %bb.nph1770 - %tmp487 = add double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1] + %tmp487 = fadd double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1] br i1 false, label %bb505, label %bb471 bb505: ; preds = %bb471, %bb.nph1770 %xy.0.lcssa = phi double [ 0.000000e+00, %bb.nph1770 ], [ %tmp487, %bb471 ] ; <double> [#uses=1] - %tmp507 = sub double -0.000000e+00, %xy.0.lcssa ; <double> [#uses=1] + %tmp507 = fsub double -0.000000e+00, %xy.0.lcssa ; <double> [#uses=1] %tmp509 = fdiv double %tmp507, 0.000000e+00 ; <double> [#uses=1] - %tmp510 = mul double %tmp509, 1.024000e+03 ; <double> [#uses=1] + %tmp510 = fmul double %tmp509, 1.024000e+03 ; <double> [#uses=1] %tmp516 = fdiv double %tmp510, 0.000000e+00 ; <double> [#uses=1] - %tmp517 = add double %tmp516, 5.000000e-01 ; <double> [#uses=1] + %tmp517 = fadd double %tmp516, 5.000000e-01 ; <double> [#uses=1] %tmp518 = tail call double @floor( double %tmp517 ) nounwind readnone ; <double> [#uses=0] ret i32 0 diff --git a/test/CodeGen/Generic/2009-06-03-UnreachableSplitPad.ll b/test/CodeGen/Generic/2009-06-03-UnreachableSplitPad.ll new file mode 100644 index 0000000..59e7d0c --- /dev/null +++ b/test/CodeGen/Generic/2009-06-03-UnreachableSplitPad.ll @@ -0,0 +1,15 @@ +; RUN: llvm-as < %s | llc +; PR4317 + +declare i32 @b() + +define void @a() { +entry: + ret void + +dummy: + invoke i32 @b() to label %reg unwind label %reg + +reg: + ret void +} diff --git a/test/CodeGen/Generic/fneg-fabs.ll b/test/CodeGen/Generic/fneg-fabs.ll index f9580b1..2709fa1 100644 --- a/test/CodeGen/Generic/fneg-fabs.ll +++ b/test/CodeGen/Generic/fneg-fabs.ll @@ -1,12 +1,12 @@ ; RUN: llvm-as < %s | llc define double @fneg(double %X) { - %Y = sub double -0.000000e+00, %X ; <double> [#uses=1] + %Y = fsub double -0.000000e+00, %X ; <double> [#uses=1] ret double %Y } define float @fnegf(float %X) { - %Y = sub float -0.000000e+00, %X ; <float> [#uses=1] + %Y = fsub float -0.000000e+00, %X ; <float> [#uses=1] ret float %Y } diff --git a/test/CodeGen/Generic/print-arith-fp.ll b/test/CodeGen/Generic/print-arith-fp.ll index 87aa1a0..1e27061 100644 --- a/test/CodeGen/Generic/print-arith-fp.ll +++ b/test/CodeGen/Generic/print-arith-fp.ll @@ -24,9 +24,9 @@ define i32 @main() { %b_s = getelementptr [8 x i8]* @b_str, i64 0, i64 0 ; <i8*> [#uses=1] call i32 (i8*, ...)* @printf( i8* %a_s, double %a ) ; <i32>:1 [#uses=0] call i32 (i8*, ...)* @printf( i8* %b_s, double %b ) ; <i32>:2 [#uses=0] - %add_r = add double %a, %b ; <double> [#uses=1] - %sub_r = sub double %a, %b ; <double> [#uses=1] - %mul_r = mul double %a, %b ; <double> [#uses=1] + %add_r = fadd double %a, %b ; <double> [#uses=1] + %sub_r = fsub double %a, %b ; <double> [#uses=1] + %mul_r = fmul double %a, %b ; <double> [#uses=1] %div_r = fdiv double %b, %a ; <double> [#uses=1] %rem_r = frem double %b, %a ; <double> [#uses=1] %add_s = getelementptr [12 x i8]* @add_str, i64 0, i64 0 ; <i8*> [#uses=1] diff --git a/test/CodeGen/Generic/select.ll b/test/CodeGen/Generic/select.ll index fc573f3..a532703 100644 --- a/test/CodeGen/Generic/select.ll +++ b/test/CodeGen/Generic/select.ll @@ -9,8 +9,8 @@ define void @testConsts(i32 %N, float %X) { %a = add i32 %N, 1 ; <i32> [#uses=0] %i = add i32 %N, 12345678 ; <i32> [#uses=0] %b = add i16 4, 3 ; <i16> [#uses=0] - %c = add float %X, 0.000000e+00 ; <float> [#uses=0] - %d = add float %X, 0x400921CAC0000000 ; <float> [#uses=0] + %c = fadd float %X, 0.000000e+00 ; <float> [#uses=0] + %d = fadd float %X, 0x400921CAC0000000 ; <float> [#uses=0] %f = add i32 -1, 10 ; <i32> [#uses=0] %g = add i16 20, -1 ; <i16> [#uses=0] %j = add i16 -1, 30 ; <i16> [#uses=0] @@ -126,8 +126,8 @@ define void @testfloatbool(float %x, float %y) { br label %Top Top: ; preds = %Top, %0 - %p = add float %x, %y ; <float> [#uses=1] - %z = sub float %x, %y ; <float> [#uses=1] + %p = fadd float %x, %y ; <float> [#uses=1] + %z = fsub float %x, %y ; <float> [#uses=1] %b = fcmp ole float %p, %z ; <i1> [#uses=2] %c = xor i1 %b, true ; <i1> [#uses=0] br i1 %b, label %Top, label %goon diff --git a/test/CodeGen/Generic/storetrunc-fp.ll b/test/CodeGen/Generic/storetrunc-fp.ll index 710a990..0f7bb0b 100644 --- a/test/CodeGen/Generic/storetrunc-fp.ll +++ b/test/CodeGen/Generic/storetrunc-fp.ll @@ -1,7 +1,7 @@ ; RUN: llvm-as < %s | llc define void @foo(double %a, double %b, float* %fp) { - %c = add double %a, %b + %c = fadd double %a, %b %d = fptrunc double %c to float store float %d, float* %fp ret void diff --git a/test/CodeGen/Generic/v-split.ll b/test/CodeGen/Generic/v-split.ll index a312492..44601d0 100644 --- a/test/CodeGen/Generic/v-split.ll +++ b/test/CodeGen/Generic/v-split.ll @@ -4,7 +4,7 @@ define void @test_f8(%f8 *%P, %f8* %Q, %f8 *%S) { %p = load %f8* %P %q = load %f8* %Q - %R = add %f8 %p, %q + %R = fadd %f8 %p, %q store %f8 %R, %f8 *%S ret void } diff --git a/test/CodeGen/Generic/vector.ll b/test/CodeGen/Generic/vector.ll index 9105757..f283256 100644 --- a/test/CodeGen/Generic/vector.ll +++ b/test/CodeGen/Generic/vector.ll @@ -14,7 +14,7 @@ define void @test_f1(%f1* %P, %f1* %Q, %f1* %S) { %p = load %f1* %P ; <%f1> [#uses=1] %q = load %f1* %Q ; <%f1> [#uses=1] - %R = add %f1 %p, %q ; <%f1> [#uses=1] + %R = fadd %f1 %p, %q ; <%f1> [#uses=1] store %f1 %R, %f1* %S ret void } @@ -22,7 +22,7 @@ define void @test_f1(%f1* %P, %f1* %Q, %f1* %S) { define void @test_f2(%f2* %P, %f2* %Q, %f2* %S) { %p = load %f2* %P ; <%f2> [#uses=1] %q = load %f2* %Q ; <%f2> [#uses=1] - %R = add %f2 %p, %q ; <%f2> [#uses=1] + %R = fadd %f2 %p, %q ; <%f2> [#uses=1] store %f2 %R, %f2* %S ret void } @@ -30,7 +30,7 @@ define void @test_f2(%f2* %P, %f2* %Q, %f2* %S) { define void @test_f4(%f4* %P, %f4* %Q, %f4* %S) { %p = load %f4* %P ; <%f4> [#uses=1] %q = load %f4* %Q ; <%f4> [#uses=1] - %R = add %f4 %p, %q ; <%f4> [#uses=1] + %R = fadd %f4 %p, %q ; <%f4> [#uses=1] store %f4 %R, %f4* %S ret void } @@ -38,7 +38,7 @@ define void @test_f4(%f4* %P, %f4* %Q, %f4* %S) { define void @test_f8(%f8* %P, %f8* %Q, %f8* %S) { %p = load %f8* %P ; <%f8> [#uses=1] %q = load %f8* %Q ; <%f8> [#uses=1] - %R = add %f8 %p, %q ; <%f8> [#uses=1] + %R = fadd %f8 %p, %q ; <%f8> [#uses=1] store %f8 %R, %f8* %S ret void } @@ -46,7 +46,7 @@ define void @test_f8(%f8* %P, %f8* %Q, %f8* %S) { define void @test_fmul(%f8* %P, %f8* %Q, %f8* %S) { %p = load %f8* %P ; <%f8> [#uses=1] %q = load %f8* %Q ; <%f8> [#uses=1] - %R = mul %f8 %p, %q ; <%f8> [#uses=1] + %R = fmul %f8 %p, %q ; <%f8> [#uses=1] store %f8 %R, %f8* %S ret void } @@ -64,21 +64,21 @@ define void @test_div(%f8* %P, %f8* %Q, %f8* %S) { define void @test_cst(%f4* %P, %f4* %S) { %p = load %f4* %P ; <%f4> [#uses=1] - %R = add %f4 %p, < float 0x3FB99999A0000000, float 1.000000e+00, float 2.000000e+00, float 4.500000e+00 > ; <%f4> [#uses=1] + %R = fadd %f4 %p, < float 0x3FB99999A0000000, float 1.000000e+00, float 2.000000e+00, float 4.500000e+00 > ; <%f4> [#uses=1] store %f4 %R, %f4* %S ret void } define void @test_zero(%f4* %P, %f4* %S) { %p = load %f4* %P ; <%f4> [#uses=1] - %R = add %f4 %p, zeroinitializer ; <%f4> [#uses=1] + %R = fadd %f4 %p, zeroinitializer ; <%f4> [#uses=1] store %f4 %R, %f4* %S ret void } define void @test_undef(%f4* %P, %f4* %S) { %p = load %f4* %P ; <%f4> [#uses=1] - %R = add %f4 %p, undef ; <%f4> [#uses=1] + %R = fadd %f4 %p, undef ; <%f4> [#uses=1] store %f4 %R, %f4* %S ret void } @@ -115,7 +115,7 @@ define double @test_extract_elt2(%d8* %P) { define void @test_cast_1(%f4* %b, %i4* %a) { %tmp = load %f4* %b ; <%f4> [#uses=1] - %tmp2 = add %f4 %tmp, < float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00 > ; <%f4> [#uses=1] + %tmp2 = fadd %f4 %tmp, < float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00 > ; <%f4> [#uses=1] %tmp3 = bitcast %f4 %tmp2 to %i4 ; <%i4> [#uses=1] %tmp4 = add %i4 %tmp3, < i32 1, i32 2, i32 3, i32 4 > ; <%i4> [#uses=1] store %i4 %tmp4, %i4* %a @@ -137,7 +137,7 @@ define void @splat(%f4* %P, %f4* %Q, float %X) { %tmp4 = insertelement %f4 %tmp2, float %X, i32 2 ; <%f4> [#uses=1] %tmp6 = insertelement %f4 %tmp4, float %X, i32 3 ; <%f4> [#uses=1] %q = load %f4* %Q ; <%f4> [#uses=1] - %R = add %f4 %q, %tmp6 ; <%f4> [#uses=1] + %R = fadd %f4 %q, %tmp6 ; <%f4> [#uses=1] store %f4 %R, %f4* %P ret void } diff --git a/test/CodeGen/MSP430/2009-05-19-DoubleSplit.ll b/test/CodeGen/MSP430/2009-05-19-DoubleSplit.ll index 20050e9..70f1d99 100644 --- a/test/CodeGen/MSP430/2009-05-19-DoubleSplit.ll +++ b/test/CodeGen/MSP430/2009-05-19-DoubleSplit.ll @@ -2,7 +2,7 @@ define i16 @test(double %d) nounwind { entry: - %add = add double %d, 1.000000e+00 + %add = fadd double %d, 1.000000e+00 %call = tail call i16 @funct(double %add) nounwind ret i16 %call } diff --git a/test/CodeGen/Mips/2008-07-06-fadd64.ll b/test/CodeGen/Mips/2008-07-06-fadd64.ll index 95792ff..f8eca85 100644 --- a/test/CodeGen/Mips/2008-07-06-fadd64.ll +++ b/test/CodeGen/Mips/2008-07-06-fadd64.ll @@ -5,6 +5,6 @@ target triple = "mipsallegrexel-psp-elf" define double @dofloat(double %a, double %b) nounwind { entry: - add double %a, %b ; <double>:0 [#uses=1] + fadd double %a, %b ; <double>:0 [#uses=1] ret double %0 } diff --git a/test/CodeGen/Mips/2008-07-22-Cstpool.ll b/test/CodeGen/Mips/2008-07-22-Cstpool.ll index 99eccf5..2af7ab1 100644 --- a/test/CodeGen/Mips/2008-07-22-Cstpool.ll +++ b/test/CodeGen/Mips/2008-07-22-Cstpool.ll @@ -6,7 +6,7 @@ target triple = "mipsallegrexel-psp-elf" define float @F(float %a) nounwind { entry: - add float %a, 0x4011333340000000 ; <float>:0 [#uses=1] - add float %0, 0x4010666660000000 ; <float>:1 [#uses=1] + fadd float %a, 0x4011333340000000 ; <float>:0 [#uses=1] + fadd float %0, 0x4010666660000000 ; <float>:1 [#uses=1] ret float %1 } diff --git a/test/CodeGen/Mips/2008-07-23-fpcmp.ll b/test/CodeGen/Mips/2008-07-23-fpcmp.ll index 7bc1f42..4580215 100644 --- a/test/CodeGen/Mips/2008-07-23-fpcmp.ll +++ b/test/CodeGen/Mips/2008-07-23-fpcmp.ll @@ -11,7 +11,7 @@ entry: br i1 %0, label %bb, label %bb2 bb: ; preds = %entry - add float %a, 1.000000e+00 ; <float>:1 [#uses=1] + fadd float %a, 1.000000e+00 ; <float>:1 [#uses=1] ret float %1 bb2: ; preds = %entry diff --git a/test/CodeGen/Mips/2008-08-03-fabs64.ll b/test/CodeGen/Mips/2008-08-03-fabs64.ll index 8495bfe..9d18f47 100644 --- a/test/CodeGen/Mips/2008-08-03-fabs64.ll +++ b/test/CodeGen/Mips/2008-08-03-fabs64.ll @@ -9,7 +9,7 @@ define double @A(double %c, double %d) nounwind readnone { entry: tail call double @fabs( double %c ) nounwind readnone ; <double>:0 [#uses=1] tail call double @fabs( double %d ) nounwind readnone ; <double>:0 [#uses=1] - add double %0, %1 + fadd double %0, %1 ret double %2 } diff --git a/test/CodeGen/Mips/2008-11-10-xint_to_fp.ll b/test/CodeGen/Mips/2008-11-10-xint_to_fp.ll index c9ee2cf..1f7440a 100644 --- a/test/CodeGen/Mips/2008-11-10-xint_to_fp.ll +++ b/test/CodeGen/Mips/2008-11-10-xint_to_fp.ll @@ -21,12 +21,12 @@ entry: load i16* %3, align 2 ; <i16>:4 [#uses=1] uitofp i16 %4 to double ; <double>:5 [#uses=1] tail call double @ldexp( double %5, i32 -32 ) nounwind ; <double>:6 [#uses=1] - add double %2, %6 ; <double>:7 [#uses=1] + fadd double %2, %6 ; <double>:7 [#uses=1] getelementptr i16* %xseed, i32 2 ; <i16*>:8 [#uses=1] load i16* %8, align 2 ; <i16>:9 [#uses=1] uitofp i16 %9 to double ; <double>:10 [#uses=1] tail call double @ldexp( double %10, i32 -16 ) nounwind ; <double>:11 [#uses=1] - add double %7, %11 ; <double>:12 [#uses=1] + fadd double %7, %11 ; <double>:12 [#uses=1] ret double %12 } @@ -45,11 +45,11 @@ entry: load i16* %4, align 2 ; <i16>:5 [#uses=1] uitofp i16 %5 to double ; <double>:6 [#uses=1] tail call double @ldexp( double %6, i32 -32 ) nounwind ; <double>:7 [#uses=1] - add double %3, %7 ; <double>:8 [#uses=1] + fadd double %3, %7 ; <double>:8 [#uses=1] getelementptr i16* %xseed, i32 2 ; <i16*>:9 [#uses=1] load i16* %9, align 2 ; <i16>:10 [#uses=1] uitofp i16 %10 to double ; <double>:11 [#uses=1] tail call double @ldexp( double %11, i32 -16 ) nounwind ; <double>:12 [#uses=1] - add double %8, %12 ; <double>:13 [#uses=1] + fadd double %8, %12 ; <double>:13 [#uses=1] ret double %13 } diff --git a/test/CodeGen/PowerPC/2006-01-11-darwin-fp-argument.ll b/test/CodeGen/PowerPC/2006-01-11-darwin-fp-argument.ll index e2f06f5..1b3bde8 100644 --- a/test/CodeGen/PowerPC/2006-01-11-darwin-fp-argument.ll +++ b/test/CodeGen/PowerPC/2006-01-11-darwin-fp-argument.ll @@ -5,6 +5,6 @@ target triple = "powerpc-apple-darwin8.2.0" ; Dead argument should reserve an FP register. define double @bar(double %DEAD, double %X, double %Y) { - %tmp.2 = add double %X, %Y ; <double> [#uses=1] + %tmp.2 = fadd double %X, %Y ; <double> [#uses=1] ret double %tmp.2 } diff --git a/test/CodeGen/PowerPC/2006-10-11-combiner-aa-regression.ll b/test/CodeGen/PowerPC/2006-10-11-combiner-aa-regression.ll index a58cd16..7a65c00 100644 --- a/test/CodeGen/PowerPC/2006-10-11-combiner-aa-regression.ll +++ b/test/CodeGen/PowerPC/2006-10-11-combiner-aa-regression.ll @@ -9,15 +9,15 @@ define void @offset(%struct.Point* %pt, double %x, double %y, double %z) { entry: %tmp = getelementptr %struct.Point* %pt, i32 0, i32 0 ; <double*> [#uses=2] %tmp.upgrd.1 = load double* %tmp ; <double> [#uses=1] - %tmp2 = add double %tmp.upgrd.1, %x ; <double> [#uses=1] + %tmp2 = fadd double %tmp.upgrd.1, %x ; <double> [#uses=1] store double %tmp2, double* %tmp %tmp6 = getelementptr %struct.Point* %pt, i32 0, i32 1 ; <double*> [#uses=2] %tmp7 = load double* %tmp6 ; <double> [#uses=1] - %tmp9 = add double %tmp7, %y ; <double> [#uses=1] + %tmp9 = fadd double %tmp7, %y ; <double> [#uses=1] store double %tmp9, double* %tmp6 %tmp13 = getelementptr %struct.Point* %pt, i32 0, i32 2 ; <double*> [#uses=2] %tmp14 = load double* %tmp13 ; <double> [#uses=1] - %tmp16 = add double %tmp14, %z ; <double> [#uses=1] + %tmp16 = fadd double %tmp14, %z ; <double> [#uses=1] store double %tmp16, double* %tmp13 ret void } diff --git a/test/CodeGen/PowerPC/2007-03-30-SpillerCrash.ll b/test/CodeGen/PowerPC/2007-03-30-SpillerCrash.ll index 04ca3bb..637208b 100644 --- a/test/CodeGen/PowerPC/2007-03-30-SpillerCrash.ll +++ b/test/CodeGen/PowerPC/2007-03-30-SpillerCrash.ll @@ -604,10 +604,10 @@ xPIF.exit: ; preds = %.critedge7898, %xOperationInitMasks.exit shufflevector <4 x float> %583, <4 x float> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x float>>:589 [#uses=1] shufflevector <4 x float> %585, <4 x float> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x float>>:590 [#uses=1] shufflevector <4 x float> %588, <4 x float> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x float>>:591 [#uses=1] - mul <4 x float> zeroinitializer, %589 ; <<4 x float>>:592 [#uses=0] - mul <4 x float> zeroinitializer, %590 ; <<4 x float>>:593 [#uses=0] - mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:594 [#uses=1] - mul <4 x float> zeroinitializer, %591 ; <<4 x float>>:595 [#uses=0] + fmul <4 x float> zeroinitializer, %589 ; <<4 x float>>:592 [#uses=0] + fmul <4 x float> zeroinitializer, %590 ; <<4 x float>>:593 [#uses=0] + fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:594 [#uses=1] + fmul <4 x float> zeroinitializer, %591 ; <<4 x float>>:595 [#uses=0] getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:596 [#uses=2] load <4 x float>* %596 ; <<4 x float>>:597 [#uses=0] store <4 x float> zeroinitializer, <4 x float>* %596 @@ -621,8 +621,8 @@ xPIF.exit: ; preds = %.critedge7898, %xOperationInitMasks.exit load <4 x float>* null ; <<4 x float>>:604 [#uses=1] getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:605 [#uses=1] load <4 x float>* %605 ; <<4 x float>>:606 [#uses=1] - sub <4 x float> zeroinitializer, %604 ; <<4 x float>>:607 [#uses=2] - sub <4 x float> zeroinitializer, %606 ; <<4 x float>>:608 [#uses=2] + fsub <4 x float> zeroinitializer, %604 ; <<4 x float>>:607 [#uses=2] + fsub <4 x float> zeroinitializer, %606 ; <<4 x float>>:608 [#uses=2] call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:609 [#uses=0] br i1 false, label %617, label %610 @@ -672,21 +672,21 @@ xST.exit400: ; preds = %633, %625, %610 load <4 x float>* null ; <<4 x float>>:638 [#uses=2] getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:639 [#uses=0] load <4 x float>* null ; <<4 x float>>:640 [#uses=2] - mul <4 x float> %638, %638 ; <<4 x float>>:641 [#uses=1] - mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:642 [#uses=0] - mul <4 x float> %640, %640 ; <<4 x float>>:643 [#uses=2] + fmul <4 x float> %638, %638 ; <<4 x float>>:641 [#uses=1] + fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:642 [#uses=0] + fmul <4 x float> %640, %640 ; <<4 x float>>:643 [#uses=2] shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x float>>:644 [#uses=0] shufflevector <4 x float> %643, <4 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x float>>:645 [#uses=1] - add <4 x float> %645, %643 ; <<4 x float>>:646 [#uses=0] + fadd <4 x float> %645, %643 ; <<4 x float>>:646 [#uses=0] shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x float>>:647 [#uses=1] shufflevector <4 x float> %641, <4 x float> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x float>>:648 [#uses=1] - add <4 x float> zeroinitializer, %647 ; <<4 x float>>:649 [#uses=2] - add <4 x float> zeroinitializer, %648 ; <<4 x float>>:650 [#uses=0] - add <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:651 [#uses=2] + fadd <4 x float> zeroinitializer, %647 ; <<4 x float>>:649 [#uses=2] + fadd <4 x float> zeroinitializer, %648 ; <<4 x float>>:650 [#uses=0] + fadd <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:651 [#uses=2] call <4 x float> @llvm.ppc.altivec.vrsqrtefp( <4 x float> %649 ) ; <<4 x float>>:652 [#uses=1] - mul <4 x float> %652, %649 ; <<4 x float>>:653 [#uses=1] + fmul <4 x float> %652, %649 ; <<4 x float>>:653 [#uses=1] call <4 x float> @llvm.ppc.altivec.vrsqrtefp( <4 x float> %651 ) ; <<4 x float>>:654 [#uses=1] - mul <4 x float> %654, %651 ; <<4 x float>>:655 [#uses=0] + fmul <4 x float> %654, %651 ; <<4 x float>>:655 [#uses=0] icmp eq i32 0, 0 ; <i1>:656 [#uses=1] br i1 %656, label %665, label %657 @@ -721,9 +721,9 @@ xST.exit402: ; preds = %669, %657 load <4 x float>* null ; <<4 x float>>:676 [#uses=0] shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:677 [#uses=1] shufflevector <4 x float> %675, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:678 [#uses=1] - mul <4 x float> zeroinitializer, %677 ; <<4 x float>>:679 [#uses=0] - mul <4 x float> zeroinitializer, %678 ; <<4 x float>>:680 [#uses=0] - mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:681 [#uses=1] + fmul <4 x float> zeroinitializer, %677 ; <<4 x float>>:679 [#uses=0] + fmul <4 x float> zeroinitializer, %678 ; <<4 x float>>:680 [#uses=0] + fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:681 [#uses=1] icmp eq i32 0, 0 ; <i1>:682 [#uses=1] br i1 %682, label %689, label %683 @@ -750,7 +750,7 @@ xST.exit405: ; preds = %689, %683 load <4 x float>* null ; <<4 x float>>:698 [#uses=0] getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:699 [#uses=0] shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:700 [#uses=1] - add <4 x float> zeroinitializer, %700 ; <<4 x float>>:701 [#uses=0] + fadd <4 x float> zeroinitializer, %700 ; <<4 x float>>:701 [#uses=0] load <4 x i32>* %.sub7896 ; <<4 x i32>>:702 [#uses=1] call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %702, <4 x i32> zeroinitializer ) ; <i32>:703 [#uses=0] getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:704 [#uses=2] @@ -769,7 +769,7 @@ xST.exit405: ; preds = %689, %683 getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:714 [#uses=1] load <4 x float>* %714 ; <<4 x float>>:715 [#uses=0] shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:716 [#uses=0] - mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:717 [#uses=1] + fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:717 [#uses=1] load <4 x i32>* %.sub7896 ; <<4 x i32>>:718 [#uses=0] getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 0 ; <<4 x float>*>:719 [#uses=1] store <4 x float> zeroinitializer, <4 x float>* %719 @@ -791,10 +791,10 @@ xST.exit405: ; preds = %689, %683 load <4 x float>* %732 ; <<4 x float>>:733 [#uses=0] getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:734 [#uses=0] shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:735 [#uses=1] - mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:736 [#uses=1] - mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:737 [#uses=1] - mul <4 x float> zeroinitializer, %735 ; <<4 x float>>:738 [#uses=1] - mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:739 [#uses=1] + fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:736 [#uses=1] + fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:737 [#uses=1] + fmul <4 x float> zeroinitializer, %735 ; <<4 x float>>:738 [#uses=1] + fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:739 [#uses=1] call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:740 [#uses=1] icmp eq i32 %740, 0 ; <i1>:741 [#uses=0] getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:742 [#uses=2] @@ -821,9 +821,9 @@ xST.exit405: ; preds = %689, %683 getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:761 [#uses=0] shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:762 [#uses=0] shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:763 [#uses=1] - add <4 x float> %757, zeroinitializer ; <<4 x float>>:764 [#uses=0] - add <4 x float> %758, %763 ; <<4 x float>>:765 [#uses=0] - mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:766 [#uses=1] + fadd <4 x float> %757, zeroinitializer ; <<4 x float>>:764 [#uses=0] + fadd <4 x float> %758, %763 ; <<4 x float>>:765 [#uses=0] + fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:766 [#uses=1] br i1 false, label %773, label %767 ; <label>:767 ; preds = %xST.exit405 @@ -841,7 +841,7 @@ xST.exit405: ; preds = %689, %683 xST.exit422: ; preds = %773, %767 %.07267 = phi <4 x float> [ %766, %767 ], [ undef, %773 ] ; <<4 x float>> [#uses=0] getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:774 [#uses=0] - mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:775 [#uses=0] + fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:775 [#uses=0] icmp eq i32 0, 0 ; <i1>:776 [#uses=1] br i1 %776, label %780, label %777 @@ -1295,7 +1295,7 @@ xST.exit469: ; preds = %1027, %1025, %1005 %.07489 = phi <4 x float> [ %1002, %1005 ], [ %.17490, %1027 ], [ %.17490, %1025 ] ; <<4 x float>> [#uses=1] load <4 x float>* null ; <<4 x float>>:1029 [#uses=0] load <4 x float>* null ; <<4 x float>>:1030 [#uses=0] - sub <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1031 [#uses=1] + fsub <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1031 [#uses=1] br i1 false, label %1037, label %1032 ; <label>:1032 ; preds = %xST.exit469 @@ -1368,8 +1368,8 @@ xST.exit472: ; preds = %1050, %1048, %1032 xST.exit474: ; preds = %1059, %1058, %1051 load <4 x float>* null ; <<4 x float>>:1060 [#uses=1] - mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1061 [#uses=1] - mul <4 x float> %1060, zeroinitializer ; <<4 x float>>:1062 [#uses=2] + fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1061 [#uses=1] + fmul <4 x float> %1060, zeroinitializer ; <<4 x float>>:1062 [#uses=2] br i1 false, label %1065, label %1063 ; <label>:1063 ; preds = %xST.exit474 @@ -1556,8 +1556,8 @@ xST.exit489: ; preds = %1109, %1108, %1101 xST.exit492: ; preds = %1118, %1117, %1110 load <4 x float>* null ; <<4 x float>>:1119 [#uses=1] - mul <4 x float> %1119, zeroinitializer ; <<4 x float>>:1120 [#uses=1] - mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1121 [#uses=1] + fmul <4 x float> %1119, zeroinitializer ; <<4 x float>>:1120 [#uses=1] + fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1121 [#uses=1] br i1 false, label %1123, label %1122 ; <label>:1122 ; preds = %xST.exit492 @@ -1591,8 +1591,8 @@ xST.exit495: ; preds = %1130, %1129, %1122 %.07582 = phi <4 x float> [ %1121, %1122 ], [ %.17583, %1130 ], [ %.17583, %1129 ] ; <<4 x float>> [#uses=1] %.07590 = phi <4 x float> [ %1120, %1122 ], [ %.17591, %1130 ], [ %.17591, %1129 ] ; <<4 x float>> [#uses=1] load <4 x float>* null ; <<4 x float>>:1131 [#uses=1] - add <4 x float> %1131, zeroinitializer ; <<4 x float>>:1132 [#uses=1] - add <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1133 [#uses=1] + fadd <4 x float> %1131, zeroinitializer ; <<4 x float>>:1132 [#uses=1] + fadd <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1133 [#uses=1] br i1 false, label %1135, label %1134 ; <label>:1134 ; preds = %xST.exit495 @@ -1633,10 +1633,10 @@ xST.exit498: ; preds = %1142, %1141, %1134 shufflevector <4 x float> %1143, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:1148 [#uses=1] shufflevector <4 x float> %1145, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:1149 [#uses=1] shufflevector <4 x float> %1147, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:1150 [#uses=1] - mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1151 [#uses=1] - mul <4 x float> zeroinitializer, %1148 ; <<4 x float>>:1152 [#uses=1] - mul <4 x float> zeroinitializer, %1149 ; <<4 x float>>:1153 [#uses=1] - mul <4 x float> zeroinitializer, %1150 ; <<4 x float>>:1154 [#uses=1] + fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1151 [#uses=1] + fmul <4 x float> zeroinitializer, %1148 ; <<4 x float>>:1152 [#uses=1] + fmul <4 x float> zeroinitializer, %1149 ; <<4 x float>>:1153 [#uses=1] + fmul <4 x float> zeroinitializer, %1150 ; <<4 x float>>:1154 [#uses=1] br i1 false, label %1156, label %1155 ; <label>:1155 ; preds = %xST.exit498 @@ -1676,10 +1676,10 @@ xST.exit501: ; preds = %1163, %1162, %1155 load <4 x float>* %1165 ; <<4 x float>>:1166 [#uses=1] getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1167 [#uses=1] load <4 x float>* %1167 ; <<4 x float>>:1168 [#uses=1] - add <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1169 [#uses=1] - add <4 x float> zeroinitializer, %1164 ; <<4 x float>>:1170 [#uses=1] - add <4 x float> zeroinitializer, %1166 ; <<4 x float>>:1171 [#uses=1] - add <4 x float> zeroinitializer, %1168 ; <<4 x float>>:1172 [#uses=1] + fadd <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1169 [#uses=1] + fadd <4 x float> zeroinitializer, %1164 ; <<4 x float>>:1170 [#uses=1] + fadd <4 x float> zeroinitializer, %1166 ; <<4 x float>>:1171 [#uses=1] + fadd <4 x float> zeroinitializer, %1168 ; <<4 x float>>:1172 [#uses=1] br i1 false, label %1174, label %1173 ; <label>:1173 ; preds = %xST.exit501 @@ -1714,7 +1714,7 @@ xST.exit504: ; preds = %1181, %1180, %1173 %.07726 = phi <4 x float> [ %1171, %1173 ], [ %.17727, %1181 ], [ %.17727, %1180 ] ; <<4 x float>> [#uses=1] %.07730 = phi <4 x float> [ %1170, %1173 ], [ %.17731, %1181 ], [ %.17731, %1180 ] ; <<4 x float>> [#uses=1] %.07734 = phi <4 x float> [ %1169, %1173 ], [ %.17735, %1181 ], [ %.17735, %1180 ] ; <<4 x float>> [#uses=1] - add <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1182 [#uses=1] + fadd <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1182 [#uses=1] br i1 false, label %1184, label %1183 ; <label>:1183 ; preds = %xST.exit504 diff --git a/test/CodeGen/PowerPC/2007-11-19-VectorSplitting.ll b/test/CodeGen/PowerPC/2007-11-19-VectorSplitting.ll index 5cccd31..aca0faa 100644 --- a/test/CodeGen/PowerPC/2007-11-19-VectorSplitting.ll +++ b/test/CodeGen/PowerPC/2007-11-19-VectorSplitting.ll @@ -9,8 +9,8 @@ entry: %input2 = load <4 x float>* null, align 16 ; <<4 x float>> %shuffle7 = shufflevector <4 x float> %input2, <4 x float> < float 0.000000e+00, float 1.000000e+00, float 0.000000e+00, float 1.000000e+00 >, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x float>> [#uses=1] - %mul1 = mul <4 x float> %shuffle7, zeroinitializer ; <<4 x - %add2 = add <4 x float> %mul1, %input2 ; <<4 x float>> + %mul1 = fmul <4 x float> %shuffle7, zeroinitializer ; <<4 x + %add2 = fadd <4 x float> %mul1, %input2 ; <<4 x float>> store <4 x float> %add2, <4 x float>* null, align 16 ret void } diff --git a/test/CodeGen/PowerPC/2008-07-15-Fabs.ll b/test/CodeGen/PowerPC/2008-07-15-Fabs.ll index 7d86434..f55ffac 100644 --- a/test/CodeGen/PowerPC/2008-07-15-Fabs.ll +++ b/test/CodeGen/PowerPC/2008-07-15-Fabs.ll @@ -7,11 +7,11 @@ entry: call ppc_fp128 @fabsl( ppc_fp128 %d ) nounwind readnone ; <ppc_fp128>:0 [#uses=1] fcmp olt ppc_fp128 0xM00000000000000000000000000000000, %0 ; <i1>:1 [#uses=1] %.pn106 = select i1 %1, ppc_fp128 %a, ppc_fp128 0xM00000000000000000000000000000000 ; <ppc_fp128> [#uses=1] - %.pn = sub ppc_fp128 0xM00000000000000000000000000000000, %.pn106 ; <ppc_fp128> [#uses=1] + %.pn = fsub ppc_fp128 0xM00000000000000000000000000000000, %.pn106 ; <ppc_fp128> [#uses=1] %y.0 = fdiv ppc_fp128 %.pn, 0xM00000000000000000000000000000000 ; <ppc_fp128> [#uses=1] - mul ppc_fp128 %y.0, 0xM3FF00000000000000000000000000000 ; <ppc_fp128>:2 [#uses=1] - add ppc_fp128 %2, mul (ppc_fp128 0xM00000000000000000000000000000000, ppc_fp128 0xM00000000000000000000000000000000) ; <ppc_fp128>:3 [#uses=1] - %tmpi = add ppc_fp128 %3, 0xM00000000000000000000000000000000 ; <ppc_fp128> [#uses=1] + fmul ppc_fp128 %y.0, 0xM3FF00000000000000000000000000000 ; <ppc_fp128>:2 [#uses=1] + fadd ppc_fp128 %2, fmul (ppc_fp128 0xM00000000000000000000000000000000, ppc_fp128 0xM00000000000000000000000000000000) ; <ppc_fp128>:3 [#uses=1] + %tmpi = fadd ppc_fp128 %3, 0xM00000000000000000000000000000000 ; <ppc_fp128> [#uses=1] store ppc_fp128 %tmpi, ppc_fp128* null, align 16 ret i256 0 } diff --git a/test/CodeGen/PowerPC/2008-07-17-Fneg.ll b/test/CodeGen/PowerPC/2008-07-17-Fneg.ll index 54bb4b3..a7f8181 100644 --- a/test/CodeGen/PowerPC/2008-07-17-Fneg.ll +++ b/test/CodeGen/PowerPC/2008-07-17-Fneg.ll @@ -7,7 +7,7 @@ entry: br i1 false, label %bb3, label %bb4 bb3: ; preds = %entry - sub ppc_fp128 0xM80000000000000000000000000000000, 0xM00000000000000000000000000000000 ; <ppc_fp128>:0 [#uses=1] + fsub ppc_fp128 0xM80000000000000000000000000000000, 0xM00000000000000000000000000000000 ; <ppc_fp128>:0 [#uses=1] fptoui ppc_fp128 %0 to i32 ; <i32>:1 [#uses=1] zext i32 %1 to i64 ; <i64>:2 [#uses=1] sub i64 0, %2 ; <i64>:3 [#uses=1] diff --git a/test/CodeGen/PowerPC/2008-09-12-CoalescerBug.ll b/test/CodeGen/PowerPC/2008-09-12-CoalescerBug.ll index c181b1c..b625ceb 100644 --- a/test/CodeGen/PowerPC/2008-09-12-CoalescerBug.ll +++ b/test/CodeGen/PowerPC/2008-09-12-CoalescerBug.ll @@ -29,10 +29,10 @@ bb2217: ; preds = %bb2326 %10 = load float* %9, align 4 ; <float> [#uses=1] %11 = getelementptr float* null, i32 3 ; <float*> [#uses=1] %12 = load float* %11, align 4 ; <float> [#uses=1] - %13 = mul float %10, 6.553500e+04 ; <float> [#uses=1] - %14 = add float %13, 5.000000e-01 ; <float> [#uses=1] - %15 = mul float %12, 6.553500e+04 ; <float> [#uses=1] - %16 = add float %15, 5.000000e-01 ; <float> [#uses=3] + %13 = fmul float %10, 6.553500e+04 ; <float> [#uses=1] + %14 = fadd float %13, 5.000000e-01 ; <float> [#uses=1] + %15 = fmul float %12, 6.553500e+04 ; <float> [#uses=1] + %16 = fadd float %15, 5.000000e-01 ; <float> [#uses=3] %17 = fcmp olt float %14, 0.000000e+00 ; <i1> [#uses=0] %18 = fcmp olt float %16, 0.000000e+00 ; <i1> [#uses=1] br i1 %18, label %bb2265, label %bb2262 @@ -68,10 +68,10 @@ bb2265: ; preds = %bb2264, %bb2262, %bb2217 %37 = load float* %36, align 4 ; <float> [#uses=1] %38 = getelementptr float* %36, i32 1 ; <float*> [#uses=1] %39 = load float* %38, align 4 ; <float> [#uses=1] - %40 = mul float %37, 6.553500e+04 ; <float> [#uses=1] - %41 = add float %40, 5.000000e-01 ; <float> [#uses=1] - %42 = mul float %39, 6.553500e+04 ; <float> [#uses=1] - %43 = add float %42, 5.000000e-01 ; <float> [#uses=3] + %40 = fmul float %37, 6.553500e+04 ; <float> [#uses=1] + %41 = fadd float %40, 5.000000e-01 ; <float> [#uses=1] + %42 = fmul float %39, 6.553500e+04 ; <float> [#uses=1] + %43 = fadd float %42, 5.000000e-01 ; <float> [#uses=3] %44 = fcmp olt float %41, 0.000000e+00 ; <i1> [#uses=0] %45 = fcmp olt float %43, 0.000000e+00 ; <i1> [#uses=1] br i1 %45, label %bb2277, label %bb2274 @@ -88,10 +88,10 @@ bb2277: ; preds = %bb2274, %bb2265 %50 = load float* %49, align 4 ; <float> [#uses=1] %51 = getelementptr float* %36, i32 3 ; <float*> [#uses=1] %52 = load float* %51, align 4 ; <float> [#uses=1] - %53 = mul float %50, 6.553500e+04 ; <float> [#uses=1] - %54 = add float %53, 5.000000e-01 ; <float> [#uses=1] - %55 = mul float %52, 6.553500e+04 ; <float> [#uses=1] - %56 = add float %55, 5.000000e-01 ; <float> [#uses=1] + %53 = fmul float %50, 6.553500e+04 ; <float> [#uses=1] + %54 = fadd float %53, 5.000000e-01 ; <float> [#uses=1] + %55 = fmul float %52, 6.553500e+04 ; <float> [#uses=1] + %56 = fadd float %55, 5.000000e-01 ; <float> [#uses=1] %57 = fcmp olt float %54, 0.000000e+00 ; <i1> [#uses=0] %58 = fcmp olt float %56, 0.000000e+00 ; <i1> [#uses=0] %59 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1] @@ -111,10 +111,10 @@ bb2277: ; preds = %bb2274, %bb2265 %73 = load float* %72, align 4 ; <float> [#uses=1] %74 = getelementptr float* %72, i32 1 ; <float*> [#uses=1] %75 = load float* %74, align 4 ; <float> [#uses=1] - %76 = mul float %73, 6.553500e+04 ; <float> [#uses=1] - %77 = add float %76, 5.000000e-01 ; <float> [#uses=3] - %78 = mul float %75, 6.553500e+04 ; <float> [#uses=1] - %79 = add float %78, 5.000000e-01 ; <float> [#uses=1] + %76 = fmul float %73, 6.553500e+04 ; <float> [#uses=1] + %77 = fadd float %76, 5.000000e-01 ; <float> [#uses=3] + %78 = fmul float %75, 6.553500e+04 ; <float> [#uses=1] + %79 = fadd float %78, 5.000000e-01 ; <float> [#uses=1] %80 = fcmp olt float %77, 0.000000e+00 ; <i1> [#uses=1] br i1 %80, label %bb2295, label %bb2292 @@ -134,10 +134,10 @@ bb2295: ; preds = %bb2294, %bb2292, %bb2277 %86 = load float* %85, align 4 ; <float> [#uses=1] %87 = getelementptr float* %72, i32 3 ; <float*> [#uses=1] %88 = load float* %87, align 4 ; <float> [#uses=1] - %89 = mul float %86, 6.553500e+04 ; <float> [#uses=1] - %90 = add float %89, 5.000000e-01 ; <float> [#uses=1] - %91 = mul float %88, 6.553500e+04 ; <float> [#uses=1] - %92 = add float %91, 5.000000e-01 ; <float> [#uses=1] + %89 = fmul float %86, 6.553500e+04 ; <float> [#uses=1] + %90 = fadd float %89, 5.000000e-01 ; <float> [#uses=1] + %91 = fmul float %88, 6.553500e+04 ; <float> [#uses=1] + %92 = fadd float %91, 5.000000e-01 ; <float> [#uses=1] %93 = fcmp olt float %90, 0.000000e+00 ; <i1> [#uses=0] %94 = fcmp olt float %92, 0.000000e+00 ; <i1> [#uses=0] %95 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1] diff --git a/test/CodeGen/PowerPC/2008-10-28-UnprocessedNode.ll b/test/CodeGen/PowerPC/2008-10-28-UnprocessedNode.ll index 0283082..c760b41 100644 --- a/test/CodeGen/PowerPC/2008-10-28-UnprocessedNode.ll +++ b/test/CodeGen/PowerPC/2008-10-28-UnprocessedNode.ll @@ -3,9 +3,9 @@ define void @__divtc3({ ppc_fp128, ppc_fp128 }* noalias sret %agg.result, ppc_fp128 %a, ppc_fp128 %b, ppc_fp128 %c, ppc_fp128 %d) nounwind { entry: %imag59 = load ppc_fp128* null, align 8 ; <ppc_fp128> [#uses=1] - %0 = mul ppc_fp128 0xM00000000000000000000000000000000, %imag59 ; <ppc_fp128> [#uses=1] - %1 = mul ppc_fp128 0xM00000000000000000000000000000000, 0xM00000000000000000000000000000000 ; <ppc_fp128> [#uses=1] - %2 = add ppc_fp128 %0, %1 ; <ppc_fp128> [#uses=1] + %0 = fmul ppc_fp128 0xM00000000000000000000000000000000, %imag59 ; <ppc_fp128> [#uses=1] + %1 = fmul ppc_fp128 0xM00000000000000000000000000000000, 0xM00000000000000000000000000000000 ; <ppc_fp128> [#uses=1] + %2 = fadd ppc_fp128 %0, %1 ; <ppc_fp128> [#uses=1] store ppc_fp128 %2, ppc_fp128* null, align 16 unreachable } diff --git a/test/CodeGen/PowerPC/2008-10-28-f128-i32.ll b/test/CodeGen/PowerPC/2008-10-28-f128-i32.ll index 4db5773..071c788 100644 --- a/test/CodeGen/PowerPC/2008-10-28-f128-i32.ll +++ b/test/CodeGen/PowerPC/2008-10-28-f128-i32.ll @@ -6,17 +6,17 @@ entry: br i1 %0, label %bb5, label %bb1 bb1: ; preds = %entry - %1 = mul ppc_fp128 %a, 0xM3DF00000000000000000000000000000 ; <ppc_fp128> [#uses=1] + %1 = fmul ppc_fp128 %a, 0xM3DF00000000000000000000000000000 ; <ppc_fp128> [#uses=1] %2 = fptoui ppc_fp128 %1 to i32 ; <i32> [#uses=1] %3 = zext i32 %2 to i64 ; <i64> [#uses=1] %4 = shl i64 %3, 32 ; <i64> [#uses=3] %5 = uitofp i64 %4 to ppc_fp128 ; <ppc_fp128> [#uses=1] - %6 = sub ppc_fp128 %a, %5 ; <ppc_fp128> [#uses=3] + %6 = fsub ppc_fp128 %a, %5 ; <ppc_fp128> [#uses=3] %7 = fcmp olt ppc_fp128 %6, 0xM00000000000000000000000000000000 ; <i1> [#uses=1] br i1 %7, label %bb2, label %bb3 bb2: ; preds = %bb1 - %8 = sub ppc_fp128 0xM80000000000000000000000000000000, %6 ; <ppc_fp128> [#uses=1] + %8 = fsub ppc_fp128 0xM80000000000000000000000000000000, %6 ; <ppc_fp128> [#uses=1] %9 = fptoui ppc_fp128 %8 to i32 ; <i32> [#uses=1] %10 = zext i32 %9 to i64 ; <i64> [#uses=1] %11 = sub i64 %4, %10 ; <i64> [#uses=1] diff --git a/test/CodeGen/PowerPC/buildvec_canonicalize.ll b/test/CodeGen/PowerPC/buildvec_canonicalize.ll index 66428c7..20ff3db 100644 --- a/test/CodeGen/PowerPC/buildvec_canonicalize.ll +++ b/test/CodeGen/PowerPC/buildvec_canonicalize.ll @@ -11,7 +11,7 @@ define void @VXOR(<4 x float>* %P1, <4 x i32>* %P2, <4 x float>* %P3) { %tmp = load <4 x float>* %P3 ; <<4 x float>> [#uses=1] %tmp3 = load <4 x float>* %P1 ; <<4 x float>> [#uses=1] - %tmp4 = mul <4 x float> %tmp, %tmp3 ; <<4 x float>> [#uses=1] + %tmp4 = fmul <4 x float> %tmp, %tmp3 ; <<4 x float>> [#uses=1] store <4 x float> %tmp4, <4 x float>* %P3 store <4 x float> zeroinitializer, <4 x float>* %P1 store <4 x i32> zeroinitializer, <4 x i32>* %P2 diff --git a/test/CodeGen/PowerPC/fma.ll b/test/CodeGen/PowerPC/fma.ll index fd9bd74..4a6fe70 100644 --- a/test/CodeGen/PowerPC/fma.ll +++ b/test/CodeGen/PowerPC/fma.ll @@ -2,53 +2,53 @@ ; RUN: egrep {fn?madd|fn?msub} | count 8 define double @test_FMADD1(double %A, double %B, double %C) { - %D = mul double %A, %B ; <double> [#uses=1] - %E = add double %D, %C ; <double> [#uses=1] + %D = fmul double %A, %B ; <double> [#uses=1] + %E = fadd double %D, %C ; <double> [#uses=1] ret double %E } define double @test_FMADD2(double %A, double %B, double %C) { - %D = mul double %A, %B ; <double> [#uses=1] - %E = add double %D, %C ; <double> [#uses=1] + %D = fmul double %A, %B ; <double> [#uses=1] + %E = fadd double %D, %C ; <double> [#uses=1] ret double %E } define double @test_FMSUB(double %A, double %B, double %C) { - %D = mul double %A, %B ; <double> [#uses=1] - %E = sub double %D, %C ; <double> [#uses=1] + %D = fmul double %A, %B ; <double> [#uses=1] + %E = fsub double %D, %C ; <double> [#uses=1] ret double %E } define double @test_FNMADD1(double %A, double %B, double %C) { - %D = mul double %A, %B ; <double> [#uses=1] - %E = add double %D, %C ; <double> [#uses=1] - %F = sub double -0.000000e+00, %E ; <double> [#uses=1] + %D = fmul double %A, %B ; <double> [#uses=1] + %E = fadd double %D, %C ; <double> [#uses=1] + %F = fsub double -0.000000e+00, %E ; <double> [#uses=1] ret double %F } define double @test_FNMADD2(double %A, double %B, double %C) { - %D = mul double %A, %B ; <double> [#uses=1] - %E = add double %C, %D ; <double> [#uses=1] - %F = sub double -0.000000e+00, %E ; <double> [#uses=1] + %D = fmul double %A, %B ; <double> [#uses=1] + %E = fadd double %C, %D ; <double> [#uses=1] + %F = fsub double -0.000000e+00, %E ; <double> [#uses=1] ret double %F } define double @test_FNMSUB1(double %A, double %B, double %C) { - %D = mul double %A, %B ; <double> [#uses=1] - %E = sub double %C, %D ; <double> [#uses=1] + %D = fmul double %A, %B ; <double> [#uses=1] + %E = fsub double %C, %D ; <double> [#uses=1] ret double %E } define double @test_FNMSUB2(double %A, double %B, double %C) { - %D = mul double %A, %B ; <double> [#uses=1] - %E = sub double %D, %C ; <double> [#uses=1] - %F = sub double -0.000000e+00, %E ; <double> [#uses=1] + %D = fmul double %A, %B ; <double> [#uses=1] + %E = fsub double %D, %C ; <double> [#uses=1] + %F = fsub double -0.000000e+00, %E ; <double> [#uses=1] ret double %F } define float @test_FNMSUBS(float %A, float %B, float %C) { - %D = mul float %A, %B ; <float> [#uses=1] - %E = sub float %D, %C ; <float> [#uses=1] - %F = sub float -0.000000e+00, %E ; <float> [#uses=1] + %D = fmul float %A, %B ; <float> [#uses=1] + %E = fsub float %D, %C ; <float> [#uses=1] + %F = fsub float -0.000000e+00, %E ; <float> [#uses=1] ret float %F } diff --git a/test/CodeGen/PowerPC/fnabs.ll b/test/CodeGen/PowerPC/fnabs.ll index b9517de..6c10dfb 100644 --- a/test/CodeGen/PowerPC/fnabs.ll +++ b/test/CodeGen/PowerPC/fnabs.ll @@ -4,7 +4,7 @@ declare double @fabs(double) define double @test(double %X) { %Y = call double @fabs( double %X ) ; <double> [#uses=1] - %Z = sub double -0.000000e+00, %Y ; <double> [#uses=1] + %Z = fsub double -0.000000e+00, %Y ; <double> [#uses=1] ret double %Z } diff --git a/test/CodeGen/PowerPC/fneg.ll b/test/CodeGen/PowerPC/fneg.ll index a4f49f7..9579a74 100644 --- a/test/CodeGen/PowerPC/fneg.ll +++ b/test/CodeGen/PowerPC/fneg.ll @@ -2,10 +2,10 @@ define double @test1(double %a, double %b, double %c, double %d) { entry: - %tmp2 = sub double -0.000000e+00, %c ; <double> [#uses=1] - %tmp4 = mul double %tmp2, %d ; <double> [#uses=1] - %tmp7 = mul double %a, %b ; <double> [#uses=1] - %tmp9 = sub double %tmp7, %tmp4 ; <double> [#uses=1] + %tmp2 = fsub double -0.000000e+00, %c ; <double> [#uses=1] + %tmp4 = fmul double %tmp2, %d ; <double> [#uses=1] + %tmp7 = fmul double %a, %b ; <double> [#uses=1] + %tmp9 = fsub double %tmp7, %tmp4 ; <double> [#uses=1] ret double %tmp9 } diff --git a/test/CodeGen/PowerPC/int-fp-conv-1.ll b/test/CodeGen/PowerPC/int-fp-conv-1.ll index 3d66675..583408c 100644 --- a/test/CodeGen/PowerPC/int-fp-conv-1.ll +++ b/test/CodeGen/PowerPC/int-fp-conv-1.ll @@ -3,7 +3,7 @@ define i64 @__fixunstfdi(ppc_fp128 %a) nounwind { entry: %tmp1213 = uitofp i64 0 to ppc_fp128 ; <ppc_fp128> [#uses=1] - %tmp15 = sub ppc_fp128 %a, %tmp1213 ; <ppc_fp128> [#uses=1] + %tmp15 = fsub ppc_fp128 %a, %tmp1213 ; <ppc_fp128> [#uses=1] %tmp2829 = fptoui ppc_fp128 %tmp15 to i32 ; <i32> [#uses=1] %tmp282930 = zext i32 %tmp2829 to i64 ; <i64> [#uses=1] %tmp32 = add i64 %tmp282930, 0 ; <i64> [#uses=1] diff --git a/test/CodeGen/PowerPC/itofp128.ll b/test/CodeGen/PowerPC/itofp128.ll index 91119e9..4d74511 100644 --- a/test/CodeGen/PowerPC/itofp128.ll +++ b/test/CodeGen/PowerPC/itofp128.ll @@ -6,7 +6,7 @@ target triple = "powerpc64-apple-darwin9.2.0" define i128 @__fixunstfti(ppc_fp128 %a) nounwind { entry: %tmp1213 = uitofp i128 0 to ppc_fp128 ; <ppc_fp128> [#uses=1] - %tmp15 = sub ppc_fp128 %a, %tmp1213 ; <ppc_fp128> [#uses=1] + %tmp15 = fsub ppc_fp128 %a, %tmp1213 ; <ppc_fp128> [#uses=1] %tmp2829 = fptoui ppc_fp128 %tmp15 to i64 ; <i64> [#uses=1] %tmp282930 = zext i64 %tmp2829 to i128 ; <i128> [#uses=1] %tmp32 = add i128 %tmp282930, 0 ; <i128> [#uses=1] diff --git a/test/CodeGen/PowerPC/mem-rr-addr-mode.ll b/test/CodeGen/PowerPC/mem-rr-addr-mode.ll index d5484bd..fd0e1d4 100644 --- a/test/CodeGen/PowerPC/mem-rr-addr-mode.ll +++ b/test/CodeGen/PowerPC/mem-rr-addr-mode.ll @@ -9,9 +9,9 @@ define void @func(<4 x float>* %a, <4 x float>* %b) { %tmp = load <4 x float>* %tmp1 ; <<4 x float>> [#uses=1] %tmp3 = getelementptr <4 x float>* %a, i32 1 ; <<4 x float>*> [#uses=1] %tmp4 = load <4 x float>* %tmp3 ; <<4 x float>> [#uses=1] - %tmp5 = mul <4 x float> %tmp, %tmp4 ; <<4 x float>> [#uses=1] + %tmp5 = fmul <4 x float> %tmp, %tmp4 ; <<4 x float>> [#uses=1] %tmp8 = load <4 x float>* %b ; <<4 x float>> [#uses=1] - %tmp9 = add <4 x float> %tmp5, %tmp8 ; <<4 x float>> [#uses=1] + %tmp9 = fadd <4 x float> %tmp5, %tmp8 ; <<4 x float>> [#uses=1] store <4 x float> %tmp9, <4 x float>* %a ret void } diff --git a/test/CodeGen/PowerPC/multiple-return-values.ll b/test/CodeGen/PowerPC/multiple-return-values.ll index b72b148..3f75f7d 100644 --- a/test/CodeGen/PowerPC/multiple-return-values.ll +++ b/test/CodeGen/PowerPC/multiple-return-values.ll @@ -3,7 +3,7 @@ define {i64, float} @bar(i64 %a, float %b) { %y = add i64 %a, 7 - %z = add float %b, 7.0 + %z = fadd float %b, 7.0 ret i64 %y, float %z } diff --git a/test/CodeGen/PowerPC/ppcf128-1-opt.ll b/test/CodeGen/PowerPC/ppcf128-1-opt.ll index 5c059b4..e3c5ab1 100644 --- a/test/CodeGen/PowerPC/ppcf128-1-opt.ll +++ b/test/CodeGen/PowerPC/ppcf128-1-opt.ll @@ -5,19 +5,19 @@ target triple = "powerpc-apple-darwin8" define ppc_fp128 @plus(ppc_fp128 %x, ppc_fp128 %y) { entry: - %tmp3 = add ppc_fp128 %x, %y ; <ppc_fp128> [#uses=1] + %tmp3 = fadd ppc_fp128 %x, %y ; <ppc_fp128> [#uses=1] ret ppc_fp128 %tmp3 } define ppc_fp128 @minus(ppc_fp128 %x, ppc_fp128 %y) { entry: - %tmp3 = sub ppc_fp128 %x, %y ; <ppc_fp128> [#uses=1] + %tmp3 = fsub ppc_fp128 %x, %y ; <ppc_fp128> [#uses=1] ret ppc_fp128 %tmp3 } define ppc_fp128 @times(ppc_fp128 %x, ppc_fp128 %y) { entry: - %tmp3 = mul ppc_fp128 %x, %y ; <ppc_fp128> [#uses=1] + %tmp3 = fmul ppc_fp128 %x, %y ; <ppc_fp128> [#uses=1] ret ppc_fp128 %tmp3 } diff --git a/test/CodeGen/PowerPC/ppcf128-1.ll b/test/CodeGen/PowerPC/ppcf128-1.ll index ea8dd37..a487de7 100644 --- a/test/CodeGen/PowerPC/ppcf128-1.ll +++ b/test/CodeGen/PowerPC/ppcf128-1.ll @@ -14,7 +14,7 @@ entry: store ppc_fp128 %y, ppc_fp128* %y_addr %tmp1 = load ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1] %tmp2 = load ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1] - %tmp3 = add ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1] + %tmp3 = fadd ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1] store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16 %tmp4 = load ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1] store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16 @@ -36,7 +36,7 @@ entry: store ppc_fp128 %y, ppc_fp128* %y_addr %tmp1 = load ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1] %tmp2 = load ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1] - %tmp3 = sub ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1] + %tmp3 = fsub ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1] store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16 %tmp4 = load ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1] store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16 @@ -58,7 +58,7 @@ entry: store ppc_fp128 %y, ppc_fp128* %y_addr %tmp1 = load ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1] %tmp2 = load ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1] - %tmp3 = mul ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1] + %tmp3 = fmul ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1] store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16 %tmp4 = load ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1] store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16 diff --git a/test/CodeGen/PowerPC/ppcf128-2.ll b/test/CodeGen/PowerPC/ppcf128-2.ll index b4f61f8..4318226 100644 --- a/test/CodeGen/PowerPC/ppcf128-2.ll +++ b/test/CodeGen/PowerPC/ppcf128-2.ll @@ -4,7 +4,7 @@ define i64 @__fixtfdi(ppc_fp128 %a) nounwind { entry: br i1 false, label %bb, label %bb8 bb: ; preds = %entry - %tmp5 = sub ppc_fp128 0xM80000000000000000000000000000000, %a ; <ppc_fp128> [#uses=1] + %tmp5 = fsub ppc_fp128 0xM80000000000000000000000000000000, %a ; <ppc_fp128> [#uses=1] %tmp6 = tail call i64 @__fixunstfdi( ppc_fp128 %tmp5 ) nounwind ; <i64> [#uses=0] ret i64 0 bb8: ; preds = %entry diff --git a/test/CodeGen/PowerPC/ppcf128-4.ll b/test/CodeGen/PowerPC/ppcf128-4.ll index 8921dfc..16d6178 100644 --- a/test/CodeGen/PowerPC/ppcf128-4.ll +++ b/test/CodeGen/PowerPC/ppcf128-4.ll @@ -2,9 +2,9 @@ define ppc_fp128 @__floatditf(i64 %u) nounwind { entry: - %tmp6 = mul ppc_fp128 0xM00000000000000000000000000000000, 0xM41F00000000000000000000000000000 + %tmp6 = fmul ppc_fp128 0xM00000000000000000000000000000000, 0xM41F00000000000000000000000000000 %tmp78 = trunc i64 %u to i32 %tmp789 = uitofp i32 %tmp78 to ppc_fp128 - %tmp11 = add ppc_fp128 %tmp789, %tmp6 + %tmp11 = fadd ppc_fp128 %tmp789, %tmp6 ret ppc_fp128 %tmp11 } diff --git a/test/CodeGen/PowerPC/return-val-i128.ll b/test/CodeGen/PowerPC/return-val-i128.ll index 6e68ee3..27a5004 100644 --- a/test/CodeGen/PowerPC/return-val-i128.ll +++ b/test/CodeGen/PowerPC/return-val-i128.ll @@ -14,7 +14,7 @@ entry: br i1 %toBool, label %bb, label %bb8 bb: ; preds = %entry %tmp4 = load float* %a_addr, align 4 ; <float> [#uses=1] - %tmp5 = sub float -0.000000e+00, %tmp4 ; <float> [#uses=1] + %tmp5 = fsub float -0.000000e+00, %tmp4 ; <float> [#uses=1] %tmp6 = call i128 @__fixunssfDI( float %tmp5 ) nounwind ; <i128> [#uses=1] %tmp7 = sub i128 0, %tmp6 ; <i128> [#uses=1] store i128 %tmp7, i128* %tmp, align 16 diff --git a/test/CodeGen/PowerPC/unsafe-math.ll b/test/CodeGen/PowerPC/unsafe-math.ll index 3d52d0c..d211b3b 100644 --- a/test/CodeGen/PowerPC/unsafe-math.ll +++ b/test/CodeGen/PowerPC/unsafe-math.ll @@ -3,8 +3,8 @@ ; RUN: grep fmul | count 1 define double @foo(double %X) { - %tmp1 = mul double %X, 1.23 - %tmp2 = mul double %tmp1, 4.124 + %tmp1 = fmul double %X, 1.23 + %tmp2 = fmul double %tmp1, 4.124 ret double %tmp2 } diff --git a/test/CodeGen/PowerPC/vec_fneg.ll b/test/CodeGen/PowerPC/vec_fneg.ll index 2ef2099..9fdbffd 100644 --- a/test/CodeGen/PowerPC/vec_fneg.ll +++ b/test/CodeGen/PowerPC/vec_fneg.ll @@ -2,7 +2,7 @@ define void @t(<4 x float>* %A) { %tmp2 = load <4 x float>* %A - %tmp3 = sub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %tmp2 + %tmp3 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %tmp2 store <4 x float> %tmp3, <4 x float>* %A ret void } diff --git a/test/CodeGen/PowerPC/vec_splat.ll b/test/CodeGen/PowerPC/vec_splat.ll index a631137..7b7e4fe 100644 --- a/test/CodeGen/PowerPC/vec_splat.ll +++ b/test/CodeGen/PowerPC/vec_splat.ll @@ -15,7 +15,7 @@ define void @splat(%f4* %P, %f4* %Q, float %X) nounwind { %tmp4 = insertelement %f4 %tmp2, float %X, i32 2 ; <%f4> [#uses=1] %tmp6 = insertelement %f4 %tmp4, float %X, i32 3 ; <%f4> [#uses=1] %q = load %f4* %Q ; <%f4> [#uses=1] - %R = add %f4 %q, %tmp6 ; <%f4> [#uses=1] + %R = fadd %f4 %q, %tmp6 ; <%f4> [#uses=1] store %f4 %R, %f4* %P ret void } diff --git a/test/CodeGen/PowerPC/vec_zero.ll b/test/CodeGen/PowerPC/vec_zero.ll index 8d06a7d..7350e91 100644 --- a/test/CodeGen/PowerPC/vec_zero.ll +++ b/test/CodeGen/PowerPC/vec_zero.ll @@ -2,7 +2,7 @@ define void @foo(<4 x float>* %P) { %T = load <4 x float>* %P ; <<4 x float>> [#uses=1] - %S = add <4 x float> zeroinitializer, %T ; <<4 x float>> [#uses=1] + %S = fadd <4 x float> zeroinitializer, %T ; <<4 x float>> [#uses=1] store <4 x float> %S, <4 x float>* %P ret void } diff --git a/test/CodeGen/PowerPC/vector.ll b/test/CodeGen/PowerPC/vector.ll index 679e69e..a6c17b4 100644 --- a/test/CodeGen/PowerPC/vector.ll +++ b/test/CodeGen/PowerPC/vector.ll @@ -14,7 +14,7 @@ define void @test_f1(%f1* %P, %f1* %Q, %f1* %S) { %p = load %f1* %P ; <%f1> [#uses=1] %q = load %f1* %Q ; <%f1> [#uses=1] - %R = add %f1 %p, %q ; <%f1> [#uses=1] + %R = fadd %f1 %p, %q ; <%f1> [#uses=1] store %f1 %R, %f1* %S ret void } @@ -22,7 +22,7 @@ define void @test_f1(%f1* %P, %f1* %Q, %f1* %S) { define void @test_f2(%f2* %P, %f2* %Q, %f2* %S) { %p = load %f2* %P ; <%f2> [#uses=1] %q = load %f2* %Q ; <%f2> [#uses=1] - %R = add %f2 %p, %q ; <%f2> [#uses=1] + %R = fadd %f2 %p, %q ; <%f2> [#uses=1] store %f2 %R, %f2* %S ret void } @@ -30,7 +30,7 @@ define void @test_f2(%f2* %P, %f2* %Q, %f2* %S) { define void @test_f4(%f4* %P, %f4* %Q, %f4* %S) { %p = load %f4* %P ; <%f4> [#uses=1] %q = load %f4* %Q ; <%f4> [#uses=1] - %R = add %f4 %p, %q ; <%f4> [#uses=1] + %R = fadd %f4 %p, %q ; <%f4> [#uses=1] store %f4 %R, %f4* %S ret void } @@ -38,7 +38,7 @@ define void @test_f4(%f4* %P, %f4* %Q, %f4* %S) { define void @test_f8(%f8* %P, %f8* %Q, %f8* %S) { %p = load %f8* %P ; <%f8> [#uses=1] %q = load %f8* %Q ; <%f8> [#uses=1] - %R = add %f8 %p, %q ; <%f8> [#uses=1] + %R = fadd %f8 %p, %q ; <%f8> [#uses=1] store %f8 %R, %f8* %S ret void } @@ -46,7 +46,7 @@ define void @test_f8(%f8* %P, %f8* %Q, %f8* %S) { define void @test_fmul(%f8* %P, %f8* %Q, %f8* %S) { %p = load %f8* %P ; <%f8> [#uses=1] %q = load %f8* %Q ; <%f8> [#uses=1] - %R = mul %f8 %p, %q ; <%f8> [#uses=1] + %R = fmul %f8 %p, %q ; <%f8> [#uses=1] store %f8 %R, %f8* %S ret void } @@ -63,7 +63,7 @@ define void @test_div(%f8* %P, %f8* %Q, %f8* %S) { define void @test_cst(%f4* %P, %f4* %S) { %p = load %f4* %P ; <%f4> [#uses=1] - %R = add %f4 %p, < float 0x3FB99999A0000000, float 1.000000e+00, float + %R = fadd %f4 %p, < float 0x3FB99999A0000000, float 1.000000e+00, float 2.000000e+00, float 4.500000e+00 > ; <%f4> [#uses=1] store %f4 %R, %f4* %S ret void @@ -71,14 +71,14 @@ define void @test_cst(%f4* %P, %f4* %S) { define void @test_zero(%f4* %P, %f4* %S) { %p = load %f4* %P ; <%f4> [#uses=1] - %R = add %f4 %p, zeroinitializer ; <%f4> [#uses=1] + %R = fadd %f4 %p, zeroinitializer ; <%f4> [#uses=1] store %f4 %R, %f4* %S ret void } define void @test_undef(%f4* %P, %f4* %S) { %p = load %f4* %P ; <%f4> [#uses=1] - %R = add %f4 %p, undef ; <%f4> [#uses=1] + %R = fadd %f4 %p, undef ; <%f4> [#uses=1] store %f4 %R, %f4* %S ret void } @@ -116,7 +116,7 @@ define double @test_extract_elt2(%d8* %P) { define void @test_cast_1(%f4* %b, %i4* %a) { %tmp = load %f4* %b ; <%f4> [#uses=1] - %tmp2 = add %f4 %tmp, < float 1.000000e+00, float 2.000000e+00, float + %tmp2 = fadd %f4 %tmp, < float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00 > ; <%f4> [#uses=1] %tmp3 = bitcast %f4 %tmp2 to %i4 ; <%i4> [#uses=1] %tmp4 = add %i4 %tmp3, < i32 1, i32 2, i32 3, i32 4 > @@ -140,7 +140,7 @@ define void @splat(%f4* %P, %f4* %Q, float %X) { %tmp4 = insertelement %f4 %tmp2, float %X, i32 2 %tmp6 = insertelement %f4 %tmp4, float %X, i32 3 %q = load %f4* %Q ; <%f4> [#uses=1] - %R = add %f4 %q, %tmp6 ; <%f4> [#uses=1] + %R = fadd %f4 %q, %tmp6 ; <%f4> [#uses=1] store %f4 %R, %f4* %P ret void } diff --git a/test/CodeGen/SPARC/2006-01-22-BitConvertLegalize.ll b/test/CodeGen/SPARC/2006-01-22-BitConvertLegalize.ll index 15af046..76f140c 100644 --- a/test/CodeGen/SPARC/2006-01-22-BitConvertLegalize.ll +++ b/test/CodeGen/SPARC/2006-01-22-BitConvertLegalize.ll @@ -2,8 +2,8 @@ define void @execute_list() { %tmp.33.i = fdiv float 0.000000e+00, 0.000000e+00 ; <float> [#uses=1] - %tmp.37.i = mul float 0.000000e+00, %tmp.33.i ; <float> [#uses=1] - %tmp.42.i = add float %tmp.37.i, 0.000000e+00 ; <float> [#uses=1] + %tmp.37.i = fmul float 0.000000e+00, %tmp.33.i ; <float> [#uses=1] + %tmp.42.i = fadd float %tmp.37.i, 0.000000e+00 ; <float> [#uses=1] call void @gl_EvalCoord1f( float %tmp.42.i ) ret void } diff --git a/test/CodeGen/X86/2005-05-08-FPStackifierPHI.ll b/test/CodeGen/X86/2005-05-08-FPStackifierPHI.ll index b5d215b..04035ac 100644 --- a/test/CodeGen/X86/2005-05-08-FPStackifierPHI.ll +++ b/test/CodeGen/X86/2005-05-08-FPStackifierPHI.ll @@ -13,7 +13,7 @@ no_exit.16: ; preds = %no_exit.16, %no_exit.16.preheader loopexit.16.loopexit: ; preds = %no_exit.16 br label %no_exit.18 no_exit.18: ; preds = %loopexit.20, %loopexit.16.loopexit - %tmp.882 = add float 0.000000e+00, 0.000000e+00 ; <float> [#uses=2] + %tmp.882 = fadd float 0.000000e+00, 0.000000e+00 ; <float> [#uses=2] br i1 false, label %loopexit.19, label %no_exit.19.preheader no_exit.19.preheader: ; preds = %no_exit.18 ret void @@ -21,9 +21,9 @@ loopexit.19: ; preds = %no_exit.18 br i1 false, label %loopexit.20, label %no_exit.20 no_exit.20: ; preds = %loopexit.21, %loopexit.19 %ai2.1122.tmp.3 = phi float [ %tmp.958, %loopexit.21 ], [ %tmp.882, %loopexit.19 ] ; <float> [#uses=1] - %tmp.950 = mul float %tmp.882, %ai2.1122.tmp.3 ; <float> [#uses=1] - %tmp.951 = sub float 0.000000e+00, %tmp.950 ; <float> [#uses=1] - %tmp.958 = add float 0.000000e+00, 0.000000e+00 ; <float> [#uses=1] + %tmp.950 = fmul float %tmp.882, %ai2.1122.tmp.3 ; <float> [#uses=1] + %tmp.951 = fsub float 0.000000e+00, %tmp.950 ; <float> [#uses=1] + %tmp.958 = fadd float 0.000000e+00, 0.000000e+00 ; <float> [#uses=1] br i1 false, label %loopexit.21, label %no_exit.21.preheader no_exit.21.preheader: ; preds = %no_exit.20 ret void diff --git a/test/CodeGen/X86/2006-05-25-CycleInDAG.ll b/test/CodeGen/X86/2006-05-25-CycleInDAG.ll index c9a0049..78838d1 100644 --- a/test/CodeGen/X86/2006-05-25-CycleInDAG.ll +++ b/test/CodeGen/X86/2006-05-25-CycleInDAG.ll @@ -11,7 +11,7 @@ cond_next33: ; preds = %0 %tmp58.i = or i32 0, %tmp61.i.upgrd.1 ; <i32> [#uses=1] %tmp62.i = or i32 %tmp58.i, 0 ; <i32> [#uses=1] %tmp62.i.upgrd.2 = sitofp i32 %tmp62.i to double ; <double> [#uses=1] - %tmp64.i = add double %tmp62.i.upgrd.2, %tmp44.i ; <double> [#uses=1] + %tmp64.i = fadd double %tmp62.i.upgrd.2, %tmp44.i ; <double> [#uses=1] %tmp68.i = call double @foo( double %tmp64.i, i32 0 ) ; <double> [#uses=0] ret i32 0 } diff --git a/test/CodeGen/X86/2007-01-08-InstrSched.ll b/test/CodeGen/X86/2007-01-08-InstrSched.ll index 811e9ac..3b365f3 100644 --- a/test/CodeGen/X86/2007-01-08-InstrSched.ll +++ b/test/CodeGen/X86/2007-01-08-InstrSched.ll @@ -3,12 +3,12 @@ ; RUN: %prcontext {mulss LCPI1_3} 1 | grep mulss | count 1 define float @foo(float %x) { - %tmp1 = mul float %x, 3.000000e+00 - %tmp3 = mul float %x, 5.000000e+00 - %tmp5 = mul float %x, 7.000000e+00 - %tmp7 = mul float %x, 1.100000e+01 - %tmp10 = add float %tmp1, %tmp3 - %tmp12 = add float %tmp10, %tmp5 - %tmp14 = add float %tmp12, %tmp7 + %tmp1 = fmul float %x, 3.000000e+00 + %tmp3 = fmul float %x, 5.000000e+00 + %tmp5 = fmul float %x, 7.000000e+00 + %tmp7 = fmul float %x, 1.100000e+01 + %tmp10 = fadd float %tmp1, %tmp3 + %tmp12 = fadd float %tmp10, %tmp5 + %tmp14 = fadd float %tmp12, %tmp7 ret float %tmp14 } diff --git a/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll b/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll index d1d0ea8..c03d982 100644 --- a/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll +++ b/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll @@ -139,7 +139,7 @@ b341: %r353 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r352 %r354 = load float* %r353 %r362 = load float* bitcast ([128 x i64]* @i6000 to float*) - %r363 = add float 0.000000e+00, %r362 + %r363 = fadd float 0.000000e+00, %r362 %r370 = load float* bitcast ([128 x i64]* @i6000 to float*) %r376 = icmp slt i64 %r16, 0 br i1 %r376, label %b377, label %a35b @@ -155,11 +155,11 @@ a35b: %e785 = shl i64 %w1865, 0 %b1877 = mul i64 %w1865, 0 %s795 = add i64 %b1877, 0 - %r399 = add float %r354, 0.000000e+00 - %r402 = add float %r370, 0.000000e+00 - %r403 = add float %r348, 0.000000e+00 + %r399 = fadd float %r354, 0.000000e+00 + %r402 = fadd float %r370, 0.000000e+00 + %r403 = fadd float %r348, 0.000000e+00 %r411 = add i64 %s795, 0 - %r431 = add float %r362, 0.000000e+00 + %r431 = fadd float %r362, 0.000000e+00 %r454 = add i64 %e785, 0 %r457 = add i64 %e785, 0 %r459 = icmp slt i64 %r457, 0 @@ -230,21 +230,21 @@ a45b714: %r750 = add i64 %r717, 0 %r751 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r750 %r752 = load float* %r751 - %r753 = add float %r752, %r746 - %r754 = add float %r728, %r722 - %r755 = add float %r734, %r754 - %r756 = add float %r755, %r740 - %r757 = add float %r753, %r756 - %r759 = add float %r757, %r540 + %r753 = fadd float %r752, %r746 + %r754 = fadd float %r728, %r722 + %r755 = fadd float %r734, %r754 + %r756 = fadd float %r755, %r740 + %r757 = fadd float %r753, %r756 + %r759 = fadd float %r757, %r540 %r770 = add i64 %r717, 0 %r771 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r770 %r772 = load float* %r771 %r776 = add i64 %r717, 0 %r777 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r776 %r778 = load float* %r777 - %r781 = add float %r363, %r772 - %r782 = add float %r781, %r778 - %r783 = add float %r551, %r782 + %r781 = fadd float %r363, %r772 + %r782 = fadd float %r781, %r778 + %r783 = fadd float %r551, %r782 br label %b712 a57b: br i1 %r335, label %a66b, label %b1086 @@ -310,10 +310,10 @@ a53b1019: %r1035 = load float* %r1034 %r1037 = bitcast i8* %c22010 to float* %r1040 = getelementptr float* %r1037, i64 %r1025 - %r1044 = add float %r864, %r1035 - %r1046 = add float %r870, %r1027 - %r1047 = add float %r1044, %r1046 - %r1048 = add float %r851, %r1047 + %r1044 = fadd float %r864, %r1035 + %r1046 = fadd float %r870, %r1027 + %r1047 = fadd float %r1044, %r1046 + %r1048 = fadd float %r851, %r1047 %v1886 = add i64 %w1885, 0 %u1890 = icmp slt i64 %v1886, %b1889 br i1 %u1890, label %b1016, label %a53b1019 @@ -341,7 +341,7 @@ b1117: %r1132 = bitcast i8* %c22012 to float* %r1134 = getelementptr float* %r1132, i64 %w1915 %r1135 = load float* %r1134 - %r1136 = add float %r1123, %r1135 + %r1136 = fadd float %r1123, %r1135 %r1138 = icmp slt i64 %r1114, 0 br i1 %r1138, label %b1139, label %a63b b1139: @@ -387,7 +387,7 @@ b1263: a63b1266: %w1944 = phi i64 [ 0, %a63b1266q ], [ %v1945, %a63b1266 ] %s1377 = phi i64 [ %s1374, %a63b1266q ], [ %r1297, %a63b1266 ] - %r1282 = add float %r1136, 0.000000e+00 + %r1282 = fadd float %r1136, 0.000000e+00 %r1297 = add i64 %s1377, 0 %v1945 = add i64 %w1944, 0 %u1949 = icmp slt i64 %v1945, %b1948 @@ -418,7 +418,7 @@ a74b: %r1379 = add i64 %s1543, 0 %r1403 = add i64 %r1355, 0 %r1422 = add i64 %r1348, 0 - %r1526 = add float %r1372, 0.000000e+00 + %r1526 = fadd float %r1372, 0.000000e+00 %r1573 = add i64 %w1958, 0 %r1581 = icmp slt i64 %r1573, 0 %v1959 = add i64 %w1958, 0 @@ -448,10 +448,10 @@ a97b: %r1763 = load float* %r1762 %r1767 = add i64 %r1622, 0 %r1768 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r1767 - %r1772 = add float %r1763, 0.000000e+00 - %r1773 = add float %r1772, 0.000000e+00 - %r1809 = add float %r1757, 0.000000e+00 - %r1810 = add float %r1773, %r1809 + %r1772 = fadd float %r1763, 0.000000e+00 + %r1773 = fadd float %r1772, 0.000000e+00 + %r1809 = fadd float %r1757, 0.000000e+00 + %r1810 = fadd float %r1773, %r1809 store float %r1810, float* %r1768 %r1818 = add i64 %w1970, 0 %r1826 = icmp slt i64 %r1818, 0 diff --git a/test/CodeGen/X86/2007-03-01-SpillerCrash.ll b/test/CodeGen/X86/2007-03-01-SpillerCrash.ll index d4176f1..721b6e7 100644 --- a/test/CodeGen/X86/2007-03-01-SpillerCrash.ll +++ b/test/CodeGen/X86/2007-03-01-SpillerCrash.ll @@ -3,12 +3,12 @@ define void @test() nounwind { test.exit: - mul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:0 [#uses=4] + fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:0 [#uses=4] load <4 x float>* null ; <<4 x float>>:1 [#uses=1] shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x float>>:2 [#uses=1] - mul <4 x float> %0, %2 ; <<4 x float>>:3 [#uses=1] - sub <4 x float> zeroinitializer, %3 ; <<4 x float>>:4 [#uses=1] - mul <4 x float> %4, zeroinitializer ; <<4 x float>>:5 [#uses=2] + fmul <4 x float> %0, %2 ; <<4 x float>>:3 [#uses=1] + fsub <4 x float> zeroinitializer, %3 ; <<4 x float>>:4 [#uses=1] + fmul <4 x float> %4, zeroinitializer ; <<4 x float>>:5 [#uses=2] bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:6 [#uses=1] and <4 x i32> %6, < i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647 > ; <<4 x i32>>:7 [#uses=1] bitcast <4 x i32> %7 to <4 x float> ; <<4 x float>>:8 [#uses=2] @@ -23,13 +23,13 @@ test.exit: br i1 false, label %19, label %13 ; <label>:13 ; preds = %12 - sub float -0.000000e+00, 0.000000e+00 ; <float>:14 [#uses=1] + fsub float -0.000000e+00, 0.000000e+00 ; <float>:14 [#uses=1] %tmp207 = extractelement <4 x float> zeroinitializer, i32 0 ; <float> [#uses=1] %tmp208 = extractelement <4 x float> zeroinitializer, i32 2 ; <float> [#uses=1] - sub float -0.000000e+00, %tmp208 ; <float>:15 [#uses=1] + fsub float -0.000000e+00, %tmp208 ; <float>:15 [#uses=1] %tmp155 = extractelement <4 x float> zeroinitializer, i32 0 ; <float> [#uses=1] %tmp156 = extractelement <4 x float> zeroinitializer, i32 2 ; <float> [#uses=1] - sub float -0.000000e+00, %tmp156 ; <float>:16 [#uses=1] + fsub float -0.000000e+00, %tmp156 ; <float>:16 [#uses=1] br label %19 ; <label>:17 ; preds = %11 @@ -54,7 +54,7 @@ test.exit: insertelement <4 x float> %31, float %25, i32 2 ; <<4 x float>>:32 [#uses=1] insertelement <4 x float> %32, float %25, i32 3 ; <<4 x float>>:33 [#uses=1] fdiv <4 x float> %33, zeroinitializer ; <<4 x float>>:34 [#uses=1] - mul <4 x float> %34, < float 5.000000e-01, float 5.000000e-01, float 5.000000e-01, float 5.000000e-01 > ; <<4 x float>>:35 [#uses=1] + fmul <4 x float> %34, < float 5.000000e-01, float 5.000000e-01, float 5.000000e-01, float 5.000000e-01 > ; <<4 x float>>:35 [#uses=1] insertelement <4 x float> undef, float %22, i32 0 ; <<4 x float>>:36 [#uses=1] insertelement <4 x float> %36, float %21, i32 1 ; <<4 x float>>:37 [#uses=0] br i1 false, label %foo.exit, label %38 @@ -64,17 +64,17 @@ test.exit: fcmp ogt float %39, 0.000000e+00 ; <i1>:40 [#uses=1] extractelement <4 x float> %0, i32 2 ; <float>:41 [#uses=1] extractelement <4 x float> %0, i32 1 ; <float>:42 [#uses=1] - sub float -0.000000e+00, %42 ; <float>:43 [#uses=2] + fsub float -0.000000e+00, %42 ; <float>:43 [#uses=2] %tmp189 = extractelement <4 x float> %5, i32 2 ; <float> [#uses=1] br i1 %40, label %44, label %46 ; <label>:44 ; preds = %38 - sub float -0.000000e+00, %tmp189 ; <float>:45 [#uses=0] + fsub float -0.000000e+00, %tmp189 ; <float>:45 [#uses=0] br label %foo.exit ; <label>:46 ; preds = %38 %tmp192 = extractelement <4 x float> %5, i32 1 ; <float> [#uses=1] - sub float -0.000000e+00, %tmp192 ; <float>:47 [#uses=1] + fsub float -0.000000e+00, %tmp192 ; <float>:47 [#uses=1] br label %foo.exit foo.exit: ; preds = %46, %44, %19 diff --git a/test/CodeGen/X86/2007-04-11-InlineAsmVectorResult.ll b/test/CodeGen/X86/2007-04-11-InlineAsmVectorResult.ll index ed5a194..514d665 100644 --- a/test/CodeGen/X86/2007-04-11-InlineAsmVectorResult.ll +++ b/test/CodeGen/X86/2007-04-11-InlineAsmVectorResult.ll @@ -11,7 +11,7 @@ bb: ; preds = %bb, %cond_true10 %tmp52 = bitcast <4 x float> %tmp49 to <4 x i32> ; <<4 x i32>> [#uses=1] %tmp53 = call <4 x i32> @llvm.x86.sse2.psll.d( <4 x i32> %tmp52, <4 x i32> < i32 8, i32 undef, i32 undef, i32 undef > ) ; <<4 x i32>> [#uses=1] %tmp105 = bitcast <4 x i32> %tmp53 to <4 x float> ; <<4 x float>> [#uses=1] - %tmp108 = sub <4 x float> zeroinitializer, %tmp105 ; <<4 x float>> [#uses=0] + %tmp108 = fsub <4 x float> zeroinitializer, %tmp105 ; <<4 x float>> [#uses=0] br label %bb return: ; preds = %entry diff --git a/test/CodeGen/X86/2007-04-24-VectorCrash.ll b/test/CodeGen/X86/2007-04-24-VectorCrash.ll index ce23da0..3e08e50 100644 --- a/test/CodeGen/X86/2007-04-24-VectorCrash.ll +++ b/test/CodeGen/X86/2007-04-24-VectorCrash.ll @@ -8,8 +8,8 @@ define void @test(float* %P) { entry: or <4 x i32> zeroinitializer, and (<4 x i32> bitcast (<4 x float> shufflevector (<4 x float> undef, <4 x float> undef, <4 x i32> zeroinitializer) to <4 x i32>), <4 x i32> < i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648 >) ; <<4 x i32>>:0 [#uses=1] bitcast <4 x i32> %0 to <4 x float> ; <<4 x float>>:1 [#uses=1] - sub <4 x float> %1, zeroinitializer ; <<4 x float>>:2 [#uses=1] - sub <4 x float> shufflevector (<4 x float> undef, <4 x float> undef, <4 x i32> zeroinitializer), %2 ; <<4 x float>>:3 [#uses=1] + fsub <4 x float> %1, zeroinitializer ; <<4 x float>>:2 [#uses=1] + fsub <4 x float> shufflevector (<4 x float> undef, <4 x float> undef, <4 x i32> zeroinitializer), %2 ; <<4 x float>>:3 [#uses=1] shufflevector <4 x float> zeroinitializer, <4 x float> %3, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:4 [#uses=1] shufflevector <4 x float> zeroinitializer, <4 x float> %4, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:5 [#uses=1] shufflevector <4 x float> zeroinitializer, <4 x float> %5, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:6 [#uses=1] @@ -29,19 +29,19 @@ entry: shufflevector <4 x float> zeroinitializer, <4 x float> %19, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:20 [#uses=1] shufflevector <4 x float> %20, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:21 [#uses=1] shufflevector <4 x float> %21, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:22 [#uses=1] - mul <4 x float> %22, zeroinitializer ; <<4 x float>>:23 [#uses=1] + fmul <4 x float> %22, zeroinitializer ; <<4 x float>>:23 [#uses=1] shufflevector <4 x float> %23, <4 x float> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x float>>:24 [#uses=1] call <4 x float> @llvm.x86.sse.add.ss( <4 x float> zeroinitializer, <4 x float> %24 ) ; <<4 x float>>:25 [#uses=1] shufflevector <4 x float> %25, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:26 [#uses=1] shufflevector <4 x float> %26, <4 x float> zeroinitializer, <4 x i32> zeroinitializer ; <<4 x float>>:27 [#uses=1] shufflevector <4 x float> %27, <4 x float> zeroinitializer, <4 x i32> < i32 4, i32 1, i32 6, i32 7 > ; <<4 x float>>:28 [#uses=1] - mul <4 x float> zeroinitializer, %28 ; <<4 x float>>:29 [#uses=1] - add <4 x float> %29, zeroinitializer ; <<4 x float>>:30 [#uses=1] - mul <4 x float> zeroinitializer, %30 ; <<4 x float>>:31 [#uses=1] + fmul <4 x float> zeroinitializer, %28 ; <<4 x float>>:29 [#uses=1] + fadd <4 x float> %29, zeroinitializer ; <<4 x float>>:30 [#uses=1] + fmul <4 x float> zeroinitializer, %30 ; <<4 x float>>:31 [#uses=1] shufflevector <4 x float> zeroinitializer, <4 x float> %31, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:32 [#uses=1] - mul <4 x float> zeroinitializer, %32 ; <<4 x float>>:33 [#uses=1] + fmul <4 x float> zeroinitializer, %32 ; <<4 x float>>:33 [#uses=1] shufflevector <4 x float> %33, <4 x float> zeroinitializer, <4 x i32> zeroinitializer ; <<4 x float>>:34 [#uses=1] - mul <4 x float> zeroinitializer, %34 ; <<4 x float>>:35 [#uses=1] + fmul <4 x float> zeroinitializer, %34 ; <<4 x float>>:35 [#uses=1] shufflevector <4 x float> zeroinitializer, <4 x float> %35, <4 x i32> < i32 0, i32 1, i32 6, i32 7 > ; <<4 x float>>:36 [#uses=1] shufflevector <4 x float> zeroinitializer, <4 x float> %36, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:37 [#uses=1] shufflevector <4 x float> zeroinitializer, <4 x float> %37, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:38 [#uses=1] @@ -56,7 +56,7 @@ entry: shufflevector <4 x float> zeroinitializer, <4 x float> %46, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:47 [#uses=1] shufflevector <4 x float> zeroinitializer, <4 x float> %47, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:48 [#uses=1] shufflevector <4 x float> %48, <4 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x float>>:49 [#uses=1] - add <4 x float> %49, zeroinitializer ; <<4 x float>>:50 [#uses=1] + fadd <4 x float> %49, zeroinitializer ; <<4 x float>>:50 [#uses=1] %tmp5845 = extractelement <4 x float> %50, i32 2 ; <float> [#uses=1] store float %tmp5845, float* %P ret void diff --git a/test/CodeGen/X86/2007-06-29-VecFPConstantCSEBug.ll b/test/CodeGen/X86/2007-06-29-VecFPConstantCSEBug.ll index 11fb8e3..66a58c7 100644 --- a/test/CodeGen/X86/2007-06-29-VecFPConstantCSEBug.ll +++ b/test/CodeGen/X86/2007-06-29-VecFPConstantCSEBug.ll @@ -2,10 +2,10 @@ define void @test(<4 x float>* %arg) { %tmp89 = getelementptr <4 x float>* %arg, i64 3 - %tmp1144 = sub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, zeroinitializer + %tmp1144 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, zeroinitializer store <4 x float> %tmp1144, <4 x float>* null %tmp1149 = load <4 x float>* %tmp89 - %tmp1150 = sub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %tmp1149 + %tmp1150 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %tmp1149 store <4 x float> %tmp1150, <4 x float>* %tmp89 ret void } diff --git a/test/CodeGen/X86/2007-07-10-StackerAssert.ll b/test/CodeGen/X86/2007-07-10-StackerAssert.ll index 120284f..7f09b52 100644 --- a/test/CodeGen/X86/2007-07-10-StackerAssert.ll +++ b/test/CodeGen/X86/2007-07-10-StackerAssert.ll @@ -27,7 +27,7 @@ bb383: ; preds = %bb164 cond_true425: ; preds = %bb383 %tmp430 = load float* null ; <float> [#uses=1] - %tmp432 = sub float %tmp430, %tmp408 ; <float> [#uses=1] + %tmp432 = fsub float %tmp430, %tmp408 ; <float> [#uses=1] %tmp432433 = fpext float %tmp432 to double ; <double> [#uses=1] %tmp434435 = fpext float %tmp408 to double ; <double> [#uses=1] call void (i8*, ...)* @PR_LogPrint( i8* getelementptr ([56 x i8]* @.str97, i32 0, i32 0), double 0.000000e+00, double %tmp434435, double %tmp432433 ) diff --git a/test/CodeGen/X86/2007-09-18-ShuffleXformBug.ll b/test/CodeGen/X86/2007-09-18-ShuffleXformBug.ll index 142bcd3..835e4ca 100644 --- a/test/CodeGen/X86/2007-09-18-ShuffleXformBug.ll +++ b/test/CodeGen/X86/2007-09-18-ShuffleXformBug.ll @@ -22,7 +22,7 @@ entry: %tmp1406.i1367.i = shufflevector <4 x float> %tmp2723.i1170.i, <4 x float> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x float>> [#uses=1] %tmp84.i1413.i = load <4 x float>* %.sub6235.i ; <<4 x float>> [#uses=1] - %tmp89.i1415.i = mul <4 x float> %tmp84.i1413.i, %tmp1406.i1367.i ; <<4 x float>> [#uses=1] + %tmp89.i1415.i = fmul <4 x float> %tmp84.i1413.i, %tmp1406.i1367.i ; <<4 x float>> [#uses=1] store <4 x float> %tmp89.i1415.i, <4 x float>* %.sub.i ret i16 0 } diff --git a/test/CodeGen/X86/2007-10-12-SpillerUnfold1.ll b/test/CodeGen/X86/2007-10-12-SpillerUnfold1.ll index 3a3c113..fd914a1 100644 --- a/test/CodeGen/X86/2007-10-12-SpillerUnfold1.ll +++ b/test/CodeGen/X86/2007-10-12-SpillerUnfold1.ll @@ -5,38 +5,38 @@ entry: br i1 true, label %bb171.preheader, label %bb431 bb171.preheader: ; preds = %entry - %tmp176 = add float 0.000000e+00, 1.000000e+00 ; <float> [#uses=2] + %tmp176 = fadd float 0.000000e+00, 1.000000e+00 ; <float> [#uses=2] %gi.1 = getelementptr float* %fz, i32 0 ; <float*> [#uses=2] %tmp240 = load float* %gi.1, align 4 ; <float> [#uses=1] - %tmp242 = sub float %tmp240, 0.000000e+00 ; <float> [#uses=2] + %tmp242 = fsub float %tmp240, 0.000000e+00 ; <float> [#uses=2] %tmp251 = getelementptr float* %fz, i32 0 ; <float*> [#uses=1] %tmp252 = load float* %tmp251, align 4 ; <float> [#uses=1] %tmp258 = getelementptr float* %fz, i32 0 ; <float*> [#uses=2] %tmp259 = load float* %tmp258, align 4 ; <float> [#uses=2] - %tmp261 = mul float %tmp259, %tmp176 ; <float> [#uses=1] - %tmp262 = sub float 0.000000e+00, %tmp261 ; <float> [#uses=2] - %tmp269 = mul float %tmp252, %tmp176 ; <float> [#uses=1] - %tmp276 = mul float %tmp259, 0.000000e+00 ; <float> [#uses=1] - %tmp277 = add float %tmp269, %tmp276 ; <float> [#uses=2] + %tmp261 = fmul float %tmp259, %tmp176 ; <float> [#uses=1] + %tmp262 = fsub float 0.000000e+00, %tmp261 ; <float> [#uses=2] + %tmp269 = fmul float %tmp252, %tmp176 ; <float> [#uses=1] + %tmp276 = fmul float %tmp259, 0.000000e+00 ; <float> [#uses=1] + %tmp277 = fadd float %tmp269, %tmp276 ; <float> [#uses=2] %tmp281 = getelementptr float* %fz, i32 0 ; <float*> [#uses=1] %tmp282 = load float* %tmp281, align 4 ; <float> [#uses=2] - %tmp284 = sub float %tmp282, %tmp277 ; <float> [#uses=1] - %tmp291 = add float %tmp282, %tmp277 ; <float> [#uses=1] - %tmp298 = sub float 0.000000e+00, %tmp262 ; <float> [#uses=1] - %tmp305 = add float 0.000000e+00, %tmp262 ; <float> [#uses=1] - %tmp315 = mul float 0.000000e+00, %tmp291 ; <float> [#uses=1] - %tmp318 = mul float 0.000000e+00, %tmp298 ; <float> [#uses=1] - %tmp319 = add float %tmp315, %tmp318 ; <float> [#uses=1] - %tmp329 = add float 0.000000e+00, %tmp319 ; <float> [#uses=1] + %tmp284 = fsub float %tmp282, %tmp277 ; <float> [#uses=1] + %tmp291 = fadd float %tmp282, %tmp277 ; <float> [#uses=1] + %tmp298 = fsub float 0.000000e+00, %tmp262 ; <float> [#uses=1] + %tmp305 = fadd float 0.000000e+00, %tmp262 ; <float> [#uses=1] + %tmp315 = fmul float 0.000000e+00, %tmp291 ; <float> [#uses=1] + %tmp318 = fmul float 0.000000e+00, %tmp298 ; <float> [#uses=1] + %tmp319 = fadd float %tmp315, %tmp318 ; <float> [#uses=1] + %tmp329 = fadd float 0.000000e+00, %tmp319 ; <float> [#uses=1] store float %tmp329, float* null, align 4 - %tmp336 = sub float %tmp242, 0.000000e+00 ; <float> [#uses=1] + %tmp336 = fsub float %tmp242, 0.000000e+00 ; <float> [#uses=1] store float %tmp336, float* %tmp258, align 4 - %tmp343 = add float %tmp242, 0.000000e+00 ; <float> [#uses=1] + %tmp343 = fadd float %tmp242, 0.000000e+00 ; <float> [#uses=1] store float %tmp343, float* null, align 4 - %tmp355 = mul float 0.000000e+00, %tmp305 ; <float> [#uses=1] - %tmp358 = mul float 0.000000e+00, %tmp284 ; <float> [#uses=1] - %tmp359 = add float %tmp355, %tmp358 ; <float> [#uses=1] - %tmp369 = add float 0.000000e+00, %tmp359 ; <float> [#uses=1] + %tmp355 = fmul float 0.000000e+00, %tmp305 ; <float> [#uses=1] + %tmp358 = fmul float 0.000000e+00, %tmp284 ; <float> [#uses=1] + %tmp359 = fadd float %tmp355, %tmp358 ; <float> [#uses=1] + %tmp369 = fadd float 0.000000e+00, %tmp359 ; <float> [#uses=1] store float %tmp369, float* %gi.1, align 4 ret void diff --git a/test/CodeGen/X86/2007-11-02-BadAsm.ll b/test/CodeGen/X86/2007-11-02-BadAsm.ll index 7fe8eaf..4ae4d2f 100644 --- a/test/CodeGen/X86/2007-11-02-BadAsm.ll +++ b/test/CodeGen/X86/2007-11-02-BadAsm.ll @@ -45,7 +45,7 @@ cond_true.i34.i: ; preds = %xit.i cond_next.i79.i: ; preds = %xit.i %phitmp167.i = fptosi double 0.000000e+00 to i64 ; <i64> [#uses=1] %tmp142143.i = fpext float %tmp6162.i.i to double ; <double> [#uses=1] - %tmp2.i139.i = add double %tmp142143.i, 5.000000e-01 ; <double> [#uses=1] + %tmp2.i139.i = fadd double %tmp142143.i, 5.000000e-01 ; <double> [#uses=1] %tmp23.i140.i = fptosi double %tmp2.i139.i to i64 ; <i64> [#uses=1] br i1 false, label %cond_true.i143.i, label %round_coord.exit148.i @@ -60,7 +60,7 @@ round_coord.exit148.i: ; preds = %cond_true.i143.i, %cond_next.i79.i %tmp144149.i = phi i32 [ 32767, %cond_next.i79.i ], [ -32767, %cond_true.i143.i ] ; <i32> [#uses=1] store i32 %tmp144149.i, i32* null, align 8 %tmp147148.i = fpext float %tmp67.i15.i to double ; <double> [#uses=1] - %tmp2.i128.i = add double %tmp147148.i, 5.000000e-01 ; <double> [#uses=1] + %tmp2.i128.i = fadd double %tmp147148.i, 5.000000e-01 ; <double> [#uses=1] %tmp23.i129.i = fptosi double %tmp2.i128.i to i64 ; <i64> [#uses=2] %tmp5.i130.i = icmp slt i64 %tmp23.i129.i, 32768 ; <i1> [#uses=1] br i1 %tmp5.i130.i, label %cond_true.i132.i, label %round_coord.exit137.i diff --git a/test/CodeGen/X86/2007-11-06-InstrSched.ll b/test/CodeGen/X86/2007-11-06-InstrSched.ll index 605fb55..a4e44e1 100644 --- a/test/CodeGen/X86/2007-11-06-InstrSched.ll +++ b/test/CodeGen/X86/2007-11-06-InstrSched.ll @@ -13,8 +13,8 @@ bb18: ; preds = %bb18, %entry %tmp45 = sitofp i32 %tmp4 to float ; <float> [#uses=1] %tmp8 = getelementptr float* %y, i32 %i.0.reg2mem.0 ; <float*> [#uses=1] %tmp9 = load float* %tmp8, align 4 ; <float> [#uses=1] - %tmp11 = mul float %tmp9, %tmp45 ; <float> [#uses=1] - %tmp14 = add float %tmp11, %res.0.reg2mem.0 ; <float> [#uses=2] + %tmp11 = fmul float %tmp9, %tmp45 ; <float> [#uses=1] + %tmp14 = fadd float %tmp11, %res.0.reg2mem.0 ; <float> [#uses=2] %tmp17 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=2] %tmp21 = icmp ult i32 %tmp17, %c ; <i1> [#uses=1] br i1 %tmp21, label %bb18, label %bb23 diff --git a/test/CodeGen/X86/2007-11-30-LoadFolding-Bug.ll b/test/CodeGen/X86/2007-11-30-LoadFolding-Bug.ll index 1b36fce..46422bc 100644 --- a/test/CodeGen/X86/2007-11-30-LoadFolding-Bug.ll +++ b/test/CodeGen/X86/2007-11-30-LoadFolding-Bug.ll @@ -41,8 +41,8 @@ bb.i28.i: ; preds = %bb.i28.i, %cond_next36.i %x.0.i21.i = select i1 %tmp4.i19.i, i32 %tmp1.i18.i, i32 0 ; <i32> [#uses=1] %tmp41.sum.i = add i32 %j.0.reg2mem.0.i16.i, 2 ; <i32> [#uses=0] %tmp1213.i23.i = sitofp i32 %x.0.i21.i to double ; <double> [#uses=1] - %tmp15.i24.i = sub double 0.000000e+00, %tmp1213.i23.i ; <double> [#uses=1] - %tmp16.i25.i = mul double 0.000000e+00, %tmp15.i24.i ; <double> [#uses=1] + %tmp15.i24.i = fsub double 0.000000e+00, %tmp1213.i23.i ; <double> [#uses=1] + %tmp16.i25.i = fmul double 0.000000e+00, %tmp15.i24.i ; <double> [#uses=1] %indvar.next39.i = add i32 %j.0.reg2mem.0.i16.i, 2 ; <i32> [#uses=2] %exitcond40.i = icmp eq i32 %indvar.next39.i, %tmp8.i14.i ; <i1> [#uses=1] br i1 %exitcond40.i, label %mp_unexp_d2mp.exit29.i, label %bb.i28.i diff --git a/test/CodeGen/X86/2007-12-11-FoldImpDefSpill.ll b/test/CodeGen/X86/2007-12-11-FoldImpDefSpill.ll index 84229cf..cb7a3dc 100644 --- a/test/CodeGen/X86/2007-12-11-FoldImpDefSpill.ll +++ b/test/CodeGen/X86/2007-12-11-FoldImpDefSpill.ll @@ -288,7 +288,7 @@ invcont1640: ; preds = %tmp9.i3799.noexc to label %invcont1642 unwind label %lpad3845 ; <i8*> [#uses=0] invcont1642: ; preds = %invcont1640 - %tmp18.i3770 = sub double %tmp3.i3778, 0.000000e+00 ; <double> [#uses=0] + %tmp18.i3770 = fsub double %tmp3.i3778, 0.000000e+00 ; <double> [#uses=0] invoke fastcc void @_ZN7mrScene9AddObjectEP9mrSurfaceRK8ggStringS4_i( %struct.mrScene* %this, %struct.ggBRDF* null, %struct.ggString* null, %struct.ggString* null, i32 0 ) to label %bb3743 unwind label %lpad3845 diff --git a/test/CodeGen/X86/2008-01-16-FPStackifierAssert.ll b/test/CodeGen/X86/2008-01-16-FPStackifierAssert.ll index 83ca3e3..38020c1 100644 --- a/test/CodeGen/X86/2008-01-16-FPStackifierAssert.ll +++ b/test/CodeGen/X86/2008-01-16-FPStackifierAssert.ll @@ -4,29 +4,29 @@ define void @SolveCubic(double %a, double %b, double %c, double %d, i32* %soluti entry: %tmp71 = load x86_fp80* null, align 16 ; <x86_fp80> [#uses=1] %tmp72 = fdiv x86_fp80 %tmp71, 0xKC000C000000000000000 ; <x86_fp80> [#uses=1] - %tmp73 = add x86_fp80 0xK00000000000000000000, %tmp72 ; <x86_fp80> [#uses=1] + %tmp73 = fadd x86_fp80 0xK00000000000000000000, %tmp72 ; <x86_fp80> [#uses=1] %tmp7374 = fptrunc x86_fp80 %tmp73 to double ; <double> [#uses=1] store double %tmp7374, double* null, align 8 %tmp81 = load double* null, align 8 ; <double> [#uses=1] - %tmp82 = add double %tmp81, 0x401921FB54442D18 ; <double> [#uses=1] + %tmp82 = fadd double %tmp81, 0x401921FB54442D18 ; <double> [#uses=1] %tmp83 = fdiv double %tmp82, 3.000000e+00 ; <double> [#uses=1] %tmp84 = call double @cos( double %tmp83 ) ; <double> [#uses=1] - %tmp85 = mul double 0.000000e+00, %tmp84 ; <double> [#uses=1] + %tmp85 = fmul double 0.000000e+00, %tmp84 ; <double> [#uses=1] %tmp8586 = fpext double %tmp85 to x86_fp80 ; <x86_fp80> [#uses=1] %tmp87 = load x86_fp80* null, align 16 ; <x86_fp80> [#uses=1] %tmp88 = fdiv x86_fp80 %tmp87, 0xKC000C000000000000000 ; <x86_fp80> [#uses=1] - %tmp89 = add x86_fp80 %tmp8586, %tmp88 ; <x86_fp80> [#uses=1] + %tmp89 = fadd x86_fp80 %tmp8586, %tmp88 ; <x86_fp80> [#uses=1] %tmp8990 = fptrunc x86_fp80 %tmp89 to double ; <double> [#uses=1] store double %tmp8990, double* null, align 8 %tmp97 = load double* null, align 8 ; <double> [#uses=1] - %tmp98 = add double %tmp97, 0x402921FB54442D18 ; <double> [#uses=1] + %tmp98 = fadd double %tmp97, 0x402921FB54442D18 ; <double> [#uses=1] %tmp99 = fdiv double %tmp98, 3.000000e+00 ; <double> [#uses=1] %tmp100 = call double @cos( double %tmp99 ) ; <double> [#uses=1] - %tmp101 = mul double 0.000000e+00, %tmp100 ; <double> [#uses=1] + %tmp101 = fmul double 0.000000e+00, %tmp100 ; <double> [#uses=1] %tmp101102 = fpext double %tmp101 to x86_fp80 ; <x86_fp80> [#uses=1] %tmp103 = load x86_fp80* null, align 16 ; <x86_fp80> [#uses=1] %tmp104 = fdiv x86_fp80 %tmp103, 0xKC000C000000000000000 ; <x86_fp80> [#uses=1] - %tmp105 = add x86_fp80 %tmp101102, %tmp104 ; <x86_fp80> [#uses=1] + %tmp105 = fadd x86_fp80 %tmp101102, %tmp104 ; <x86_fp80> [#uses=1] %tmp105106 = fptrunc x86_fp80 %tmp105 to double ; <double> [#uses=1] store double %tmp105106, double* null, align 8 ret void diff --git a/test/CodeGen/X86/2008-02-06-LoadFoldingBug.ll b/test/CodeGen/X86/2008-02-06-LoadFoldingBug.ll index f1300fa..6db6537 100644 --- a/test/CodeGen/X86/2008-02-06-LoadFoldingBug.ll +++ b/test/CodeGen/X86/2008-02-06-LoadFoldingBug.ll @@ -3,13 +3,13 @@ define void @casin({ double, double }* sret %agg.result, double %z.0, double %z.1) nounwind { entry: %memtmp = alloca { double, double }, align 8 ; <{ double, double }*> [#uses=3] - %tmp4 = sub double -0.000000e+00, %z.1 ; <double> [#uses=1] + %tmp4 = fsub double -0.000000e+00, %z.1 ; <double> [#uses=1] call void @casinh( { double, double }* sret %memtmp, double %tmp4, double %z.0 ) nounwind %tmp19 = getelementptr { double, double }* %memtmp, i32 0, i32 0 ; <double*> [#uses=1] %tmp20 = load double* %tmp19, align 8 ; <double> [#uses=1] %tmp22 = getelementptr { double, double }* %memtmp, i32 0, i32 1 ; <double*> [#uses=1] %tmp23 = load double* %tmp22, align 8 ; <double> [#uses=1] - %tmp32 = sub double -0.000000e+00, %tmp20 ; <double> [#uses=1] + %tmp32 = fsub double -0.000000e+00, %tmp20 ; <double> [#uses=1] %tmp37 = getelementptr { double, double }* %agg.result, i32 0, i32 0 ; <double*> [#uses=1] store double %tmp23, double* %tmp37, align 8 %tmp40 = getelementptr { double, double }* %agg.result, i32 0, i32 1 ; <double*> [#uses=1] diff --git a/test/CodeGen/X86/2008-02-08-LoadFoldingBug.ll b/test/CodeGen/X86/2008-02-08-LoadFoldingBug.ll index b3fe9ab..230af57 100644 --- a/test/CodeGen/X86/2008-02-08-LoadFoldingBug.ll +++ b/test/CodeGen/X86/2008-02-08-LoadFoldingBug.ll @@ -33,23 +33,23 @@ bb5.i.i31: ; preds = %bb3.i27 br i1 %tmp10.i.i30, label %bb13.i.i37, label %bb30.i.i43 bb13.i.i37: ; preds = %bb5.i.i31 - %tmp15.i.i32 = sub double -0.000000e+00, %tmp22.i25 ; <double> [#uses=1] + %tmp15.i.i32 = fsub double -0.000000e+00, %tmp22.i25 ; <double> [#uses=1] %tmp17.i.i33 = fdiv double %tmp15.i.i32, %tmp12.i23 ; <double> [#uses=3] - %tmp20.i4.i = mul double %tmp17.i.i33, %tmp17.i.i33 ; <double> [#uses=1] - %tmp21.i.i34 = add double %tmp20.i4.i, 1.000000e+00 ; <double> [#uses=1] + %tmp20.i4.i = fmul double %tmp17.i.i33, %tmp17.i.i33 ; <double> [#uses=1] + %tmp21.i.i34 = fadd double %tmp20.i4.i, 1.000000e+00 ; <double> [#uses=1] %tmp22.i.i35 = call double @llvm.sqrt.f64( double %tmp21.i.i34 ) nounwind ; <double> [#uses=1] %tmp23.i5.i = fdiv double 1.000000e+00, %tmp22.i.i35 ; <double> [#uses=2] - %tmp28.i.i36 = mul double %tmp23.i5.i, %tmp17.i.i33 ; <double> [#uses=1] + %tmp28.i.i36 = fmul double %tmp23.i5.i, %tmp17.i.i33 ; <double> [#uses=1] br label %Givens.exit.i49 bb30.i.i43: ; preds = %bb5.i.i31 - %tmp32.i.i38 = sub double -0.000000e+00, %tmp12.i23 ; <double> [#uses=1] + %tmp32.i.i38 = fsub double -0.000000e+00, %tmp12.i23 ; <double> [#uses=1] %tmp34.i.i39 = fdiv double %tmp32.i.i38, %tmp22.i25 ; <double> [#uses=3] - %tmp37.i6.i = mul double %tmp34.i.i39, %tmp34.i.i39 ; <double> [#uses=1] - %tmp38.i.i40 = add double %tmp37.i6.i, 1.000000e+00 ; <double> [#uses=1] + %tmp37.i6.i = fmul double %tmp34.i.i39, %tmp34.i.i39 ; <double> [#uses=1] + %tmp38.i.i40 = fadd double %tmp37.i6.i, 1.000000e+00 ; <double> [#uses=1] %tmp39.i7.i = call double @llvm.sqrt.f64( double %tmp38.i.i40 ) nounwind ; <double> [#uses=1] %tmp40.i.i41 = fdiv double 1.000000e+00, %tmp39.i7.i ; <double> [#uses=2] - %tmp45.i.i42 = mul double %tmp40.i.i41, %tmp34.i.i39 ; <double> [#uses=1] + %tmp45.i.i42 = fmul double %tmp40.i.i41, %tmp34.i.i39 ; <double> [#uses=1] br label %Givens.exit.i49 Givens.exit.i49: ; preds = %bb3.i27.Givens.exit.i49_crit_edge, %bb30.i.i43, %bb13.i.i37 diff --git a/test/CodeGen/X86/2008-02-27-DeadSlotElimBug.ll b/test/CodeGen/X86/2008-02-27-DeadSlotElimBug.ll index 96ac7a7..fe0ee8a 100644 --- a/test/CodeGen/X86/2008-02-27-DeadSlotElimBug.ll +++ b/test/CodeGen/X86/2008-02-27-DeadSlotElimBug.ll @@ -24,20 +24,20 @@ bb35: ; preds = %bb24, %entry %tmp42 = sdiv i32 %i, 9 ; <i32> [#uses=1] %tmp43 = add i32 %tmp42, -1 ; <i32> [#uses=1] %tmp4344 = sitofp i32 %tmp43 to double ; <double> [#uses=1] - %tmp17.i76 = mul double %tmp4344, 0.000000e+00 ; <double> [#uses=1] + %tmp17.i76 = fmul double %tmp4344, 0.000000e+00 ; <double> [#uses=1] %tmp48 = sdiv i32 %i, 3 ; <i32> [#uses=1] %tmp49 = srem i32 %tmp48, 3 ; <i32> [#uses=1] %tmp50 = add i32 %tmp49, -1 ; <i32> [#uses=1] %tmp5051 = sitofp i32 %tmp50 to double ; <double> [#uses=1] - %tmp17.i63 = mul double %tmp5051, 0.000000e+00 ; <double> [#uses=1] + %tmp17.i63 = fmul double %tmp5051, 0.000000e+00 ; <double> [#uses=1] %tmp55 = srem i32 %i, 3 ; <i32> [#uses=1] %tmp56 = add i32 %tmp55, -1 ; <i32> [#uses=1] %tmp5657 = sitofp i32 %tmp56 to double ; <double> [#uses=1] %tmp15.i49 = getelementptr %struct.Lattice* %this, i32 0, i32 0, i32 0 ; <double*> [#uses=1] %tmp16.i50 = load double* %tmp15.i49, align 4 ; <double> [#uses=1] - %tmp17.i = mul double %tmp5657, %tmp16.i50 ; <double> [#uses=1] - %tmp20.i39 = add double %tmp17.i, %tmp17.i63 ; <double> [#uses=1] - %tmp20.i23 = add double %tmp20.i39, %tmp17.i76 ; <double> [#uses=1] + %tmp17.i = fmul double %tmp5657, %tmp16.i50 ; <double> [#uses=1] + %tmp20.i39 = fadd double %tmp17.i, %tmp17.i63 ; <double> [#uses=1] + %tmp20.i23 = fadd double %tmp20.i39, %tmp17.i76 ; <double> [#uses=1] br i1 false, label %bb58.preheader, label %bb81 bb58.preheader: ; preds = %bb35 @@ -55,7 +55,7 @@ bb58: ; preds = %bb58, %bb58.preheader %tmp95.i = and i32 %tmp88.i, -268435456 ; <i32> [#uses=1] %tmp97.i = or i32 0, %tmp95.i ; <i32> [#uses=1] store i32 %tmp97.i, i32* %tmp25.i, align 1 - %tmp6.i = add double 0.000000e+00, %tmp20.i23 ; <double> [#uses=0] + %tmp6.i = fadd double 0.000000e+00, %tmp20.i23 ; <double> [#uses=0] %exitcond96 = icmp eq i32 0, %smax ; <i1> [#uses=1] br i1 %exitcond96, label %bb81, label %bb58 diff --git a/test/CodeGen/X86/2008-02-27-PEICrash.ll b/test/CodeGen/X86/2008-02-27-PEICrash.ll index b644d8f..055eabb 100644 --- a/test/CodeGen/X86/2008-02-27-PEICrash.ll +++ b/test/CodeGen/X86/2008-02-27-PEICrash.ll @@ -13,21 +13,21 @@ bb56: ; preds = %bb33, %entry %a.pn = phi float [ %a, %bb33 ], [ %b, %entry ] ; <float> [#uses=1] %tmp41.pn508 = phi float [ 0.000000e+00, %bb33 ], [ 0.000000e+00, %entry ] ; <float> [#uses=1] %tmp51.pn = phi float [ 0.000000e+00, %bb33 ], [ %a, %entry ] ; <float> [#uses=1] - %tmp44.pn = mul float %tmp36.pn, %b.pn509 ; <float> [#uses=1] - %tmp46.pn = add float %tmp44.pn, %a.pn ; <float> [#uses=1] - %tmp53.pn = sub float 0.000000e+00, %tmp51.pn ; <float> [#uses=1] + %tmp44.pn = fmul float %tmp36.pn, %b.pn509 ; <float> [#uses=1] + %tmp46.pn = fadd float %tmp44.pn, %a.pn ; <float> [#uses=1] + %tmp53.pn = fsub float 0.000000e+00, %tmp51.pn ; <float> [#uses=1] %x.0 = fdiv float %tmp46.pn, %tmp41.pn508 ; <float> [#uses=1] %y.0 = fdiv float %tmp53.pn, 0.000000e+00 ; <float> [#uses=1] br i1 false, label %bb433, label %bb98 bb98: ; preds = %bb56 - %tmp102 = mul float 0.000000e+00, %a ; <float> [#uses=1] - %tmp106 = mul float 0.000000e+00, %b ; <float> [#uses=1] + %tmp102 = fmul float 0.000000e+00, %a ; <float> [#uses=1] + %tmp106 = fmul float 0.000000e+00, %b ; <float> [#uses=1] br label %bb433 bb433: ; preds = %bb98, %bb56 %x.1 = phi float [ %tmp102, %bb98 ], [ %x.0, %bb56 ] ; <float> [#uses=0] %y.1 = phi float [ %tmp106, %bb98 ], [ %y.0, %bb56 ] ; <float> [#uses=1] - %tmp460 = add float %y.1, 0.000000e+00 ; <float> [#uses=0] + %tmp460 = fadd float %y.1, 0.000000e+00 ; <float> [#uses=0] ret i64 0 } diff --git a/test/CodeGen/X86/2008-03-18-CoalescerBug.ll b/test/CodeGen/X86/2008-03-18-CoalescerBug.ll index c3b4a25..4b6758d 100644 --- a/test/CodeGen/X86/2008-03-18-CoalescerBug.ll +++ b/test/CodeGen/X86/2008-03-18-CoalescerBug.ll @@ -14,7 +14,7 @@ entry: %tmp30 = icmp sgt i32 %delta, 0 ; <i1> [#uses=1] br i1 %tmp30, label %bb33, label %bb87.preheader bb33: ; preds = %entry - %tmp28 = add float 0.000000e+00, %tmp24 ; <float> [#uses=1] + %tmp28 = fadd float 0.000000e+00, %tmp24 ; <float> [#uses=1] %tmp35 = fcmp ogt float %tmp28, 1.800000e+01 ; <i1> [#uses=1] br i1 %tmp35, label %bb38, label %bb87.preheader bb38: ; preds = %bb33 @@ -24,7 +24,7 @@ bb43: ; preds = %bb38 store i32 %tmp53, i32* null, align 4 ret void bb50: ; preds = %bb38 - %tmp56 = sub float 1.800000e+01, %tmp24 ; <float> [#uses=1] + %tmp56 = fsub float 1.800000e+01, %tmp24 ; <float> [#uses=1] %tmp57 = fcmp ugt float 0.000000e+00, %tmp56 ; <i1> [#uses=1] br i1 %tmp57, label %bb64, label %bb87.preheader bb64: ; preds = %bb50 diff --git a/test/CodeGen/X86/2008-03-25-TwoAddrPassBug.ll b/test/CodeGen/X86/2008-03-25-TwoAddrPassBug.ll index 1e5ab99..53bb054 100644 --- a/test/CodeGen/X86/2008-03-25-TwoAddrPassBug.ll +++ b/test/CodeGen/X86/2008-03-25-TwoAddrPassBug.ll @@ -3,10 +3,10 @@ define void @t() { entry: %tmp455 = shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> < i32 1, i32 0, i32 3, i32 2 > ; <<4 x float>> [#uses=1] - %tmp457 = mul <4 x float> zeroinitializer, %tmp455 ; <<4 x float>> [#uses=2] + %tmp457 = fmul <4 x float> zeroinitializer, %tmp455 ; <<4 x float>> [#uses=2] %tmp461 = shufflevector <4 x float> %tmp457, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1] %tmp465 = shufflevector <4 x float> %tmp457, <4 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x float>> [#uses=1] - %tmp466 = sub <4 x float> %tmp461, %tmp465 ; <<4 x float>> [#uses=1] + %tmp466 = fsub <4 x float> %tmp461, %tmp465 ; <<4 x float>> [#uses=1] %tmp536 = shufflevector <4 x float> zeroinitializer, <4 x float> %tmp466, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x float>> [#uses=1] %tmp542 = shufflevector <4 x float> %tmp536, <4 x float> zeroinitializer, <4 x i32> < i32 6, i32 7, i32 2, i32 3 > ; <<4 x float>> [#uses=1] %tmp580 = bitcast <4 x float> %tmp542 to <4 x i32> ; <<4 x i32>> [#uses=1] @@ -15,10 +15,10 @@ entry: %tmp592 = bitcast <4 x i32> %tmp591 to <4 x float> ; <<4 x float>> [#uses=1] %tmp609 = fdiv <4 x float> < float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 >, %tmp592 ; <<4 x float>> [#uses=1] %tmp652 = shufflevector <4 x float> %tmp609, <4 x float> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x float>> [#uses=1] - %tmp662 = mul <4 x float> zeroinitializer, %tmp652 ; <<4 x float>> [#uses=1] + %tmp662 = fmul <4 x float> zeroinitializer, %tmp652 ; <<4 x float>> [#uses=1] %tmp678 = shufflevector <4 x float> %tmp662, <4 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x float>> [#uses=1] - %tmp753 = mul <4 x float> zeroinitializer, %tmp678 ; <<4 x float>> [#uses=1] - %tmp754 = sub <4 x float> zeroinitializer, %tmp753 ; <<4 x float>> [#uses=1] + %tmp753 = fmul <4 x float> zeroinitializer, %tmp678 ; <<4 x float>> [#uses=1] + %tmp754 = fsub <4 x float> zeroinitializer, %tmp753 ; <<4 x float>> [#uses=1] store <4 x float> %tmp754, <4 x float>* null, align 16 unreachable } diff --git a/test/CodeGen/X86/2008-07-19-movups-spills.ll b/test/CodeGen/X86/2008-07-19-movups-spills.ll index 8800357..ae30385 100644 --- a/test/CodeGen/X86/2008-07-19-movups-spills.ll +++ b/test/CodeGen/X86/2008-07-19-movups-spills.ll @@ -70,567 +70,567 @@ define void @""() { load <4 x float>* @29, align 1 ; <<4 x float>>:30 [#uses=31] load <4 x float>* @30, align 1 ; <<4 x float>>:31 [#uses=32] load <4 x float>* @31, align 1 ; <<4 x float>>:32 [#uses=33] - mul <4 x float> %1, %1 ; <<4 x float>>:33 [#uses=1] - mul <4 x float> %33, %2 ; <<4 x float>>:34 [#uses=1] - mul <4 x float> %34, %3 ; <<4 x float>>:35 [#uses=1] - mul <4 x float> %35, %4 ; <<4 x float>>:36 [#uses=1] - mul <4 x float> %36, %5 ; <<4 x float>>:37 [#uses=1] - mul <4 x float> %37, %6 ; <<4 x float>>:38 [#uses=1] - mul <4 x float> %38, %7 ; <<4 x float>>:39 [#uses=1] - mul <4 x float> %39, %8 ; <<4 x float>>:40 [#uses=1] - mul <4 x float> %40, %9 ; <<4 x float>>:41 [#uses=1] - mul <4 x float> %41, %10 ; <<4 x float>>:42 [#uses=1] - mul <4 x float> %42, %11 ; <<4 x float>>:43 [#uses=1] - mul <4 x float> %43, %12 ; <<4 x float>>:44 [#uses=1] - mul <4 x float> %44, %13 ; <<4 x float>>:45 [#uses=1] - mul <4 x float> %45, %14 ; <<4 x float>>:46 [#uses=1] - mul <4 x float> %46, %15 ; <<4 x float>>:47 [#uses=1] - mul <4 x float> %47, %16 ; <<4 x float>>:48 [#uses=1] - mul <4 x float> %48, %17 ; <<4 x float>>:49 [#uses=1] - mul <4 x float> %49, %18 ; <<4 x float>>:50 [#uses=1] - mul <4 x float> %50, %19 ; <<4 x float>>:51 [#uses=1] - mul <4 x float> %51, %20 ; <<4 x float>>:52 [#uses=1] - mul <4 x float> %52, %21 ; <<4 x float>>:53 [#uses=1] - mul <4 x float> %53, %22 ; <<4 x float>>:54 [#uses=1] - mul <4 x float> %54, %23 ; <<4 x float>>:55 [#uses=1] - mul <4 x float> %55, %24 ; <<4 x float>>:56 [#uses=1] - mul <4 x float> %56, %25 ; <<4 x float>>:57 [#uses=1] - mul <4 x float> %57, %26 ; <<4 x float>>:58 [#uses=1] - mul <4 x float> %58, %27 ; <<4 x float>>:59 [#uses=1] - mul <4 x float> %59, %28 ; <<4 x float>>:60 [#uses=1] - mul <4 x float> %60, %29 ; <<4 x float>>:61 [#uses=1] - mul <4 x float> %61, %30 ; <<4 x float>>:62 [#uses=1] - mul <4 x float> %62, %31 ; <<4 x float>>:63 [#uses=1] - mul <4 x float> %63, %32 ; <<4 x float>>:64 [#uses=3] - mul <4 x float> %2, %2 ; <<4 x float>>:65 [#uses=1] - mul <4 x float> %65, %3 ; <<4 x float>>:66 [#uses=1] - mul <4 x float> %66, %4 ; <<4 x float>>:67 [#uses=1] - mul <4 x float> %67, %5 ; <<4 x float>>:68 [#uses=1] - mul <4 x float> %68, %6 ; <<4 x float>>:69 [#uses=1] - mul <4 x float> %69, %7 ; <<4 x float>>:70 [#uses=1] - mul <4 x float> %70, %8 ; <<4 x float>>:71 [#uses=1] - mul <4 x float> %71, %9 ; <<4 x float>>:72 [#uses=1] - mul <4 x float> %72, %10 ; <<4 x float>>:73 [#uses=1] - mul <4 x float> %73, %11 ; <<4 x float>>:74 [#uses=1] - mul <4 x float> %74, %12 ; <<4 x float>>:75 [#uses=1] - mul <4 x float> %75, %13 ; <<4 x float>>:76 [#uses=1] - mul <4 x float> %76, %14 ; <<4 x float>>:77 [#uses=1] - mul <4 x float> %77, %15 ; <<4 x float>>:78 [#uses=1] - mul <4 x float> %78, %16 ; <<4 x float>>:79 [#uses=1] - mul <4 x float> %79, %17 ; <<4 x float>>:80 [#uses=1] - mul <4 x float> %80, %18 ; <<4 x float>>:81 [#uses=1] - mul <4 x float> %81, %19 ; <<4 x float>>:82 [#uses=1] - mul <4 x float> %82, %20 ; <<4 x float>>:83 [#uses=1] - mul <4 x float> %83, %21 ; <<4 x float>>:84 [#uses=1] - mul <4 x float> %84, %22 ; <<4 x float>>:85 [#uses=1] - mul <4 x float> %85, %23 ; <<4 x float>>:86 [#uses=1] - mul <4 x float> %86, %24 ; <<4 x float>>:87 [#uses=1] - mul <4 x float> %87, %25 ; <<4 x float>>:88 [#uses=1] - mul <4 x float> %88, %26 ; <<4 x float>>:89 [#uses=1] - mul <4 x float> %89, %27 ; <<4 x float>>:90 [#uses=1] - mul <4 x float> %90, %28 ; <<4 x float>>:91 [#uses=1] - mul <4 x float> %91, %29 ; <<4 x float>>:92 [#uses=1] - mul <4 x float> %92, %30 ; <<4 x float>>:93 [#uses=1] - mul <4 x float> %93, %31 ; <<4 x float>>:94 [#uses=1] - mul <4 x float> %94, %32 ; <<4 x float>>:95 [#uses=1] - mul <4 x float> %3, %3 ; <<4 x float>>:96 [#uses=1] - mul <4 x float> %96, %4 ; <<4 x float>>:97 [#uses=1] - mul <4 x float> %97, %5 ; <<4 x float>>:98 [#uses=1] - mul <4 x float> %98, %6 ; <<4 x float>>:99 [#uses=1] - mul <4 x float> %99, %7 ; <<4 x float>>:100 [#uses=1] - mul <4 x float> %100, %8 ; <<4 x float>>:101 [#uses=1] - mul <4 x float> %101, %9 ; <<4 x float>>:102 [#uses=1] - mul <4 x float> %102, %10 ; <<4 x float>>:103 [#uses=1] - mul <4 x float> %103, %11 ; <<4 x float>>:104 [#uses=1] - mul <4 x float> %104, %12 ; <<4 x float>>:105 [#uses=1] - mul <4 x float> %105, %13 ; <<4 x float>>:106 [#uses=1] - mul <4 x float> %106, %14 ; <<4 x float>>:107 [#uses=1] - mul <4 x float> %107, %15 ; <<4 x float>>:108 [#uses=1] - mul <4 x float> %108, %16 ; <<4 x float>>:109 [#uses=1] - mul <4 x float> %109, %17 ; <<4 x float>>:110 [#uses=1] - mul <4 x float> %110, %18 ; <<4 x float>>:111 [#uses=1] - mul <4 x float> %111, %19 ; <<4 x float>>:112 [#uses=1] - mul <4 x float> %112, %20 ; <<4 x float>>:113 [#uses=1] - mul <4 x float> %113, %21 ; <<4 x float>>:114 [#uses=1] - mul <4 x float> %114, %22 ; <<4 x float>>:115 [#uses=1] - mul <4 x float> %115, %23 ; <<4 x float>>:116 [#uses=1] - mul <4 x float> %116, %24 ; <<4 x float>>:117 [#uses=1] - mul <4 x float> %117, %25 ; <<4 x float>>:118 [#uses=1] - mul <4 x float> %118, %26 ; <<4 x float>>:119 [#uses=1] - mul <4 x float> %119, %27 ; <<4 x float>>:120 [#uses=1] - mul <4 x float> %120, %28 ; <<4 x float>>:121 [#uses=1] - mul <4 x float> %121, %29 ; <<4 x float>>:122 [#uses=1] - mul <4 x float> %122, %30 ; <<4 x float>>:123 [#uses=1] - mul <4 x float> %123, %31 ; <<4 x float>>:124 [#uses=1] - mul <4 x float> %124, %32 ; <<4 x float>>:125 [#uses=1] - mul <4 x float> %4, %4 ; <<4 x float>>:126 [#uses=1] - mul <4 x float> %126, %5 ; <<4 x float>>:127 [#uses=1] - mul <4 x float> %127, %6 ; <<4 x float>>:128 [#uses=1] - mul <4 x float> %128, %7 ; <<4 x float>>:129 [#uses=1] - mul <4 x float> %129, %8 ; <<4 x float>>:130 [#uses=1] - mul <4 x float> %130, %9 ; <<4 x float>>:131 [#uses=1] - mul <4 x float> %131, %10 ; <<4 x float>>:132 [#uses=1] - mul <4 x float> %132, %11 ; <<4 x float>>:133 [#uses=1] - mul <4 x float> %133, %12 ; <<4 x float>>:134 [#uses=1] - mul <4 x float> %134, %13 ; <<4 x float>>:135 [#uses=1] - mul <4 x float> %135, %14 ; <<4 x float>>:136 [#uses=1] - mul <4 x float> %136, %15 ; <<4 x float>>:137 [#uses=1] - mul <4 x float> %137, %16 ; <<4 x float>>:138 [#uses=1] - mul <4 x float> %138, %17 ; <<4 x float>>:139 [#uses=1] - mul <4 x float> %139, %18 ; <<4 x float>>:140 [#uses=1] - mul <4 x float> %140, %19 ; <<4 x float>>:141 [#uses=1] - mul <4 x float> %141, %20 ; <<4 x float>>:142 [#uses=1] - mul <4 x float> %142, %21 ; <<4 x float>>:143 [#uses=1] - mul <4 x float> %143, %22 ; <<4 x float>>:144 [#uses=1] - mul <4 x float> %144, %23 ; <<4 x float>>:145 [#uses=1] - mul <4 x float> %145, %24 ; <<4 x float>>:146 [#uses=1] - mul <4 x float> %146, %25 ; <<4 x float>>:147 [#uses=1] - mul <4 x float> %147, %26 ; <<4 x float>>:148 [#uses=1] - mul <4 x float> %148, %27 ; <<4 x float>>:149 [#uses=1] - mul <4 x float> %149, %28 ; <<4 x float>>:150 [#uses=1] - mul <4 x float> %150, %29 ; <<4 x float>>:151 [#uses=1] - mul <4 x float> %151, %30 ; <<4 x float>>:152 [#uses=1] - mul <4 x float> %152, %31 ; <<4 x float>>:153 [#uses=1] - mul <4 x float> %153, %32 ; <<4 x float>>:154 [#uses=1] - mul <4 x float> %5, %5 ; <<4 x float>>:155 [#uses=1] - mul <4 x float> %155, %6 ; <<4 x float>>:156 [#uses=1] - mul <4 x float> %156, %7 ; <<4 x float>>:157 [#uses=1] - mul <4 x float> %157, %8 ; <<4 x float>>:158 [#uses=1] - mul <4 x float> %158, %9 ; <<4 x float>>:159 [#uses=1] - mul <4 x float> %159, %10 ; <<4 x float>>:160 [#uses=1] - mul <4 x float> %160, %11 ; <<4 x float>>:161 [#uses=1] - mul <4 x float> %161, %12 ; <<4 x float>>:162 [#uses=1] - mul <4 x float> %162, %13 ; <<4 x float>>:163 [#uses=1] - mul <4 x float> %163, %14 ; <<4 x float>>:164 [#uses=1] - mul <4 x float> %164, %15 ; <<4 x float>>:165 [#uses=1] - mul <4 x float> %165, %16 ; <<4 x float>>:166 [#uses=1] - mul <4 x float> %166, %17 ; <<4 x float>>:167 [#uses=1] - mul <4 x float> %167, %18 ; <<4 x float>>:168 [#uses=1] - mul <4 x float> %168, %19 ; <<4 x float>>:169 [#uses=1] - mul <4 x float> %169, %20 ; <<4 x float>>:170 [#uses=1] - mul <4 x float> %170, %21 ; <<4 x float>>:171 [#uses=1] - mul <4 x float> %171, %22 ; <<4 x float>>:172 [#uses=1] - mul <4 x float> %172, %23 ; <<4 x float>>:173 [#uses=1] - mul <4 x float> %173, %24 ; <<4 x float>>:174 [#uses=1] - mul <4 x float> %174, %25 ; <<4 x float>>:175 [#uses=1] - mul <4 x float> %175, %26 ; <<4 x float>>:176 [#uses=1] - mul <4 x float> %176, %27 ; <<4 x float>>:177 [#uses=1] - mul <4 x float> %177, %28 ; <<4 x float>>:178 [#uses=1] - mul <4 x float> %178, %29 ; <<4 x float>>:179 [#uses=1] - mul <4 x float> %179, %30 ; <<4 x float>>:180 [#uses=1] - mul <4 x float> %180, %31 ; <<4 x float>>:181 [#uses=1] - mul <4 x float> %181, %32 ; <<4 x float>>:182 [#uses=1] - mul <4 x float> %6, %6 ; <<4 x float>>:183 [#uses=1] - mul <4 x float> %183, %7 ; <<4 x float>>:184 [#uses=1] - mul <4 x float> %184, %8 ; <<4 x float>>:185 [#uses=1] - mul <4 x float> %185, %9 ; <<4 x float>>:186 [#uses=1] - mul <4 x float> %186, %10 ; <<4 x float>>:187 [#uses=1] - mul <4 x float> %187, %11 ; <<4 x float>>:188 [#uses=1] - mul <4 x float> %188, %12 ; <<4 x float>>:189 [#uses=1] - mul <4 x float> %189, %13 ; <<4 x float>>:190 [#uses=1] - mul <4 x float> %190, %14 ; <<4 x float>>:191 [#uses=1] - mul <4 x float> %191, %15 ; <<4 x float>>:192 [#uses=1] - mul <4 x float> %192, %16 ; <<4 x float>>:193 [#uses=1] - mul <4 x float> %193, %17 ; <<4 x float>>:194 [#uses=1] - mul <4 x float> %194, %18 ; <<4 x float>>:195 [#uses=1] - mul <4 x float> %195, %19 ; <<4 x float>>:196 [#uses=1] - mul <4 x float> %196, %20 ; <<4 x float>>:197 [#uses=1] - mul <4 x float> %197, %21 ; <<4 x float>>:198 [#uses=1] - mul <4 x float> %198, %22 ; <<4 x float>>:199 [#uses=1] - mul <4 x float> %199, %23 ; <<4 x float>>:200 [#uses=1] - mul <4 x float> %200, %24 ; <<4 x float>>:201 [#uses=1] - mul <4 x float> %201, %25 ; <<4 x float>>:202 [#uses=1] - mul <4 x float> %202, %26 ; <<4 x float>>:203 [#uses=1] - mul <4 x float> %203, %27 ; <<4 x float>>:204 [#uses=1] - mul <4 x float> %204, %28 ; <<4 x float>>:205 [#uses=1] - mul <4 x float> %205, %29 ; <<4 x float>>:206 [#uses=1] - mul <4 x float> %206, %30 ; <<4 x float>>:207 [#uses=1] - mul <4 x float> %207, %31 ; <<4 x float>>:208 [#uses=1] - mul <4 x float> %208, %32 ; <<4 x float>>:209 [#uses=1] - mul <4 x float> %7, %7 ; <<4 x float>>:210 [#uses=1] - mul <4 x float> %210, %8 ; <<4 x float>>:211 [#uses=1] - mul <4 x float> %211, %9 ; <<4 x float>>:212 [#uses=1] - mul <4 x float> %212, %10 ; <<4 x float>>:213 [#uses=1] - mul <4 x float> %213, %11 ; <<4 x float>>:214 [#uses=1] - mul <4 x float> %214, %12 ; <<4 x float>>:215 [#uses=1] - mul <4 x float> %215, %13 ; <<4 x float>>:216 [#uses=1] - mul <4 x float> %216, %14 ; <<4 x float>>:217 [#uses=1] - mul <4 x float> %217, %15 ; <<4 x float>>:218 [#uses=1] - mul <4 x float> %218, %16 ; <<4 x float>>:219 [#uses=1] - mul <4 x float> %219, %17 ; <<4 x float>>:220 [#uses=1] - mul <4 x float> %220, %18 ; <<4 x float>>:221 [#uses=1] - mul <4 x float> %221, %19 ; <<4 x float>>:222 [#uses=1] - mul <4 x float> %222, %20 ; <<4 x float>>:223 [#uses=1] - mul <4 x float> %223, %21 ; <<4 x float>>:224 [#uses=1] - mul <4 x float> %224, %22 ; <<4 x float>>:225 [#uses=1] - mul <4 x float> %225, %23 ; <<4 x float>>:226 [#uses=1] - mul <4 x float> %226, %24 ; <<4 x float>>:227 [#uses=1] - mul <4 x float> %227, %25 ; <<4 x float>>:228 [#uses=1] - mul <4 x float> %228, %26 ; <<4 x float>>:229 [#uses=1] - mul <4 x float> %229, %27 ; <<4 x float>>:230 [#uses=1] - mul <4 x float> %230, %28 ; <<4 x float>>:231 [#uses=1] - mul <4 x float> %231, %29 ; <<4 x float>>:232 [#uses=1] - mul <4 x float> %232, %30 ; <<4 x float>>:233 [#uses=1] - mul <4 x float> %233, %31 ; <<4 x float>>:234 [#uses=1] - mul <4 x float> %234, %32 ; <<4 x float>>:235 [#uses=1] - mul <4 x float> %8, %8 ; <<4 x float>>:236 [#uses=1] - mul <4 x float> %236, %9 ; <<4 x float>>:237 [#uses=1] - mul <4 x float> %237, %10 ; <<4 x float>>:238 [#uses=1] - mul <4 x float> %238, %11 ; <<4 x float>>:239 [#uses=1] - mul <4 x float> %239, %12 ; <<4 x float>>:240 [#uses=1] - mul <4 x float> %240, %13 ; <<4 x float>>:241 [#uses=1] - mul <4 x float> %241, %14 ; <<4 x float>>:242 [#uses=1] - mul <4 x float> %242, %15 ; <<4 x float>>:243 [#uses=1] - mul <4 x float> %243, %16 ; <<4 x float>>:244 [#uses=1] - mul <4 x float> %244, %17 ; <<4 x float>>:245 [#uses=1] - mul <4 x float> %245, %18 ; <<4 x float>>:246 [#uses=1] - mul <4 x float> %246, %19 ; <<4 x float>>:247 [#uses=1] - mul <4 x float> %247, %20 ; <<4 x float>>:248 [#uses=1] - mul <4 x float> %248, %21 ; <<4 x float>>:249 [#uses=1] - mul <4 x float> %249, %22 ; <<4 x float>>:250 [#uses=1] - mul <4 x float> %250, %23 ; <<4 x float>>:251 [#uses=1] - mul <4 x float> %251, %24 ; <<4 x float>>:252 [#uses=1] - mul <4 x float> %252, %25 ; <<4 x float>>:253 [#uses=1] - mul <4 x float> %253, %26 ; <<4 x float>>:254 [#uses=1] - mul <4 x float> %254, %27 ; <<4 x float>>:255 [#uses=1] - mul <4 x float> %255, %28 ; <<4 x float>>:256 [#uses=1] - mul <4 x float> %256, %29 ; <<4 x float>>:257 [#uses=1] - mul <4 x float> %257, %30 ; <<4 x float>>:258 [#uses=1] - mul <4 x float> %258, %31 ; <<4 x float>>:259 [#uses=1] - mul <4 x float> %259, %32 ; <<4 x float>>:260 [#uses=1] - mul <4 x float> %9, %9 ; <<4 x float>>:261 [#uses=1] - mul <4 x float> %261, %10 ; <<4 x float>>:262 [#uses=1] - mul <4 x float> %262, %11 ; <<4 x float>>:263 [#uses=1] - mul <4 x float> %263, %12 ; <<4 x float>>:264 [#uses=1] - mul <4 x float> %264, %13 ; <<4 x float>>:265 [#uses=1] - mul <4 x float> %265, %14 ; <<4 x float>>:266 [#uses=1] - mul <4 x float> %266, %15 ; <<4 x float>>:267 [#uses=1] - mul <4 x float> %267, %16 ; <<4 x float>>:268 [#uses=1] - mul <4 x float> %268, %17 ; <<4 x float>>:269 [#uses=1] - mul <4 x float> %269, %18 ; <<4 x float>>:270 [#uses=1] - mul <4 x float> %270, %19 ; <<4 x float>>:271 [#uses=1] - mul <4 x float> %271, %20 ; <<4 x float>>:272 [#uses=1] - mul <4 x float> %272, %21 ; <<4 x float>>:273 [#uses=1] - mul <4 x float> %273, %22 ; <<4 x float>>:274 [#uses=1] - mul <4 x float> %274, %23 ; <<4 x float>>:275 [#uses=1] - mul <4 x float> %275, %24 ; <<4 x float>>:276 [#uses=1] - mul <4 x float> %276, %25 ; <<4 x float>>:277 [#uses=1] - mul <4 x float> %277, %26 ; <<4 x float>>:278 [#uses=1] - mul <4 x float> %278, %27 ; <<4 x float>>:279 [#uses=1] - mul <4 x float> %279, %28 ; <<4 x float>>:280 [#uses=1] - mul <4 x float> %280, %29 ; <<4 x float>>:281 [#uses=1] - mul <4 x float> %281, %30 ; <<4 x float>>:282 [#uses=1] - mul <4 x float> %282, %31 ; <<4 x float>>:283 [#uses=1] - mul <4 x float> %283, %32 ; <<4 x float>>:284 [#uses=1] - mul <4 x float> %10, %10 ; <<4 x float>>:285 [#uses=1] - mul <4 x float> %285, %11 ; <<4 x float>>:286 [#uses=1] - mul <4 x float> %286, %12 ; <<4 x float>>:287 [#uses=1] - mul <4 x float> %287, %13 ; <<4 x float>>:288 [#uses=1] - mul <4 x float> %288, %14 ; <<4 x float>>:289 [#uses=1] - mul <4 x float> %289, %15 ; <<4 x float>>:290 [#uses=1] - mul <4 x float> %290, %16 ; <<4 x float>>:291 [#uses=1] - mul <4 x float> %291, %17 ; <<4 x float>>:292 [#uses=1] - mul <4 x float> %292, %18 ; <<4 x float>>:293 [#uses=1] - mul <4 x float> %293, %19 ; <<4 x float>>:294 [#uses=1] - mul <4 x float> %294, %20 ; <<4 x float>>:295 [#uses=1] - mul <4 x float> %295, %21 ; <<4 x float>>:296 [#uses=1] - mul <4 x float> %296, %22 ; <<4 x float>>:297 [#uses=1] - mul <4 x float> %297, %23 ; <<4 x float>>:298 [#uses=1] - mul <4 x float> %298, %24 ; <<4 x float>>:299 [#uses=1] - mul <4 x float> %299, %25 ; <<4 x float>>:300 [#uses=1] - mul <4 x float> %300, %26 ; <<4 x float>>:301 [#uses=1] - mul <4 x float> %301, %27 ; <<4 x float>>:302 [#uses=1] - mul <4 x float> %302, %28 ; <<4 x float>>:303 [#uses=1] - mul <4 x float> %303, %29 ; <<4 x float>>:304 [#uses=1] - mul <4 x float> %304, %30 ; <<4 x float>>:305 [#uses=1] - mul <4 x float> %305, %31 ; <<4 x float>>:306 [#uses=1] - mul <4 x float> %306, %32 ; <<4 x float>>:307 [#uses=1] - mul <4 x float> %11, %11 ; <<4 x float>>:308 [#uses=1] - mul <4 x float> %308, %12 ; <<4 x float>>:309 [#uses=1] - mul <4 x float> %309, %13 ; <<4 x float>>:310 [#uses=1] - mul <4 x float> %310, %14 ; <<4 x float>>:311 [#uses=1] - mul <4 x float> %311, %15 ; <<4 x float>>:312 [#uses=1] - mul <4 x float> %312, %16 ; <<4 x float>>:313 [#uses=1] - mul <4 x float> %313, %17 ; <<4 x float>>:314 [#uses=1] - mul <4 x float> %314, %18 ; <<4 x float>>:315 [#uses=1] - mul <4 x float> %315, %19 ; <<4 x float>>:316 [#uses=1] - mul <4 x float> %316, %20 ; <<4 x float>>:317 [#uses=1] - mul <4 x float> %317, %21 ; <<4 x float>>:318 [#uses=1] - mul <4 x float> %318, %22 ; <<4 x float>>:319 [#uses=1] - mul <4 x float> %319, %23 ; <<4 x float>>:320 [#uses=1] - mul <4 x float> %320, %24 ; <<4 x float>>:321 [#uses=1] - mul <4 x float> %321, %25 ; <<4 x float>>:322 [#uses=1] - mul <4 x float> %322, %26 ; <<4 x float>>:323 [#uses=1] - mul <4 x float> %323, %27 ; <<4 x float>>:324 [#uses=1] - mul <4 x float> %324, %28 ; <<4 x float>>:325 [#uses=1] - mul <4 x float> %325, %29 ; <<4 x float>>:326 [#uses=1] - mul <4 x float> %326, %30 ; <<4 x float>>:327 [#uses=1] - mul <4 x float> %327, %31 ; <<4 x float>>:328 [#uses=1] - mul <4 x float> %328, %32 ; <<4 x float>>:329 [#uses=1] - mul <4 x float> %12, %12 ; <<4 x float>>:330 [#uses=1] - mul <4 x float> %330, %13 ; <<4 x float>>:331 [#uses=1] - mul <4 x float> %331, %14 ; <<4 x float>>:332 [#uses=1] - mul <4 x float> %332, %15 ; <<4 x float>>:333 [#uses=1] - mul <4 x float> %333, %16 ; <<4 x float>>:334 [#uses=1] - mul <4 x float> %334, %17 ; <<4 x float>>:335 [#uses=1] - mul <4 x float> %335, %18 ; <<4 x float>>:336 [#uses=1] - mul <4 x float> %336, %19 ; <<4 x float>>:337 [#uses=1] - mul <4 x float> %337, %20 ; <<4 x float>>:338 [#uses=1] - mul <4 x float> %338, %21 ; <<4 x float>>:339 [#uses=1] - mul <4 x float> %339, %22 ; <<4 x float>>:340 [#uses=1] - mul <4 x float> %340, %23 ; <<4 x float>>:341 [#uses=1] - mul <4 x float> %341, %24 ; <<4 x float>>:342 [#uses=1] - mul <4 x float> %342, %25 ; <<4 x float>>:343 [#uses=1] - mul <4 x float> %343, %26 ; <<4 x float>>:344 [#uses=1] - mul <4 x float> %344, %27 ; <<4 x float>>:345 [#uses=1] - mul <4 x float> %345, %28 ; <<4 x float>>:346 [#uses=1] - mul <4 x float> %346, %29 ; <<4 x float>>:347 [#uses=1] - mul <4 x float> %347, %30 ; <<4 x float>>:348 [#uses=1] - mul <4 x float> %348, %31 ; <<4 x float>>:349 [#uses=1] - mul <4 x float> %349, %32 ; <<4 x float>>:350 [#uses=1] - mul <4 x float> %13, %13 ; <<4 x float>>:351 [#uses=1] - mul <4 x float> %351, %14 ; <<4 x float>>:352 [#uses=1] - mul <4 x float> %352, %15 ; <<4 x float>>:353 [#uses=1] - mul <4 x float> %353, %16 ; <<4 x float>>:354 [#uses=1] - mul <4 x float> %354, %17 ; <<4 x float>>:355 [#uses=1] - mul <4 x float> %355, %18 ; <<4 x float>>:356 [#uses=1] - mul <4 x float> %356, %19 ; <<4 x float>>:357 [#uses=1] - mul <4 x float> %357, %20 ; <<4 x float>>:358 [#uses=1] - mul <4 x float> %358, %21 ; <<4 x float>>:359 [#uses=1] - mul <4 x float> %359, %22 ; <<4 x float>>:360 [#uses=1] - mul <4 x float> %360, %23 ; <<4 x float>>:361 [#uses=1] - mul <4 x float> %361, %24 ; <<4 x float>>:362 [#uses=1] - mul <4 x float> %362, %25 ; <<4 x float>>:363 [#uses=1] - mul <4 x float> %363, %26 ; <<4 x float>>:364 [#uses=1] - mul <4 x float> %364, %27 ; <<4 x float>>:365 [#uses=1] - mul <4 x float> %365, %28 ; <<4 x float>>:366 [#uses=1] - mul <4 x float> %366, %29 ; <<4 x float>>:367 [#uses=1] - mul <4 x float> %367, %30 ; <<4 x float>>:368 [#uses=1] - mul <4 x float> %368, %31 ; <<4 x float>>:369 [#uses=1] - mul <4 x float> %369, %32 ; <<4 x float>>:370 [#uses=1] - mul <4 x float> %14, %14 ; <<4 x float>>:371 [#uses=1] - mul <4 x float> %371, %15 ; <<4 x float>>:372 [#uses=1] - mul <4 x float> %372, %16 ; <<4 x float>>:373 [#uses=1] - mul <4 x float> %373, %17 ; <<4 x float>>:374 [#uses=1] - mul <4 x float> %374, %18 ; <<4 x float>>:375 [#uses=1] - mul <4 x float> %375, %19 ; <<4 x float>>:376 [#uses=1] - mul <4 x float> %376, %20 ; <<4 x float>>:377 [#uses=1] - mul <4 x float> %377, %21 ; <<4 x float>>:378 [#uses=1] - mul <4 x float> %378, %22 ; <<4 x float>>:379 [#uses=1] - mul <4 x float> %379, %23 ; <<4 x float>>:380 [#uses=1] - mul <4 x float> %380, %24 ; <<4 x float>>:381 [#uses=1] - mul <4 x float> %381, %25 ; <<4 x float>>:382 [#uses=1] - mul <4 x float> %382, %26 ; <<4 x float>>:383 [#uses=1] - mul <4 x float> %383, %27 ; <<4 x float>>:384 [#uses=1] - mul <4 x float> %384, %28 ; <<4 x float>>:385 [#uses=1] - mul <4 x float> %385, %29 ; <<4 x float>>:386 [#uses=1] - mul <4 x float> %386, %30 ; <<4 x float>>:387 [#uses=1] - mul <4 x float> %387, %31 ; <<4 x float>>:388 [#uses=1] - mul <4 x float> %388, %32 ; <<4 x float>>:389 [#uses=1] - mul <4 x float> %15, %15 ; <<4 x float>>:390 [#uses=1] - mul <4 x float> %390, %16 ; <<4 x float>>:391 [#uses=1] - mul <4 x float> %391, %17 ; <<4 x float>>:392 [#uses=1] - mul <4 x float> %392, %18 ; <<4 x float>>:393 [#uses=1] - mul <4 x float> %393, %19 ; <<4 x float>>:394 [#uses=1] - mul <4 x float> %394, %20 ; <<4 x float>>:395 [#uses=1] - mul <4 x float> %395, %21 ; <<4 x float>>:396 [#uses=1] - mul <4 x float> %396, %22 ; <<4 x float>>:397 [#uses=1] - mul <4 x float> %397, %23 ; <<4 x float>>:398 [#uses=1] - mul <4 x float> %398, %24 ; <<4 x float>>:399 [#uses=1] - mul <4 x float> %399, %25 ; <<4 x float>>:400 [#uses=1] - mul <4 x float> %400, %26 ; <<4 x float>>:401 [#uses=1] - mul <4 x float> %401, %27 ; <<4 x float>>:402 [#uses=1] - mul <4 x float> %402, %28 ; <<4 x float>>:403 [#uses=1] - mul <4 x float> %403, %29 ; <<4 x float>>:404 [#uses=1] - mul <4 x float> %404, %30 ; <<4 x float>>:405 [#uses=1] - mul <4 x float> %405, %31 ; <<4 x float>>:406 [#uses=1] - mul <4 x float> %406, %32 ; <<4 x float>>:407 [#uses=1] - mul <4 x float> %16, %16 ; <<4 x float>>:408 [#uses=1] - mul <4 x float> %408, %17 ; <<4 x float>>:409 [#uses=1] - mul <4 x float> %409, %18 ; <<4 x float>>:410 [#uses=1] - mul <4 x float> %410, %19 ; <<4 x float>>:411 [#uses=1] - mul <4 x float> %411, %20 ; <<4 x float>>:412 [#uses=1] - mul <4 x float> %412, %21 ; <<4 x float>>:413 [#uses=1] - mul <4 x float> %413, %22 ; <<4 x float>>:414 [#uses=1] - mul <4 x float> %414, %23 ; <<4 x float>>:415 [#uses=1] - mul <4 x float> %415, %24 ; <<4 x float>>:416 [#uses=1] - mul <4 x float> %416, %25 ; <<4 x float>>:417 [#uses=1] - mul <4 x float> %417, %26 ; <<4 x float>>:418 [#uses=1] - mul <4 x float> %418, %27 ; <<4 x float>>:419 [#uses=1] - mul <4 x float> %419, %28 ; <<4 x float>>:420 [#uses=1] - mul <4 x float> %420, %29 ; <<4 x float>>:421 [#uses=1] - mul <4 x float> %421, %30 ; <<4 x float>>:422 [#uses=1] - mul <4 x float> %422, %31 ; <<4 x float>>:423 [#uses=1] - mul <4 x float> %423, %32 ; <<4 x float>>:424 [#uses=1] - mul <4 x float> %17, %17 ; <<4 x float>>:425 [#uses=1] - mul <4 x float> %425, %18 ; <<4 x float>>:426 [#uses=1] - mul <4 x float> %426, %19 ; <<4 x float>>:427 [#uses=1] - mul <4 x float> %427, %20 ; <<4 x float>>:428 [#uses=1] - mul <4 x float> %428, %21 ; <<4 x float>>:429 [#uses=1] - mul <4 x float> %429, %22 ; <<4 x float>>:430 [#uses=1] - mul <4 x float> %430, %23 ; <<4 x float>>:431 [#uses=1] - mul <4 x float> %431, %24 ; <<4 x float>>:432 [#uses=1] - mul <4 x float> %432, %25 ; <<4 x float>>:433 [#uses=1] - mul <4 x float> %433, %26 ; <<4 x float>>:434 [#uses=1] - mul <4 x float> %434, %27 ; <<4 x float>>:435 [#uses=1] - mul <4 x float> %435, %28 ; <<4 x float>>:436 [#uses=1] - mul <4 x float> %436, %29 ; <<4 x float>>:437 [#uses=1] - mul <4 x float> %437, %30 ; <<4 x float>>:438 [#uses=1] - mul <4 x float> %438, %31 ; <<4 x float>>:439 [#uses=1] - mul <4 x float> %439, %32 ; <<4 x float>>:440 [#uses=1] - mul <4 x float> %18, %18 ; <<4 x float>>:441 [#uses=1] - mul <4 x float> %441, %19 ; <<4 x float>>:442 [#uses=1] - mul <4 x float> %442, %20 ; <<4 x float>>:443 [#uses=1] - mul <4 x float> %443, %21 ; <<4 x float>>:444 [#uses=1] - mul <4 x float> %444, %22 ; <<4 x float>>:445 [#uses=1] - mul <4 x float> %445, %23 ; <<4 x float>>:446 [#uses=1] - mul <4 x float> %446, %24 ; <<4 x float>>:447 [#uses=1] - mul <4 x float> %447, %25 ; <<4 x float>>:448 [#uses=1] - mul <4 x float> %448, %26 ; <<4 x float>>:449 [#uses=1] - mul <4 x float> %449, %27 ; <<4 x float>>:450 [#uses=1] - mul <4 x float> %450, %28 ; <<4 x float>>:451 [#uses=1] - mul <4 x float> %451, %29 ; <<4 x float>>:452 [#uses=1] - mul <4 x float> %452, %30 ; <<4 x float>>:453 [#uses=1] - mul <4 x float> %453, %31 ; <<4 x float>>:454 [#uses=1] - mul <4 x float> %454, %32 ; <<4 x float>>:455 [#uses=1] - mul <4 x float> %19, %19 ; <<4 x float>>:456 [#uses=1] - mul <4 x float> %456, %20 ; <<4 x float>>:457 [#uses=1] - mul <4 x float> %457, %21 ; <<4 x float>>:458 [#uses=1] - mul <4 x float> %458, %22 ; <<4 x float>>:459 [#uses=1] - mul <4 x float> %459, %23 ; <<4 x float>>:460 [#uses=1] - mul <4 x float> %460, %24 ; <<4 x float>>:461 [#uses=1] - mul <4 x float> %461, %25 ; <<4 x float>>:462 [#uses=1] - mul <4 x float> %462, %26 ; <<4 x float>>:463 [#uses=1] - mul <4 x float> %463, %27 ; <<4 x float>>:464 [#uses=1] - mul <4 x float> %464, %28 ; <<4 x float>>:465 [#uses=1] - mul <4 x float> %465, %29 ; <<4 x float>>:466 [#uses=1] - mul <4 x float> %466, %30 ; <<4 x float>>:467 [#uses=1] - mul <4 x float> %467, %31 ; <<4 x float>>:468 [#uses=1] - mul <4 x float> %468, %32 ; <<4 x float>>:469 [#uses=1] - mul <4 x float> %20, %20 ; <<4 x float>>:470 [#uses=1] - mul <4 x float> %470, %21 ; <<4 x float>>:471 [#uses=1] - mul <4 x float> %471, %22 ; <<4 x float>>:472 [#uses=1] - mul <4 x float> %472, %23 ; <<4 x float>>:473 [#uses=1] - mul <4 x float> %473, %24 ; <<4 x float>>:474 [#uses=1] - mul <4 x float> %474, %25 ; <<4 x float>>:475 [#uses=1] - mul <4 x float> %475, %26 ; <<4 x float>>:476 [#uses=1] - mul <4 x float> %476, %27 ; <<4 x float>>:477 [#uses=1] - mul <4 x float> %477, %28 ; <<4 x float>>:478 [#uses=1] - mul <4 x float> %478, %29 ; <<4 x float>>:479 [#uses=1] - mul <4 x float> %479, %30 ; <<4 x float>>:480 [#uses=1] - mul <4 x float> %480, %31 ; <<4 x float>>:481 [#uses=1] - mul <4 x float> %481, %32 ; <<4 x float>>:482 [#uses=1] - mul <4 x float> %21, %21 ; <<4 x float>>:483 [#uses=1] - mul <4 x float> %483, %22 ; <<4 x float>>:484 [#uses=1] - mul <4 x float> %484, %23 ; <<4 x float>>:485 [#uses=1] - mul <4 x float> %485, %24 ; <<4 x float>>:486 [#uses=1] - mul <4 x float> %486, %25 ; <<4 x float>>:487 [#uses=1] - mul <4 x float> %487, %26 ; <<4 x float>>:488 [#uses=1] - mul <4 x float> %488, %27 ; <<4 x float>>:489 [#uses=1] - mul <4 x float> %489, %28 ; <<4 x float>>:490 [#uses=1] - mul <4 x float> %490, %29 ; <<4 x float>>:491 [#uses=1] - mul <4 x float> %491, %30 ; <<4 x float>>:492 [#uses=1] - mul <4 x float> %492, %31 ; <<4 x float>>:493 [#uses=1] - mul <4 x float> %493, %32 ; <<4 x float>>:494 [#uses=1] - mul <4 x float> %22, %22 ; <<4 x float>>:495 [#uses=1] - mul <4 x float> %495, %23 ; <<4 x float>>:496 [#uses=1] - mul <4 x float> %496, %24 ; <<4 x float>>:497 [#uses=1] - mul <4 x float> %497, %25 ; <<4 x float>>:498 [#uses=1] - mul <4 x float> %498, %26 ; <<4 x float>>:499 [#uses=1] - mul <4 x float> %499, %27 ; <<4 x float>>:500 [#uses=1] - mul <4 x float> %500, %28 ; <<4 x float>>:501 [#uses=1] - mul <4 x float> %501, %29 ; <<4 x float>>:502 [#uses=1] - mul <4 x float> %502, %30 ; <<4 x float>>:503 [#uses=1] - mul <4 x float> %503, %31 ; <<4 x float>>:504 [#uses=1] - mul <4 x float> %504, %32 ; <<4 x float>>:505 [#uses=1] - mul <4 x float> %23, %23 ; <<4 x float>>:506 [#uses=1] - mul <4 x float> %506, %24 ; <<4 x float>>:507 [#uses=1] - mul <4 x float> %507, %25 ; <<4 x float>>:508 [#uses=1] - mul <4 x float> %508, %26 ; <<4 x float>>:509 [#uses=1] - mul <4 x float> %509, %27 ; <<4 x float>>:510 [#uses=1] - mul <4 x float> %510, %28 ; <<4 x float>>:511 [#uses=1] - mul <4 x float> %511, %29 ; <<4 x float>>:512 [#uses=1] - mul <4 x float> %512, %30 ; <<4 x float>>:513 [#uses=1] - mul <4 x float> %513, %31 ; <<4 x float>>:514 [#uses=1] - mul <4 x float> %514, %32 ; <<4 x float>>:515 [#uses=1] - mul <4 x float> %24, %24 ; <<4 x float>>:516 [#uses=1] - mul <4 x float> %516, %25 ; <<4 x float>>:517 [#uses=1] - mul <4 x float> %517, %26 ; <<4 x float>>:518 [#uses=1] - mul <4 x float> %518, %27 ; <<4 x float>>:519 [#uses=1] - mul <4 x float> %519, %28 ; <<4 x float>>:520 [#uses=1] - mul <4 x float> %520, %29 ; <<4 x float>>:521 [#uses=1] - mul <4 x float> %521, %30 ; <<4 x float>>:522 [#uses=1] - mul <4 x float> %522, %31 ; <<4 x float>>:523 [#uses=1] - mul <4 x float> %523, %32 ; <<4 x float>>:524 [#uses=1] - mul <4 x float> %25, %25 ; <<4 x float>>:525 [#uses=1] - mul <4 x float> %525, %26 ; <<4 x float>>:526 [#uses=1] - mul <4 x float> %526, %27 ; <<4 x float>>:527 [#uses=1] - mul <4 x float> %527, %28 ; <<4 x float>>:528 [#uses=1] - mul <4 x float> %528, %29 ; <<4 x float>>:529 [#uses=1] - mul <4 x float> %529, %30 ; <<4 x float>>:530 [#uses=1] - mul <4 x float> %530, %31 ; <<4 x float>>:531 [#uses=1] - mul <4 x float> %531, %32 ; <<4 x float>>:532 [#uses=1] - mul <4 x float> %26, %26 ; <<4 x float>>:533 [#uses=1] - mul <4 x float> %533, %27 ; <<4 x float>>:534 [#uses=1] - mul <4 x float> %534, %28 ; <<4 x float>>:535 [#uses=1] - mul <4 x float> %535, %29 ; <<4 x float>>:536 [#uses=1] - mul <4 x float> %536, %30 ; <<4 x float>>:537 [#uses=1] - mul <4 x float> %537, %31 ; <<4 x float>>:538 [#uses=1] - mul <4 x float> %538, %32 ; <<4 x float>>:539 [#uses=1] - mul <4 x float> %27, %27 ; <<4 x float>>:540 [#uses=1] - mul <4 x float> %540, %28 ; <<4 x float>>:541 [#uses=1] - mul <4 x float> %541, %29 ; <<4 x float>>:542 [#uses=1] - mul <4 x float> %542, %30 ; <<4 x float>>:543 [#uses=1] - mul <4 x float> %543, %31 ; <<4 x float>>:544 [#uses=1] - mul <4 x float> %544, %32 ; <<4 x float>>:545 [#uses=1] - mul <4 x float> %28, %28 ; <<4 x float>>:546 [#uses=1] - mul <4 x float> %546, %29 ; <<4 x float>>:547 [#uses=1] - mul <4 x float> %547, %30 ; <<4 x float>>:548 [#uses=1] - mul <4 x float> %548, %31 ; <<4 x float>>:549 [#uses=1] - mul <4 x float> %549, %32 ; <<4 x float>>:550 [#uses=1] - mul <4 x float> %29, %29 ; <<4 x float>>:551 [#uses=1] - mul <4 x float> %551, %30 ; <<4 x float>>:552 [#uses=1] - mul <4 x float> %552, %31 ; <<4 x float>>:553 [#uses=1] - mul <4 x float> %553, %32 ; <<4 x float>>:554 [#uses=1] - mul <4 x float> %30, %30 ; <<4 x float>>:555 [#uses=1] - mul <4 x float> %555, %31 ; <<4 x float>>:556 [#uses=1] - mul <4 x float> %556, %32 ; <<4 x float>>:557 [#uses=1] - mul <4 x float> %31, %31 ; <<4 x float>>:558 [#uses=1] - mul <4 x float> %558, %32 ; <<4 x float>>:559 [#uses=1] - mul <4 x float> %32, %32 ; <<4 x float>>:560 [#uses=1] - add <4 x float> %64, %64 ; <<4 x float>>:561 [#uses=1] - add <4 x float> %561, %64 ; <<4 x float>>:562 [#uses=1] - add <4 x float> %562, %95 ; <<4 x float>>:563 [#uses=1] - add <4 x float> %563, %125 ; <<4 x float>>:564 [#uses=1] - add <4 x float> %564, %154 ; <<4 x float>>:565 [#uses=1] - add <4 x float> %565, %182 ; <<4 x float>>:566 [#uses=1] - add <4 x float> %566, %209 ; <<4 x float>>:567 [#uses=1] - add <4 x float> %567, %235 ; <<4 x float>>:568 [#uses=1] - add <4 x float> %568, %260 ; <<4 x float>>:569 [#uses=1] - add <4 x float> %569, %284 ; <<4 x float>>:570 [#uses=1] - add <4 x float> %570, %307 ; <<4 x float>>:571 [#uses=1] - add <4 x float> %571, %329 ; <<4 x float>>:572 [#uses=1] - add <4 x float> %572, %350 ; <<4 x float>>:573 [#uses=1] - add <4 x float> %573, %370 ; <<4 x float>>:574 [#uses=1] - add <4 x float> %574, %389 ; <<4 x float>>:575 [#uses=1] - add <4 x float> %575, %407 ; <<4 x float>>:576 [#uses=1] - add <4 x float> %576, %424 ; <<4 x float>>:577 [#uses=1] - add <4 x float> %577, %440 ; <<4 x float>>:578 [#uses=1] - add <4 x float> %578, %455 ; <<4 x float>>:579 [#uses=1] - add <4 x float> %579, %469 ; <<4 x float>>:580 [#uses=1] - add <4 x float> %580, %482 ; <<4 x float>>:581 [#uses=1] - add <4 x float> %581, %494 ; <<4 x float>>:582 [#uses=1] - add <4 x float> %582, %505 ; <<4 x float>>:583 [#uses=1] - add <4 x float> %583, %515 ; <<4 x float>>:584 [#uses=1] - add <4 x float> %584, %524 ; <<4 x float>>:585 [#uses=1] - add <4 x float> %585, %532 ; <<4 x float>>:586 [#uses=1] - add <4 x float> %586, %539 ; <<4 x float>>:587 [#uses=1] - add <4 x float> %587, %545 ; <<4 x float>>:588 [#uses=1] - add <4 x float> %588, %550 ; <<4 x float>>:589 [#uses=1] - add <4 x float> %589, %554 ; <<4 x float>>:590 [#uses=1] - add <4 x float> %590, %557 ; <<4 x float>>:591 [#uses=1] - add <4 x float> %591, %559 ; <<4 x float>>:592 [#uses=1] - add <4 x float> %592, %560 ; <<4 x float>>:593 [#uses=1] + fmul <4 x float> %1, %1 ; <<4 x float>>:33 [#uses=1] + fmul <4 x float> %33, %2 ; <<4 x float>>:34 [#uses=1] + fmul <4 x float> %34, %3 ; <<4 x float>>:35 [#uses=1] + fmul <4 x float> %35, %4 ; <<4 x float>>:36 [#uses=1] + fmul <4 x float> %36, %5 ; <<4 x float>>:37 [#uses=1] + fmul <4 x float> %37, %6 ; <<4 x float>>:38 [#uses=1] + fmul <4 x float> %38, %7 ; <<4 x float>>:39 [#uses=1] + fmul <4 x float> %39, %8 ; <<4 x float>>:40 [#uses=1] + fmul <4 x float> %40, %9 ; <<4 x float>>:41 [#uses=1] + fmul <4 x float> %41, %10 ; <<4 x float>>:42 [#uses=1] + fmul <4 x float> %42, %11 ; <<4 x float>>:43 [#uses=1] + fmul <4 x float> %43, %12 ; <<4 x float>>:44 [#uses=1] + fmul <4 x float> %44, %13 ; <<4 x float>>:45 [#uses=1] + fmul <4 x float> %45, %14 ; <<4 x float>>:46 [#uses=1] + fmul <4 x float> %46, %15 ; <<4 x float>>:47 [#uses=1] + fmul <4 x float> %47, %16 ; <<4 x float>>:48 [#uses=1] + fmul <4 x float> %48, %17 ; <<4 x float>>:49 [#uses=1] + fmul <4 x float> %49, %18 ; <<4 x float>>:50 [#uses=1] + fmul <4 x float> %50, %19 ; <<4 x float>>:51 [#uses=1] + fmul <4 x float> %51, %20 ; <<4 x float>>:52 [#uses=1] + fmul <4 x float> %52, %21 ; <<4 x float>>:53 [#uses=1] + fmul <4 x float> %53, %22 ; <<4 x float>>:54 [#uses=1] + fmul <4 x float> %54, %23 ; <<4 x float>>:55 [#uses=1] + fmul <4 x float> %55, %24 ; <<4 x float>>:56 [#uses=1] + fmul <4 x float> %56, %25 ; <<4 x float>>:57 [#uses=1] + fmul <4 x float> %57, %26 ; <<4 x float>>:58 [#uses=1] + fmul <4 x float> %58, %27 ; <<4 x float>>:59 [#uses=1] + fmul <4 x float> %59, %28 ; <<4 x float>>:60 [#uses=1] + fmul <4 x float> %60, %29 ; <<4 x float>>:61 [#uses=1] + fmul <4 x float> %61, %30 ; <<4 x float>>:62 [#uses=1] + fmul <4 x float> %62, %31 ; <<4 x float>>:63 [#uses=1] + fmul <4 x float> %63, %32 ; <<4 x float>>:64 [#uses=3] + fmul <4 x float> %2, %2 ; <<4 x float>>:65 [#uses=1] + fmul <4 x float> %65, %3 ; <<4 x float>>:66 [#uses=1] + fmul <4 x float> %66, %4 ; <<4 x float>>:67 [#uses=1] + fmul <4 x float> %67, %5 ; <<4 x float>>:68 [#uses=1] + fmul <4 x float> %68, %6 ; <<4 x float>>:69 [#uses=1] + fmul <4 x float> %69, %7 ; <<4 x float>>:70 [#uses=1] + fmul <4 x float> %70, %8 ; <<4 x float>>:71 [#uses=1] + fmul <4 x float> %71, %9 ; <<4 x float>>:72 [#uses=1] + fmul <4 x float> %72, %10 ; <<4 x float>>:73 [#uses=1] + fmul <4 x float> %73, %11 ; <<4 x float>>:74 [#uses=1] + fmul <4 x float> %74, %12 ; <<4 x float>>:75 [#uses=1] + fmul <4 x float> %75, %13 ; <<4 x float>>:76 [#uses=1] + fmul <4 x float> %76, %14 ; <<4 x float>>:77 [#uses=1] + fmul <4 x float> %77, %15 ; <<4 x float>>:78 [#uses=1] + fmul <4 x float> %78, %16 ; <<4 x float>>:79 [#uses=1] + fmul <4 x float> %79, %17 ; <<4 x float>>:80 [#uses=1] + fmul <4 x float> %80, %18 ; <<4 x float>>:81 [#uses=1] + fmul <4 x float> %81, %19 ; <<4 x float>>:82 [#uses=1] + fmul <4 x float> %82, %20 ; <<4 x float>>:83 [#uses=1] + fmul <4 x float> %83, %21 ; <<4 x float>>:84 [#uses=1] + fmul <4 x float> %84, %22 ; <<4 x float>>:85 [#uses=1] + fmul <4 x float> %85, %23 ; <<4 x float>>:86 [#uses=1] + fmul <4 x float> %86, %24 ; <<4 x float>>:87 [#uses=1] + fmul <4 x float> %87, %25 ; <<4 x float>>:88 [#uses=1] + fmul <4 x float> %88, %26 ; <<4 x float>>:89 [#uses=1] + fmul <4 x float> %89, %27 ; <<4 x float>>:90 [#uses=1] + fmul <4 x float> %90, %28 ; <<4 x float>>:91 [#uses=1] + fmul <4 x float> %91, %29 ; <<4 x float>>:92 [#uses=1] + fmul <4 x float> %92, %30 ; <<4 x float>>:93 [#uses=1] + fmul <4 x float> %93, %31 ; <<4 x float>>:94 [#uses=1] + fmul <4 x float> %94, %32 ; <<4 x float>>:95 [#uses=1] + fmul <4 x float> %3, %3 ; <<4 x float>>:96 [#uses=1] + fmul <4 x float> %96, %4 ; <<4 x float>>:97 [#uses=1] + fmul <4 x float> %97, %5 ; <<4 x float>>:98 [#uses=1] + fmul <4 x float> %98, %6 ; <<4 x float>>:99 [#uses=1] + fmul <4 x float> %99, %7 ; <<4 x float>>:100 [#uses=1] + fmul <4 x float> %100, %8 ; <<4 x float>>:101 [#uses=1] + fmul <4 x float> %101, %9 ; <<4 x float>>:102 [#uses=1] + fmul <4 x float> %102, %10 ; <<4 x float>>:103 [#uses=1] + fmul <4 x float> %103, %11 ; <<4 x float>>:104 [#uses=1] + fmul <4 x float> %104, %12 ; <<4 x float>>:105 [#uses=1] + fmul <4 x float> %105, %13 ; <<4 x float>>:106 [#uses=1] + fmul <4 x float> %106, %14 ; <<4 x float>>:107 [#uses=1] + fmul <4 x float> %107, %15 ; <<4 x float>>:108 [#uses=1] + fmul <4 x float> %108, %16 ; <<4 x float>>:109 [#uses=1] + fmul <4 x float> %109, %17 ; <<4 x float>>:110 [#uses=1] + fmul <4 x float> %110, %18 ; <<4 x float>>:111 [#uses=1] + fmul <4 x float> %111, %19 ; <<4 x float>>:112 [#uses=1] + fmul <4 x float> %112, %20 ; <<4 x float>>:113 [#uses=1] + fmul <4 x float> %113, %21 ; <<4 x float>>:114 [#uses=1] + fmul <4 x float> %114, %22 ; <<4 x float>>:115 [#uses=1] + fmul <4 x float> %115, %23 ; <<4 x float>>:116 [#uses=1] + fmul <4 x float> %116, %24 ; <<4 x float>>:117 [#uses=1] + fmul <4 x float> %117, %25 ; <<4 x float>>:118 [#uses=1] + fmul <4 x float> %118, %26 ; <<4 x float>>:119 [#uses=1] + fmul <4 x float> %119, %27 ; <<4 x float>>:120 [#uses=1] + fmul <4 x float> %120, %28 ; <<4 x float>>:121 [#uses=1] + fmul <4 x float> %121, %29 ; <<4 x float>>:122 [#uses=1] + fmul <4 x float> %122, %30 ; <<4 x float>>:123 [#uses=1] + fmul <4 x float> %123, %31 ; <<4 x float>>:124 [#uses=1] + fmul <4 x float> %124, %32 ; <<4 x float>>:125 [#uses=1] + fmul <4 x float> %4, %4 ; <<4 x float>>:126 [#uses=1] + fmul <4 x float> %126, %5 ; <<4 x float>>:127 [#uses=1] + fmul <4 x float> %127, %6 ; <<4 x float>>:128 [#uses=1] + fmul <4 x float> %128, %7 ; <<4 x float>>:129 [#uses=1] + fmul <4 x float> %129, %8 ; <<4 x float>>:130 [#uses=1] + fmul <4 x float> %130, %9 ; <<4 x float>>:131 [#uses=1] + fmul <4 x float> %131, %10 ; <<4 x float>>:132 [#uses=1] + fmul <4 x float> %132, %11 ; <<4 x float>>:133 [#uses=1] + fmul <4 x float> %133, %12 ; <<4 x float>>:134 [#uses=1] + fmul <4 x float> %134, %13 ; <<4 x float>>:135 [#uses=1] + fmul <4 x float> %135, %14 ; <<4 x float>>:136 [#uses=1] + fmul <4 x float> %136, %15 ; <<4 x float>>:137 [#uses=1] + fmul <4 x float> %137, %16 ; <<4 x float>>:138 [#uses=1] + fmul <4 x float> %138, %17 ; <<4 x float>>:139 [#uses=1] + fmul <4 x float> %139, %18 ; <<4 x float>>:140 [#uses=1] + fmul <4 x float> %140, %19 ; <<4 x float>>:141 [#uses=1] + fmul <4 x float> %141, %20 ; <<4 x float>>:142 [#uses=1] + fmul <4 x float> %142, %21 ; <<4 x float>>:143 [#uses=1] + fmul <4 x float> %143, %22 ; <<4 x float>>:144 [#uses=1] + fmul <4 x float> %144, %23 ; <<4 x float>>:145 [#uses=1] + fmul <4 x float> %145, %24 ; <<4 x float>>:146 [#uses=1] + fmul <4 x float> %146, %25 ; <<4 x float>>:147 [#uses=1] + fmul <4 x float> %147, %26 ; <<4 x float>>:148 [#uses=1] + fmul <4 x float> %148, %27 ; <<4 x float>>:149 [#uses=1] + fmul <4 x float> %149, %28 ; <<4 x float>>:150 [#uses=1] + fmul <4 x float> %150, %29 ; <<4 x float>>:151 [#uses=1] + fmul <4 x float> %151, %30 ; <<4 x float>>:152 [#uses=1] + fmul <4 x float> %152, %31 ; <<4 x float>>:153 [#uses=1] + fmul <4 x float> %153, %32 ; <<4 x float>>:154 [#uses=1] + fmul <4 x float> %5, %5 ; <<4 x float>>:155 [#uses=1] + fmul <4 x float> %155, %6 ; <<4 x float>>:156 [#uses=1] + fmul <4 x float> %156, %7 ; <<4 x float>>:157 [#uses=1] + fmul <4 x float> %157, %8 ; <<4 x float>>:158 [#uses=1] + fmul <4 x float> %158, %9 ; <<4 x float>>:159 [#uses=1] + fmul <4 x float> %159, %10 ; <<4 x float>>:160 [#uses=1] + fmul <4 x float> %160, %11 ; <<4 x float>>:161 [#uses=1] + fmul <4 x float> %161, %12 ; <<4 x float>>:162 [#uses=1] + fmul <4 x float> %162, %13 ; <<4 x float>>:163 [#uses=1] + fmul <4 x float> %163, %14 ; <<4 x float>>:164 [#uses=1] + fmul <4 x float> %164, %15 ; <<4 x float>>:165 [#uses=1] + fmul <4 x float> %165, %16 ; <<4 x float>>:166 [#uses=1] + fmul <4 x float> %166, %17 ; <<4 x float>>:167 [#uses=1] + fmul <4 x float> %167, %18 ; <<4 x float>>:168 [#uses=1] + fmul <4 x float> %168, %19 ; <<4 x float>>:169 [#uses=1] + fmul <4 x float> %169, %20 ; <<4 x float>>:170 [#uses=1] + fmul <4 x float> %170, %21 ; <<4 x float>>:171 [#uses=1] + fmul <4 x float> %171, %22 ; <<4 x float>>:172 [#uses=1] + fmul <4 x float> %172, %23 ; <<4 x float>>:173 [#uses=1] + fmul <4 x float> %173, %24 ; <<4 x float>>:174 [#uses=1] + fmul <4 x float> %174, %25 ; <<4 x float>>:175 [#uses=1] + fmul <4 x float> %175, %26 ; <<4 x float>>:176 [#uses=1] + fmul <4 x float> %176, %27 ; <<4 x float>>:177 [#uses=1] + fmul <4 x float> %177, %28 ; <<4 x float>>:178 [#uses=1] + fmul <4 x float> %178, %29 ; <<4 x float>>:179 [#uses=1] + fmul <4 x float> %179, %30 ; <<4 x float>>:180 [#uses=1] + fmul <4 x float> %180, %31 ; <<4 x float>>:181 [#uses=1] + fmul <4 x float> %181, %32 ; <<4 x float>>:182 [#uses=1] + fmul <4 x float> %6, %6 ; <<4 x float>>:183 [#uses=1] + fmul <4 x float> %183, %7 ; <<4 x float>>:184 [#uses=1] + fmul <4 x float> %184, %8 ; <<4 x float>>:185 [#uses=1] + fmul <4 x float> %185, %9 ; <<4 x float>>:186 [#uses=1] + fmul <4 x float> %186, %10 ; <<4 x float>>:187 [#uses=1] + fmul <4 x float> %187, %11 ; <<4 x float>>:188 [#uses=1] + fmul <4 x float> %188, %12 ; <<4 x float>>:189 [#uses=1] + fmul <4 x float> %189, %13 ; <<4 x float>>:190 [#uses=1] + fmul <4 x float> %190, %14 ; <<4 x float>>:191 [#uses=1] + fmul <4 x float> %191, %15 ; <<4 x float>>:192 [#uses=1] + fmul <4 x float> %192, %16 ; <<4 x float>>:193 [#uses=1] + fmul <4 x float> %193, %17 ; <<4 x float>>:194 [#uses=1] + fmul <4 x float> %194, %18 ; <<4 x float>>:195 [#uses=1] + fmul <4 x float> %195, %19 ; <<4 x float>>:196 [#uses=1] + fmul <4 x float> %196, %20 ; <<4 x float>>:197 [#uses=1] + fmul <4 x float> %197, %21 ; <<4 x float>>:198 [#uses=1] + fmul <4 x float> %198, %22 ; <<4 x float>>:199 [#uses=1] + fmul <4 x float> %199, %23 ; <<4 x float>>:200 [#uses=1] + fmul <4 x float> %200, %24 ; <<4 x float>>:201 [#uses=1] + fmul <4 x float> %201, %25 ; <<4 x float>>:202 [#uses=1] + fmul <4 x float> %202, %26 ; <<4 x float>>:203 [#uses=1] + fmul <4 x float> %203, %27 ; <<4 x float>>:204 [#uses=1] + fmul <4 x float> %204, %28 ; <<4 x float>>:205 [#uses=1] + fmul <4 x float> %205, %29 ; <<4 x float>>:206 [#uses=1] + fmul <4 x float> %206, %30 ; <<4 x float>>:207 [#uses=1] + fmul <4 x float> %207, %31 ; <<4 x float>>:208 [#uses=1] + fmul <4 x float> %208, %32 ; <<4 x float>>:209 [#uses=1] + fmul <4 x float> %7, %7 ; <<4 x float>>:210 [#uses=1] + fmul <4 x float> %210, %8 ; <<4 x float>>:211 [#uses=1] + fmul <4 x float> %211, %9 ; <<4 x float>>:212 [#uses=1] + fmul <4 x float> %212, %10 ; <<4 x float>>:213 [#uses=1] + fmul <4 x float> %213, %11 ; <<4 x float>>:214 [#uses=1] + fmul <4 x float> %214, %12 ; <<4 x float>>:215 [#uses=1] + fmul <4 x float> %215, %13 ; <<4 x float>>:216 [#uses=1] + fmul <4 x float> %216, %14 ; <<4 x float>>:217 [#uses=1] + fmul <4 x float> %217, %15 ; <<4 x float>>:218 [#uses=1] + fmul <4 x float> %218, %16 ; <<4 x float>>:219 [#uses=1] + fmul <4 x float> %219, %17 ; <<4 x float>>:220 [#uses=1] + fmul <4 x float> %220, %18 ; <<4 x float>>:221 [#uses=1] + fmul <4 x float> %221, %19 ; <<4 x float>>:222 [#uses=1] + fmul <4 x float> %222, %20 ; <<4 x float>>:223 [#uses=1] + fmul <4 x float> %223, %21 ; <<4 x float>>:224 [#uses=1] + fmul <4 x float> %224, %22 ; <<4 x float>>:225 [#uses=1] + fmul <4 x float> %225, %23 ; <<4 x float>>:226 [#uses=1] + fmul <4 x float> %226, %24 ; <<4 x float>>:227 [#uses=1] + fmul <4 x float> %227, %25 ; <<4 x float>>:228 [#uses=1] + fmul <4 x float> %228, %26 ; <<4 x float>>:229 [#uses=1] + fmul <4 x float> %229, %27 ; <<4 x float>>:230 [#uses=1] + fmul <4 x float> %230, %28 ; <<4 x float>>:231 [#uses=1] + fmul <4 x float> %231, %29 ; <<4 x float>>:232 [#uses=1] + fmul <4 x float> %232, %30 ; <<4 x float>>:233 [#uses=1] + fmul <4 x float> %233, %31 ; <<4 x float>>:234 [#uses=1] + fmul <4 x float> %234, %32 ; <<4 x float>>:235 [#uses=1] + fmul <4 x float> %8, %8 ; <<4 x float>>:236 [#uses=1] + fmul <4 x float> %236, %9 ; <<4 x float>>:237 [#uses=1] + fmul <4 x float> %237, %10 ; <<4 x float>>:238 [#uses=1] + fmul <4 x float> %238, %11 ; <<4 x float>>:239 [#uses=1] + fmul <4 x float> %239, %12 ; <<4 x float>>:240 [#uses=1] + fmul <4 x float> %240, %13 ; <<4 x float>>:241 [#uses=1] + fmul <4 x float> %241, %14 ; <<4 x float>>:242 [#uses=1] + fmul <4 x float> %242, %15 ; <<4 x float>>:243 [#uses=1] + fmul <4 x float> %243, %16 ; <<4 x float>>:244 [#uses=1] + fmul <4 x float> %244, %17 ; <<4 x float>>:245 [#uses=1] + fmul <4 x float> %245, %18 ; <<4 x float>>:246 [#uses=1] + fmul <4 x float> %246, %19 ; <<4 x float>>:247 [#uses=1] + fmul <4 x float> %247, %20 ; <<4 x float>>:248 [#uses=1] + fmul <4 x float> %248, %21 ; <<4 x float>>:249 [#uses=1] + fmul <4 x float> %249, %22 ; <<4 x float>>:250 [#uses=1] + fmul <4 x float> %250, %23 ; <<4 x float>>:251 [#uses=1] + fmul <4 x float> %251, %24 ; <<4 x float>>:252 [#uses=1] + fmul <4 x float> %252, %25 ; <<4 x float>>:253 [#uses=1] + fmul <4 x float> %253, %26 ; <<4 x float>>:254 [#uses=1] + fmul <4 x float> %254, %27 ; <<4 x float>>:255 [#uses=1] + fmul <4 x float> %255, %28 ; <<4 x float>>:256 [#uses=1] + fmul <4 x float> %256, %29 ; <<4 x float>>:257 [#uses=1] + fmul <4 x float> %257, %30 ; <<4 x float>>:258 [#uses=1] + fmul <4 x float> %258, %31 ; <<4 x float>>:259 [#uses=1] + fmul <4 x float> %259, %32 ; <<4 x float>>:260 [#uses=1] + fmul <4 x float> %9, %9 ; <<4 x float>>:261 [#uses=1] + fmul <4 x float> %261, %10 ; <<4 x float>>:262 [#uses=1] + fmul <4 x float> %262, %11 ; <<4 x float>>:263 [#uses=1] + fmul <4 x float> %263, %12 ; <<4 x float>>:264 [#uses=1] + fmul <4 x float> %264, %13 ; <<4 x float>>:265 [#uses=1] + fmul <4 x float> %265, %14 ; <<4 x float>>:266 [#uses=1] + fmul <4 x float> %266, %15 ; <<4 x float>>:267 [#uses=1] + fmul <4 x float> %267, %16 ; <<4 x float>>:268 [#uses=1] + fmul <4 x float> %268, %17 ; <<4 x float>>:269 [#uses=1] + fmul <4 x float> %269, %18 ; <<4 x float>>:270 [#uses=1] + fmul <4 x float> %270, %19 ; <<4 x float>>:271 [#uses=1] + fmul <4 x float> %271, %20 ; <<4 x float>>:272 [#uses=1] + fmul <4 x float> %272, %21 ; <<4 x float>>:273 [#uses=1] + fmul <4 x float> %273, %22 ; <<4 x float>>:274 [#uses=1] + fmul <4 x float> %274, %23 ; <<4 x float>>:275 [#uses=1] + fmul <4 x float> %275, %24 ; <<4 x float>>:276 [#uses=1] + fmul <4 x float> %276, %25 ; <<4 x float>>:277 [#uses=1] + fmul <4 x float> %277, %26 ; <<4 x float>>:278 [#uses=1] + fmul <4 x float> %278, %27 ; <<4 x float>>:279 [#uses=1] + fmul <4 x float> %279, %28 ; <<4 x float>>:280 [#uses=1] + fmul <4 x float> %280, %29 ; <<4 x float>>:281 [#uses=1] + fmul <4 x float> %281, %30 ; <<4 x float>>:282 [#uses=1] + fmul <4 x float> %282, %31 ; <<4 x float>>:283 [#uses=1] + fmul <4 x float> %283, %32 ; <<4 x float>>:284 [#uses=1] + fmul <4 x float> %10, %10 ; <<4 x float>>:285 [#uses=1] + fmul <4 x float> %285, %11 ; <<4 x float>>:286 [#uses=1] + fmul <4 x float> %286, %12 ; <<4 x float>>:287 [#uses=1] + fmul <4 x float> %287, %13 ; <<4 x float>>:288 [#uses=1] + fmul <4 x float> %288, %14 ; <<4 x float>>:289 [#uses=1] + fmul <4 x float> %289, %15 ; <<4 x float>>:290 [#uses=1] + fmul <4 x float> %290, %16 ; <<4 x float>>:291 [#uses=1] + fmul <4 x float> %291, %17 ; <<4 x float>>:292 [#uses=1] + fmul <4 x float> %292, %18 ; <<4 x float>>:293 [#uses=1] + fmul <4 x float> %293, %19 ; <<4 x float>>:294 [#uses=1] + fmul <4 x float> %294, %20 ; <<4 x float>>:295 [#uses=1] + fmul <4 x float> %295, %21 ; <<4 x float>>:296 [#uses=1] + fmul <4 x float> %296, %22 ; <<4 x float>>:297 [#uses=1] + fmul <4 x float> %297, %23 ; <<4 x float>>:298 [#uses=1] + fmul <4 x float> %298, %24 ; <<4 x float>>:299 [#uses=1] + fmul <4 x float> %299, %25 ; <<4 x float>>:300 [#uses=1] + fmul <4 x float> %300, %26 ; <<4 x float>>:301 [#uses=1] + fmul <4 x float> %301, %27 ; <<4 x float>>:302 [#uses=1] + fmul <4 x float> %302, %28 ; <<4 x float>>:303 [#uses=1] + fmul <4 x float> %303, %29 ; <<4 x float>>:304 [#uses=1] + fmul <4 x float> %304, %30 ; <<4 x float>>:305 [#uses=1] + fmul <4 x float> %305, %31 ; <<4 x float>>:306 [#uses=1] + fmul <4 x float> %306, %32 ; <<4 x float>>:307 [#uses=1] + fmul <4 x float> %11, %11 ; <<4 x float>>:308 [#uses=1] + fmul <4 x float> %308, %12 ; <<4 x float>>:309 [#uses=1] + fmul <4 x float> %309, %13 ; <<4 x float>>:310 [#uses=1] + fmul <4 x float> %310, %14 ; <<4 x float>>:311 [#uses=1] + fmul <4 x float> %311, %15 ; <<4 x float>>:312 [#uses=1] + fmul <4 x float> %312, %16 ; <<4 x float>>:313 [#uses=1] + fmul <4 x float> %313, %17 ; <<4 x float>>:314 [#uses=1] + fmul <4 x float> %314, %18 ; <<4 x float>>:315 [#uses=1] + fmul <4 x float> %315, %19 ; <<4 x float>>:316 [#uses=1] + fmul <4 x float> %316, %20 ; <<4 x float>>:317 [#uses=1] + fmul <4 x float> %317, %21 ; <<4 x float>>:318 [#uses=1] + fmul <4 x float> %318, %22 ; <<4 x float>>:319 [#uses=1] + fmul <4 x float> %319, %23 ; <<4 x float>>:320 [#uses=1] + fmul <4 x float> %320, %24 ; <<4 x float>>:321 [#uses=1] + fmul <4 x float> %321, %25 ; <<4 x float>>:322 [#uses=1] + fmul <4 x float> %322, %26 ; <<4 x float>>:323 [#uses=1] + fmul <4 x float> %323, %27 ; <<4 x float>>:324 [#uses=1] + fmul <4 x float> %324, %28 ; <<4 x float>>:325 [#uses=1] + fmul <4 x float> %325, %29 ; <<4 x float>>:326 [#uses=1] + fmul <4 x float> %326, %30 ; <<4 x float>>:327 [#uses=1] + fmul <4 x float> %327, %31 ; <<4 x float>>:328 [#uses=1] + fmul <4 x float> %328, %32 ; <<4 x float>>:329 [#uses=1] + fmul <4 x float> %12, %12 ; <<4 x float>>:330 [#uses=1] + fmul <4 x float> %330, %13 ; <<4 x float>>:331 [#uses=1] + fmul <4 x float> %331, %14 ; <<4 x float>>:332 [#uses=1] + fmul <4 x float> %332, %15 ; <<4 x float>>:333 [#uses=1] + fmul <4 x float> %333, %16 ; <<4 x float>>:334 [#uses=1] + fmul <4 x float> %334, %17 ; <<4 x float>>:335 [#uses=1] + fmul <4 x float> %335, %18 ; <<4 x float>>:336 [#uses=1] + fmul <4 x float> %336, %19 ; <<4 x float>>:337 [#uses=1] + fmul <4 x float> %337, %20 ; <<4 x float>>:338 [#uses=1] + fmul <4 x float> %338, %21 ; <<4 x float>>:339 [#uses=1] + fmul <4 x float> %339, %22 ; <<4 x float>>:340 [#uses=1] + fmul <4 x float> %340, %23 ; <<4 x float>>:341 [#uses=1] + fmul <4 x float> %341, %24 ; <<4 x float>>:342 [#uses=1] + fmul <4 x float> %342, %25 ; <<4 x float>>:343 [#uses=1] + fmul <4 x float> %343, %26 ; <<4 x float>>:344 [#uses=1] + fmul <4 x float> %344, %27 ; <<4 x float>>:345 [#uses=1] + fmul <4 x float> %345, %28 ; <<4 x float>>:346 [#uses=1] + fmul <4 x float> %346, %29 ; <<4 x float>>:347 [#uses=1] + fmul <4 x float> %347, %30 ; <<4 x float>>:348 [#uses=1] + fmul <4 x float> %348, %31 ; <<4 x float>>:349 [#uses=1] + fmul <4 x float> %349, %32 ; <<4 x float>>:350 [#uses=1] + fmul <4 x float> %13, %13 ; <<4 x float>>:351 [#uses=1] + fmul <4 x float> %351, %14 ; <<4 x float>>:352 [#uses=1] + fmul <4 x float> %352, %15 ; <<4 x float>>:353 [#uses=1] + fmul <4 x float> %353, %16 ; <<4 x float>>:354 [#uses=1] + fmul <4 x float> %354, %17 ; <<4 x float>>:355 [#uses=1] + fmul <4 x float> %355, %18 ; <<4 x float>>:356 [#uses=1] + fmul <4 x float> %356, %19 ; <<4 x float>>:357 [#uses=1] + fmul <4 x float> %357, %20 ; <<4 x float>>:358 [#uses=1] + fmul <4 x float> %358, %21 ; <<4 x float>>:359 [#uses=1] + fmul <4 x float> %359, %22 ; <<4 x float>>:360 [#uses=1] + fmul <4 x float> %360, %23 ; <<4 x float>>:361 [#uses=1] + fmul <4 x float> %361, %24 ; <<4 x float>>:362 [#uses=1] + fmul <4 x float> %362, %25 ; <<4 x float>>:363 [#uses=1] + fmul <4 x float> %363, %26 ; <<4 x float>>:364 [#uses=1] + fmul <4 x float> %364, %27 ; <<4 x float>>:365 [#uses=1] + fmul <4 x float> %365, %28 ; <<4 x float>>:366 [#uses=1] + fmul <4 x float> %366, %29 ; <<4 x float>>:367 [#uses=1] + fmul <4 x float> %367, %30 ; <<4 x float>>:368 [#uses=1] + fmul <4 x float> %368, %31 ; <<4 x float>>:369 [#uses=1] + fmul <4 x float> %369, %32 ; <<4 x float>>:370 [#uses=1] + fmul <4 x float> %14, %14 ; <<4 x float>>:371 [#uses=1] + fmul <4 x float> %371, %15 ; <<4 x float>>:372 [#uses=1] + fmul <4 x float> %372, %16 ; <<4 x float>>:373 [#uses=1] + fmul <4 x float> %373, %17 ; <<4 x float>>:374 [#uses=1] + fmul <4 x float> %374, %18 ; <<4 x float>>:375 [#uses=1] + fmul <4 x float> %375, %19 ; <<4 x float>>:376 [#uses=1] + fmul <4 x float> %376, %20 ; <<4 x float>>:377 [#uses=1] + fmul <4 x float> %377, %21 ; <<4 x float>>:378 [#uses=1] + fmul <4 x float> %378, %22 ; <<4 x float>>:379 [#uses=1] + fmul <4 x float> %379, %23 ; <<4 x float>>:380 [#uses=1] + fmul <4 x float> %380, %24 ; <<4 x float>>:381 [#uses=1] + fmul <4 x float> %381, %25 ; <<4 x float>>:382 [#uses=1] + fmul <4 x float> %382, %26 ; <<4 x float>>:383 [#uses=1] + fmul <4 x float> %383, %27 ; <<4 x float>>:384 [#uses=1] + fmul <4 x float> %384, %28 ; <<4 x float>>:385 [#uses=1] + fmul <4 x float> %385, %29 ; <<4 x float>>:386 [#uses=1] + fmul <4 x float> %386, %30 ; <<4 x float>>:387 [#uses=1] + fmul <4 x float> %387, %31 ; <<4 x float>>:388 [#uses=1] + fmul <4 x float> %388, %32 ; <<4 x float>>:389 [#uses=1] + fmul <4 x float> %15, %15 ; <<4 x float>>:390 [#uses=1] + fmul <4 x float> %390, %16 ; <<4 x float>>:391 [#uses=1] + fmul <4 x float> %391, %17 ; <<4 x float>>:392 [#uses=1] + fmul <4 x float> %392, %18 ; <<4 x float>>:393 [#uses=1] + fmul <4 x float> %393, %19 ; <<4 x float>>:394 [#uses=1] + fmul <4 x float> %394, %20 ; <<4 x float>>:395 [#uses=1] + fmul <4 x float> %395, %21 ; <<4 x float>>:396 [#uses=1] + fmul <4 x float> %396, %22 ; <<4 x float>>:397 [#uses=1] + fmul <4 x float> %397, %23 ; <<4 x float>>:398 [#uses=1] + fmul <4 x float> %398, %24 ; <<4 x float>>:399 [#uses=1] + fmul <4 x float> %399, %25 ; <<4 x float>>:400 [#uses=1] + fmul <4 x float> %400, %26 ; <<4 x float>>:401 [#uses=1] + fmul <4 x float> %401, %27 ; <<4 x float>>:402 [#uses=1] + fmul <4 x float> %402, %28 ; <<4 x float>>:403 [#uses=1] + fmul <4 x float> %403, %29 ; <<4 x float>>:404 [#uses=1] + fmul <4 x float> %404, %30 ; <<4 x float>>:405 [#uses=1] + fmul <4 x float> %405, %31 ; <<4 x float>>:406 [#uses=1] + fmul <4 x float> %406, %32 ; <<4 x float>>:407 [#uses=1] + fmul <4 x float> %16, %16 ; <<4 x float>>:408 [#uses=1] + fmul <4 x float> %408, %17 ; <<4 x float>>:409 [#uses=1] + fmul <4 x float> %409, %18 ; <<4 x float>>:410 [#uses=1] + fmul <4 x float> %410, %19 ; <<4 x float>>:411 [#uses=1] + fmul <4 x float> %411, %20 ; <<4 x float>>:412 [#uses=1] + fmul <4 x float> %412, %21 ; <<4 x float>>:413 [#uses=1] + fmul <4 x float> %413, %22 ; <<4 x float>>:414 [#uses=1] + fmul <4 x float> %414, %23 ; <<4 x float>>:415 [#uses=1] + fmul <4 x float> %415, %24 ; <<4 x float>>:416 [#uses=1] + fmul <4 x float> %416, %25 ; <<4 x float>>:417 [#uses=1] + fmul <4 x float> %417, %26 ; <<4 x float>>:418 [#uses=1] + fmul <4 x float> %418, %27 ; <<4 x float>>:419 [#uses=1] + fmul <4 x float> %419, %28 ; <<4 x float>>:420 [#uses=1] + fmul <4 x float> %420, %29 ; <<4 x float>>:421 [#uses=1] + fmul <4 x float> %421, %30 ; <<4 x float>>:422 [#uses=1] + fmul <4 x float> %422, %31 ; <<4 x float>>:423 [#uses=1] + fmul <4 x float> %423, %32 ; <<4 x float>>:424 [#uses=1] + fmul <4 x float> %17, %17 ; <<4 x float>>:425 [#uses=1] + fmul <4 x float> %425, %18 ; <<4 x float>>:426 [#uses=1] + fmul <4 x float> %426, %19 ; <<4 x float>>:427 [#uses=1] + fmul <4 x float> %427, %20 ; <<4 x float>>:428 [#uses=1] + fmul <4 x float> %428, %21 ; <<4 x float>>:429 [#uses=1] + fmul <4 x float> %429, %22 ; <<4 x float>>:430 [#uses=1] + fmul <4 x float> %430, %23 ; <<4 x float>>:431 [#uses=1] + fmul <4 x float> %431, %24 ; <<4 x float>>:432 [#uses=1] + fmul <4 x float> %432, %25 ; <<4 x float>>:433 [#uses=1] + fmul <4 x float> %433, %26 ; <<4 x float>>:434 [#uses=1] + fmul <4 x float> %434, %27 ; <<4 x float>>:435 [#uses=1] + fmul <4 x float> %435, %28 ; <<4 x float>>:436 [#uses=1] + fmul <4 x float> %436, %29 ; <<4 x float>>:437 [#uses=1] + fmul <4 x float> %437, %30 ; <<4 x float>>:438 [#uses=1] + fmul <4 x float> %438, %31 ; <<4 x float>>:439 [#uses=1] + fmul <4 x float> %439, %32 ; <<4 x float>>:440 [#uses=1] + fmul <4 x float> %18, %18 ; <<4 x float>>:441 [#uses=1] + fmul <4 x float> %441, %19 ; <<4 x float>>:442 [#uses=1] + fmul <4 x float> %442, %20 ; <<4 x float>>:443 [#uses=1] + fmul <4 x float> %443, %21 ; <<4 x float>>:444 [#uses=1] + fmul <4 x float> %444, %22 ; <<4 x float>>:445 [#uses=1] + fmul <4 x float> %445, %23 ; <<4 x float>>:446 [#uses=1] + fmul <4 x float> %446, %24 ; <<4 x float>>:447 [#uses=1] + fmul <4 x float> %447, %25 ; <<4 x float>>:448 [#uses=1] + fmul <4 x float> %448, %26 ; <<4 x float>>:449 [#uses=1] + fmul <4 x float> %449, %27 ; <<4 x float>>:450 [#uses=1] + fmul <4 x float> %450, %28 ; <<4 x float>>:451 [#uses=1] + fmul <4 x float> %451, %29 ; <<4 x float>>:452 [#uses=1] + fmul <4 x float> %452, %30 ; <<4 x float>>:453 [#uses=1] + fmul <4 x float> %453, %31 ; <<4 x float>>:454 [#uses=1] + fmul <4 x float> %454, %32 ; <<4 x float>>:455 [#uses=1] + fmul <4 x float> %19, %19 ; <<4 x float>>:456 [#uses=1] + fmul <4 x float> %456, %20 ; <<4 x float>>:457 [#uses=1] + fmul <4 x float> %457, %21 ; <<4 x float>>:458 [#uses=1] + fmul <4 x float> %458, %22 ; <<4 x float>>:459 [#uses=1] + fmul <4 x float> %459, %23 ; <<4 x float>>:460 [#uses=1] + fmul <4 x float> %460, %24 ; <<4 x float>>:461 [#uses=1] + fmul <4 x float> %461, %25 ; <<4 x float>>:462 [#uses=1] + fmul <4 x float> %462, %26 ; <<4 x float>>:463 [#uses=1] + fmul <4 x float> %463, %27 ; <<4 x float>>:464 [#uses=1] + fmul <4 x float> %464, %28 ; <<4 x float>>:465 [#uses=1] + fmul <4 x float> %465, %29 ; <<4 x float>>:466 [#uses=1] + fmul <4 x float> %466, %30 ; <<4 x float>>:467 [#uses=1] + fmul <4 x float> %467, %31 ; <<4 x float>>:468 [#uses=1] + fmul <4 x float> %468, %32 ; <<4 x float>>:469 [#uses=1] + fmul <4 x float> %20, %20 ; <<4 x float>>:470 [#uses=1] + fmul <4 x float> %470, %21 ; <<4 x float>>:471 [#uses=1] + fmul <4 x float> %471, %22 ; <<4 x float>>:472 [#uses=1] + fmul <4 x float> %472, %23 ; <<4 x float>>:473 [#uses=1] + fmul <4 x float> %473, %24 ; <<4 x float>>:474 [#uses=1] + fmul <4 x float> %474, %25 ; <<4 x float>>:475 [#uses=1] + fmul <4 x float> %475, %26 ; <<4 x float>>:476 [#uses=1] + fmul <4 x float> %476, %27 ; <<4 x float>>:477 [#uses=1] + fmul <4 x float> %477, %28 ; <<4 x float>>:478 [#uses=1] + fmul <4 x float> %478, %29 ; <<4 x float>>:479 [#uses=1] + fmul <4 x float> %479, %30 ; <<4 x float>>:480 [#uses=1] + fmul <4 x float> %480, %31 ; <<4 x float>>:481 [#uses=1] + fmul <4 x float> %481, %32 ; <<4 x float>>:482 [#uses=1] + fmul <4 x float> %21, %21 ; <<4 x float>>:483 [#uses=1] + fmul <4 x float> %483, %22 ; <<4 x float>>:484 [#uses=1] + fmul <4 x float> %484, %23 ; <<4 x float>>:485 [#uses=1] + fmul <4 x float> %485, %24 ; <<4 x float>>:486 [#uses=1] + fmul <4 x float> %486, %25 ; <<4 x float>>:487 [#uses=1] + fmul <4 x float> %487, %26 ; <<4 x float>>:488 [#uses=1] + fmul <4 x float> %488, %27 ; <<4 x float>>:489 [#uses=1] + fmul <4 x float> %489, %28 ; <<4 x float>>:490 [#uses=1] + fmul <4 x float> %490, %29 ; <<4 x float>>:491 [#uses=1] + fmul <4 x float> %491, %30 ; <<4 x float>>:492 [#uses=1] + fmul <4 x float> %492, %31 ; <<4 x float>>:493 [#uses=1] + fmul <4 x float> %493, %32 ; <<4 x float>>:494 [#uses=1] + fmul <4 x float> %22, %22 ; <<4 x float>>:495 [#uses=1] + fmul <4 x float> %495, %23 ; <<4 x float>>:496 [#uses=1] + fmul <4 x float> %496, %24 ; <<4 x float>>:497 [#uses=1] + fmul <4 x float> %497, %25 ; <<4 x float>>:498 [#uses=1] + fmul <4 x float> %498, %26 ; <<4 x float>>:499 [#uses=1] + fmul <4 x float> %499, %27 ; <<4 x float>>:500 [#uses=1] + fmul <4 x float> %500, %28 ; <<4 x float>>:501 [#uses=1] + fmul <4 x float> %501, %29 ; <<4 x float>>:502 [#uses=1] + fmul <4 x float> %502, %30 ; <<4 x float>>:503 [#uses=1] + fmul <4 x float> %503, %31 ; <<4 x float>>:504 [#uses=1] + fmul <4 x float> %504, %32 ; <<4 x float>>:505 [#uses=1] + fmul <4 x float> %23, %23 ; <<4 x float>>:506 [#uses=1] + fmul <4 x float> %506, %24 ; <<4 x float>>:507 [#uses=1] + fmul <4 x float> %507, %25 ; <<4 x float>>:508 [#uses=1] + fmul <4 x float> %508, %26 ; <<4 x float>>:509 [#uses=1] + fmul <4 x float> %509, %27 ; <<4 x float>>:510 [#uses=1] + fmul <4 x float> %510, %28 ; <<4 x float>>:511 [#uses=1] + fmul <4 x float> %511, %29 ; <<4 x float>>:512 [#uses=1] + fmul <4 x float> %512, %30 ; <<4 x float>>:513 [#uses=1] + fmul <4 x float> %513, %31 ; <<4 x float>>:514 [#uses=1] + fmul <4 x float> %514, %32 ; <<4 x float>>:515 [#uses=1] + fmul <4 x float> %24, %24 ; <<4 x float>>:516 [#uses=1] + fmul <4 x float> %516, %25 ; <<4 x float>>:517 [#uses=1] + fmul <4 x float> %517, %26 ; <<4 x float>>:518 [#uses=1] + fmul <4 x float> %518, %27 ; <<4 x float>>:519 [#uses=1] + fmul <4 x float> %519, %28 ; <<4 x float>>:520 [#uses=1] + fmul <4 x float> %520, %29 ; <<4 x float>>:521 [#uses=1] + fmul <4 x float> %521, %30 ; <<4 x float>>:522 [#uses=1] + fmul <4 x float> %522, %31 ; <<4 x float>>:523 [#uses=1] + fmul <4 x float> %523, %32 ; <<4 x float>>:524 [#uses=1] + fmul <4 x float> %25, %25 ; <<4 x float>>:525 [#uses=1] + fmul <4 x float> %525, %26 ; <<4 x float>>:526 [#uses=1] + fmul <4 x float> %526, %27 ; <<4 x float>>:527 [#uses=1] + fmul <4 x float> %527, %28 ; <<4 x float>>:528 [#uses=1] + fmul <4 x float> %528, %29 ; <<4 x float>>:529 [#uses=1] + fmul <4 x float> %529, %30 ; <<4 x float>>:530 [#uses=1] + fmul <4 x float> %530, %31 ; <<4 x float>>:531 [#uses=1] + fmul <4 x float> %531, %32 ; <<4 x float>>:532 [#uses=1] + fmul <4 x float> %26, %26 ; <<4 x float>>:533 [#uses=1] + fmul <4 x float> %533, %27 ; <<4 x float>>:534 [#uses=1] + fmul <4 x float> %534, %28 ; <<4 x float>>:535 [#uses=1] + fmul <4 x float> %535, %29 ; <<4 x float>>:536 [#uses=1] + fmul <4 x float> %536, %30 ; <<4 x float>>:537 [#uses=1] + fmul <4 x float> %537, %31 ; <<4 x float>>:538 [#uses=1] + fmul <4 x float> %538, %32 ; <<4 x float>>:539 [#uses=1] + fmul <4 x float> %27, %27 ; <<4 x float>>:540 [#uses=1] + fmul <4 x float> %540, %28 ; <<4 x float>>:541 [#uses=1] + fmul <4 x float> %541, %29 ; <<4 x float>>:542 [#uses=1] + fmul <4 x float> %542, %30 ; <<4 x float>>:543 [#uses=1] + fmul <4 x float> %543, %31 ; <<4 x float>>:544 [#uses=1] + fmul <4 x float> %544, %32 ; <<4 x float>>:545 [#uses=1] + fmul <4 x float> %28, %28 ; <<4 x float>>:546 [#uses=1] + fmul <4 x float> %546, %29 ; <<4 x float>>:547 [#uses=1] + fmul <4 x float> %547, %30 ; <<4 x float>>:548 [#uses=1] + fmul <4 x float> %548, %31 ; <<4 x float>>:549 [#uses=1] + fmul <4 x float> %549, %32 ; <<4 x float>>:550 [#uses=1] + fmul <4 x float> %29, %29 ; <<4 x float>>:551 [#uses=1] + fmul <4 x float> %551, %30 ; <<4 x float>>:552 [#uses=1] + fmul <4 x float> %552, %31 ; <<4 x float>>:553 [#uses=1] + fmul <4 x float> %553, %32 ; <<4 x float>>:554 [#uses=1] + fmul <4 x float> %30, %30 ; <<4 x float>>:555 [#uses=1] + fmul <4 x float> %555, %31 ; <<4 x float>>:556 [#uses=1] + fmul <4 x float> %556, %32 ; <<4 x float>>:557 [#uses=1] + fmul <4 x float> %31, %31 ; <<4 x float>>:558 [#uses=1] + fmul <4 x float> %558, %32 ; <<4 x float>>:559 [#uses=1] + fmul <4 x float> %32, %32 ; <<4 x float>>:560 [#uses=1] + fadd <4 x float> %64, %64 ; <<4 x float>>:561 [#uses=1] + fadd <4 x float> %561, %64 ; <<4 x float>>:562 [#uses=1] + fadd <4 x float> %562, %95 ; <<4 x float>>:563 [#uses=1] + fadd <4 x float> %563, %125 ; <<4 x float>>:564 [#uses=1] + fadd <4 x float> %564, %154 ; <<4 x float>>:565 [#uses=1] + fadd <4 x float> %565, %182 ; <<4 x float>>:566 [#uses=1] + fadd <4 x float> %566, %209 ; <<4 x float>>:567 [#uses=1] + fadd <4 x float> %567, %235 ; <<4 x float>>:568 [#uses=1] + fadd <4 x float> %568, %260 ; <<4 x float>>:569 [#uses=1] + fadd <4 x float> %569, %284 ; <<4 x float>>:570 [#uses=1] + fadd <4 x float> %570, %307 ; <<4 x float>>:571 [#uses=1] + fadd <4 x float> %571, %329 ; <<4 x float>>:572 [#uses=1] + fadd <4 x float> %572, %350 ; <<4 x float>>:573 [#uses=1] + fadd <4 x float> %573, %370 ; <<4 x float>>:574 [#uses=1] + fadd <4 x float> %574, %389 ; <<4 x float>>:575 [#uses=1] + fadd <4 x float> %575, %407 ; <<4 x float>>:576 [#uses=1] + fadd <4 x float> %576, %424 ; <<4 x float>>:577 [#uses=1] + fadd <4 x float> %577, %440 ; <<4 x float>>:578 [#uses=1] + fadd <4 x float> %578, %455 ; <<4 x float>>:579 [#uses=1] + fadd <4 x float> %579, %469 ; <<4 x float>>:580 [#uses=1] + fadd <4 x float> %580, %482 ; <<4 x float>>:581 [#uses=1] + fadd <4 x float> %581, %494 ; <<4 x float>>:582 [#uses=1] + fadd <4 x float> %582, %505 ; <<4 x float>>:583 [#uses=1] + fadd <4 x float> %583, %515 ; <<4 x float>>:584 [#uses=1] + fadd <4 x float> %584, %524 ; <<4 x float>>:585 [#uses=1] + fadd <4 x float> %585, %532 ; <<4 x float>>:586 [#uses=1] + fadd <4 x float> %586, %539 ; <<4 x float>>:587 [#uses=1] + fadd <4 x float> %587, %545 ; <<4 x float>>:588 [#uses=1] + fadd <4 x float> %588, %550 ; <<4 x float>>:589 [#uses=1] + fadd <4 x float> %589, %554 ; <<4 x float>>:590 [#uses=1] + fadd <4 x float> %590, %557 ; <<4 x float>>:591 [#uses=1] + fadd <4 x float> %591, %559 ; <<4 x float>>:592 [#uses=1] + fadd <4 x float> %592, %560 ; <<4 x float>>:593 [#uses=1] store <4 x float> %593, <4 x float>* @0, align 1 ret void } diff --git a/test/CodeGen/X86/2008-07-23-VSetCC.ll b/test/CodeGen/X86/2008-07-23-VSetCC.ll index 735c610..da6c089 100644 --- a/test/CodeGen/X86/2008-07-23-VSetCC.ll +++ b/test/CodeGen/X86/2008-07-23-VSetCC.ll @@ -13,12 +13,12 @@ bb.nph: ; preds = %bb.nph, %0 insertelement <4 x i32> zeroinitializer, i32 %5, i32 3 ; <<4 x i32>>:6 [#uses=1] and <4 x i32> zeroinitializer, %6 ; <<4 x i32>>:7 [#uses=1] bitcast <4 x i32> %7 to <4 x float> ; <<4 x float>>:8 [#uses=1] - mul <4 x float> zeroinitializer, %8 ; <<4 x float>>:9 [#uses=1] + fmul <4 x float> zeroinitializer, %8 ; <<4 x float>>:9 [#uses=1] bitcast <4 x float> %9 to <4 x i32> ; <<4 x i32>>:10 [#uses=1] or <4 x i32> %10, zeroinitializer ; <<4 x i32>>:11 [#uses=1] bitcast <4 x i32> %11 to <4 x float> ; <<4 x float>>:12 [#uses=1] - mul <4 x float> %12, < float 1.000000e+02, float 1.000000e+02, float 1.000000e+02, float 1.000000e+02 > ; <<4 x float>>:13 [#uses=1] - sub <4 x float> %13, < float 1.000000e+02, float 1.000000e+02, float 1.000000e+02, float 1.000000e+02 > ; <<4 x float>>:14 [#uses=1] + fmul <4 x float> %12, < float 1.000000e+02, float 1.000000e+02, float 1.000000e+02, float 1.000000e+02 > ; <<4 x float>>:13 [#uses=1] + fsub <4 x float> %13, < float 1.000000e+02, float 1.000000e+02, float 1.000000e+02, float 1.000000e+02 > ; <<4 x float>>:14 [#uses=1] extractelement <4 x float> %14, i32 3 ; <float>:15 [#uses=1] call float @fmaxf( float 0.000000e+00, float %15 ) ; <float>:16 [#uses=0] br label %bb.nph diff --git a/test/CodeGen/X86/2008-08-23-X86-64AsmBug.ll b/test/CodeGen/X86/2008-08-23-X86-64AsmBug.ll index b50f2b0..4e35332 100644 --- a/test/CodeGen/X86/2008-08-23-X86-64AsmBug.ll +++ b/test/CodeGen/X86/2008-08-23-X86-64AsmBug.ll @@ -13,7 +13,7 @@ bb151: ; preds = %entry bb163: ; preds = %bb151, %entry %tmp366 = load double* null, align 8 ; <double> [#uses=1] - %tmp368 = mul double %tmp366, 0.000000e+00 ; <double> [#uses=1] + %tmp368 = fmul double %tmp366, 0.000000e+00 ; <double> [#uses=1] %tmp368226 = bitcast double %tmp368 to i64 ; <i64> [#uses=1] br label %bb5.i diff --git a/test/CodeGen/X86/2008-10-27-CoalescerBug.ll b/test/CodeGen/X86/2008-10-27-CoalescerBug.ll index 2c8e12f..ad13b85 100644 --- a/test/CodeGen/X86/2008-10-27-CoalescerBug.ll +++ b/test/CodeGen/X86/2008-10-27-CoalescerBug.ll @@ -26,7 +26,7 @@ bb22.preheader: ; preds = %bb24.preheader, %bb22.preheader br label %bb22.preheader bb25: ; preds = %bb24.preheader - %7 = mul double 0.000000e+00, %6 ; <double> [#uses=0] + %7 = fmul double 0.000000e+00, %6 ; <double> [#uses=0] %8 = add i32 %i3.122100, 0 ; <i32> [#uses=1] %9 = icmp sgt i32 %8, 0 ; <i1> [#uses=1] br i1 %9, label %bb3, label %bb24.preheader @@ -37,7 +37,7 @@ bb24.preheader: ; preds = %bb25, %bb18 br i1 %10, label %bb25, label %bb22.preheader bb30.loopexit: ; preds = %bb - %11 = mul double 0.000000e+00, 0x401921FB54442D1C ; <double> [#uses=1] + %11 = fmul double 0.000000e+00, 0x401921FB54442D1C ; <double> [#uses=1] br label %bb3 } diff --git a/test/CodeGen/X86/2008-11-03-F80VAARG.ll b/test/CodeGen/X86/2008-11-03-F80VAARG.ll index bb9fbdb..36a054a 100644 --- a/test/CodeGen/X86/2008-11-03-F80VAARG.ll +++ b/test/CodeGen/X86/2008-11-03-F80VAARG.ll @@ -12,6 +12,6 @@ define x86_fp80 @test(...) nounwind { call void @llvm.va_start(i8* %v1) %t1 = va_arg i8** %ap, x86_fp80 ; <x86_fp80> [#uses=1] %t2 = va_arg i8** %ap, x86_fp80 ; <x86_fp80> [#uses=1] - %t = add x86_fp80 %t1, %t2 ; <x86_fp80> [#uses=1] + %t = fadd x86_fp80 %t1, %t2 ; <x86_fp80> [#uses=1] ret x86_fp80 %t } diff --git a/test/CodeGen/X86/2008-12-05-SpillerCrash.ll b/test/CodeGen/X86/2008-12-05-SpillerCrash.ll index dbb7acf..b6b5cbd 100644 --- a/test/CodeGen/X86/2008-12-05-SpillerCrash.ll +++ b/test/CodeGen/X86/2008-12-05-SpillerCrash.ll @@ -145,7 +145,7 @@ bb4426.i.i.i: ; preds = %bb7551.i.i.i %20 = add <4 x i32> %19, zeroinitializer ; <<4 x i32>> [#uses=3] %21 = load i32* null, align 4 ; <i32> [#uses=0] %22 = call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> zeroinitializer) nounwind readnone ; <<4 x float>> [#uses=1] - %23 = mul <4 x float> %22, < float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000 > ; <<4 x float>> [#uses=1] + %23 = fmul <4 x float> %22, < float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000 > ; <<4 x float>> [#uses=1] %tmp2114.i119.i.i = extractelement <4 x i32> %20, i32 1 ; <i32> [#uses=1] %24 = shl i32 %tmp2114.i119.i.i, 2 ; <i32> [#uses=1] %25 = getelementptr i8* %11, i32 %24 ; <i8*> [#uses=1] @@ -160,7 +160,7 @@ bb4426.i.i.i: ; preds = %bb7551.i.i.i %33 = bitcast <8 x i16> %32 to <4 x i32> ; <<4 x i32>> [#uses=1] %34 = shufflevector <4 x i32> %33, <4 x i32> undef, <4 x i32> < i32 2, i32 1, i32 0, i32 3 > ; <<4 x i32>> [#uses=1] %35 = call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %34) nounwind readnone ; <<4 x float>> [#uses=1] - %36 = mul <4 x float> %35, < float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000 > ; <<4 x float>> [#uses=1] + %36 = fmul <4 x float> %35, < float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000 > ; <<4 x float>> [#uses=1] %tmp2113.i124.i.i = extractelement <4 x i32> %20, i32 2 ; <i32> [#uses=1] %37 = shl i32 %tmp2113.i124.i.i, 2 ; <i32> [#uses=1] %38 = getelementptr i8* %14, i32 %37 ; <i8*> [#uses=1] @@ -175,7 +175,7 @@ bb4426.i.i.i: ; preds = %bb7551.i.i.i %46 = bitcast <8 x i16> %45 to <4 x i32> ; <<4 x i32>> [#uses=1] %47 = shufflevector <4 x i32> %46, <4 x i32> undef, <4 x i32> < i32 2, i32 1, i32 0, i32 3 > ; <<4 x i32>> [#uses=1] %48 = call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %47) nounwind readnone ; <<4 x float>> [#uses=1] - %49 = mul <4 x float> %48, < float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000 > ; <<4 x float>> [#uses=1] + %49 = fmul <4 x float> %48, < float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000 > ; <<4 x float>> [#uses=1] %tmp2112.i129.i.i = extractelement <4 x i32> %20, i32 3 ; <i32> [#uses=1] %50 = shl i32 %tmp2112.i129.i.i, 2 ; <i32> [#uses=1] %51 = getelementptr i8* %17, i32 %50 ; <i8*> [#uses=1] @@ -190,15 +190,15 @@ bb4426.i.i.i: ; preds = %bb7551.i.i.i %59 = bitcast <8 x i16> %58 to <4 x i32> ; <<4 x i32>> [#uses=1] %60 = shufflevector <4 x i32> %59, <4 x i32> undef, <4 x i32> < i32 2, i32 1, i32 0, i32 3 > ; <<4 x i32>> [#uses=1] %61 = call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %60) nounwind readnone ; <<4 x float>> [#uses=1] - %62 = mul <4 x float> %61, < float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000 > ; <<4 x float>> [#uses=1] - %63 = mul <4 x float> %23, zeroinitializer ; <<4 x float>> [#uses=1] - %64 = add <4 x float> zeroinitializer, %63 ; <<4 x float>> [#uses=1] - %65 = mul <4 x float> %36, zeroinitializer ; <<4 x float>> [#uses=1] - %66 = add <4 x float> zeroinitializer, %65 ; <<4 x float>> [#uses=1] - %67 = mul <4 x float> %49, zeroinitializer ; <<4 x float>> [#uses=1] - %68 = add <4 x float> zeroinitializer, %67 ; <<4 x float>> [#uses=1] - %69 = mul <4 x float> %62, zeroinitializer ; <<4 x float>> [#uses=1] - %70 = add <4 x float> zeroinitializer, %69 ; <<4 x float>> [#uses=1] + %62 = fmul <4 x float> %61, < float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000 > ; <<4 x float>> [#uses=1] + %63 = fmul <4 x float> %23, zeroinitializer ; <<4 x float>> [#uses=1] + %64 = fadd <4 x float> zeroinitializer, %63 ; <<4 x float>> [#uses=1] + %65 = fmul <4 x float> %36, zeroinitializer ; <<4 x float>> [#uses=1] + %66 = fadd <4 x float> zeroinitializer, %65 ; <<4 x float>> [#uses=1] + %67 = fmul <4 x float> %49, zeroinitializer ; <<4 x float>> [#uses=1] + %68 = fadd <4 x float> zeroinitializer, %67 ; <<4 x float>> [#uses=1] + %69 = fmul <4 x float> %62, zeroinitializer ; <<4 x float>> [#uses=1] + %70 = fadd <4 x float> zeroinitializer, %69 ; <<4 x float>> [#uses=1] %tmp7452.i.i.i = bitcast <4 x float> %64 to <4 x i32> ; <<4 x i32>> [#uses=1] %tmp7454.i.i.i = and <4 x i32> %tmp7452.i.i.i, zeroinitializer ; <<4 x i32>> [#uses=1] %tmp7459.i.i.i = or <4 x i32> %tmp7454.i.i.i, zeroinitializer ; <<4 x i32>> [#uses=1] diff --git a/test/CodeGen/X86/2009-01-16-UIntToFP.ll b/test/CodeGen/X86/2009-01-16-UIntToFP.ll index 6de11c9..340608a 100644 --- a/test/CodeGen/X86/2009-01-16-UIntToFP.ll +++ b/test/CodeGen/X86/2009-01-16-UIntToFP.ll @@ -22,10 +22,10 @@ bb2: ; preds = %bb1, %bb, %entry %5 = lshr i64 %u_addr.0, 32 ; <i64> [#uses=1] %6 = trunc i64 %5 to i32 ; <i32> [#uses=1] %7 = uitofp i32 %6 to double ; <double> [#uses=1] - %8 = mul double %7, 0x41F0000000000000 ; <double> [#uses=1] + %8 = fmul double %7, 0x41F0000000000000 ; <double> [#uses=1] %9 = trunc i64 %u_addr.0 to i32 ; <i32> [#uses=1] %10 = uitofp i32 %9 to double ; <double> [#uses=1] - %11 = add double %10, %8 ; <double> [#uses=1] + %11 = fadd double %10, %8 ; <double> [#uses=1] %12 = fptrunc double %11 to float ; <float> [#uses=1] ret float %12 } diff --git a/test/CodeGen/X86/2009-02-12-SpillerBug.ll b/test/CodeGen/X86/2009-02-12-SpillerBug.ll index 747dc8a..1d10319 100644 --- a/test/CodeGen/X86/2009-02-12-SpillerBug.ll +++ b/test/CodeGen/X86/2009-02-12-SpillerBug.ll @@ -3,9 +3,9 @@ define hidden void @__mulxc3({ x86_fp80, x86_fp80 }* noalias nocapture sret %agg.result, x86_fp80 %a, x86_fp80 %b, x86_fp80 %c, x86_fp80 %d) nounwind { entry: - %0 = mul x86_fp80 %b, %d ; <x86_fp80> [#uses=1] - %1 = sub x86_fp80 0xK00000000000000000000, %0 ; <x86_fp80> [#uses=1] - %2 = add x86_fp80 0xK00000000000000000000, 0xK00000000000000000000 ; <x86_fp80> [#uses=1] + %0 = fmul x86_fp80 %b, %d ; <x86_fp80> [#uses=1] + %1 = fsub x86_fp80 0xK00000000000000000000, %0 ; <x86_fp80> [#uses=1] + %2 = fadd x86_fp80 0xK00000000000000000000, 0xK00000000000000000000 ; <x86_fp80> [#uses=1] %3 = fcmp uno x86_fp80 %1, 0xK00000000000000000000 ; <i1> [#uses=1] %4 = fcmp uno x86_fp80 %2, 0xK00000000000000000000 ; <i1> [#uses=1] %or.cond = and i1 %3, %4 ; <i1> [#uses=1] diff --git a/test/CodeGen/X86/2009-02-25-CommuteBug.ll b/test/CodeGen/X86/2009-02-25-CommuteBug.ll index b772bf8..3dbfa80 100644 --- a/test/CodeGen/X86/2009-02-25-CommuteBug.ll +++ b/test/CodeGen/X86/2009-02-25-CommuteBug.ll @@ -7,7 +7,7 @@ entry: %tmp2.i = or <2 x i64> %tmp.i2, <i64 4607632778762754458, i64 4607632778762754458> ; <<2 x i64>> [#uses=1] %tmp3.i = bitcast <2 x i64> %tmp2.i to <2 x double> ; <<2 x double>> [#uses=1] %0 = tail call <2 x double> @llvm.x86.sse2.add.sd(<2 x double> %A, <2 x double> %tmp3.i) nounwind readnone ; <<2 x double>> [#uses=1] - %tmp.i = add <2 x double> %0, %C ; <<2 x double>> [#uses=1] + %tmp.i = fadd <2 x double> %0, %C ; <<2 x double>> [#uses=1] ret <2 x double> %tmp.i } diff --git a/test/CodeGen/X86/2009-03-03-BitcastLongDouble.ll b/test/CodeGen/X86/2009-03-03-BitcastLongDouble.ll index 1eefaa9..6f16ced 100644 --- a/test/CodeGen/X86/2009-03-03-BitcastLongDouble.ll +++ b/test/CodeGen/X86/2009-03-03-BitcastLongDouble.ll @@ -6,7 +6,7 @@ define i32 @x(i32 %y) nounwind readnone { entry: %tmp14 = zext i32 %y to i80 ; <i80> [#uses=1] %tmp15 = bitcast i80 %tmp14 to x86_fp80 ; <x86_fp80> [#uses=1] - %add = add x86_fp80 %tmp15, 0xK3FFF8000000000000000 ; <x86_fp80> [#uses=1] + %add = fadd x86_fp80 %tmp15, 0xK3FFF8000000000000000 ; <x86_fp80> [#uses=1] %tmp11 = bitcast x86_fp80 %add to i80 ; <i80> [#uses=1] %tmp10 = trunc i80 %tmp11 to i32 ; <i32> [#uses=1] ret i32 %tmp10 diff --git a/test/CodeGen/X86/2009-03-09-SpillerBug.ll b/test/CodeGen/X86/2009-03-09-SpillerBug.ll index 14bdcc3..2ccd771 100644 --- a/test/CodeGen/X86/2009-03-09-SpillerBug.ll +++ b/test/CodeGen/X86/2009-03-09-SpillerBug.ll @@ -5,7 +5,7 @@ define void @__mulxc3(x86_fp80 %b) nounwind { entry: %call = call x86_fp80 @y(x86_fp80* null, x86_fp80* null) ; <x86_fp80> [#uses=0] %cmp = fcmp ord x86_fp80 %b, 0xK00000000000000000000 ; <i1> [#uses=1] - %sub = sub x86_fp80 %b, %b ; <x86_fp80> [#uses=1] + %sub = fsub x86_fp80 %b, %b ; <x86_fp80> [#uses=1] %cmp7 = fcmp uno x86_fp80 %sub, 0xK00000000000000000000 ; <i1> [#uses=1] %and12 = and i1 %cmp7, %cmp ; <i1> [#uses=1] %and = zext i1 %and12 to i32 ; <i32> [#uses=1] diff --git a/test/CodeGen/X86/2009-03-12-CPAlignBug.ll b/test/CodeGen/X86/2009-03-12-CPAlignBug.ll index 75af992..ec060e4 100644 --- a/test/CodeGen/X86/2009-03-12-CPAlignBug.ll +++ b/test/CodeGen/X86/2009-03-12-CPAlignBug.ll @@ -19,18 +19,18 @@ bb1: ; preds = %newFuncRoot %0 = tail call double @llvm.sqrt.f64(double %.reload8) ; <double> [#uses=1] %1 = fptrunc x86_fp80 %.reload6 to double ; <double> [#uses=1] %2 = tail call double @fabs(double %1) nounwind readnone ; <double> [#uses=1] - %3 = add double %0, %2 ; <double> [#uses=1] + %3 = fadd double %0, %2 ; <double> [#uses=1] %4 = tail call double @llvm.pow.f64(double %3, double 0x3FD5555555555555) ; <double> [#uses=1] %5 = fpext double %4 to x86_fp80 ; <x86_fp80> [#uses=2] %6 = fdiv x86_fp80 %.reload5, %5 ; <x86_fp80> [#uses=1] - %7 = add x86_fp80 %5, %6 ; <x86_fp80> [#uses=1] + %7 = fadd x86_fp80 %5, %6 ; <x86_fp80> [#uses=1] %8 = fptrunc x86_fp80 %7 to double ; <double> [#uses=1] %9 = fcmp olt x86_fp80 %.reload6, 0xK00000000000000000000 ; <i1> [#uses=1] %iftmp.6.0 = select i1 %9, double 1.000000e+00, double -1.000000e+00 ; <double> [#uses=1] - %10 = mul double %8, %iftmp.6.0 ; <double> [#uses=1] + %10 = fmul double %8, %iftmp.6.0 ; <double> [#uses=1] %11 = fpext double %10 to x86_fp80 ; <x86_fp80> [#uses=1] %12 = fdiv x86_fp80 %.reload, 0xKC000C000000000000000 ; <x86_fp80> [#uses=1] - %13 = add x86_fp80 %11, %12 ; <x86_fp80> [#uses=1] + %13 = fadd x86_fp80 %11, %12 ; <x86_fp80> [#uses=1] %14 = fptrunc x86_fp80 %13 to double ; <double> [#uses=1] store double %14, double* %x, align 1 br label %bb1.ret.exitStub diff --git a/test/CodeGen/X86/2009-03-23-MultiUseSched.ll b/test/CodeGen/X86/2009-03-23-MultiUseSched.ll index a963145..b30d41e 100644 --- a/test/CodeGen/X86/2009-03-23-MultiUseSched.ll +++ b/test/CodeGen/X86/2009-03-23-MultiUseSched.ll @@ -1,4 +1,4 @@ -; RUN: llvm-as < %s | llc -march=x86-64 -relocation-model=static -stats -info-output-file - > %t +; RUN: llvm-as < %s | llc -mtriple=x86_64-linux -relocation-model=static -stats -info-output-file - > %t ; RUN: not grep spill %t ; RUN: not grep {%rsp} %t ; RUN: not grep {%rbp} %t diff --git a/test/CodeGen/X86/2009-06-03-Win64DisableRedZone.ll b/test/CodeGen/X86/2009-06-03-Win64DisableRedZone.ll new file mode 100644 index 0000000..c628b8a --- /dev/null +++ b/test/CodeGen/X86/2009-06-03-Win64DisableRedZone.ll @@ -0,0 +1,9 @@ +; RUN: llvm-as < %s | llc | grep "subq.*\\\$8, \\\%rsp" +target triple = "x86_64-mingw64" + +define x86_fp80 @a(i64 %x) nounwind readnone { +entry: + %conv = sitofp i64 %x to x86_fp80 ; <x86_fp80> [#uses=1] + ret x86_fp80 %conv +} + diff --git a/test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll b/test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll new file mode 100644 index 0000000..33d7972 --- /dev/null +++ b/test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll @@ -0,0 +1,12 @@ +; RUN: llvm-as < %s | llc -o %t1 -f +; RUN: grep "subq.*\\\$40, \\\%rsp" %t1 +; RUN: grep "movaps \\\%xmm8, \\\(\\\%rsp\\\)" %t1 +; RUN: grep "movaps \\\%xmm7, 16\\\(\\\%rsp\\\)" %t1 +target triple = "x86_64-mingw64" + +define i32 @a() nounwind { +entry: + tail call void asm sideeffect "", "~{xmm7},~{xmm8},~{dirflag},~{fpsr},~{flags}"() nounwind + ret i32 undef +} + diff --git a/test/CodeGen/X86/2009-06-04-VirtualLiveIn.ll b/test/CodeGen/X86/2009-06-04-VirtualLiveIn.ll new file mode 100644 index 0000000..fa90fa9 --- /dev/null +++ b/test/CodeGen/X86/2009-06-04-VirtualLiveIn.ll @@ -0,0 +1,48 @@ +; RUN: llvm-as < %s | llc -march=x86 + + type { %struct.GAP } ; type %0 + type { i16, i8, i8 } ; type %1 + type { [2 x i32], [2 x i32] } ; type %2 + type { %struct.rec* } ; type %3 + %struct.FILE_POS = type { i8, i8, i16, i32 } + %struct.FIRST_UNION = type { %struct.FILE_POS } + %struct.FOURTH_UNION = type { %struct.STYLE } + %struct.GAP = type { i8, i8, i16 } + %struct.LIST = type { %struct.rec*, %struct.rec* } + %struct.SECOND_UNION = type { %1 } + %struct.STYLE = type { %0, %0, i16, i16, i32 } + %struct.THIRD_UNION = type { %2 } + %struct.head_type = type { [2 x %struct.LIST], %struct.FIRST_UNION, %struct.SECOND_UNION, %struct.THIRD_UNION, %struct.FOURTH_UNION, %struct.rec*, %3, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, i32 } + %struct.rec = type { %struct.head_type } + +define fastcc void @MinSize(%struct.rec* %x) nounwind { +entry: + %tmp13 = load i8* undef, align 4 ; <i8> [#uses=3] + %tmp14 = zext i8 %tmp13 to i32 ; <i32> [#uses=2] + switch i32 %tmp14, label %bb1109 [ + i32 42, label %bb246 + ] + +bb246: ; preds = %entry, %entry + switch i8 %tmp13, label %bb249 [ + i8 42, label %bb269 + i8 44, label %bb269 + ] + +bb249: ; preds = %bb246 + %tmp3240 = icmp eq i8 %tmp13, 0 ; <i1> [#uses=1] + br i1 %tmp3240, label %bb974, label %bb269 + +bb269: + %tmp3424 = getelementptr %struct.rec* %x, i32 0, i32 0, i32 0, i32 0, i32 1 ; <%struct.rec**> [#uses=0] + unreachable + +bb974: + unreachable + +bb1109: ; preds = %entry + call fastcc void @Image(i32 %tmp14) nounwind ; <i8*> [#uses=0] + unreachable +} + +declare fastcc void @Image(i32) nounwind diff --git a/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll b/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll new file mode 100644 index 0000000..94df530 --- /dev/null +++ b/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll @@ -0,0 +1,7 @@ +; RUN: llvm-as < %s | llc -march=x86 -mattr=+mmx | not grep movl + +define <8 x i8> @a(i8 zeroext %x) nounwind { + %r = insertelement <8 x i8> undef, i8 %x, i32 0 + ret <8 x i8> %r +} + diff --git a/test/CodeGen/X86/2009-06-05-VZextByteShort.ll b/test/CodeGen/X86/2009-06-05-VZextByteShort.ll new file mode 100644 index 0000000..220423a --- /dev/null +++ b/test/CodeGen/X86/2009-06-05-VZextByteShort.ll @@ -0,0 +1,37 @@ +; RUN: llvm-as < %s | llc -march=x86 -mattr=+mmx,+sse2 > %t1 +; RUN: grep movzwl %t1 | count 2 +; RUN: grep movzbl %t1 | count 2 +; RUN: grep movd %t1 | count 4 + +define <4 x i16> @a(i32* %x1) nounwind { + %x2 = load i32* %x1 + %x3 = lshr i32 %x2, 1 + %x = trunc i32 %x3 to i16 + %r = insertelement <4 x i16> zeroinitializer, i16 %x, i32 0 + ret <4 x i16> %r +} + +define <8 x i16> @b(i32* %x1) nounwind { + %x2 = load i32* %x1 + %x3 = lshr i32 %x2, 1 + %x = trunc i32 %x3 to i16 + %r = insertelement <8 x i16> zeroinitializer, i16 %x, i32 0 + ret <8 x i16> %r +} + +define <8 x i8> @c(i32* %x1) nounwind { + %x2 = load i32* %x1 + %x3 = lshr i32 %x2, 1 + %x = trunc i32 %x3 to i8 + %r = insertelement <8 x i8> zeroinitializer, i8 %x, i32 0 + ret <8 x i8> %r +} + +define <16 x i8> @d(i32* %x1) nounwind { + %x2 = load i32* %x1 + %x3 = lshr i32 %x2, 1 + %x = trunc i32 %x3 to i8 + %r = insertelement <16 x i8> zeroinitializer, i8 %x, i32 0 + ret <16 x i8> %r +} + diff --git a/test/CodeGen/X86/2009-06-05-VariableIndexInsert.ll b/test/CodeGen/X86/2009-06-05-VariableIndexInsert.ll new file mode 100644 index 0000000..2e3f195 --- /dev/null +++ b/test/CodeGen/X86/2009-06-05-VariableIndexInsert.ll @@ -0,0 +1,11 @@ +; RUN: llvm-as < %s | llc + +define <2 x i64> @_mm_insert_epi16(<2 x i64> %a, i32 %b, i32 %imm) nounwind readnone { +entry: + %conv = bitcast <2 x i64> %a to <8 x i16> ; <<8 x i16>> [#uses=1] + %conv2 = trunc i32 %b to i16 ; <i16> [#uses=1] + %and = and i32 %imm, 7 ; <i32> [#uses=1] + %vecins = insertelement <8 x i16> %conv, i16 %conv2, i32 %and ; <<8 x i16>> [#uses=1] + %conv6 = bitcast <8 x i16> %vecins to <2 x i64> ; <<2 x i64>> [#uses=1] + ret <2 x i64> %conv6 +} diff --git a/test/CodeGen/X86/2009-06-05-sitofpCrash.ll b/test/CodeGen/X86/2009-06-05-sitofpCrash.ll new file mode 100644 index 0000000..589a880 --- /dev/null +++ b/test/CodeGen/X86/2009-06-05-sitofpCrash.ll @@ -0,0 +1,13 @@ +; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse +; PR2598 + +define <2 x float> @a(<2 x i32> %i) nounwind { + %r = sitofp <2 x i32> %i to <2 x float> + ret <2 x float> %r +} + +define <2 x i32> @b(<2 x float> %i) nounwind { + %r = fptosi <2 x float> %i to <2 x i32> + ret <2 x i32> %r +} + diff --git a/test/CodeGen/X86/2009-06-06-ConcatVectors.ll b/test/CodeGen/X86/2009-06-06-ConcatVectors.ll new file mode 100644 index 0000000..a46fd1a --- /dev/null +++ b/test/CodeGen/X86/2009-06-06-ConcatVectors.ll @@ -0,0 +1,8 @@ +; RUN: llvm-as < %s | llc + +define <2 x i64> @_mm_movpi64_pi64(<1 x i64> %a, <1 x i64> %b) nounwind readnone { +entry: + %0 = shufflevector <1 x i64> %a, <1 x i64> %b, <2 x i32> <i32 0, i32 1> + ret <2 x i64> %0 +} + diff --git a/test/CodeGen/X86/abi-isel.ll b/test/CodeGen/X86/abi-isel.ll index f1fec3f..513599c 100644 --- a/test/CodeGen/X86/abi-isel.ll +++ b/test/CodeGen/X86/abi-isel.ll @@ -141,26 +141,6 @@ ; RUN: not grep @PLTOFF %t ; RUN: grep {call \\\*} %t | count 10 ; RUN: not grep {%rip} %t -; RUN: llvm-as < %s | llc -mtriple=x86_64-apple-darwin -march=x86-64 -relocation-model=static -code-model=small > %t -; RUN: not grep leal %t -; RUN: grep movl %t | count 91 -; RUN: not grep addl %t -; RUN: not grep subl %t -; RUN: grep leaq %t | count 70 -; RUN: grep movq %t | count 56 -; RUN: grep addq %t | count 20 -; RUN: grep subq %t | count 14 -; RUN: not grep movabs %t -; RUN: not grep largecomm %t -; RUN: not grep _GLOBAL_OFFSET_TABLE_ %t -; RUN: not grep @GOT %t -; RUN: not grep @GOTOFF %t -; RUN: not grep @GOTPCREL %t -; RUN: not grep @GOTPLT %t -; RUN: not grep @PLT %t -; RUN: not grep @PLTOFF %t -; RUN: grep {call \\\*} %t | count 10 -; RUN: grep {%rip} %t | count 139 ; RUN: llvm-as < %s | llc -mtriple=x86_64-apple-darwin -march=x86-64 -relocation-model=dynamic-no-pic -code-model=small > %t ; RUN: not grep leal %t ; RUN: grep movl %t | count 95 diff --git a/test/CodeGen/X86/break-anti-dependencies.ll b/test/CodeGen/X86/break-anti-dependencies.ll index b432c39..b9ce10f 100644 --- a/test/CodeGen/X86/break-anti-dependencies.ll +++ b/test/CodeGen/X86/break-anti-dependencies.ll @@ -8,18 +8,18 @@ define void @goo(double* %r, double* %p, double* %q) nounwind { entry: %0 = load double* %p, align 8 - %1 = add double %0, 1.100000e+00 - %2 = mul double %1, 1.200000e+00 - %3 = add double %2, 1.300000e+00 - %4 = mul double %3, 1.400000e+00 - %5 = add double %4, 1.500000e+00 + %1 = fadd double %0, 1.100000e+00 + %2 = fmul double %1, 1.200000e+00 + %3 = fadd double %2, 1.300000e+00 + %4 = fmul double %3, 1.400000e+00 + %5 = fadd double %4, 1.500000e+00 %6 = fptosi double %5 to i32 %7 = load double* %r, align 8 - %8 = add double %7, 7.100000e+00 - %9 = mul double %8, 7.200000e+00 - %10 = add double %9, 7.300000e+00 - %11 = mul double %10, 7.400000e+00 - %12 = add double %11, 7.500000e+00 + %8 = fadd double %7, 7.100000e+00 + %9 = fmul double %8, 7.200000e+00 + %10 = fadd double %9, 7.300000e+00 + %11 = fmul double %10, 7.400000e+00 + %12 = fadd double %11, 7.500000e+00 %13 = fptosi double %12 to i32 %14 = icmp slt i32 %6, %13 br i1 %14, label %bb, label %return diff --git a/test/CodeGen/X86/coalescer-commute1.ll b/test/CodeGen/X86/coalescer-commute1.ll index 0fae2a6..9939424 100644 --- a/test/CodeGen/X86/coalescer-commute1.ll +++ b/test/CodeGen/X86/coalescer-commute1.ll @@ -15,7 +15,7 @@ bb: ; preds = %bb, %entry %tmp2 = getelementptr i32* %source, i32 %neuron.0 ; <i32*> [#uses=1] %tmp3 = load i32* %tmp2, align 4 ; <i32> [#uses=1] %tmp34 = sitofp i32 %tmp3 to float ; <float> [#uses=1] - %tmp6 = add float %tmp34, %thesum.0 ; <float> [#uses=2] + %tmp6 = fadd float %tmp34, %thesum.0 ; <float> [#uses=2] %indvar.next = add i32 %neuron.0, 1 ; <i32> [#uses=2] %exitcond = icmp eq i32 %indvar.next, %tmp10 ; <i1> [#uses=1] br i1 %exitcond, label %bb13, label %bb diff --git a/test/CodeGen/X86/coalescer-commute2.ll b/test/CodeGen/X86/coalescer-commute2.ll index ce4abf1..c67e0f5 100644 --- a/test/CodeGen/X86/coalescer-commute2.ll +++ b/test/CodeGen/X86/coalescer-commute2.ll @@ -28,7 +28,7 @@ define <4 x float> @test3(<4 x float> %V) { entry: %tmp8 = shufflevector <4 x float> %V, <4 x float> undef, <4 x i32> < i32 3, i32 2, i32 1, i32 0 > - %add = add <4 x float> %tmp8, %V + %add = fadd <4 x float> %tmp8, %V ret <4 x float> %add } diff --git a/test/CodeGen/X86/coalescer-commute4.ll b/test/CodeGen/X86/coalescer-commute4.ll index 7299aca..9628f93 100644 --- a/test/CodeGen/X86/coalescer-commute4.ll +++ b/test/CodeGen/X86/coalescer-commute4.ll @@ -18,8 +18,8 @@ bb: ; preds = %bb, %bb.preheader %tmp45 = sitofp i32 %tmp4 to float ; <float> [#uses=1] %tmp8 = getelementptr float* %y, i32 %i.0.reg2mem.0 ; <float*> [#uses=1] %tmp9 = load float* %tmp8, align 4 ; <float> [#uses=1] - %tmp11 = mul float %tmp9, %tmp45 ; <float> [#uses=1] - %tmp14 = add float %tmp11, %res.0.reg2mem.0 ; <float> [#uses=2] + %tmp11 = fmul float %tmp9, %tmp45 ; <float> [#uses=1] + %tmp14 = fadd float %tmp11, %res.0.reg2mem.0 ; <float> [#uses=2] %indvar.next = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=2] %exitcond = icmp eq i32 %indvar.next, %umax ; <i1> [#uses=1] br i1 %exitcond, label %bb23, label %bb diff --git a/test/CodeGen/X86/complex-fca.ll b/test/CodeGen/X86/complex-fca.ll index 29eb6ee..05adb50 100644 --- a/test/CodeGen/X86/complex-fca.ll +++ b/test/CodeGen/X86/complex-fca.ll @@ -4,7 +4,7 @@ define void @ccosl({ x86_fp80, x86_fp80 }* noalias sret %agg.result, { x86_fp80, entry: %z8 = extractvalue { x86_fp80, x86_fp80 } %z, 0 %z9 = extractvalue { x86_fp80, x86_fp80 } %z, 1 - %0 = sub x86_fp80 0xK80000000000000000000, %z9 + %0 = fsub x86_fp80 0xK80000000000000000000, %z9 %insert = insertvalue { x86_fp80, x86_fp80 } undef, x86_fp80 %0, 0 %insert7 = insertvalue { x86_fp80, x86_fp80 } %insert, x86_fp80 %z8, 1 call void @ccoshl({ x86_fp80, x86_fp80 }* noalias sret %agg.result, { x86_fp80, x86_fp80 } %insert7) nounwind diff --git a/test/CodeGen/X86/constant-pool-remat-0.ll b/test/CodeGen/X86/constant-pool-remat-0.ll index 40caaa6..80be854 100644 --- a/test/CodeGen/X86/constant-pool-remat-0.ll +++ b/test/CodeGen/X86/constant-pool-remat-0.ll @@ -6,8 +6,8 @@ declare float @qux(float %y) define float @array(float %a) nounwind { - %n = mul float %a, 9.0 + %n = fmul float %a, 9.0 %m = call float @qux(float %n) - %o = mul float %m, 9.0 + %o = fmul float %m, 9.0 ret float %o } diff --git a/test/CodeGen/X86/dagcombine-buildvector.ll b/test/CodeGen/X86/dagcombine-buildvector.ll index c89a296..b96fdfc 100644 --- a/test/CodeGen/X86/dagcombine-buildvector.ll +++ b/test/CodeGen/X86/dagcombine-buildvector.ll @@ -1,13 +1,25 @@ -; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 -o %t -f +; RUN: llvm-as < %s | llc -march=x86 -mcpu=penryn -disable-mmx -o %t -f ; RUN: grep unpcklpd %t | count 1 ; RUN: grep movapd %t | count 1 +; RUN: grep movaps %t | count 1 ; Shows a dag combine bug that will generate an illegal build vector ; with v2i64 build_vector i32, i32. -define void @test(<2 x double>* %dst, <4 x double> %src) { +define void @test(<2 x double>* %dst, <4 x double> %src) nounwind { entry: %tmp7.i = shufflevector <4 x double> %src, <4 x double> undef, <2 x i32> < i32 0, i32 2 > store <2 x double> %tmp7.i, <2 x double>* %dst ret void } + +define void @test2(<4 x i16>* %src, <4 x i32>* %dest) nounwind { +entry: + %tmp1 = load <4 x i16>* %src + %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef> + %0 = tail call <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16> %tmp3) + store <4 x i32> %0, <4 x i32>* %dest + ret void +} + +declare <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16>) nounwind readnone diff --git a/test/CodeGen/X86/extract-combine.ll b/test/CodeGen/X86/extract-combine.ll index 9172dce..842ec24 100644 --- a/test/CodeGen/X86/extract-combine.ll +++ b/test/CodeGen/X86/extract-combine.ll @@ -7,9 +7,9 @@ entry: %tmp518 = shufflevector <16 x float> %tmp74.i25762, <16 x float> undef, <4 x i32> <i32 12, i32 13, i32 14, i32 15> ; <<4 x float>> [#uses=1] %movss.i25611 = shufflevector <4 x float> zeroinitializer, <4 x float> %tmp518, <4 x i32> <i32 4, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1] %conv3.i25615 = shufflevector <4 x float> %movss.i25611, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0> ; <<4 x float>> [#uses=1] - %sub.i25620 = sub <4 x float> %conv3.i25615, zeroinitializer ; <<4 x float>> [#uses=1] - %mul.i25621 = mul <4 x float> zeroinitializer, %sub.i25620 ; <<4 x float>> [#uses=1] - %add.i25622 = add <4 x float> zeroinitializer, %mul.i25621 ; <<4 x float>> [#uses=1] + %sub.i25620 = fsub <4 x float> %conv3.i25615, zeroinitializer ; <<4 x float>> [#uses=1] + %mul.i25621 = fmul <4 x float> zeroinitializer, %sub.i25620 ; <<4 x float>> [#uses=1] + %add.i25622 = fadd <4 x float> zeroinitializer, %mul.i25621 ; <<4 x float>> [#uses=1] store <4 x float> %add.i25622, <4 x float>* null unreachable } diff --git a/test/CodeGen/X86/fabs.ll b/test/CodeGen/X86/fabs.ll index 0646a79..7ac8e04 100644 --- a/test/CodeGen/X86/fabs.ll +++ b/test/CodeGen/X86/fabs.ll @@ -16,7 +16,7 @@ define float @test1(float %X) { define double @test2(double %X) { %Y = fcmp oge double %X, -0.0 - %Z = sub double -0.0, %X + %Z = fsub double -0.0, %X %Q = select i1 %Y, double %X, double %Z ret double %Q } diff --git a/test/CodeGen/X86/fast-isel.ll b/test/CodeGen/X86/fast-isel.ll index 2ee2c83..a9a016b 100644 --- a/test/CodeGen/X86/fast-isel.ll +++ b/test/CodeGen/X86/fast-isel.ll @@ -32,10 +32,10 @@ entry: br label %fast fast: - %t0 = add double %r, %s - %t1 = mul double %t0, %s - %t2 = sub double %t1, %s - %t3 = add double %t2, 707.0 + %t0 = fadd double %r, %s + %t1 = fmul double %t0, %s + %t2 = fsub double %t1, %s + %t3 = fadd double %t2, 707.0 br label %exit exit: diff --git a/test/CodeGen/X86/fmul-zero.ll b/test/CodeGen/X86/fmul-zero.ll new file mode 100644 index 0000000..8f705a4 --- /dev/null +++ b/test/CodeGen/X86/fmul-zero.ll @@ -0,0 +1,9 @@ +; RUN: llvm-as < %s | llc -march=x86-64 -enable-unsafe-fp-math | not grep mulps +; RUN: llvm-as < %s | llc -march=x86-64 | grep mulps + +define void @test14(<4 x float>*) nounwind { + load <4 x float>* %0, align 1 + mul <4 x float> %2, zeroinitializer + store <4 x float> %3, <4 x float>* %0, align 1 + ret void +} diff --git a/test/CodeGen/X86/fold-pcmpeqd-0.ll b/test/CodeGen/X86/fold-pcmpeqd-0.ll index 066d38e..f558aca 100644 --- a/test/CodeGen/X86/fold-pcmpeqd-0.ll +++ b/test/CodeGen/X86/fold-pcmpeqd-0.ll @@ -26,23 +26,23 @@ forcond: ; preds = %entry forbody: ; preds = %forcond %bitcast204.i313 = bitcast <4 x i32> zeroinitializer to <4 x float> ; <<4 x float>> [#uses=1] - %mul233 = mul <4 x float> %bitcast204.i313, zeroinitializer ; <<4 x float>> [#uses=1] - %mul257 = mul <4 x float> %mul233, zeroinitializer ; <<4 x float>> [#uses=1] - %mul275 = mul <4 x float> %mul257, zeroinitializer ; <<4 x float>> [#uses=1] + %mul233 = fmul <4 x float> %bitcast204.i313, zeroinitializer ; <<4 x float>> [#uses=1] + %mul257 = fmul <4 x float> %mul233, zeroinitializer ; <<4 x float>> [#uses=1] + %mul275 = fmul <4 x float> %mul257, zeroinitializer ; <<4 x float>> [#uses=1] %tmp51 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %mul275, <4 x float> zeroinitializer) nounwind ; <<4 x float>> [#uses=1] %bitcast198.i182 = bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>> [#uses=0] %bitcast204.i185 = bitcast <4 x i32> zeroinitializer to <4 x float> ; <<4 x float>> [#uses=1] %tmp69 = call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> zeroinitializer) nounwind ; <<4 x i32>> [#uses=1] %tmp70 = call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %tmp69) nounwind ; <<4 x float>> [#uses=1] - %sub140.i78 = sub <4 x float> zeroinitializer, %tmp70 ; <<4 x float>> [#uses=2] - %mul166.i86 = mul <4 x float> zeroinitializer, %sub140.i78 ; <<4 x float>> [#uses=1] - %add167.i87 = add <4 x float> %mul166.i86, < float 0x3FE62ACB60000000, float 0x3FE62ACB60000000, float 0x3FE62ACB60000000, float 0x3FE62ACB60000000 > ; <<4 x float>> [#uses=1] - %mul171.i88 = mul <4 x float> %add167.i87, %sub140.i78 ; <<4 x float>> [#uses=1] - %add172.i89 = add <4 x float> %mul171.i88, < float 0x3FF0000A40000000, float 0x3FF0000A40000000, float 0x3FF0000A40000000, float 0x3FF0000A40000000 > ; <<4 x float>> [#uses=1] + %sub140.i78 = fsub <4 x float> zeroinitializer, %tmp70 ; <<4 x float>> [#uses=2] + %mul166.i86 = fmul <4 x float> zeroinitializer, %sub140.i78 ; <<4 x float>> [#uses=1] + %add167.i87 = fadd <4 x float> %mul166.i86, < float 0x3FE62ACB60000000, float 0x3FE62ACB60000000, float 0x3FE62ACB60000000, float 0x3FE62ACB60000000 > ; <<4 x float>> [#uses=1] + %mul171.i88 = fmul <4 x float> %add167.i87, %sub140.i78 ; <<4 x float>> [#uses=1] + %add172.i89 = fadd <4 x float> %mul171.i88, < float 0x3FF0000A40000000, float 0x3FF0000A40000000, float 0x3FF0000A40000000, float 0x3FF0000A40000000 > ; <<4 x float>> [#uses=1] %bitcast176.i90 = bitcast <4 x float> %add172.i89 to <4 x i32> ; <<4 x i32>> [#uses=1] %andnps178.i92 = and <4 x i32> %bitcast176.i90, zeroinitializer ; <<4 x i32>> [#uses=1] %bitcast179.i93 = bitcast <4 x i32> %andnps178.i92 to <4 x float> ; <<4 x float>> [#uses=1] - %mul186.i96 = mul <4 x float> %bitcast179.i93, zeroinitializer ; <<4 x float>> [#uses=1] + %mul186.i96 = fmul <4 x float> %bitcast179.i93, zeroinitializer ; <<4 x float>> [#uses=1] %bitcast190.i98 = bitcast <4 x float> %mul186.i96 to <4 x i32> ; <<4 x i32>> [#uses=1] %andnps192.i100 = and <4 x i32> %bitcast190.i98, zeroinitializer ; <<4 x i32>> [#uses=1] %xorps.i102 = xor <4 x i32> zeroinitializer, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1] @@ -50,15 +50,15 @@ forbody: ; preds = %forcond %bitcast204.i104 = bitcast <4 x i32> %orps203.i103 to <4 x float> ; <<4 x float>> [#uses=1] %cmple.i = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> zeroinitializer, <4 x float> %tmp51, i8 2) nounwind ; <<4 x float>> [#uses=1] %tmp80 = call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> zeroinitializer) nounwind ; <<4 x float>> [#uses=1] - %sub140.i = sub <4 x float> zeroinitializer, %tmp80 ; <<4 x float>> [#uses=1] + %sub140.i = fsub <4 x float> zeroinitializer, %tmp80 ; <<4 x float>> [#uses=1] %bitcast148.i = bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>> [#uses=1] %andnps150.i = and <4 x i32> %bitcast148.i, < i32 -2139095041, i32 -2139095041, i32 -2139095041, i32 -2139095041 > ; <<4 x i32>> [#uses=0] - %mul171.i = mul <4 x float> zeroinitializer, %sub140.i ; <<4 x float>> [#uses=1] - %add172.i = add <4 x float> %mul171.i, < float 0x3FF0000A40000000, float 0x3FF0000A40000000, float 0x3FF0000A40000000, float 0x3FF0000A40000000 > ; <<4 x float>> [#uses=1] + %mul171.i = fmul <4 x float> zeroinitializer, %sub140.i ; <<4 x float>> [#uses=1] + %add172.i = fadd <4 x float> %mul171.i, < float 0x3FF0000A40000000, float 0x3FF0000A40000000, float 0x3FF0000A40000000, float 0x3FF0000A40000000 > ; <<4 x float>> [#uses=1] %bitcast176.i = bitcast <4 x float> %add172.i to <4 x i32> ; <<4 x i32>> [#uses=1] %andnps178.i = and <4 x i32> %bitcast176.i, zeroinitializer ; <<4 x i32>> [#uses=1] %bitcast179.i = bitcast <4 x i32> %andnps178.i to <4 x float> ; <<4 x float>> [#uses=1] - %mul186.i = mul <4 x float> %bitcast179.i, zeroinitializer ; <<4 x float>> [#uses=1] + %mul186.i = fmul <4 x float> %bitcast179.i, zeroinitializer ; <<4 x float>> [#uses=1] %bitcast189.i = bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>> [#uses=0] %bitcast190.i = bitcast <4 x float> %mul186.i to <4 x i32> ; <<4 x i32>> [#uses=1] %andnps192.i = and <4 x i32> %bitcast190.i, zeroinitializer ; <<4 x i32>> [#uses=1] @@ -66,9 +66,9 @@ forbody: ; preds = %forcond %xorps.i = xor <4 x i32> %bitcast198.i, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1] %orps203.i = or <4 x i32> %andnps192.i, %xorps.i ; <<4 x i32>> [#uses=1] %bitcast204.i = bitcast <4 x i32> %orps203.i to <4 x float> ; <<4 x float>> [#uses=1] - %mul307 = mul <4 x float> %bitcast204.i185, zeroinitializer ; <<4 x float>> [#uses=1] - %mul310 = mul <4 x float> %bitcast204.i104, zeroinitializer ; <<4 x float>> [#uses=2] - %mul313 = mul <4 x float> %bitcast204.i, zeroinitializer ; <<4 x float>> [#uses=1] + %mul307 = fmul <4 x float> %bitcast204.i185, zeroinitializer ; <<4 x float>> [#uses=1] + %mul310 = fmul <4 x float> %bitcast204.i104, zeroinitializer ; <<4 x float>> [#uses=2] + %mul313 = fmul <4 x float> %bitcast204.i, zeroinitializer ; <<4 x float>> [#uses=1] %tmp82 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %mul307, <4 x float> zeroinitializer) nounwind ; <<4 x float>> [#uses=1] %bitcast11.i15 = bitcast <4 x float> %tmp82 to <4 x i32> ; <<4 x i32>> [#uses=1] %andnps.i17 = and <4 x i32> %bitcast11.i15, zeroinitializer ; <<4 x i32>> [#uses=1] diff --git a/test/CodeGen/X86/fold-pcmpeqd-2.ll b/test/CodeGen/X86/fold-pcmpeqd-2.ll index de6ba6c..2b75781 100644 --- a/test/CodeGen/X86/fold-pcmpeqd-2.ll +++ b/test/CodeGen/X86/fold-pcmpeqd-2.ll @@ -28,22 +28,22 @@ forbody: ; preds = %forcond %tmp78 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> < float 1.280000e+02, float 1.280000e+02, float 1.280000e+02, float 1.280000e+02 >, <4 x float> zeroinitializer) nounwind ; <<4 x float>> [#uses=2] %tmp79 = call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> %tmp78) nounwind ; <<4 x i32>> [#uses=1] %tmp80 = call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %tmp79) nounwind ; <<4 x float>> [#uses=1] - %sub140.i = sub <4 x float> %tmp78, %tmp80 ; <<4 x float>> [#uses=2] - %mul166.i = mul <4 x float> zeroinitializer, %sub140.i ; <<4 x float>> [#uses=1] - %add167.i = add <4 x float> %mul166.i, < float 0x3FE62ACB60000000, float 0x3FE62ACB60000000, float 0x3FE62ACB60000000, float 0x3FE62ACB60000000 > ; <<4 x float>> [#uses=1] - %mul171.i = mul <4 x float> %add167.i, %sub140.i ; <<4 x float>> [#uses=1] - %add172.i = add <4 x float> %mul171.i, < float 0x3FF0000A40000000, float 0x3FF0000A40000000, float 0x3FF0000A40000000, float 0x3FF0000A40000000 > ; <<4 x float>> [#uses=1] + %sub140.i = fsub <4 x float> %tmp78, %tmp80 ; <<4 x float>> [#uses=2] + %mul166.i = fmul <4 x float> zeroinitializer, %sub140.i ; <<4 x float>> [#uses=1] + %add167.i = fadd <4 x float> %mul166.i, < float 0x3FE62ACB60000000, float 0x3FE62ACB60000000, float 0x3FE62ACB60000000, float 0x3FE62ACB60000000 > ; <<4 x float>> [#uses=1] + %mul171.i = fmul <4 x float> %add167.i, %sub140.i ; <<4 x float>> [#uses=1] + %add172.i = fadd <4 x float> %mul171.i, < float 0x3FF0000A40000000, float 0x3FF0000A40000000, float 0x3FF0000A40000000, float 0x3FF0000A40000000 > ; <<4 x float>> [#uses=1] %bitcast176.i = bitcast <4 x float> %add172.i to <4 x i32> ; <<4 x i32>> [#uses=1] %andnps178.i = and <4 x i32> %bitcast176.i, zeroinitializer ; <<4 x i32>> [#uses=1] %bitcast179.i = bitcast <4 x i32> %andnps178.i to <4 x float> ; <<4 x float>> [#uses=1] - %mul186.i = mul <4 x float> %bitcast179.i, zeroinitializer ; <<4 x float>> [#uses=1] + %mul186.i = fmul <4 x float> %bitcast179.i, zeroinitializer ; <<4 x float>> [#uses=1] %bitcast190.i = bitcast <4 x float> %mul186.i to <4 x i32> ; <<4 x i32>> [#uses=1] %andnps192.i = and <4 x i32> %bitcast190.i, zeroinitializer ; <<4 x i32>> [#uses=1] %xorps.i = xor <4 x i32> zeroinitializer, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1] %orps203.i = or <4 x i32> %andnps192.i, %xorps.i ; <<4 x i32>> [#uses=1] %bitcast204.i = bitcast <4 x i32> %orps203.i to <4 x float> ; <<4 x float>> [#uses=1] - %mul310 = mul <4 x float> %bitcast204.i104, zeroinitializer ; <<4 x float>> [#uses=2] - %mul313 = mul <4 x float> %bitcast204.i, zeroinitializer ; <<4 x float>> [#uses=1] + %mul310 = fmul <4 x float> %bitcast204.i104, zeroinitializer ; <<4 x float>> [#uses=2] + %mul313 = fmul <4 x float> %bitcast204.i, zeroinitializer ; <<4 x float>> [#uses=1] %cmpunord.i11 = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> zeroinitializer, <4 x float> zeroinitializer, i8 3) nounwind ; <<4 x float>> [#uses=1] %bitcast6.i13 = bitcast <4 x float> %cmpunord.i11 to <4 x i32> ; <<4 x i32>> [#uses=2] %andps.i14 = and <4 x i32> zeroinitializer, %bitcast6.i13 ; <<4 x i32>> [#uses=1] diff --git a/test/CodeGen/X86/fp-in-intregs.ll b/test/CodeGen/X86/fp-in-intregs.ll index 1e3ea89..15606c3 100644 --- a/test/CodeGen/X86/fp-in-intregs.ll +++ b/test/CodeGen/X86/fp-in-intregs.ll @@ -5,7 +5,7 @@ define i32 @test1(float %x) nounwind { entry: - %tmp2 = sub float -0.000000e+00, %x ; <float> [#uses=1] + %tmp2 = fsub float -0.000000e+00, %x ; <float> [#uses=1] %tmp210 = bitcast float %tmp2 to i32 ; <i32> [#uses=1] ret i32 %tmp210 } diff --git a/test/CodeGen/X86/fp-stack-compare.ll b/test/CodeGen/X86/fp-stack-compare.ll index 383549a..4e61d0f 100644 --- a/test/CodeGen/X86/fp-stack-compare.ll +++ b/test/CodeGen/X86/fp-stack-compare.ll @@ -5,7 +5,7 @@ define float @foo(float* %col.2.0) { %tmp = load float* %col.2.0 ; <float> [#uses=3] %tmp16 = fcmp olt float %tmp, 0.000000e+00 ; <i1> [#uses=1] - %tmp20 = sub float -0.000000e+00, %tmp ; <float> [#uses=1] + %tmp20 = fsub float -0.000000e+00, %tmp ; <float> [#uses=1] %iftmp.2.0 = select i1 %tmp16, float %tmp20, float %tmp ; <float> [#uses=1] ret float %iftmp.2.0 } diff --git a/test/CodeGen/X86/fp_constant_op.ll b/test/CodeGen/X86/fp_constant_op.ll index ed02c6a..f2017b9 100644 --- a/test/CodeGen/X86/fp_constant_op.ll +++ b/test/CodeGen/X86/fp_constant_op.ll @@ -5,22 +5,22 @@ define double @foo_add(double %P) { - %tmp.1 = add double %P, 1.230000e+02 ; <double> [#uses=1] + %tmp.1 = fadd double %P, 1.230000e+02 ; <double> [#uses=1] ret double %tmp.1 } define double @foo_mul(double %P) { - %tmp.1 = mul double %P, 1.230000e+02 ; <double> [#uses=1] + %tmp.1 = fmul double %P, 1.230000e+02 ; <double> [#uses=1] ret double %tmp.1 } define double @foo_sub(double %P) { - %tmp.1 = sub double %P, 1.230000e+02 ; <double> [#uses=1] + %tmp.1 = fsub double %P, 1.230000e+02 ; <double> [#uses=1] ret double %tmp.1 } define double @foo_subr(double %P) { - %tmp.1 = sub double 1.230000e+02, %P ; <double> [#uses=1] + %tmp.1 = fsub double 1.230000e+02, %P ; <double> [#uses=1] ret double %tmp.1 } diff --git a/test/CodeGen/X86/fp_load_fold.ll b/test/CodeGen/X86/fp_load_fold.ll index 7c33cb3..655ad3d 100644 --- a/test/CodeGen/X86/fp_load_fold.ll +++ b/test/CodeGen/X86/fp_load_fold.ll @@ -5,25 +5,25 @@ define double @test_add(double %X, double* %P) { %Y = load double* %P ; <double> [#uses=1] - %R = add double %X, %Y ; <double> [#uses=1] + %R = fadd double %X, %Y ; <double> [#uses=1] ret double %R } define double @test_mul(double %X, double* %P) { %Y = load double* %P ; <double> [#uses=1] - %R = mul double %X, %Y ; <double> [#uses=1] + %R = fmul double %X, %Y ; <double> [#uses=1] ret double %R } define double @test_sub(double %X, double* %P) { %Y = load double* %P ; <double> [#uses=1] - %R = sub double %X, %Y ; <double> [#uses=1] + %R = fsub double %X, %Y ; <double> [#uses=1] ret double %R } define double @test_subr(double %X, double* %P) { %Y = load double* %P ; <double> [#uses=1] - %R = sub double %Y, %X ; <double> [#uses=1] + %R = fsub double %Y, %X ; <double> [#uses=1] ret double %R } diff --git a/test/CodeGen/X86/fsxor-alignment.ll b/test/CodeGen/X86/fsxor-alignment.ll index 71007dc..4d25fca 100644 --- a/test/CodeGen/X86/fsxor-alignment.ll +++ b/test/CodeGen/X86/fsxor-alignment.ll @@ -6,8 +6,8 @@ ; and aren't vector-aligned. define void @foo(float* %p, float* %q, float %s, float %y) { - %ss = sub float -0.0, %s - %yy = sub float -0.0, %y + %ss = fsub float -0.0, %s + %yy = fsub float -0.0, %y store float %ss, float* %p store float %yy, float* %q ret void diff --git a/test/CodeGen/X86/full-lsr.ll b/test/CodeGen/X86/full-lsr.ll index ee9eaf9..4a85779 100644 --- a/test/CodeGen/X86/full-lsr.ll +++ b/test/CodeGen/X86/full-lsr.ll @@ -13,7 +13,7 @@ bb: ; preds = %bb, %entry %2 = load float* %1, align 4 ; <float> [#uses=1] %3 = getelementptr float* %B, i32 %i.03 ; <float*> [#uses=1] %4 = load float* %3, align 4 ; <float> [#uses=1] - %5 = add float %2, %4 ; <float> [#uses=1] + %5 = fadd float %2, %4 ; <float> [#uses=1] %6 = getelementptr float* %C, i32 %i.03 ; <float*> [#uses=1] store float %5, float* %6, align 4 %7 = add i32 %i.03, 10 ; <i32> [#uses=3] @@ -21,7 +21,7 @@ bb: ; preds = %bb, %entry %9 = load float* %8, align 4 ; <float> [#uses=1] %10 = getelementptr float* %B, i32 %7 ; <float*> [#uses=1] %11 = load float* %10, align 4 ; <float> [#uses=1] - %12 = add float %9, %11 ; <float> [#uses=1] + %12 = fadd float %9, %11 ; <float> [#uses=1] %13 = getelementptr float* %C, i32 %7 ; <float*> [#uses=1] store float %12, float* %13, align 4 %indvar.next = add i32 %i.03, 1 ; <i32> [#uses=2] diff --git a/test/CodeGen/X86/ga-offset.ll b/test/CodeGen/X86/ga-offset.ll index cc93b4c..aaa2f84 100644 --- a/test/CodeGen/X86/ga-offset.ll +++ b/test/CodeGen/X86/ga-offset.ll @@ -2,7 +2,7 @@ ; RUN: not grep lea %t ; RUN: not grep add %t ; RUN: grep mov %t | count 1 -; RUN: llvm-as < %s | llc -march=x86-64 -relocation-model=static > %t +; RUN: llvm-as < %s | llc -mtriple=x86_64-linux -relocation-model=static > %t ; RUN: not grep lea %t ; RUN: not grep add %t ; RUN: grep mov %t | count 1 diff --git a/test/CodeGen/X86/illegal-vector-args-return.ll b/test/CodeGen/X86/illegal-vector-args-return.ll index 8fb6db35..5ed6ddb 100644 --- a/test/CodeGen/X86/illegal-vector-args-return.ll +++ b/test/CodeGen/X86/illegal-vector-args-return.ll @@ -4,11 +4,11 @@ ; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 | grep {addps %xmm2, %xmm0} define <4 x double> @foo(<4 x double> %x, <4 x double> %z) { - %y = mul <4 x double> %x, %z + %y = fmul <4 x double> %x, %z ret <4 x double> %y } define <8 x float> @bar(<8 x float> %x, <8 x float> %z) { - %y = add <8 x float> %x, %z + %y = fadd <8 x float> %x, %z ret <8 x float> %y } diff --git a/test/CodeGen/X86/inline-asm-fpstack.ll b/test/CodeGen/X86/inline-asm-fpstack.ll index 91f2f2f..31d94d8 100644 --- a/test/CodeGen/X86/inline-asm-fpstack.ll +++ b/test/CodeGen/X86/inline-asm-fpstack.ll @@ -21,7 +21,7 @@ define void @test4(double %X) { } define void @test5(double %X) { - %Y = add double %X, 123.0 + %Y = fadd double %X, 123.0 call void asm sideeffect "frob ", "{st(0)},~{dirflag},~{fpsr},~{flags}"( double %Y) ret void } diff --git a/test/CodeGen/X86/inline-asm-mrv.ll b/test/CodeGen/X86/inline-asm-mrv.ll index f679c7f..ca39c12 100644 --- a/test/CodeGen/X86/inline-asm-mrv.ll +++ b/test/CodeGen/X86/inline-asm-mrv.ll @@ -21,7 +21,7 @@ define <4 x float> @test2() nounwind { %mrv = call {<4 x float>, <4 x float>} asm "set $0, $1", "=x,=x"() %a = getresult {<4 x float>, <4 x float>} %mrv, 0 %b = getresult {<4 x float>, <4 x float>} %mrv, 1 - %c = add <4 x float> %a, %b + %c = fadd <4 x float> %a, %b ret <4 x float> %c } diff --git a/test/CodeGen/X86/inline-asm-x-scalar.ll b/test/CodeGen/X86/inline-asm-x-scalar.ll index d1bac0c..aafbbd1 100644 --- a/test/CodeGen/X86/inline-asm-x-scalar.ll +++ b/test/CodeGen/X86/inline-asm-x-scalar.ll @@ -17,7 +17,7 @@ define void @test3() { define void @test4() { %tmp1 = tail call float asm "", "=x,0,~{dirflag},~{fpsr},~{flags}"( float 0x47EFFFFFE0000000 ); <float> [#uses=1] - %tmp4 = sub float %tmp1, 0x3810000000000000 ; <float> [#uses=1] + %tmp4 = fsub float %tmp1, 0x3810000000000000 ; <float> [#uses=1] tail call void asm sideeffect "", "x,~{dirflag},~{fpsr},~{flags}"( float %tmp4 ) ret void } diff --git a/test/CodeGen/X86/iv-users-in-other-loops.ll b/test/CodeGen/X86/iv-users-in-other-loops.ll index 275feba..2208b2d 100644 --- a/test/CodeGen/X86/iv-users-in-other-loops.ll +++ b/test/CodeGen/X86/iv-users-in-other-loops.ll @@ -17,52 +17,52 @@ target triple = "x86_64-unknown-linux-gnu" define void @foo(float* %A, i32 %IA, float* %B, i32 %IB, float* nocapture %C, i32 %N) nounwind { entry: - %0 = xor i32 %IA, 1 ; <i32> [#uses=1] - %1 = xor i32 %IB, 1 ; <i32> [#uses=1] - %2 = or i32 %1, %0 ; <i32> [#uses=1] - %3 = icmp eq i32 %2, 0 ; <i1> [#uses=1] - br i1 %3, label %bb2, label %bb13 + %0 = xor i32 %IA, 1 ; <i32> [#uses=1] + %1 = xor i32 %IB, 1 ; <i32> [#uses=1] + %2 = or i32 %1, %0 ; <i32> [#uses=1] + %3 = icmp eq i32 %2, 0 ; <i1> [#uses=1] + br i1 %3, label %bb2, label %bb13 bb: ; preds = %bb3 - %4 = load float* %A_addr.0, align 4 ; <float> [#uses=1] - %5 = load float* %B_addr.0, align 4 ; <float> [#uses=1] - %6 = mul float %4, %5 ; <float> [#uses=1] - %7 = add float %6, %Sum0.0 ; <float> [#uses=1] - %indvar.next154 = add i64 %B_addr.0.rec, 1 ; <i64> [#uses=1] - br label %bb2 + %4 = load float* %A_addr.0, align 4 ; <float> [#uses=1] + %5 = load float* %B_addr.0, align 4 ; <float> [#uses=1] + %6 = fmul float %4, %5 ; <float> [#uses=1] + %7 = fadd float %6, %Sum0.0 ; <float> [#uses=1] + %indvar.next154 = add i64 %B_addr.0.rec, 1 ; <i64> [#uses=1] + br label %bb2 bb2: ; preds = %entry, %bb - %B_addr.0.rec = phi i64 [ %indvar.next154, %bb ], [ 0, %entry ] ; <i64> [#uses=14] - %Sum0.0 = phi float [ %7, %bb ], [ 0.000000e+00, %entry ] ; <float> [#uses=5] - %indvar146 = trunc i64 %B_addr.0.rec to i32 ; <i32> [#uses=1] - %N_addr.0 = sub i32 %N, %indvar146 ; <i32> [#uses=6] - %A_addr.0 = getelementptr float* %A, i64 %B_addr.0.rec ; <float*> [#uses=4] - %B_addr.0 = getelementptr float* %B, i64 %B_addr.0.rec ; <float*> [#uses=4] - %8 = icmp sgt i32 %N_addr.0, 0 ; <i1> [#uses=1] - br i1 %8, label %bb3, label %bb4 + %B_addr.0.rec = phi i64 [ %indvar.next154, %bb ], [ 0, %entry ] ; <i64> [#uses=14] + %Sum0.0 = phi float [ %7, %bb ], [ 0.000000e+00, %entry ] ; <float> [#uses=5] + %indvar146 = trunc i64 %B_addr.0.rec to i32 ; <i32> [#uses=1] + %N_addr.0 = sub i32 %N, %indvar146 ; <i32> [#uses=6] + %A_addr.0 = getelementptr float* %A, i64 %B_addr.0.rec ; <float*> [#uses=4] + %B_addr.0 = getelementptr float* %B, i64 %B_addr.0.rec ; <float*> [#uses=4] + %8 = icmp sgt i32 %N_addr.0, 0 ; <i1> [#uses=1] + br i1 %8, label %bb3, label %bb4 bb3: ; preds = %bb2 - %9 = ptrtoint float* %A_addr.0 to i64 ; <i64> [#uses=1] - %10 = and i64 %9, 15 ; <i64> [#uses=1] - %11 = icmp eq i64 %10, 0 ; <i1> [#uses=1] - br i1 %11, label %bb4, label %bb + %9 = ptrtoint float* %A_addr.0 to i64 ; <i64> [#uses=1] + %10 = and i64 %9, 15 ; <i64> [#uses=1] + %11 = icmp eq i64 %10, 0 ; <i1> [#uses=1] + br i1 %11, label %bb4, label %bb bb4: ; preds = %bb3, %bb2 - %12 = ptrtoint float* %B_addr.0 to i64 ; <i64> [#uses=1] - %13 = and i64 %12, 15 ; <i64> [#uses=1] - %14 = icmp eq i64 %13, 0 ; <i1> [#uses=1] - %15 = icmp sgt i32 %N_addr.0, 15 ; <i1> [#uses=2] - br i1 %14, label %bb6.preheader, label %bb10.preheader + %12 = ptrtoint float* %B_addr.0 to i64 ; <i64> [#uses=1] + %13 = and i64 %12, 15 ; <i64> [#uses=1] + %14 = icmp eq i64 %13, 0 ; <i1> [#uses=1] + %15 = icmp sgt i32 %N_addr.0, 15 ; <i1> [#uses=2] + br i1 %14, label %bb6.preheader, label %bb10.preheader bb10.preheader: ; preds = %bb4 - br i1 %15, label %bb9, label %bb12.loopexit + br i1 %15, label %bb9, label %bb12.loopexit bb6.preheader: ; preds = %bb4 - br i1 %15, label %bb5, label %bb8.loopexit + br i1 %15, label %bb5, label %bb8.loopexit bb5: ; preds = %bb5, %bb6.preheader - %indvar143 = phi i64 [ 0, %bb6.preheader ], [ %indvar.next144, %bb5 ] ; <i64> [#uses=3] - %vSum0.072 = phi <4 x float> [ zeroinitializer, %bb6.preheader ], [ %21, %bb5 ] ; <<4 x float>> [#uses=1] + %indvar143 = phi i64 [ 0, %bb6.preheader ], [ %indvar.next144, %bb5 ] ; <i64> [#uses=3] + %vSum0.072 = phi <4 x float> [ zeroinitializer, %bb6.preheader ], [ %21, %bb5 ] ; <<4 x float>> [#uses=1] %vSum1.070 = phi <4 x float> [ zeroinitializer, %bb6.preheader ], [ %29, %bb5 ] ; <<4 x float>> [#uses=1] %vSum2.069 = phi <4 x float> [ zeroinitializer, %bb6.preheader ], [ %37, %bb5 ] ; <<4 x float>> [#uses=1] %vSum3.067 = phi <4 x float> [ zeroinitializer, %bb6.preheader ], [ %45, %bb5 ] ; <<4 x float>> [#uses=1] @@ -78,8 +78,8 @@ bb5: ; preds = %bb5, %bb6.preheader %17 = load <4 x float>* %16, align 16 ; <<4 x float>> [#uses=1] %18 = bitcast float* %B_addr.271 to <4 x float>* ; <<4 x float>*> [#uses=1] %19 = load <4 x float>* %18, align 16 ; <<4 x float>> [#uses=1] - %20 = mul <4 x float> %17, %19 ; <<4 x float>> [#uses=1] - %21 = add <4 x float> %20, %vSum0.072 ; <<4 x float>> [#uses=2] + %20 = fmul <4 x float> %17, %19 ; <<4 x float>> [#uses=1] + %21 = fadd <4 x float> %20, %vSum0.072 ; <<4 x float>> [#uses=2] %A_addr.273.sum163 = or i64 %A_addr.273.rec, 4 ; <i64> [#uses=1] %A_addr.0.sum175 = add i64 %B_addr.0.rec, %A_addr.273.sum163 ; <i64> [#uses=2] %22 = getelementptr float* %A, i64 %A_addr.0.sum175 ; <float*> [#uses=1] @@ -88,8 +88,8 @@ bb5: ; preds = %bb5, %bb6.preheader %25 = getelementptr float* %B, i64 %A_addr.0.sum175 ; <float*> [#uses=1] %26 = bitcast float* %25 to <4 x float>* ; <<4 x float>*> [#uses=1] %27 = load <4 x float>* %26, align 16 ; <<4 x float>> [#uses=1] - %28 = mul <4 x float> %24, %27 ; <<4 x float>> [#uses=1] - %29 = add <4 x float> %28, %vSum1.070 ; <<4 x float>> [#uses=2] + %28 = fmul <4 x float> %24, %27 ; <<4 x float>> [#uses=1] + %29 = fadd <4 x float> %28, %vSum1.070 ; <<4 x float>> [#uses=2] %A_addr.273.sum161 = or i64 %A_addr.273.rec, 8 ; <i64> [#uses=1] %A_addr.0.sum174 = add i64 %B_addr.0.rec, %A_addr.273.sum161 ; <i64> [#uses=2] %30 = getelementptr float* %A, i64 %A_addr.0.sum174 ; <float*> [#uses=1] @@ -98,8 +98,8 @@ bb5: ; preds = %bb5, %bb6.preheader %33 = getelementptr float* %B, i64 %A_addr.0.sum174 ; <float*> [#uses=1] %34 = bitcast float* %33 to <4 x float>* ; <<4 x float>*> [#uses=1] %35 = load <4 x float>* %34, align 16 ; <<4 x float>> [#uses=1] - %36 = mul <4 x float> %32, %35 ; <<4 x float>> [#uses=1] - %37 = add <4 x float> %36, %vSum2.069 ; <<4 x float>> [#uses=2] + %36 = fmul <4 x float> %32, %35 ; <<4 x float>> [#uses=1] + %37 = fadd <4 x float> %36, %vSum2.069 ; <<4 x float>> [#uses=2] %A_addr.273.sum159 = or i64 %A_addr.273.rec, 12 ; <i64> [#uses=1] %A_addr.0.sum173 = add i64 %B_addr.0.rec, %A_addr.273.sum159 ; <i64> [#uses=2] %38 = getelementptr float* %A, i64 %A_addr.0.sum173 ; <float*> [#uses=1] @@ -108,8 +108,8 @@ bb5: ; preds = %bb5, %bb6.preheader %41 = getelementptr float* %B, i64 %A_addr.0.sum173 ; <float*> [#uses=1] %42 = bitcast float* %41 to <4 x float>* ; <<4 x float>*> [#uses=1] %43 = load <4 x float>* %42, align 16 ; <<4 x float>> [#uses=1] - %44 = mul <4 x float> %40, %43 ; <<4 x float>> [#uses=1] - %45 = add <4 x float> %44, %vSum3.067 ; <<4 x float>> [#uses=2] + %44 = fmul <4 x float> %40, %43 ; <<4 x float>> [#uses=1] + %45 = fadd <4 x float> %44, %vSum3.067 ; <<4 x float>> [#uses=2] %.rec83 = add i64 %A_addr.273.rec, 16 ; <i64> [#uses=1] %A_addr.0.sum172 = add i64 %B_addr.0.rec, %.rec83 ; <i64> [#uses=2] %46 = getelementptr float* %A, i64 %A_addr.0.sum172 ; <float*> [#uses=1] @@ -132,8 +132,8 @@ bb7: ; preds = %bb7, %bb8.loopexit %51 = load <4 x float>* %50, align 16 ; <<4 x float>> [#uses=1] %52 = bitcast float* %B_addr.359 to <4 x float>* ; <<4 x float>*> [#uses=1] %53 = load <4 x float>* %52, align 16 ; <<4 x float>> [#uses=1] - %54 = mul <4 x float> %51, %53 ; <<4 x float>> [#uses=1] - %55 = add <4 x float> %54, %vSum0.260 ; <<4 x float>> [#uses=2] + %54 = fmul <4 x float> %51, %53 ; <<4 x float>> [#uses=1] + %55 = fadd <4 x float> %54, %vSum0.260 ; <<4 x float>> [#uses=2] %.rec85 = add i64 %A_addr.361.rec, 4 ; <i64> [#uses=2] %56 = getelementptr float* %A_addr.2.lcssa, i64 %.rec85 ; <float*> [#uses=1] %57 = getelementptr float* %B_addr.2.lcssa, i64 %.rec85 ; <float*> [#uses=1] @@ -185,23 +185,23 @@ bb9: ; preds = %bb9, %bb10.preheader %71 = load <4 x float>* %70, align 1 %72 = bitcast float* %A_addr.440 to <4 x float>* ; <<4 x float>*> [#uses=1] %73 = load <4 x float>* %72, align 16 ; <<4 x float>> [#uses=1] - %74 = mul <4 x float> %73, %62 ; <<4 x float>> [#uses=1] - %75 = add <4 x float> %74, %vSum0.339 ; <<4 x float>> [#uses=2] + %74 = fmul <4 x float> %73, %62 ; <<4 x float>> [#uses=1] + %75 = fadd <4 x float> %74, %vSum0.339 ; <<4 x float>> [#uses=2] %76 = getelementptr float* %A, i64 %B_addr.0.sum187 ; <float*> [#uses=1] %77 = bitcast float* %76 to <4 x float>* ; <<4 x float>*> [#uses=1] %78 = load <4 x float>* %77, align 16 ; <<4 x float>> [#uses=1] - %79 = mul <4 x float> %78, %65 ; <<4 x float>> [#uses=1] - %80 = add <4 x float> %79, %vSum1.237 ; <<4 x float>> [#uses=2] + %79 = fmul <4 x float> %78, %65 ; <<4 x float>> [#uses=1] + %80 = fadd <4 x float> %79, %vSum1.237 ; <<4 x float>> [#uses=2] %81 = getelementptr float* %A, i64 %B_addr.0.sum186 ; <float*> [#uses=1] %82 = bitcast float* %81 to <4 x float>* ; <<4 x float>*> [#uses=1] %83 = load <4 x float>* %82, align 16 ; <<4 x float>> [#uses=1] - %84 = mul <4 x float> %83, %68 ; <<4 x float>> [#uses=1] - %85 = add <4 x float> %84, %vSum2.236 ; <<4 x float>> [#uses=2] + %84 = fmul <4 x float> %83, %68 ; <<4 x float>> [#uses=1] + %85 = fadd <4 x float> %84, %vSum2.236 ; <<4 x float>> [#uses=2] %86 = getelementptr float* %A, i64 %B_addr.0.sum185 ; <float*> [#uses=1] %87 = bitcast float* %86 to <4 x float>* ; <<4 x float>*> [#uses=1] %88 = load <4 x float>* %87, align 16 ; <<4 x float>> [#uses=1] - %89 = mul <4 x float> %88, %71 ; <<4 x float>> [#uses=1] - %90 = add <4 x float> %89, %vSum3.234 ; <<4 x float>> [#uses=2] + %89 = fmul <4 x float> %88, %71 ; <<4 x float>> [#uses=1] + %90 = fadd <4 x float> %89, %vSum3.234 ; <<4 x float>> [#uses=2] %.rec89 = add i64 %A_addr.440.rec, 16 ; <i64> [#uses=1] %A_addr.0.sum170 = add i64 %B_addr.0.rec, %.rec89 ; <i64> [#uses=2] %91 = getelementptr float* %A, i64 %A_addr.0.sum170 ; <float*> [#uses=1] @@ -224,8 +224,8 @@ bb11: ; preds = %bb11, %bb12.loopexit %96 = load <4 x float>* %95, align 1 %97 = bitcast float* %A_addr.529 to <4 x float>* ; <<4 x float>*> [#uses=1] %98 = load <4 x float>* %97, align 16 ; <<4 x float>> [#uses=1] - %99 = mul <4 x float> %98, %96 ; <<4 x float>> [#uses=1] - %100 = add <4 x float> %99, %vSum0.428 ; <<4 x float>> [#uses=2] + %99 = fmul <4 x float> %98, %96 ; <<4 x float>> [#uses=1] + %100 = fadd <4 x float> %99, %vSum0.428 ; <<4 x float>> [#uses=2] %.rec91 = add i64 %A_addr.529.rec, 4 ; <i64> [#uses=2] %101 = getelementptr float* %A_addr.4.lcssa, i64 %.rec91 ; <float*> [#uses=1] %102 = getelementptr float* %B_addr.4.lcssa, i64 %.rec91 ; <float*> [#uses=1] @@ -254,17 +254,17 @@ bb13: ; preds = %bb12.loopexit, %bb11, %bb8.loopexit, %bb7, %entry %B_addr.1 = phi float* [ %B, %entry ], [ %B_addr.2.lcssa, %bb8.loopexit ], [ %57, %bb7 ], [ %B_addr.4.lcssa, %bb12.loopexit ], [ %102, %bb11 ] ; <float*> [#uses=1] %vSum0.1 = phi <4 x float> [ zeroinitializer, %entry ], [ %vSum0.0.lcssa, %bb8.loopexit ], [ %55, %bb7 ], [ %vSum0.3.lcssa, %bb12.loopexit ], [ %100, %bb11 ] ; <<4 x float>> [#uses=1] %A_addr.1 = phi float* [ %A, %entry ], [ %A_addr.2.lcssa, %bb8.loopexit ], [ %56, %bb7 ], [ %A_addr.4.lcssa, %bb12.loopexit ], [ %101, %bb11 ] ; <float*> [#uses=1] - %106 = add <4 x float> %vSum0.1, %vSum2.1 ; <<4 x float>> [#uses=1] - %107 = add <4 x float> %vSum1.1, %vSum3.1 ; <<4 x float>> [#uses=1] - %108 = add <4 x float> %106, %107 ; <<4 x float>> [#uses=4] + %106 = fadd <4 x float> %vSum0.1, %vSum2.1 ; <<4 x float>> [#uses=1] + %107 = fadd <4 x float> %vSum1.1, %vSum3.1 ; <<4 x float>> [#uses=1] + %108 = fadd <4 x float> %106, %107 ; <<4 x float>> [#uses=4] %tmp23 = extractelement <4 x float> %108, i32 0 ; <float> [#uses=1] %tmp21 = extractelement <4 x float> %108, i32 1 ; <float> [#uses=1] - %109 = add float %tmp23, %tmp21 ; <float> [#uses=1] + %109 = fadd float %tmp23, %tmp21 ; <float> [#uses=1] %tmp19 = extractelement <4 x float> %108, i32 2 ; <float> [#uses=1] %tmp17 = extractelement <4 x float> %108, i32 3 ; <float> [#uses=1] - %110 = add float %tmp19, %tmp17 ; <float> [#uses=1] - %111 = add float %109, %110 ; <float> [#uses=1] - %Sum0.254 = add float %111, %Sum0.1 ; <float> [#uses=2] + %110 = fadd float %tmp19, %tmp17 ; <float> [#uses=1] + %111 = fadd float %109, %110 ; <float> [#uses=1] + %Sum0.254 = fadd float %111, %Sum0.1 ; <float> [#uses=2] %112 = icmp sgt i32 %N_addr.1, 0 ; <i1> [#uses=1] br i1 %112, label %bb.nph56, label %bb16 @@ -283,8 +283,8 @@ bb14: ; preds = %bb14, %bb.nph56 %A_addr.653 = getelementptr float* %A_addr.1, i64 %A_addr.653.rec ; <float*> [#uses=1] %113 = load float* %A_addr.653, align 4 ; <float> [#uses=1] %114 = load float* %B_addr.652, align 4 ; <float> [#uses=1] - %115 = mul float %113, %114 ; <float> [#uses=1] - %Sum0.2 = add float %115, %Sum0.255 ; <float> [#uses=2] + %115 = fmul float %113, %114 ; <float> [#uses=1] + %Sum0.2 = fadd float %115, %Sum0.255 ; <float> [#uses=2] %indvar.next118 = add i64 %indvar117, 1 ; <i64> [#uses=2] %exitcond = icmp eq i64 %indvar.next118, %tmp. ; <i1> [#uses=1] br i1 %exitcond, label %bb16, label %bb14 diff --git a/test/CodeGen/X86/masked-iv-safe.ll b/test/CodeGen/X86/masked-iv-safe.ll index e102535..0bf347c 100644 --- a/test/CodeGen/X86/masked-iv-safe.ll +++ b/test/CodeGen/X86/masked-iv-safe.ll @@ -20,16 +20,16 @@ loop: %indvar.i8 = and i64 %indvar, 255 %t0 = getelementptr double* %d, i64 %indvar.i8 %t1 = load double* %t0 - %t2 = mul double %t1, 0.1 + %t2 = fmul double %t1, 0.1 store double %t2, double* %t0 %indvar.i24 = and i64 %indvar, 16777215 %t3 = getelementptr double* %d, i64 %indvar.i24 %t4 = load double* %t3 - %t5 = mul double %t4, 2.3 + %t5 = fmul double %t4, 2.3 store double %t5, double* %t3 %t6 = getelementptr double* %d, i64 %indvar %t7 = load double* %t6 - %t8 = mul double %t7, 4.5 + %t8 = fmul double %t7, 4.5 store double %t8, double* %t6 %indvar.next = add i64 %indvar, 1 %exitcond = icmp eq i64 %indvar.next, 10 @@ -48,16 +48,16 @@ loop: %indvar.i8 = and i64 %indvar, 255 %t0 = getelementptr double* %d, i64 %indvar.i8 %t1 = load double* %t0 - %t2 = mul double %t1, 0.1 + %t2 = fmul double %t1, 0.1 store double %t2, double* %t0 %indvar.i24 = and i64 %indvar, 16777215 %t3 = getelementptr double* %d, i64 %indvar.i24 %t4 = load double* %t3 - %t5 = mul double %t4, 2.3 + %t5 = fmul double %t4, 2.3 store double %t5, double* %t3 %t6 = getelementptr double* %d, i64 %indvar %t7 = load double* %t6 - %t8 = mul double %t7, 4.5 + %t8 = fmul double %t7, 4.5 store double %t8, double* %t6 %indvar.next = sub i64 %indvar, 1 %exitcond = icmp eq i64 %indvar.next, 0 @@ -77,17 +77,17 @@ loop: %indvar.i8 = ashr i64 %s0, 8 %t0 = getelementptr double* %d, i64 %indvar.i8 %t1 = load double* %t0 - %t2 = mul double %t1, 0.1 + %t2 = fmul double %t1, 0.1 store double %t2, double* %t0 %s1 = shl i64 %indvar, 24 %indvar.i24 = ashr i64 %s1, 24 %t3 = getelementptr double* %d, i64 %indvar.i24 %t4 = load double* %t3 - %t5 = mul double %t4, 2.3 + %t5 = fmul double %t4, 2.3 store double %t5, double* %t3 %t6 = getelementptr double* %d, i64 %indvar %t7 = load double* %t6 - %t8 = mul double %t7, 4.5 + %t8 = fmul double %t7, 4.5 store double %t8, double* %t6 %indvar.next = add i64 %indvar, 1 %exitcond = icmp eq i64 %indvar.next, 10 @@ -107,17 +107,17 @@ loop: %indvar.i8 = ashr i64 %s0, 8 %t0 = getelementptr double* %d, i64 %indvar.i8 %t1 = load double* %t0 - %t2 = mul double %t1, 0.1 + %t2 = fmul double %t1, 0.1 store double %t2, double* %t0 %s1 = shl i64 %indvar, 24 %indvar.i24 = ashr i64 %s1, 24 %t3 = getelementptr double* %d, i64 %indvar.i24 %t4 = load double* %t3 - %t5 = mul double %t4, 2.3 + %t5 = fmul double %t4, 2.3 store double %t5, double* %t3 %t6 = getelementptr double* %d, i64 %indvar %t7 = load double* %t6 - %t8 = mul double %t7, 4.5 + %t8 = fmul double %t7, 4.5 store double %t8, double* %t6 %indvar.next = sub i64 %indvar, 1 %exitcond = icmp eq i64 %indvar.next, 0 @@ -136,16 +136,16 @@ loop: %indvar.i8 = and i64 %indvar, 255 %t0 = getelementptr double* %d, i64 %indvar.i8 %t1 = load double* %t0 - %t2 = mul double %t1, 0.1 + %t2 = fmul double %t1, 0.1 store double %t2, double* %t0 %indvar.i24 = and i64 %indvar, 16777215 %t3 = getelementptr double* %d, i64 %indvar.i24 %t4 = load double* %t3 - %t5 = mul double %t4, 2.3 + %t5 = fmul double %t4, 2.3 store double %t5, double* %t3 %t6 = getelementptr double* %d, i64 %indvar %t7 = load double* %t6 - %t8 = mul double %t7, 4.5 + %t8 = fmul double %t7, 4.5 store double %t8, double* %t6 %indvar.next = add i64 %indvar, 1 %exitcond = icmp eq i64 %indvar.next, 0 @@ -164,16 +164,16 @@ loop: %indvar.i8 = and i64 %indvar, 255 %t0 = getelementptr double* %d, i64 %indvar.i8 %t1 = load double* %t0 - %t2 = mul double %t1, 0.1 + %t2 = fmul double %t1, 0.1 store double %t2, double* %t0 %indvar.i24 = and i64 %indvar, 16777215 %t3 = getelementptr double* %d, i64 %indvar.i24 %t4 = load double* %t3 - %t5 = mul double %t4, 2.3 + %t5 = fmul double %t4, 2.3 store double %t5, double* %t3 %t6 = getelementptr double* %d, i64 %indvar %t7 = load double* %t6 - %t8 = mul double %t7, 4.5 + %t8 = fmul double %t7, 4.5 store double %t8, double* %t6 %indvar.next = sub i64 %indvar, 1 %exitcond = icmp eq i64 %indvar.next, 18446744073709551615 @@ -193,17 +193,17 @@ loop: %indvar.i8 = ashr i64 %s0, 8 %t0 = getelementptr double* %d, i64 %indvar.i8 %t1 = load double* %t0 - %t2 = mul double %t1, 0.1 + %t2 = fmul double %t1, 0.1 store double %t2, double* %t0 %s1 = shl i64 %indvar, 24 %indvar.i24 = ashr i64 %s1, 24 %t3 = getelementptr double* %d, i64 %indvar.i24 %t4 = load double* %t3 - %t5 = mul double %t4, 2.3 + %t5 = fmul double %t4, 2.3 store double %t5, double* %t3 %t6 = getelementptr double* %d, i64 %indvar %t7 = load double* %t6 - %t8 = mul double %t7, 4.5 + %t8 = fmul double %t7, 4.5 store double %t8, double* %t6 %indvar.next = add i64 %indvar, 1 %exitcond = icmp eq i64 %indvar.next, 0 @@ -223,17 +223,17 @@ loop: %indvar.i8 = ashr i64 %s0, 8 %t0 = getelementptr double* %d, i64 %indvar.i8 %t1 = load double* %t0 - %t2 = mul double %t1, 0.1 + %t2 = fmul double %t1, 0.1 store double %t2, double* %t0 %s1 = shl i64 %indvar, 24 %indvar.i24 = ashr i64 %s1, 24 %t3 = getelementptr double* %d, i64 %indvar.i24 %t4 = load double* %t3 - %t5 = mul double %t4, 2.3 + %t5 = fmul double %t4, 2.3 store double %t5, double* %t3 %t6 = getelementptr double* %d, i64 %indvar %t7 = load double* %t6 - %t8 = mul double %t7, 4.5 + %t8 = fmul double %t7, 4.5 store double %t8, double* %t6 %indvar.next = sub i64 %indvar, 1 %exitcond = icmp eq i64 %indvar.next, 18446744073709551615 diff --git a/test/CodeGen/X86/masked-iv-unsafe.ll b/test/CodeGen/X86/masked-iv-unsafe.ll index 7ccfe85..639a7a6 100644 --- a/test/CodeGen/X86/masked-iv-unsafe.ll +++ b/test/CodeGen/X86/masked-iv-unsafe.ll @@ -15,16 +15,16 @@ loop: %indvar.i8 = and i64 %indvar, 255 %t0 = getelementptr double* %d, i64 %indvar.i8 %t1 = load double* %t0 - %t2 = mul double %t1, 0.1 + %t2 = fmul double %t1, 0.1 store double %t2, double* %t0 %indvar.i24 = and i64 %indvar, 16777215 %t3 = getelementptr double* %d, i64 %indvar.i24 %t4 = load double* %t3 - %t5 = mul double %t4, 2.3 + %t5 = fmul double %t4, 2.3 store double %t5, double* %t3 %t6 = getelementptr double* %d, i64 %indvar %t7 = load double* %t6 - %t8 = mul double %t7, 4.5 + %t8 = fmul double %t7, 4.5 store double %t8, double* %t6 %indvar.next = add i64 %indvar, 1 %exitcond = icmp eq i64 %indvar.next, 0 @@ -43,16 +43,16 @@ loop: %indvar.i8 = and i64 %indvar, 255 %t0 = getelementptr double* %d, i64 %indvar.i8 %t1 = load double* %t0 - %t2 = mul double %t1, 0.1 + %t2 = fmul double %t1, 0.1 store double %t2, double* %t0 %indvar.i24 = and i64 %indvar, 16777215 %t3 = getelementptr double* %d, i64 %indvar.i24 %t4 = load double* %t3 - %t5 = mul double %t4, 2.3 + %t5 = fmul double %t4, 2.3 store double %t5, double* %t3 %t6 = getelementptr double* %d, i64 %indvar %t7 = load double* %t6 - %t8 = mul double %t7, 4.5 + %t8 = fmul double %t7, 4.5 store double %t8, double* %t6 %indvar.next = sub i64 %indvar, 1 %exitcond = icmp eq i64 %indvar.next, 20 @@ -72,17 +72,17 @@ loop: %indvar.i8 = ashr i64 %s0, 8 %t0 = getelementptr double* %d, i64 %indvar.i8 %t1 = load double* %t0 - %t2 = mul double %t1, 0.1 + %t2 = fmul double %t1, 0.1 store double %t2, double* %t0 %s1 = shl i64 %indvar, 24 %indvar.i24 = ashr i64 %s1, 24 %t3 = getelementptr double* %d, i64 %indvar.i24 %t4 = load double* %t3 - %t5 = mul double %t4, 2.3 + %t5 = fmul double %t4, 2.3 store double %t5, double* %t3 %t6 = getelementptr double* %d, i64 %indvar %t7 = load double* %t6 - %t8 = mul double %t7, 4.5 + %t8 = fmul double %t7, 4.5 store double %t8, double* %t6 %indvar.next = add i64 %indvar, 1 %exitcond = icmp eq i64 %indvar.next, 0 @@ -102,17 +102,17 @@ loop: %indvar.i8 = ashr i64 %s0, 8 %t0 = getelementptr double* %d, i64 %indvar.i8 %t1 = load double* %t0 - %t2 = mul double %t1, 0.1 + %t2 = fmul double %t1, 0.1 store double %t2, double* %t0 %s1 = shl i64 %indvar, 24 %indvar.i24 = ashr i64 %s1, 24 %t3 = getelementptr double* %d, i64 %indvar.i24 %t4 = load double* %t3 - %t5 = mul double %t4, 2.3 + %t5 = fmul double %t4, 2.3 store double %t5, double* %t3 %t6 = getelementptr double* %d, i64 %indvar %t7 = load double* %t6 - %t8 = mul double %t7, 4.5 + %t8 = fmul double %t7, 4.5 store double %t8, double* %t6 %indvar.next = sub i64 %indvar, 1 %exitcond = icmp eq i64 %indvar.next, 20 @@ -131,16 +131,16 @@ loop: %indvar.i8 = and i64 %indvar, 255 %t0 = getelementptr double* %d, i64 %indvar.i8 %t1 = load double* %t0 - %t2 = mul double %t1, 0.1 + %t2 = fmul double %t1, 0.1 store double %t2, double* %t0 %indvar.i24 = and i64 %indvar, 16777215 %t3 = getelementptr double* %d, i64 %indvar.i24 %t4 = load double* %t3 - %t5 = mul double %t4, 2.3 + %t5 = fmul double %t4, 2.3 store double %t5, double* %t3 %t6 = getelementptr double* %d, i64 %indvar %t7 = load double* %t6 - %t8 = mul double %t7, 4.5 + %t8 = fmul double %t7, 4.5 store double %t8, double* %t6 %indvar.next = add i64 %indvar, 1 %exitcond = icmp eq i64 %indvar.next, %n @@ -159,16 +159,16 @@ loop: %indvar.i8 = and i64 %indvar, 255 %t0 = getelementptr double* %d, i64 %indvar.i8 %t1 = load double* %t0 - %t2 = mul double %t1, 0.1 + %t2 = fmul double %t1, 0.1 store double %t2, double* %t0 %indvar.i24 = and i64 %indvar, 16777215 %t3 = getelementptr double* %d, i64 %indvar.i24 %t4 = load double* %t3 - %t5 = mul double %t4, 2.3 + %t5 = fmul double %t4, 2.3 store double %t5, double* %t3 %t6 = getelementptr double* %d, i64 %indvar %t7 = load double* %t6 - %t8 = mul double %t7, 4.5 + %t8 = fmul double %t7, 4.5 store double %t8, double* %t6 %indvar.next = sub i64 %indvar, 1 %exitcond = icmp eq i64 %indvar.next, 10 @@ -188,17 +188,17 @@ loop: %indvar.i8 = ashr i64 %s0, 8 %t0 = getelementptr double* %d, i64 %indvar.i8 %t1 = load double* %t0 - %t2 = mul double %t1, 0.1 + %t2 = fmul double %t1, 0.1 store double %t2, double* %t0 %s1 = shl i64 %indvar, 24 %indvar.i24 = ashr i64 %s1, 24 %t3 = getelementptr double* %d, i64 %indvar.i24 %t4 = load double* %t3 - %t5 = mul double %t4, 2.3 + %t5 = fmul double %t4, 2.3 store double %t5, double* %t3 %t6 = getelementptr double* %d, i64 %indvar %t7 = load double* %t6 - %t8 = mul double %t7, 4.5 + %t8 = fmul double %t7, 4.5 store double %t8, double* %t6 %indvar.next = add i64 %indvar, 1 %exitcond = icmp eq i64 %indvar.next, %n @@ -218,17 +218,17 @@ loop: %indvar.i8 = ashr i64 %s0, 8 %t0 = getelementptr double* %d, i64 %indvar.i8 %t1 = load double* %t0 - %t2 = mul double %t1, 0.1 + %t2 = fmul double %t1, 0.1 store double %t2, double* %t0 %s1 = shl i64 %indvar, 24 %indvar.i24 = ashr i64 %s1, 24 %t3 = getelementptr double* %d, i64 %indvar.i24 %t4 = load double* %t3 - %t5 = mul double %t4, 2.3 + %t5 = fmul double %t4, 2.3 store double %t5, double* %t3 %t6 = getelementptr double* %d, i64 %indvar %t7 = load double* %t6 - %t8 = mul double %t7, 4.5 + %t8 = fmul double %t7, 4.5 store double %t8, double* %t6 %indvar.next = sub i64 %indvar, 1 %exitcond = icmp eq i64 %indvar.next, 10 @@ -247,16 +247,16 @@ loop: %indvar.i8 = and i64 %indvar, 255 %t0 = getelementptr double* %d, i64 %indvar.i8 %t1 = load double* %t0 - %t2 = mul double %t1, 0.1 + %t2 = fmul double %t1, 0.1 store double %t2, double* %t0 %indvar.i24 = and i64 %indvar, 16777215 %t3 = getelementptr double* %d, i64 %indvar.i24 %t4 = load double* %t3 - %t5 = mul double %t4, 2.3 + %t5 = fmul double %t4, 2.3 store double %t5, double* %t3 %t6 = getelementptr double* %d, i64 %indvar %t7 = load double* %t6 - %t8 = mul double %t7, 4.5 + %t8 = fmul double %t7, 4.5 store double %t8, double* %t6 %indvar.next = sub i64 %indvar, 1 %exitcond = icmp eq i64 %indvar.next, 18446744073709551615 @@ -275,16 +275,16 @@ loop: %indvar.i8 = and i64 %indvar, 255 %t0 = getelementptr double* %d, i64 %indvar.i8 %t1 = load double* %t0 - %t2 = mul double %t1, 0.1 + %t2 = fmul double %t1, 0.1 store double %t2, double* %t0 %indvar.i24 = and i64 %indvar, 16777215 %t3 = getelementptr double* %d, i64 %indvar.i24 %t4 = load double* %t3 - %t5 = mul double %t4, 2.3 + %t5 = fmul double %t4, 2.3 store double %t5, double* %t3 %t6 = getelementptr double* %d, i64 %indvar %t7 = load double* %t6 - %t8 = mul double %t7, 4.5 + %t8 = fmul double %t7, 4.5 store double %t8, double* %t6 %indvar.next = add i64 %indvar, 3 %exitcond = icmp eq i64 %indvar.next, 10 @@ -303,16 +303,16 @@ loop: %indvar.i8 = and i64 %indvar, 255 %t0 = getelementptr double* %d, i64 %indvar.i8 %t1 = load double* %t0 - %t2 = mul double %t1, 0.1 + %t2 = fmul double %t1, 0.1 store double %t2, double* %t0 %indvar.i24 = and i64 %indvar, 16777215 %t3 = getelementptr double* %d, i64 %indvar.i24 %t4 = load double* %t3 - %t5 = mul double %t4, 2.3 + %t5 = fmul double %t4, 2.3 store double %t5, double* %t3 %t6 = getelementptr double* %d, i64 %indvar %t7 = load double* %t6 - %t8 = mul double %t7, 4.5 + %t8 = fmul double %t7, 4.5 store double %t8, double* %t6 %indvar.next = sub i64 %indvar, 3 %exitcond = icmp eq i64 %indvar.next, 0 @@ -332,17 +332,17 @@ loop: %indvar.i8 = ashr i64 %s0, 8 %t0 = getelementptr double* %d, i64 %indvar.i8 %t1 = load double* %t0 - %t2 = mul double %t1, 0.1 + %t2 = fmul double %t1, 0.1 store double %t2, double* %t0 %s1 = shl i64 %indvar, 24 %indvar.i24 = ashr i64 %s1, 24 %t3 = getelementptr double* %d, i64 %indvar.i24 %t4 = load double* %t3 - %t5 = mul double %t4, 2.3 + %t5 = fmul double %t4, 2.3 store double %t5, double* %t3 %t6 = getelementptr double* %d, i64 %indvar %t7 = load double* %t6 - %t8 = mul double %t7, 4.5 + %t8 = fmul double %t7, 4.5 store double %t8, double* %t6 %indvar.next = add i64 %indvar, 3 %exitcond = icmp eq i64 %indvar.next, 10 @@ -362,17 +362,17 @@ loop: %indvar.i8 = ashr i64 %s0, 8 %t0 = getelementptr double* %d, i64 %indvar.i8 %t1 = load double* %t0 - %t2 = mul double %t1, 0.1 + %t2 = fmul double %t1, 0.1 store double %t2, double* %t0 %s1 = shl i64 %indvar, 24 %indvar.i24 = ashr i64 %s1, 24 %t3 = getelementptr double* %d, i64 %indvar.i24 %t4 = load double* %t3 - %t5 = mul double %t4, 2.3 + %t5 = fmul double %t4, 2.3 store double %t5, double* %t3 %t6 = getelementptr double* %d, i64 %indvar %t7 = load double* %t6 - %t8 = mul double %t7, 4.5 + %t8 = fmul double %t7, 4.5 store double %t8, double* %t6 %indvar.next = sub i64 %indvar, 3 %exitcond = icmp eq i64 %indvar.next, 0 diff --git a/test/CodeGen/X86/multiple-return-values.ll b/test/CodeGen/X86/multiple-return-values.ll index 2e754a8..5f7a83f 100644 --- a/test/CodeGen/X86/multiple-return-values.ll +++ b/test/CodeGen/X86/multiple-return-values.ll @@ -2,7 +2,7 @@ define {i64, float} @bar(i64 %a, float %b) { %y = add i64 %a, 7 - %z = add float %b, 7.0 + %z = fadd float %b, 7.0 ret i64 %y, float %z } diff --git a/test/CodeGen/X86/neg_fp.ll b/test/CodeGen/X86/neg_fp.ll index 55c7654..1a7ee08 100644 --- a/test/CodeGen/X86/neg_fp.ll +++ b/test/CodeGen/X86/neg_fp.ll @@ -6,7 +6,7 @@ define float @negfp(float %a, float %b) { entry: - %sub = sub float %a, %b ; <float> [#uses=1] - %neg = sub float -0.000000e+00, %sub ; <float> [#uses=1] + %sub = fsub float %a, %b ; <float> [#uses=1] + %neg = fsub float -0.000000e+00, %sub ; <float> [#uses=1] ret float %neg }
\ No newline at end of file diff --git a/test/CodeGen/X86/negate-add-zero.ll b/test/CodeGen/X86/negate-add-zero.ll index 59a2bd0..689639f 100644 --- a/test/CodeGen/X86/negate-add-zero.ll +++ b/test/CodeGen/X86/negate-add-zero.ll @@ -843,14 +843,14 @@ entry: %8 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 2, i32 0, i32 0, i32 0, i32 2 ; <double*> [#uses=0] %9 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 2, i32 0, i32 0, i32 0, i32 3 ; <double*> [#uses=0] %10 = load double* null, align 8 ; <double> [#uses=2] - %11 = sub double -0.000000e+00, %10 ; <double> [#uses=1] + %11 = fsub double -0.000000e+00, %10 ; <double> [#uses=1] %12 = load double* null, align 8 ; <double> [#uses=2] %13 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 1, i32 0, i32 0, i32 0, i32 3 ; <double*> [#uses=1] %14 = load double* %13, align 8 ; <double> [#uses=2] - %15 = sub double -0.000000e+00, %14 ; <double> [#uses=1] + %15 = fsub double -0.000000e+00, %14 ; <double> [#uses=1] %16 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 1, i32 0, i32 0, i32 0, i32 2 ; <double*> [#uses=1] %17 = load double* %16, align 8 ; <double> [#uses=2] - %18 = sub double -0.000000e+00, %17 ; <double> [#uses=1] + %18 = fsub double -0.000000e+00, %17 ; <double> [#uses=1] %19 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 0 ; <double*> [#uses=0] %20 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 3 ; <double*> [#uses=0] %21 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 6 ; <double*> [#uses=0] @@ -866,28 +866,28 @@ entry: %31 = getelementptr %"struct.FixedMatrix<double,1,3,0,0>"* null, i32 0, i32 0, i32 0, i32 0 ; <double*> [#uses=0] %32 = getelementptr %"struct.FixedMatrix<double,1,3,0,0>"* null, i32 0, i32 0, i32 0, i32 1 ; <double*> [#uses=1] %33 = getelementptr %"struct.FixedMatrix<double,1,3,0,0>"* null, i32 0, i32 0, i32 0, i32 2 ; <double*> [#uses=1] - %34 = mul double %17, %5 ; <double> [#uses=1] - %35 = add double 0.000000e+00, %34 ; <double> [#uses=1] - %36 = add double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1] - %37 = mul double %14, %3 ; <double> [#uses=1] - %38 = add double %36, %37 ; <double> [#uses=1] - %39 = mul double %12, %4 ; <double> [#uses=1] - %40 = add double %38, %39 ; <double> [#uses=1] - %41 = mul double %5, %11 ; <double> [#uses=1] - %42 = add double %40, %41 ; <double> [#uses=2] + %34 = fmul double %17, %5 ; <double> [#uses=1] + %35 = fadd double 0.000000e+00, %34 ; <double> [#uses=1] + %36 = fadd double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1] + %37 = fmul double %14, %3 ; <double> [#uses=1] + %38 = fadd double %36, %37 ; <double> [#uses=1] + %39 = fmul double %12, %4 ; <double> [#uses=1] + %40 = fadd double %38, %39 ; <double> [#uses=1] + %41 = fmul double %5, %11 ; <double> [#uses=1] + %42 = fadd double %40, %41 ; <double> [#uses=2] store double %42, double* %32, align 8 - %43 = mul double %2, %15 ; <double> [#uses=1] - %44 = add double %43, 0.000000e+00 ; <double> [#uses=1] - %45 = mul double %3, %18 ; <double> [#uses=1] - %46 = add double %44, %45 ; <double> [#uses=1] - %47 = mul double %10, %4 ; <double> [#uses=1] - %48 = add double %46, %47 ; <double> [#uses=1] - %49 = mul double %12, %5 ; <double> [#uses=1] - %50 = add double %48, %49 ; <double> [#uses=2] + %43 = fmul double %2, %15 ; <double> [#uses=1] + %44 = fadd double %43, 0.000000e+00 ; <double> [#uses=1] + %45 = fmul double %3, %18 ; <double> [#uses=1] + %46 = fadd double %44, %45 ; <double> [#uses=1] + %47 = fmul double %10, %4 ; <double> [#uses=1] + %48 = fadd double %46, %47 ; <double> [#uses=1] + %49 = fmul double %12, %5 ; <double> [#uses=1] + %50 = fadd double %48, %49 ; <double> [#uses=2] store double %50, double* %33, align 8 - %51 = mul double %35, 2.000000e+00 ; <double> [#uses=1] - %52 = mul double %42, 2.000000e+00 ; <double> [#uses=1] - %53 = mul double %50, 2.000000e+00 ; <double> [#uses=1] + %51 = fmul double %35, 2.000000e+00 ; <double> [#uses=1] + %52 = fmul double %42, 2.000000e+00 ; <double> [#uses=1] + %53 = fmul double %50, 2.000000e+00 ; <double> [#uses=1] %54 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 0, i32 10, i32 0, i32 0, i32 0 ; <double*> [#uses=1] store double %51, double* %54, align 8 %55 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 0, i32 10, i32 0, i32 0, i32 1 ; <double*> [#uses=1] diff --git a/test/CodeGen/X86/negative-sin.ll b/test/CodeGen/X86/negative-sin.ll index 39c6297..8cc1bec 100644 --- a/test/CodeGen/X86/negative-sin.ll +++ b/test/CodeGen/X86/negative-sin.ll @@ -5,8 +5,8 @@ declare double @sin(double %f) define double @foo(double %e) { - %f = sub double 0.0, %e + %f = fsub double 0.0, %e %g = call double @sin(double %f) - %h = sub double 0.0, %g + %h = fsub double 0.0, %g ret double %h } diff --git a/test/CodeGen/X86/peep-test-0.ll b/test/CodeGen/X86/peep-test-0.ll index a95b564..8dcd23a 100644 --- a/test/CodeGen/X86/peep-test-0.ll +++ b/test/CodeGen/X86/peep-test-0.ll @@ -11,7 +11,7 @@ bb: %i.03 = add i64 %indvar, %n %0 = getelementptr double* %d, i64 %i.03 %1 = load double* %0, align 8 - %2 = mul double %1, 3.000000e+00 + %2 = fmul double %1, 3.000000e+00 store double %2, double* %0, align 8 %indvar.next = add i64 %indvar, 1 %exitcond = icmp eq i64 %indvar.next, 0 diff --git a/test/CodeGen/X86/peep-test-1.ll b/test/CodeGen/X86/peep-test-1.ll index b4698e3..85e3bf2 100644 --- a/test/CodeGen/X86/peep-test-1.ll +++ b/test/CodeGen/X86/peep-test-1.ll @@ -11,7 +11,7 @@ bb: %i.03 = sub i32 %n, %indvar %1 = getelementptr double* %p, i32 %i.03 %2 = load double* %1, align 4 - %3 = mul double %2, 2.930000e+00 + %3 = fmul double %2, 2.930000e+00 store double %3, double* %1, align 4 %4 = add i32 %i.03, -1 %phitmp = icmp slt i32 %4, 0 diff --git a/test/CodeGen/X86/phys_subreg_coalesce.ll b/test/CodeGen/X86/phys_subreg_coalesce.ll index 789a4ba..3bbc55d 100644 --- a/test/CodeGen/X86/phys_subreg_coalesce.ll +++ b/test/CodeGen/X86/phys_subreg_coalesce.ll @@ -8,16 +8,16 @@ entry: %1 = sitofp i32 %0 to double ; <double> [#uses=1] %2 = trunc i64 %p2.0 to i32 ; <i32> [#uses=1] %3 = sitofp i32 %2 to double ; <double> [#uses=1] - %4 = add double %1, %3 ; <double> [#uses=1] - %5 = mul double %4, 5.000000e-01 ; <double> [#uses=1] + %4 = fadd double %1, %3 ; <double> [#uses=1] + %5 = fmul double %4, 5.000000e-01 ; <double> [#uses=1] %6 = lshr i64 %p1.0, 32 ; <i64> [#uses=1] %7 = trunc i64 %6 to i32 ; <i32> [#uses=1] %8 = sitofp i32 %7 to double ; <double> [#uses=1] %9 = lshr i64 %p2.0, 32 ; <i64> [#uses=1] %10 = trunc i64 %9 to i32 ; <i32> [#uses=1] %11 = sitofp i32 %10 to double ; <double> [#uses=1] - %12 = add double %8, %11 ; <double> [#uses=1] - %13 = mul double %12, 5.000000e-01 ; <double> [#uses=1] + %12 = fadd double %8, %11 ; <double> [#uses=1] + %13 = fmul double %12, 5.000000e-01 ; <double> [#uses=1] %mrv3 = insertvalue %struct.dpoint undef, double %5, 0 ; <%struct.dpoint> [#uses=1] %mrv4 = insertvalue %struct.dpoint %mrv3, double %13, 1 ; <%struct.dpoint> [#uses=1] ret %struct.dpoint %mrv4 diff --git a/test/CodeGen/X86/pr2656.ll b/test/CodeGen/X86/pr2656.ll index 3f6c365..96976b8 100644 --- a/test/CodeGen/X86/pr2656.ll +++ b/test/CodeGen/X86/pr2656.ll @@ -12,9 +12,9 @@ entry: %tmp1 = load float* %tmp ; <float> [#uses=1] %tmp2 = getelementptr %struct.anon* %p, i32 0, i32 1 ; <float*> [#uses=1] %tmp3 = load float* %tmp2 ; <float> [#uses=1] - %neg = sub float -0.000000e+00, %tmp1 ; <float> [#uses=1] + %neg = fsub float -0.000000e+00, %tmp1 ; <float> [#uses=1] %conv = fpext float %neg to double ; <double> [#uses=1] - %neg4 = sub float -0.000000e+00, %tmp3 ; <float> [#uses=1] + %neg4 = fsub float -0.000000e+00, %tmp3 ; <float> [#uses=1] %conv5 = fpext float %neg4 to double ; <double> [#uses=1] %call = call i32 (...)* @printf( i8* getelementptr ([17 x i8]* @.str, i32 0, i32 0), double %conv, double %conv5 ) ; <i32> [#uses=0] ret void diff --git a/test/CodeGen/X86/pr3154.ll b/test/CodeGen/X86/pr3154.ll index a1ed0c2..73f5101 100644 --- a/test/CodeGen/X86/pr3154.ll +++ b/test/CodeGen/X86/pr3154.ll @@ -22,7 +22,7 @@ bb: ; preds = %entry bb19: ; preds = %bb, %entry %data15.0 = phi double* [ %7, %bb ], [ %3, %entry ] ; <double*> [#uses=5] %8 = sitofp i32 %len to double ; <double> [#uses=1] - %9 = sub double %8, 1.000000e+00 ; <double> [#uses=1] + %9 = fsub double %8, 1.000000e+00 ; <double> [#uses=1] %10 = fdiv double 2.000000e+00, %9 ; <double> [#uses=1] store double %10, double* %c, align 8 %11 = ashr i32 %len, 1 ; <i32> [#uses=3] diff --git a/test/CodeGen/X86/pr3457.ll b/test/CodeGen/X86/pr3457.ll index 36d4a5d..d4a9810 100644 --- a/test/CodeGen/X86/pr3457.ll +++ b/test/CodeGen/X86/pr3457.ll @@ -6,9 +6,9 @@ define void @foo(double* nocapture %P) nounwind { entry: %0 = tail call double (...)* @test() nounwind ; <double> [#uses=2] %1 = tail call double (...)* @test() nounwind ; <double> [#uses=2] - %2 = mul double %0, %0 ; <double> [#uses=1] - %3 = mul double %1, %1 ; <double> [#uses=1] - %4 = add double %2, %3 ; <double> [#uses=1] + %2 = fmul double %0, %0 ; <double> [#uses=1] + %3 = fmul double %1, %1 ; <double> [#uses=1] + %4 = fadd double %2, %3 ; <double> [#uses=1] store double %4, double* %P, align 8 ret void } diff --git a/test/CodeGen/X86/pre-split1.ll b/test/CodeGen/X86/pre-split1.ll index 99a46b6..4f9a582 100644 --- a/test/CodeGen/X86/pre-split1.ll +++ b/test/CodeGen/X86/pre-split1.ll @@ -5,17 +5,17 @@ define void @test(double* %P, i32 %cond) nounwind { entry: %0 = load double* %P, align 8 ; <double> [#uses=1] - %1 = add double %0, 4.000000e+00 ; <double> [#uses=2] + %1 = fadd double %0, 4.000000e+00 ; <double> [#uses=2] %2 = icmp eq i32 %cond, 0 ; <i1> [#uses=1] br i1 %2, label %bb1, label %bb bb: ; preds = %entry - %3 = add double %1, 4.000000e+00 ; <double> [#uses=1] + %3 = fadd double %1, 4.000000e+00 ; <double> [#uses=1] br label %bb1 bb1: ; preds = %bb, %entry %A.0 = phi double [ %3, %bb ], [ %1, %entry ] ; <double> [#uses=1] - %4 = mul double %A.0, 4.000000e+00 ; <double> [#uses=1] + %4 = fmul double %A.0, 4.000000e+00 ; <double> [#uses=1] %5 = tail call i32 (...)* @bar() nounwind ; <i32> [#uses=0] store double %4, double* %P, align 8 ret void diff --git a/test/CodeGen/X86/pre-split10.ll b/test/CodeGen/X86/pre-split10.ll index c3e18c4..60297e9 100644 --- a/test/CodeGen/X86/pre-split10.ll +++ b/test/CodeGen/X86/pre-split10.ll @@ -7,9 +7,9 @@ entry: bb14.i: ; preds = %bb14.i, %entry %i8.0.reg2mem.0.i = phi i32 [ 0, %entry ], [ %0, %bb14.i ] ; <i32> [#uses=1] %0 = add i32 %i8.0.reg2mem.0.i, 1 ; <i32> [#uses=2] - %1 = add double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1] - %2 = add double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1] - %3 = add double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1] + %1 = fadd double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1] + %2 = fadd double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1] + %3 = fadd double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1] %exitcond75.i = icmp eq i32 %0, 32 ; <i1> [#uses=1] br i1 %exitcond75.i, label %bb24.i, label %bb14.i @@ -37,13 +37,13 @@ bb7.i.i: ; preds = %bb35.preheader.i, %bb5.i.i br label %bb35.preheader.i bb35.preheader.i: ; preds = %bb7.i.i, %bb33.i - %9 = sub double 0.000000e+00, %4 ; <double> [#uses=1] + %9 = fsub double 0.000000e+00, %4 ; <double> [#uses=1] store double %9, double* null, align 8 - %10 = sub double 0.000000e+00, %5 ; <double> [#uses=1] + %10 = fsub double 0.000000e+00, %5 ; <double> [#uses=1] store double %10, double* null, align 8 - %11 = sub double 0.000000e+00, %6 ; <double> [#uses=1] + %11 = fsub double 0.000000e+00, %6 ; <double> [#uses=1] store double %11, double* null, align 8 - %12 = sub double 0.000000e+00, %7 ; <double> [#uses=1] + %12 = fsub double 0.000000e+00, %7 ; <double> [#uses=1] store double %12, double* null, align 8 br i1 false, label %bb7.i.i, label %bb5.i.i } diff --git a/test/CodeGen/X86/pre-split4.ll b/test/CodeGen/X86/pre-split4.ll index 97401b3..a570f73 100644 --- a/test/CodeGen/X86/pre-split4.ll +++ b/test/CodeGen/X86/pre-split4.ll @@ -10,14 +10,14 @@ bb: ; preds = %bb, %entry %Flint.0.reg2mem.0 = phi double [ 0.000000e+00, %entry ], [ %5, %bb ] ; <double> [#uses=1] %twoThrd.0.reg2mem.0 = phi double [ 0.000000e+00, %entry ], [ %1, %bb ] ; <double> [#uses=1] %0 = tail call double @llvm.pow.f64(double 0x3FE5555555555555, double 0.000000e+00) ; <double> [#uses=1] - %1 = add double %0, %twoThrd.0.reg2mem.0 ; <double> [#uses=1] + %1 = fadd double %0, %twoThrd.0.reg2mem.0 ; <double> [#uses=1] %2 = tail call double @sin(double %k.0.reg2mem.0) nounwind readonly ; <double> [#uses=1] - %3 = mul double 0.000000e+00, %2 ; <double> [#uses=1] + %3 = fmul double 0.000000e+00, %2 ; <double> [#uses=1] %4 = fdiv double 1.000000e+00, %3 ; <double> [#uses=1] store double %Flint.0.reg2mem.0, double* null store double %twoThrd.0.reg2mem.0, double* null - %5 = add double %4, %Flint.0.reg2mem.0 ; <double> [#uses=1] - %6 = add double %k.0.reg2mem.0, 1.000000e+00 ; <double> [#uses=1] + %5 = fadd double %4, %Flint.0.reg2mem.0 ; <double> [#uses=1] + %6 = fadd double %k.0.reg2mem.0, 1.000000e+00 ; <double> [#uses=1] br label %bb } diff --git a/test/CodeGen/X86/pre-split5.ll b/test/CodeGen/X86/pre-split5.ll index d353825..b83003f 100644 --- a/test/CodeGen/X86/pre-split5.ll +++ b/test/CodeGen/X86/pre-split5.ll @@ -40,7 +40,7 @@ bb28: ; preds = %bb14 bb30: ; preds = %bb36, %bb28 %m.1.reg2mem.0 = phi i32 [ %m.0, %bb36 ], [ 0, %bb28 ] ; <i32> [#uses=1] - %1 = mul double 0.000000e+00, %0 ; <double> [#uses=1] + %1 = fmul double 0.000000e+00, %0 ; <double> [#uses=1] %2 = fptosi double %1 to i32 ; <i32> [#uses=1] br i1 false, label %bb36, label %bb35 diff --git a/test/CodeGen/X86/pre-split6.ll b/test/CodeGen/X86/pre-split6.ll index 7808223..e771b80 100644 --- a/test/CodeGen/X86/pre-split6.ll +++ b/test/CodeGen/X86/pre-split6.ll @@ -20,14 +20,14 @@ bb.nph: ; preds = %entry bb9.i: ; preds = %bb.nph %3 = tail call double @asin(double 0.000000e+00) nounwind readonly ; <double> [#uses=0] %4 = fdiv double 1.000000e+00, %1 ; <double> [#uses=1] - %5 = mul double %4, 0.000000e+00 ; <double> [#uses=1] + %5 = fmul double %4, 0.000000e+00 ; <double> [#uses=1] %6 = tail call double @asin(double %5) nounwind readonly ; <double> [#uses=0] unreachable bb13.i: ; preds = %bb.nph %7 = fdiv double 1.000000e+00, %1 ; <double> [#uses=1] %8 = tail call double @sin(double 0.000000e+00) nounwind readonly ; <double> [#uses=1] - %9 = mul double %7, %8 ; <double> [#uses=1] + %9 = fmul double %7, %8 ; <double> [#uses=1] %10 = tail call double @asin(double %9) nounwind readonly ; <double> [#uses=0] unreachable diff --git a/test/CodeGen/X86/pre-split7.ll b/test/CodeGen/X86/pre-split7.ll index 7f7b933..cd9d205 100644 --- a/test/CodeGen/X86/pre-split7.ll +++ b/test/CodeGen/X86/pre-split7.ll @@ -17,15 +17,15 @@ entry: bb: ; preds = %bb, %entry %0 = tail call double @asin(double 0.000000e+00) nounwind readonly ; <double> [#uses=1] - %1 = add double 0.000000e+00, %0 ; <double> [#uses=2] + %1 = fadd double 0.000000e+00, %0 ; <double> [#uses=2] %2 = tail call double @asin(double 0.000000e+00) nounwind readonly ; <double> [#uses=1] - %3 = sub double %1, %2 ; <double> [#uses=2] + %3 = fsub double %1, %2 ; <double> [#uses=2] store double %3, double* @axis_slope_angle, align 8 %4 = fdiv double %1, 2.000000e+00 ; <double> [#uses=1] %5 = tail call double @sin(double %4) nounwind readonly ; <double> [#uses=1] - %6 = mul double 0.000000e+00, %5 ; <double> [#uses=1] + %6 = fmul double 0.000000e+00, %5 ; <double> [#uses=1] %7 = tail call double @tan(double %3) nounwind readonly ; <double> [#uses=0] - %8 = add double 0.000000e+00, %6 ; <double> [#uses=1] + %8 = fadd double 0.000000e+00, %6 ; <double> [#uses=1] store double %8, double* @object_distance, align 8 br label %bb diff --git a/test/CodeGen/X86/pre-split8.ll b/test/CodeGen/X86/pre-split8.ll index eb6d49f..2259819 100644 --- a/test/CodeGen/X86/pre-split8.ll +++ b/test/CodeGen/X86/pre-split8.ll @@ -19,12 +19,12 @@ bb: ; preds = %bb9.i, %entry br i1 %1, label %bb9.i, label %bb13.i bb9.i: ; preds = %bb - %2 = sub double %.rle4, %0 ; <double> [#uses=0] + %2 = fsub double %.rle4, %0 ; <double> [#uses=0] %3 = tail call double @asin(double 0.000000e+00) nounwind readonly ; <double> [#uses=0] - %4 = mul double 0.000000e+00, %0 ; <double> [#uses=1] + %4 = fmul double 0.000000e+00, %0 ; <double> [#uses=1] %5 = tail call double @tan(double 0.000000e+00) nounwind readonly ; <double> [#uses=0] - %6 = mul double %4, 0.000000e+00 ; <double> [#uses=1] - %7 = add double %6, 0.000000e+00 ; <double> [#uses=1] + %6 = fmul double %4, 0.000000e+00 ; <double> [#uses=1] + %7 = fadd double %6, 0.000000e+00 ; <double> [#uses=1] br i1 false, label %return, label %bb bb13.i: ; preds = %bb diff --git a/test/CodeGen/X86/pre-split9.ll b/test/CodeGen/X86/pre-split9.ll index bfafe85..1be960f 100644 --- a/test/CodeGen/X86/pre-split9.ll +++ b/test/CodeGen/X86/pre-split9.ll @@ -21,13 +21,13 @@ bb: ; preds = %bb9.i, %entry br i1 %1, label %bb9.i, label %bb13.i bb9.i: ; preds = %bb - %2 = sub double %.rle4, %0 ; <double> [#uses=0] + %2 = fsub double %.rle4, %0 ; <double> [#uses=0] %3 = tail call double @asin(double 0.000000e+00) nounwind readonly ; <double> [#uses=0] %4 = tail call double @sin(double 0.000000e+00) nounwind readonly ; <double> [#uses=1] - %5 = mul double %4, %0 ; <double> [#uses=1] + %5 = fmul double %4, %0 ; <double> [#uses=1] %6 = tail call double @tan(double 0.000000e+00) nounwind readonly ; <double> [#uses=0] - %7 = mul double %5, 0.000000e+00 ; <double> [#uses=1] - %8 = add double %7, 0.000000e+00 ; <double> [#uses=1] + %7 = fmul double %5, 0.000000e+00 ; <double> [#uses=1] + %8 = fadd double %7, 0.000000e+00 ; <double> [#uses=1] br i1 false, label %return, label %bb bb13.i: ; preds = %bb diff --git a/test/CodeGen/X86/red-zone2.ll b/test/CodeGen/X86/red-zone2.ll new file mode 100644 index 0000000..dea7d7e --- /dev/null +++ b/test/CodeGen/X86/red-zone2.ll @@ -0,0 +1,9 @@ +; RUN: llvm-as < %s | llc -march=x86-64 > %t +; RUN: grep subq %t | count 1 +; RUN: grep addq %t | count 1 + +define x86_fp80 @f0(float %f) nounwind readnone noredzone { +entry: + %0 = fpext float %f to x86_fp80 ; <x86_fp80> [#uses=1] + ret x86_fp80 %0 +} diff --git a/test/CodeGen/X86/remat-constant.ll b/test/CodeGen/X86/remat-constant.ll index d9ef6fe..4c983b0 100644 --- a/test/CodeGen/X86/remat-constant.ll +++ b/test/CodeGen/X86/remat-constant.ll @@ -1,4 +1,4 @@ -; RUN: llvm-as < %s | llc -march=x86-64 -relocation-model=static -aggressive-remat | grep xmm | count 2 +; RUN: llvm-as < %s | llc -mtriple=x86_64-linux -relocation-model=static -aggressive-remat | grep xmm | count 2 declare void @bar() nounwind diff --git a/test/CodeGen/X86/shrink-fp-const1.ll b/test/CodeGen/X86/shrink-fp-const1.ll index 966e69e..3406aee 100644 --- a/test/CodeGen/X86/shrink-fp-const1.ll +++ b/test/CodeGen/X86/shrink-fp-const1.ll @@ -2,6 +2,6 @@ ; PR1264 define double @foo(double %x) { - %y = mul double %x, 5.000000e-01 + %y = fmul double %x, 5.000000e-01 ret double %y } diff --git a/test/CodeGen/X86/small-byval-memcpy.ll b/test/CodeGen/X86/small-byval-memcpy.ll index dedd948..8b87f74 100644 --- a/test/CodeGen/X86/small-byval-memcpy.ll +++ b/test/CodeGen/X86/small-byval-memcpy.ll @@ -8,7 +8,7 @@ entry: %iz = alloca { x86_fp80, x86_fp80 } ; <{ x86_fp80, x86_fp80 }*> [#uses=3] %tmp1 = getelementptr { x86_fp80, x86_fp80 }* %z, i32 0, i32 1 ; <x86_fp80*> [#uses=1] %tmp2 = load x86_fp80* %tmp1, align 16 ; <x86_fp80> [#uses=1] - %tmp3 = sub x86_fp80 0xK80000000000000000000, %tmp2 ; <x86_fp80> [#uses=1] + %tmp3 = fsub x86_fp80 0xK80000000000000000000, %tmp2 ; <x86_fp80> [#uses=1] %tmp4 = getelementptr { x86_fp80, x86_fp80 }* %iz, i32 0, i32 1 ; <x86_fp80*> [#uses=1] %real = getelementptr { x86_fp80, x86_fp80 }* %iz, i32 0, i32 0 ; <x86_fp80*> [#uses=1] %tmp6 = getelementptr { x86_fp80, x86_fp80 }* %z, i32 0, i32 0 ; <x86_fp80*> [#uses=1] diff --git a/test/CodeGen/X86/soft-fp.ll b/test/CodeGen/X86/soft-fp.ll index 7fa8fed..0c697de 100644 --- a/test/CodeGen/X86/soft-fp.ll +++ b/test/CodeGen/X86/soft-fp.ll @@ -22,6 +22,6 @@ declare void @llvm.va_end(i8*) nounwind define float @t2(float %a, float %b) nounwind readnone { entry: - %0 = add float %a, %b ; <float> [#uses=1] + %0 = fadd float %a, %b ; <float> [#uses=1] ret float %0 } diff --git a/test/CodeGen/X86/sse-align-0.ll b/test/CodeGen/X86/sse-align-0.ll index 39debaa..5a888b2 100644 --- a/test/CodeGen/X86/sse-align-0.ll +++ b/test/CodeGen/X86/sse-align-0.ll @@ -2,11 +2,11 @@ define <4 x float> @foo(<4 x float>* %p, <4 x float> %x) nounwind { %t = load <4 x float>* %p - %z = mul <4 x float> %t, %x + %z = fmul <4 x float> %t, %x ret <4 x float> %z } define <2 x double> @bar(<2 x double>* %p, <2 x double> %x) nounwind { %t = load <2 x double>* %p - %z = mul <2 x double> %t, %x + %z = fmul <2 x double> %t, %x ret <2 x double> %z } diff --git a/test/CodeGen/X86/sse-align-2.ll b/test/CodeGen/X86/sse-align-2.ll index b5b261d..ba693a2 100644 --- a/test/CodeGen/X86/sse-align-2.ll +++ b/test/CodeGen/X86/sse-align-2.ll @@ -2,11 +2,11 @@ define <4 x float> @foo(<4 x float>* %p, <4 x float> %x) nounwind { %t = load <4 x float>* %p, align 4 - %z = mul <4 x float> %t, %x + %z = fmul <4 x float> %t, %x ret <4 x float> %z } define <2 x double> @bar(<2 x double>* %p, <2 x double> %x) nounwind { %t = load <2 x double>* %p, align 8 - %z = mul <2 x double> %t, %x + %z = fmul <2 x double> %t, %x ret <2 x double> %z } diff --git a/test/CodeGen/X86/sse-fcopysign.ll b/test/CodeGen/X86/sse-fcopysign.ll index cff1f7f..d8c3283 100644 --- a/test/CodeGen/X86/sse-fcopysign.ll +++ b/test/CodeGen/X86/sse-fcopysign.ll @@ -6,7 +6,7 @@ define float @tst1(float %a, float %b) { } define double @tst2(double %a, float %b, float %c) { - %tmp1 = add float %b, %c + %tmp1 = fadd float %b, %c %tmp2 = fpext float %tmp1 to double %tmp = tail call double @copysign( double %a, double %tmp2 ) ret double %tmp diff --git a/test/CodeGen/X86/sse41-extractps-bitcast-1.ll b/test/CodeGen/X86/sse41-extractps-bitcast-1.ll index fc0df06..470d146 100644 --- a/test/CodeGen/X86/sse41-extractps-bitcast-1.ll +++ b/test/CodeGen/X86/sse41-extractps-bitcast-1.ll @@ -6,7 +6,7 @@ define float @bar(<4 x float> %v) { %s = extractelement <4 x float> %v, i32 3 - %t = add float %s, 1.0 + %t = fadd float %s, 1.0 ret float %t } define float @baz(<4 x float> %v) { diff --git a/test/CodeGen/X86/sse41-pmovx.ll b/test/CodeGen/X86/sse41-pmovx.ll index 71e5e25..c8cfec9 100644 --- a/test/CodeGen/X86/sse41-pmovx.ll +++ b/test/CodeGen/X86/sse41-pmovx.ll @@ -2,7 +2,7 @@ ; RUN: llvm-as < %s | llc -march=x86 -mattr=sse41 | not grep movq ; RUN: llvm-as < %s | llc -march=x86 -mattr=sse41 | grep pmovsxbd ; RUN: llvm-as < %s | llc -march=x86 -mattr=sse41 | grep pmovsxwd -; RUN: llvm-as < %s | llc -march=x86 -mattr=sse41 | grep pmovsxbq +; RUN: llvm-as < %s | llc -march=x86 -mattr=sse41 | grep pmovzxbq ; RUN: llvm-as < %s | llc -march=x86-64 -mattr=sse41 -mtriple=x86_64-apple-darwin | grep movq | count 1 ; RUN: llvm-as < %s | llc -march=x86-64 -mattr=sse41 -mtriple=x86_64-unknown-linux-gnu | not grep movq diff --git a/test/CodeGen/X86/stack-align.ll b/test/CodeGen/X86/stack-align.ll index 1e6c2b2..dda6f0d 100644 --- a/test/CodeGen/X86/stack-align.ll +++ b/test/CodeGen/X86/stack-align.ll @@ -14,7 +14,7 @@ entry: %tmp2 = tail call double @fabs( double %tmp1 ) ; <double> [#uses=1] %tmp3 = load double* @G, align 16 ; <double> [#uses=1] %tmp4 = tail call double @fabs( double %tmp3 ) ; <double> [#uses=1] - %tmp6 = add double %tmp4, %tmp2 ; <double> [#uses=1] + %tmp6 = fadd double %tmp4, %tmp2 ; <double> [#uses=1] store double %tmp6, double* %P, align 8 ret void } diff --git a/test/CodeGen/X86/storetrunc-fp.ll b/test/CodeGen/X86/storetrunc-fp.ll index 655cbd6..945cf48 100644 --- a/test/CodeGen/X86/storetrunc-fp.ll +++ b/test/CodeGen/X86/storetrunc-fp.ll @@ -1,7 +1,7 @@ ; RUN: llvm-as < %s | llc -march=x86 | not grep flds define void @foo(x86_fp80 %a, x86_fp80 %b, float* %fp) { - %c = add x86_fp80 %a, %b + %c = fadd x86_fp80 %a, %b %d = fptrunc x86_fp80 %c to float store float %d, float* %fp ret void diff --git a/test/CodeGen/X86/stride-reuse.ll b/test/CodeGen/X86/stride-reuse.ll index 97f33d8..277a443 100644 --- a/test/CodeGen/X86/stride-reuse.ll +++ b/test/CodeGen/X86/stride-reuse.ll @@ -14,7 +14,7 @@ bb: %i.019.0 = phi i32 [ %indvar.next, %bb ], [ 0, %entry ] %tmp2 = getelementptr [1000 x float]* @B, i32 0, i32 %i.019.0 %tmp3 = load float* %tmp2, align 4 - %tmp4 = mul float %tmp3, 2.000000e+00 + %tmp4 = fmul float %tmp3, 2.000000e+00 %tmp5 = getelementptr [1000 x float]* @A, i32 0, i32 %i.019.0 store float %tmp4, float* %tmp5, align 4 %tmp8 = shl i32 %i.019.0, 1 diff --git a/test/CodeGen/X86/twoaddr-coalesce-2.ll b/test/CodeGen/X86/twoaddr-coalesce-2.ll index 9a011f7..3fe4cd1 100644 --- a/test/CodeGen/X86/twoaddr-coalesce-2.ll +++ b/test/CodeGen/X86/twoaddr-coalesce-2.ll @@ -9,7 +9,7 @@ entry: %tmp.i3 = bitcast <2 x double> %B to <2 x i64> ; <<2 x i64>> [#uses=1] %tmp2.i = or <2 x i64> %tmp.i3, <i64 4607632778762754458, i64 4607632778762754458> ; <<2 x i64>> [#uses=1] %tmp3.i = bitcast <2 x i64> %tmp2.i to <2 x double> ; <<2 x double>> [#uses=1] - %tmp.i2 = add <2 x double> %tmp3.i, %A ; <<2 x double>> [#uses=1] - %tmp.i = add <2 x double> %tmp.i2, %C ; <<2 x double>> [#uses=1] + %tmp.i2 = fadd <2 x double> %tmp3.i, %A ; <<2 x double>> [#uses=1] + %tmp.i = fadd <2 x double> %tmp.i2, %C ; <<2 x double>> [#uses=1] ret <2 x double> %tmp.i } diff --git a/test/CodeGen/X86/vec_extract.ll b/test/CodeGen/X86/vec_extract.ll index f1f009e..ee7567c 100644 --- a/test/CodeGen/X86/vec_extract.ll +++ b/test/CodeGen/X86/vec_extract.ll @@ -6,7 +6,7 @@ define void @test1(<4 x float>* %F, float* %f) nounwind { %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2] - %tmp7 = add <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1] + %tmp7 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1] %tmp2 = extractelement <4 x float> %tmp7, i32 0 ; <float> [#uses=1] store float %tmp2, float* %f ret void @@ -14,7 +14,7 @@ define void @test1(<4 x float>* %F, float* %f) nounwind { define float @test2(<4 x float>* %F, float* %f) nounwind { %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2] - %tmp7 = add <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1] + %tmp7 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1] %tmp2 = extractelement <4 x float> %tmp7, i32 2 ; <float> [#uses=1] ret float %tmp2 } @@ -29,7 +29,7 @@ define void @test3(float* %R, <4 x float>* %P1) nounwind { define double @test4(double %A) nounwind { %tmp1 = call <2 x double> @foo( ) ; <<2 x double>> [#uses=1] %tmp2 = extractelement <2 x double> %tmp1, i32 1 ; <double> [#uses=1] - %tmp3 = add double %tmp2, %A ; <double> [#uses=1] + %tmp3 = fadd double %tmp2, %A ; <double> [#uses=1] ret double %tmp3 } diff --git a/test/CodeGen/X86/vec_fneg.ll b/test/CodeGen/X86/vec_fneg.ll index 03765d6..a801472 100644 --- a/test/CodeGen/X86/vec_fneg.ll +++ b/test/CodeGen/X86/vec_fneg.ll @@ -1,11 +1,11 @@ ; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 define <4 x float> @t1(<4 x float> %Q) { - %tmp15 = sub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %Q + %tmp15 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %Q ret <4 x float> %tmp15 } define <4 x float> @t2(<4 x float> %Q) { - %tmp15 = sub <4 x float> zeroinitializer, %Q + %tmp15 = fsub <4 x float> zeroinitializer, %Q ret <4 x float> %tmp15 } diff --git a/test/CodeGen/X86/vec_ins_extract.ll b/test/CodeGen/X86/vec_ins_extract.ll index 86f1306..7882839 100644 --- a/test/CodeGen/X86/vec_ins_extract.ll +++ b/test/CodeGen/X86/vec_ins_extract.ll @@ -7,9 +7,9 @@ define void @test(<4 x float>* %F, float %f) { entry: %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2] - %tmp3 = add <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1] + %tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1] %tmp10 = insertelement <4 x float> %tmp3, float %f, i32 0 ; <<4 x float>> [#uses=2] - %tmp6 = add <4 x float> %tmp10, %tmp10 ; <<4 x float>> [#uses=1] + %tmp6 = fadd <4 x float> %tmp10, %tmp10 ; <<4 x float>> [#uses=1] store <4 x float> %tmp6, <4 x float>* %F ret void } @@ -18,12 +18,12 @@ define void @test2(<4 x float>* %F, float %f) { entry: %G = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3] %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2] - %tmp3 = add <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1] + %tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1] store <4 x float> %tmp3, <4 x float>* %G %tmp.upgrd.1 = getelementptr <4 x float>* %G, i32 0, i32 2 ; <float*> [#uses=1] store float %f, float* %tmp.upgrd.1 %tmp4 = load <4 x float>* %G ; <<4 x float>> [#uses=2] - %tmp6 = add <4 x float> %tmp4, %tmp4 ; <<4 x float>> [#uses=1] + %tmp6 = fadd <4 x float> %tmp4, %tmp4 ; <<4 x float>> [#uses=1] store <4 x float> %tmp6, <4 x float>* %F ret void } @@ -32,7 +32,7 @@ define void @test3(<4 x float>* %F, float* %f) { entry: %G = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2] %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2] - %tmp3 = add <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1] + %tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1] store <4 x float> %tmp3, <4 x float>* %G %tmp.upgrd.2 = getelementptr <4 x float>* %G, i32 0, i32 2 ; <float*> [#uses=1] %tmp.upgrd.3 = load float* %tmp.upgrd.2 ; <float> [#uses=1] @@ -45,7 +45,7 @@ entry: %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2] %tmp5.lhs = extractelement <4 x float> %tmp, i32 0 ; <float> [#uses=1] %tmp5.rhs = extractelement <4 x float> %tmp, i32 0 ; <float> [#uses=1] - %tmp5 = add float %tmp5.lhs, %tmp5.rhs ; <float> [#uses=1] + %tmp5 = fadd float %tmp5.lhs, %tmp5.rhs ; <float> [#uses=1] store float %tmp5, float* %f ret void } diff --git a/test/CodeGen/X86/vec_insert.ll b/test/CodeGen/X86/vec_insert.ll index e032c5b..3a9464c 100644 --- a/test/CodeGen/X86/vec_insert.ll +++ b/test/CodeGen/X86/vec_insert.ll @@ -5,7 +5,7 @@ define void @test(<4 x float>* %F, i32 %I) { %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=1] %f = sitofp i32 %I to float ; <float> [#uses=1] %tmp1 = insertelement <4 x float> %tmp, float %f, i32 0 ; <<4 x float>> [#uses=2] - %tmp18 = add <4 x float> %tmp1, %tmp1 ; <<4 x float>> [#uses=1] + %tmp18 = fadd <4 x float> %tmp1, %tmp1 ; <<4 x float>> [#uses=1] store <4 x float> %tmp18, <4 x float>* %F ret void } diff --git a/test/CodeGen/X86/vec_logical.ll b/test/CodeGen/X86/vec_logical.ll index 6e03afb..f895762 100644 --- a/test/CodeGen/X86/vec_logical.ll +++ b/test/CodeGen/X86/vec_logical.ll @@ -4,7 +4,7 @@ ; RUN: grep movaps %t | count 2 define void @t(<4 x float> %A) { - %tmp1277 = sub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %A + %tmp1277 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %A store <4 x float> %tmp1277, <4 x float>* null ret void } diff --git a/test/CodeGen/X86/vec_select.ll b/test/CodeGen/X86/vec_select.ll index ede7ab2..ecb825b 100644 --- a/test/CodeGen/X86/vec_select.ll +++ b/test/CodeGen/X86/vec_select.ll @@ -3,7 +3,7 @@ define void @test(i32 %C, <4 x float>* %A, <4 x float>* %B) { %tmp = load <4 x float>* %A ; <<4 x float>> [#uses=1] %tmp3 = load <4 x float>* %B ; <<4 x float>> [#uses=2] - %tmp9 = mul <4 x float> %tmp3, %tmp3 ; <<4 x float>> [#uses=1] + %tmp9 = fmul <4 x float> %tmp3, %tmp3 ; <<4 x float>> [#uses=1] %tmp.upgrd.1 = icmp eq i32 %C, 0 ; <i1> [#uses=1] %iftmp.38.0 = select i1 %tmp.upgrd.1, <4 x float> %tmp9, <4 x float> %tmp ; <<4 x float>> [#uses=1] store <4 x float> %iftmp.38.0, <4 x float>* %A diff --git a/test/CodeGen/X86/vec_shuffle-27.ll b/test/CodeGen/X86/vec_shuffle-27.ll index 6baf47a..231ac0c 100644 --- a/test/CodeGen/X86/vec_shuffle-27.ll +++ b/test/CodeGen/X86/vec_shuffle-27.ll @@ -10,8 +10,8 @@ target triple = "i686-apple-cl.1.0" define <8 x float> @my2filter4_1d(<4 x float> %a, <8 x float> %T0, <8 x float> %T1) nounwind readnone { entry: %tmp7 = shufflevector <4 x float> %a, <4 x float> undef, <8 x i32> < i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3 > ; <<8 x float>> [#uses=1] - %sub = sub <8 x float> %T1, %T0 ; <<8 x float>> [#uses=1] - %mul = mul <8 x float> %sub, %tmp7 ; <<8 x float>> [#uses=1] - %add = add <8 x float> %mul, %T0 ; <<8 x float>> [#uses=1] + %sub = fsub <8 x float> %T1, %T0 ; <<8 x float>> [#uses=1] + %mul = fmul <8 x float> %sub, %tmp7 ; <<8 x float>> [#uses=1] + %add = fadd <8 x float> %mul, %T0 ; <<8 x float>> [#uses=1] ret <8 x float> %add } diff --git a/test/CodeGen/X86/vec_shuffle-3.ll b/test/CodeGen/X86/vec_shuffle-3.ll index 6de1038..018b4cf 100644 --- a/test/CodeGen/X86/vec_shuffle-3.ll +++ b/test/CodeGen/X86/vec_shuffle-3.ll @@ -5,8 +5,8 @@ define <4 x float> @test1(<4 x float>* %x, <4 x float>* %y) { %tmp = load <4 x float>* %y ; <<4 x float>> [#uses=2] %tmp5 = load <4 x float>* %x ; <<4 x float>> [#uses=2] - %tmp9 = add <4 x float> %tmp5, %tmp ; <<4 x float>> [#uses=1] - %tmp21 = sub <4 x float> %tmp5, %tmp ; <<4 x float>> [#uses=1] + %tmp9 = fadd <4 x float> %tmp5, %tmp ; <<4 x float>> [#uses=1] + %tmp21 = fsub <4 x float> %tmp5, %tmp ; <<4 x float>> [#uses=1] %tmp27 = shufflevector <4 x float> %tmp9, <4 x float> %tmp21, <4 x i32> < i32 0, i32 1, i32 4, i32 5 > ; <<4 x float>> [#uses=1] ret <4 x float> %tmp27 } diff --git a/test/CodeGen/X86/vec_shuffle-5.ll b/test/CodeGen/X86/vec_shuffle-5.ll index 1acd73f..e356f24 100644 --- a/test/CodeGen/X86/vec_shuffle-5.ll +++ b/test/CodeGen/X86/vec_shuffle-5.ll @@ -6,7 +6,7 @@ define void @test() nounwind { %tmp1 = load <4 x float>* null ; <<4 x float>> [#uses=2] %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> < float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 >, <4 x i32> < i32 0, i32 1, i32 6, i32 7 > ; <<4 x float>> [#uses=1] %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> zeroinitializer, <4 x i32> < i32 2, i32 3, i32 6, i32 7 > ; <<4 x float>> [#uses=1] - %tmp4 = add <4 x float> %tmp2, %tmp3 ; <<4 x float>> [#uses=1] + %tmp4 = fadd <4 x float> %tmp2, %tmp3 ; <<4 x float>> [#uses=1] store <4 x float> %tmp4, <4 x float>* null ret void } diff --git a/test/CodeGen/X86/vec_splat.ll b/test/CodeGen/X86/vec_splat.ll index 64222e4..89914fd 100644 --- a/test/CodeGen/X86/vec_splat.ll +++ b/test/CodeGen/X86/vec_splat.ll @@ -7,7 +7,7 @@ define void @test_v4sf(<4 x float>* %P, <4 x float>* %Q, float %X) nounwind { %tmp4 = insertelement <4 x float> %tmp2, float %X, i32 2 ; <<4 x float>> [#uses=1] %tmp6 = insertelement <4 x float> %tmp4, float %X, i32 3 ; <<4 x float>> [#uses=1] %tmp8 = load <4 x float>* %Q ; <<4 x float>> [#uses=1] - %tmp10 = mul <4 x float> %tmp8, %tmp6 ; <<4 x float>> [#uses=1] + %tmp10 = fmul <4 x float> %tmp8, %tmp6 ; <<4 x float>> [#uses=1] store <4 x float> %tmp10, <4 x float>* %P ret void } @@ -16,7 +16,7 @@ define void @test_v2sd(<2 x double>* %P, <2 x double>* %Q, double %X) nounwind { %tmp = insertelement <2 x double> zeroinitializer, double %X, i32 0 ; <<2 x double>> [#uses=1] %tmp2 = insertelement <2 x double> %tmp, double %X, i32 1 ; <<2 x double>> [#uses=1] %tmp4 = load <2 x double>* %Q ; <<2 x double>> [#uses=1] - %tmp6 = mul <2 x double> %tmp4, %tmp2 ; <<2 x double>> [#uses=1] + %tmp6 = fmul <2 x double> %tmp4, %tmp2 ; <<2 x double>> [#uses=1] store <2 x double> %tmp6, <2 x double>* %P ret void } diff --git a/test/CodeGen/X86/vec_ss_load_fold.ll b/test/CodeGen/X86/vec_ss_load_fold.ll index 0f15f92..69900a6 100644 --- a/test/CodeGen/X86/vec_ss_load_fold.ll +++ b/test/CodeGen/X86/vec_ss_load_fold.ll @@ -20,8 +20,8 @@ define i16 @test1(float %f) nounwind { } define i16 @test2(float %f) nounwind { - %tmp28 = sub float %f, 1.000000e+00 ; <float> [#uses=1] - %tmp37 = mul float %tmp28, 5.000000e-01 ; <float> [#uses=1] + %tmp28 = fsub float %f, 1.000000e+00 ; <float> [#uses=1] + %tmp37 = fmul float %tmp28, 5.000000e-01 ; <float> [#uses=1] %tmp375 = insertelement <4 x float> undef, float %tmp37, i32 0 ; <<4 x float>> [#uses=1] %tmp48 = tail call <4 x float> @llvm.x86.sse.min.ss( <4 x float> %tmp375, <4 x float> < float 6.553500e+04, float undef, float undef, float undef > ) ; <<4 x float>> [#uses=1] %tmp59 = tail call <4 x float> @llvm.x86.sse.max.ss( <4 x float> %tmp48, <4 x float> < float 0.000000e+00, float undef, float undef, float undef > ) ; <<4 x float>> [#uses=1] diff --git a/test/CodeGen/X86/vec_zero.ll b/test/CodeGen/X86/vec_zero.ll index 17b378f..0a7a543 100644 --- a/test/CodeGen/X86/vec_zero.ll +++ b/test/CodeGen/X86/vec_zero.ll @@ -2,7 +2,7 @@ define void @foo(<4 x float>* %P) { %T = load <4 x float>* %P ; <<4 x float>> [#uses=1] - %S = add <4 x float> zeroinitializer, %T ; <<4 x float>> [#uses=1] + %S = fadd <4 x float> zeroinitializer, %T ; <<4 x float>> [#uses=1] store <4 x float> %S, <4 x float>* %P ret void } diff --git a/test/CodeGen/X86/vector.ll b/test/CodeGen/X86/vector.ll index e7f3b92..8e1de2f 100644 --- a/test/CodeGen/X86/vector.ll +++ b/test/CodeGen/X86/vector.ll @@ -15,7 +15,7 @@ define void @test_f1(%f1* %P, %f1* %Q, %f1* %S) { %p = load %f1* %P ; <%f1> [#uses=1] %q = load %f1* %Q ; <%f1> [#uses=1] - %R = add %f1 %p, %q ; <%f1> [#uses=1] + %R = fadd %f1 %p, %q ; <%f1> [#uses=1] store %f1 %R, %f1* %S ret void } @@ -23,7 +23,7 @@ define void @test_f1(%f1* %P, %f1* %Q, %f1* %S) { define void @test_f2(%f2* %P, %f2* %Q, %f2* %S) { %p = load %f2* %P ; <%f2> [#uses=1] %q = load %f2* %Q ; <%f2> [#uses=1] - %R = add %f2 %p, %q ; <%f2> [#uses=1] + %R = fadd %f2 %p, %q ; <%f2> [#uses=1] store %f2 %R, %f2* %S ret void } @@ -31,7 +31,7 @@ define void @test_f2(%f2* %P, %f2* %Q, %f2* %S) { define void @test_f4(%f4* %P, %f4* %Q, %f4* %S) { %p = load %f4* %P ; <%f4> [#uses=1] %q = load %f4* %Q ; <%f4> [#uses=1] - %R = add %f4 %p, %q ; <%f4> [#uses=1] + %R = fadd %f4 %p, %q ; <%f4> [#uses=1] store %f4 %R, %f4* %S ret void } @@ -39,7 +39,7 @@ define void @test_f4(%f4* %P, %f4* %Q, %f4* %S) { define void @test_f8(%f8* %P, %f8* %Q, %f8* %S) { %p = load %f8* %P ; <%f8> [#uses=1] %q = load %f8* %Q ; <%f8> [#uses=1] - %R = add %f8 %p, %q ; <%f8> [#uses=1] + %R = fadd %f8 %p, %q ; <%f8> [#uses=1] store %f8 %R, %f8* %S ret void } @@ -47,7 +47,7 @@ define void @test_f8(%f8* %P, %f8* %Q, %f8* %S) { define void @test_fmul(%f8* %P, %f8* %Q, %f8* %S) { %p = load %f8* %P ; <%f8> [#uses=1] %q = load %f8* %Q ; <%f8> [#uses=1] - %R = mul %f8 %p, %q ; <%f8> [#uses=1] + %R = fmul %f8 %p, %q ; <%f8> [#uses=1] store %f8 %R, %f8* %S ret void } @@ -64,21 +64,21 @@ define void @test_div(%f8* %P, %f8* %Q, %f8* %S) { define void @test_cst(%f4* %P, %f4* %S) { %p = load %f4* %P ; <%f4> [#uses=1] - %R = add %f4 %p, < float 0x3FB99999A0000000, float 1.000000e+00, float 2.000000e+00, float 4.500000e+00 > ; <%f4> [#uses=1] + %R = fadd %f4 %p, < float 0x3FB99999A0000000, float 1.000000e+00, float 2.000000e+00, float 4.500000e+00 > ; <%f4> [#uses=1] store %f4 %R, %f4* %S ret void } define void @test_zero(%f4* %P, %f4* %S) { %p = load %f4* %P ; <%f4> [#uses=1] - %R = add %f4 %p, zeroinitializer ; <%f4> [#uses=1] + %R = fadd %f4 %p, zeroinitializer ; <%f4> [#uses=1] store %f4 %R, %f4* %S ret void } define void @test_undef(%f4* %P, %f4* %S) { %p = load %f4* %P ; <%f4> [#uses=1] - %R = add %f4 %p, undef ; <%f4> [#uses=1] + %R = fadd %f4 %p, undef ; <%f4> [#uses=1] store %f4 %R, %f4* %S ret void } @@ -115,7 +115,7 @@ define double @test_extract_elt2(%d8* %P) { define void @test_cast_1(%f4* %b, %i4* %a) { %tmp = load %f4* %b ; <%f4> [#uses=1] - %tmp2 = add %f4 %tmp, < float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00 > ; <%f4> [#uses=1] + %tmp2 = fadd %f4 %tmp, < float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00 > ; <%f4> [#uses=1] %tmp3 = bitcast %f4 %tmp2 to %i4 ; <%i4> [#uses=1] %tmp4 = add %i4 %tmp3, < i32 1, i32 2, i32 3, i32 4 > ; <%i4> [#uses=1] store %i4 %tmp4, %i4* %a @@ -138,7 +138,7 @@ define void @splat(%f4* %P, %f4* %Q, float %X) { %tmp4 = insertelement %f4 %tmp2, float %X, i32 2 ; <%f4> [#uses=1] %tmp6 = insertelement %f4 %tmp4, float %X, i32 3 ; <%f4> [#uses=1] %q = load %f4* %Q ; <%f4> [#uses=1] - %R = add %f4 %q, %tmp6 ; <%f4> [#uses=1] + %R = fadd %f4 %q, %tmp6 ; <%f4> [#uses=1] store %f4 %R, %f4* %P ret void } diff --git a/test/CodeGen/X86/widen_arith-6.ll b/test/CodeGen/X86/widen_arith-6.ll index 59548c3..7b0bb33 100644 --- a/test/CodeGen/X86/widen_arith-6.ll +++ b/test/CodeGen/X86/widen_arith-6.ll @@ -34,8 +34,8 @@ forbody: ; preds = %forcond %arrayidx6 = getelementptr <3 x float>* %tmp5, i32 %tmp4 ; <<3 x float>*> [#uses=1] %tmp7 = load <3 x float>* %arrayidx6 ; <<3 x float>> [#uses=1] %tmp8 = load <3 x float>* %v ; <<3 x float>> [#uses=1] - %mul = mul <3 x float> %tmp7, %tmp8 ; <<3 x float>> [#uses=1] - %add = add <3 x float> %mul, < float 0x409EE02900000000, float 0x409EE02900000000, float 0x409EE02900000000 > ; <<3 x float>> [#uses=1] + %mul = fmul <3 x float> %tmp7, %tmp8 ; <<3 x float>> [#uses=1] + %add = fadd <3 x float> %mul, < float 0x409EE02900000000, float 0x409EE02900000000, float 0x409EE02900000000 > ; <<3 x float>> [#uses=1] store <3 x float> %add, <3 x float>* %arrayidx br label %forinc diff --git a/test/CodeGen/X86/widen_shuffle-1.ll b/test/CodeGen/X86/widen_shuffle-1.ll index d3bbd5f..a676f33 100644 --- a/test/CodeGen/X86/widen_shuffle-1.ll +++ b/test/CodeGen/X86/widen_shuffle-1.ll @@ -5,7 +5,7 @@ define void @shuf(<3 x float>* %dst.addr, <3 x float> %src1,<3 x float> %src2) nounwind { entry: %x = shufflevector <3 x float> %src1, <3 x float> %src2, <3 x i32> < i32 0, i32 1, i32 2> - %val = add <3 x float> %x, %src2; + %val = fadd <3 x float> %x, %src2; store <3 x float> %val, <3 x float>* %dst.addr ret void } diff --git a/test/CodeGen/X86/widen_shuffle-2.ll b/test/CodeGen/X86/widen_shuffle-2.ll index d25e02e..c2dfa3d 100644 --- a/test/CodeGen/X86/widen_shuffle-2.ll +++ b/test/CodeGen/X86/widen_shuffle-2.ll @@ -5,7 +5,7 @@ define void @shuf(<3 x float>* %dst.addr, <3 x float> %src1,<3 x float> %src2) nounwind { entry: %x = shufflevector <3 x float> %src1, <3 x float> %src2, <3 x i32> < i32 0, i32 4, i32 2> - %val = add <3 x float> %x, %src2; + %val = fadd <3 x float> %x, %src2; store <3 x float> %val, <3 x float>* %dst.addr ret void } diff --git a/test/CodeGen/XCore/2009-01-14-Remat-Crash.ll b/test/CodeGen/XCore/2009-01-14-Remat-Crash.ll index 735e988..b9333c9 100644 --- a/test/CodeGen/XCore/2009-01-14-Remat-Crash.ll +++ b/test/CodeGen/XCore/2009-01-14-Remat-Crash.ll @@ -8,11 +8,11 @@ bb113: ; preds = %entry ret double 0.000000e+00 bb129: ; preds = %entry - %tmp134 = sub double %b, %a ; <double> [#uses=1] - %tmp136 = sub double %tmp134, %c ; <double> [#uses=1] - %tmp138 = add double %tmp136, %d ; <double> [#uses=1] - %tmp140 = sub double %tmp138, %e ; <double> [#uses=1] - %tmp142 = add double %tmp140, %f ; <double> [#uses=1] - %tmp.0 = mul double %tmp142, 0.000000e+00 ; <double> [#uses=1] + %tmp134 = fsub double %b, %a ; <double> [#uses=1] + %tmp136 = fsub double %tmp134, %c ; <double> [#uses=1] + %tmp138 = fadd double %tmp136, %d ; <double> [#uses=1] + %tmp140 = fsub double %tmp138, %e ; <double> [#uses=1] + %tmp142 = fadd double %tmp140, %f ; <double> [#uses=1] + %tmp.0 = fmul double %tmp142, 0.000000e+00 ; <double> [#uses=1] ret double %tmp.0 } diff --git a/test/CodeGen/XCore/fneg.ll b/test/CodeGen/XCore/fneg.ll index e4426fd..3fb7b01 100644 --- a/test/CodeGen/XCore/fneg.ll +++ b/test/CodeGen/XCore/fneg.ll @@ -2,7 +2,7 @@ ; RUN: grep "xor" %t1.s | count 1 define i1 @test(double %F) nounwind { entry: - %0 = sub double -0.000000e+00, %F + %0 = fsub double -0.000000e+00, %F %1 = fcmp olt double 0.000000e+00, %0 ret i1 %1 } diff --git a/test/ExecutionEngine/2003-01-10-FUCOM.ll b/test/ExecutionEngine/2003-01-10-FUCOM.ll index 628be16..30f9330 100644 --- a/test/ExecutionEngine/2003-01-10-FUCOM.ll +++ b/test/ExecutionEngine/2003-01-10-FUCOM.ll @@ -2,10 +2,10 @@ ; RUN: lli %t.bc > /dev/null define i32 @main() { - %X = add double 0.000000e+00, 1.000000e+00 ; <double> [#uses=1] - %Y = sub double 0.000000e+00, 1.000000e+00 ; <double> [#uses=2] + %X = fadd double 0.000000e+00, 1.000000e+00 ; <double> [#uses=1] + %Y = fsub double 0.000000e+00, 1.000000e+00 ; <double> [#uses=2] %Z = fcmp oeq double %X, %Y ; <i1> [#uses=0] - add double %Y, 0.000000e+00 ; <double>:1 [#uses=0] + fadd double %Y, 0.000000e+00 ; <double>:1 [#uses=0] ret i32 0 } diff --git a/test/ExecutionEngine/test-fp.ll b/test/ExecutionEngine/test-fp.ll index a119b40..2e8ecd5 100644 --- a/test/ExecutionEngine/test-fp.ll +++ b/test/ExecutionEngine/test-fp.ll @@ -3,13 +3,13 @@ define double @test(double* %DP, double %Arg) { %D = load double* %DP ; <double> [#uses=1] - %V = add double %D, 1.000000e+00 ; <double> [#uses=2] - %W = sub double %V, %V ; <double> [#uses=3] - %X = mul double %W, %W ; <double> [#uses=2] + %V = fadd double %D, 1.000000e+00 ; <double> [#uses=2] + %W = fsub double %V, %V ; <double> [#uses=3] + %X = fmul double %W, %W ; <double> [#uses=2] %Y = fdiv double %X, %X ; <double> [#uses=2] %Z = frem double %Y, %Y ; <double> [#uses=3] %Z1 = fdiv double %Z, %W ; <double> [#uses=0] - %Q = add double %Z, %Arg ; <double> [#uses=1] + %Q = fadd double %Z, %Arg ; <double> [#uses=1] %R = bitcast double %Q to double ; <double> [#uses=1] store double %R, double* %DP ret double %Z diff --git a/test/ExecutionEngine/test-setcond-fp.ll b/test/ExecutionEngine/test-setcond-fp.ll index 235c402..b917693 100644 --- a/test/ExecutionEngine/test-setcond-fp.ll +++ b/test/ExecutionEngine/test-setcond-fp.ll @@ -3,10 +3,10 @@ define i32 @main() { - %double1 = add double 0.000000e+00, 0.000000e+00 ; <double> [#uses=6] - %double2 = add double 0.000000e+00, 0.000000e+00 ; <double> [#uses=6] - %float1 = add float 0.000000e+00, 0.000000e+00 ; <float> [#uses=6] - %float2 = add float 0.000000e+00, 0.000000e+00 ; <float> [#uses=6] + %double1 = fadd double 0.000000e+00, 0.000000e+00 ; <double> [#uses=6] + %double2 = fadd double 0.000000e+00, 0.000000e+00 ; <double> [#uses=6] + %float1 = fadd float 0.000000e+00, 0.000000e+00 ; <float> [#uses=6] + %float2 = fadd float 0.000000e+00, 0.000000e+00 ; <float> [#uses=6] %test49 = fcmp oeq float %float1, %float2 ; <i1> [#uses=0] %test50 = fcmp oge float %float1, %float2 ; <i1> [#uses=0] %test51 = fcmp ogt float %float1, %float2 ; <i1> [#uses=0] diff --git a/test/Feature/ppcld.ll b/test/Feature/ppcld.ll index f21eb43..393a491 100644 --- a/test/Feature/ppcld.ll +++ b/test/Feature/ppcld.ll @@ -15,7 +15,7 @@ entry: %tmp = load float* @f ; <float> [#uses=1] %tmp1 = fpext float %tmp to double ; <double> [#uses=1] %tmp2 = load double* @d ; <double> [#uses=1] - %tmp3 = mul double %tmp1, %tmp2 ; <double> [#uses=1] + %tmp3 = fmul double %tmp1, %tmp2 ; <double> [#uses=1] %tmp4 = fpext double %tmp3 to ppc_fp128 ; <ppc_fp128> [#uses=1] store ppc_fp128 %tmp4, ppc_fp128* @ld br label %return diff --git a/test/Feature/sparcld.ll b/test/Feature/sparcld.ll index 2e99bda..095f6f6 100644 --- a/test/Feature/sparcld.ll +++ b/test/Feature/sparcld.ll @@ -13,7 +13,7 @@ entry: %tmp = load float* @f ; <float> [#uses=1] %tmp1 = fpext float %tmp to double ; <double> [#uses=1] %tmp2 = load double* @d ; <double> [#uses=1] - %tmp3 = mul double %tmp1, %tmp2 ; <double> [#uses=1] + %tmp3 = fmul double %tmp1, %tmp2 ; <double> [#uses=1] %tmp4 = fpext double %tmp3 to fp128 ; <fp128> [#uses=1] store fp128 %tmp4, fp128* @ld br label %return diff --git a/test/Feature/x86ld.ll b/test/Feature/x86ld.ll index 6904003..32005ae 100644 --- a/test/Feature/x86ld.ll +++ b/test/Feature/x86ld.ll @@ -15,7 +15,7 @@ entry: %tmp = load float* @f ; <float> [#uses=1] %tmp1 = fpext float %tmp to double ; <double> [#uses=1] %tmp2 = load double* @d ; <double> [#uses=1] - %tmp3 = mul double %tmp1, %tmp2 ; <double> [#uses=1] + %tmp3 = fmul double %tmp1, %tmp2 ; <double> [#uses=1] %tmp4 = fpext double %tmp3 to x86_fp80 ; <x86_fp80> [#uses=1] store x86_fp80 %tmp4, x86_fp80* @ld br label %return diff --git a/test/FrontendC/2009-01-20-k8.c b/test/FrontendC/2009-01-20-k8.c index 627ab65..d28302b 100644 --- a/test/FrontendC/2009-01-20-k8.c +++ b/test/FrontendC/2009-01-20-k8.c @@ -1,3 +1,4 @@ // RUN: %llvmgcc %s -S -march=k8 -// XTARGET: x86 +// XFAIL: * +// XTARGET: x86,i386,i686 long double x; diff --git a/test/FrontendC/2009-05-04-EnumInreg.c b/test/FrontendC/2009-05-04-EnumInreg.c index 8a76f5f..6dbdb54 100644 --- a/test/FrontendC/2009-05-04-EnumInreg.c +++ b/test/FrontendC/2009-05-04-EnumInreg.c @@ -1,5 +1,6 @@ // RUN: %llvmgcc -S -m32 -mregparm=3 %s -emit-llvm -o - | grep {inreg %action} -// XTARGET: x86 +// XFAIL: * +// XTARGET: x86,i386,i686 // PR3967 enum kobject_action { diff --git a/test/Other/2004-08-16-PackedSelect.ll b/test/Other/2004-08-16-PackedSelect.ll index 6438316..c1d6214 100644 --- a/test/Other/2004-08-16-PackedSelect.ll +++ b/test/Other/2004-08-16-PackedSelect.ll @@ -5,7 +5,7 @@ define void @main() { %t0 = load <4 x float>* @foo ; <<4 x float>> [#uses=3] - %t1 = add <4 x float> %t0, %t0 ; <<4 x float>> [#uses=1] + %t1 = fadd <4 x float> %t0, %t0 ; <<4 x float>> [#uses=1] %t2 = select i1 true, <4 x float> %t0, <4 x float> %t1 ; <<4 x float>> [#uses=1] store <4 x float> %t2, <4 x float>* @bar ret void diff --git a/test/Other/2004-08-16-PackedSimple.ll b/test/Other/2004-08-16-PackedSimple.ll index 5bb8b79..81cecd4 100644 --- a/test/Other/2004-08-16-PackedSimple.ll +++ b/test/Other/2004-08-16-PackedSimple.ll @@ -5,7 +5,7 @@ define void @main() { %t0 = load <4 x float>* @foo ; <<4 x float>> [#uses=3] - %t2 = add <4 x float> %t0, %t0 ; <<4 x float>> [#uses=1] + %t2 = fadd <4 x float> %t0, %t0 ; <<4 x float>> [#uses=1] %t3 = select i1 false, <4 x float> %t0, <4 x float> %t2 ; <<4 x float>> [#uses=1] store <4 x float> %t3, <4 x float>* @bar ret void diff --git a/test/Other/2004-08-20-PackedControlFlow.ll b/test/Other/2004-08-20-PackedControlFlow.ll index 49aa606..3943570 100644 --- a/test/Other/2004-08-20-PackedControlFlow.ll +++ b/test/Other/2004-08-20-PackedControlFlow.ll @@ -12,7 +12,7 @@ C: ; preds = %B ret void B: ; preds = %A - %t2 = add %v4f %t0, %t0 ; <%v4f> [#uses=1] + %t2 = fadd %v4f %t0, %t0 ; <%v4f> [#uses=1] br label %C A: ; preds = %0 diff --git a/test/Other/2009-06-05-no-implicit-float.ll b/test/Other/2009-06-05-no-implicit-float.ll new file mode 100644 index 0000000..5addfe2 --- /dev/null +++ b/test/Other/2009-06-05-no-implicit-float.ll @@ -0,0 +1,4 @@ + +; RUN: llvm-as < %s | opt -verify | llvm-dis | grep noimplicitfloat +define void @f() noimplicitfloat { +} diff --git a/test/Transforms/ConstProp/calls.ll b/test/Transforms/ConstProp/calls.ll index 126db4c..c573e56 100644 --- a/test/Transforms/ConstProp/calls.ll +++ b/test/Transforms/ConstProp/calls.ll @@ -13,11 +13,11 @@ declare i1 @llvm.isunordered.f64(double, double) define double @T() { %A = call double @cos( double 0.000000e+00 ) ; <double> [#uses=1] %B = call double @sin( double 0.000000e+00 ) ; <double> [#uses=1] - %a = add double %A, %B ; <double> [#uses=1] + %a = fadd double %A, %B ; <double> [#uses=1] %C = call double @tan( double 0.000000e+00 ) ; <double> [#uses=1] - %b = add double %a, %C ; <double> [#uses=1] + %b = fadd double %a, %C ; <double> [#uses=1] %D = call double @sqrt( double 4.000000e+00 ) ; <double> [#uses=1] - %c = add double %b, %D ; <double> [#uses=1] + %c = fadd double %b, %D ; <double> [#uses=1] ret double %c } diff --git a/test/Transforms/DeadStoreElimination/2006-06-27-AST-Remove.ll b/test/Transforms/DeadStoreElimination/2006-06-27-AST-Remove.ll index 50dcf32..3b3f8ad 100644 --- a/test/Transforms/DeadStoreElimination/2006-06-27-AST-Remove.ll +++ b/test/Transforms/DeadStoreElimination/2006-06-27-AST-Remove.ll @@ -601,7 +601,7 @@ entry: %tmp21362 = icmp eq i32 0, 0 ; <i1> [#uses=2] %tmp216 = sitofp i32 %pn_restart.0.ph to float ; <float> [#uses=1] %tmp216.upgrd.177 = fpext float %tmp216 to double ; <double> [#uses=1] - %tmp217 = add double %tmp216.upgrd.177, 1.000000e+00 ; <double> [#uses=1] + %tmp217 = fadd double %tmp216.upgrd.177, 1.000000e+00 ; <double> [#uses=1] %tmp835 = icmp sgt i32 %pn_restart.0.ph, 9 ; <i1> [#uses=0] store i32 0, i32* @nodes store i32 0, i32* @qnodes diff --git a/test/Transforms/GVNPRE/2007-06-18-ConstantInPhi.ll b/test/Transforms/GVNPRE/2007-06-18-ConstantInPhi.ll index b4cb517..180105a 100644 --- a/test/Transforms/GVNPRE/2007-06-18-ConstantInPhi.ll +++ b/test/Transforms/GVNPRE/2007-06-18-ConstantInPhi.ll @@ -10,8 +10,8 @@ bb.nph: ; preds = %entry bb34: ; preds = %bb34, %bb.nph %p.1 = phi float [ 0x3FE6A09E60000000, %bb.nph ], [ %tmp48, %bb34 ] ; <float> [#uses=1] %tmp44 = load float* null ; <float> [#uses=1] - %tmp46 = sub float %tmp44, 0.000000e+00 ; <float> [#uses=1] - %tmp48 = mul float %tmp46, %p.1 ; <float> [#uses=1] + %tmp46 = fsub float %tmp44, 0.000000e+00 ; <float> [#uses=1] + %tmp48 = fmul float %tmp46, %p.1 ; <float> [#uses=1] br i1 false, label %bb57, label %bb34 bb57: ; preds = %bb34 diff --git a/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll b/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll index 0a8dd49..779e7fb 100644 --- a/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll +++ b/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll @@ -4,6 +4,6 @@ define double @foo() nounwind { entry: %tmp1 = volatile load double* @t0.1441, align 8 ; <double> [#uses=2] - %tmp4 = mul double %tmp1, %tmp1 ; <double> [#uses=1] + %tmp4 = fmul double %tmp1, %tmp1 ; <double> [#uses=1] ret double %tmp4 } diff --git a/test/Transforms/GlobalOpt/2008-04-26-SROA-Global-Align.ll b/test/Transforms/GlobalOpt/2008-04-26-SROA-Global-Align.ll index 3464be9..8a0b5b3 100644 --- a/test/Transforms/GlobalOpt/2008-04-26-SROA-Global-Align.ll +++ b/test/Transforms/GlobalOpt/2008-04-26-SROA-Global-Align.ll @@ -26,7 +26,7 @@ define double @test2() { %V1 = load double* getelementptr (%T* @G, i32 0, i32 0), align 16 %V2 = load double* getelementptr (%T* @G, i32 0, i32 1), align 8 %V3 = load double* getelementptr (%T* @G, i32 0, i32 2), align 16 - %R = add double %V1, %V2 - %R2 = add double %R, %V3 + %R = fadd double %V1, %V2 + %R2 = fadd double %R, %V3 ret double %R2 } diff --git a/test/Transforms/GlobalOpt/constantexpr-dangle.ll b/test/Transforms/GlobalOpt/constantexpr-dangle.ll index 6e33ae0..6fa139b 100644 --- a/test/Transforms/GlobalOpt/constantexpr-dangle.ll +++ b/test/Transforms/GlobalOpt/constantexpr-dangle.ll @@ -7,7 +7,7 @@ define internal float @foo() { define float @bar() { %tmp1 = call float (...)* bitcast (float ()* @foo to float (...)*)( ) - %tmp2 = mul float %tmp1, 1.000000e+01 ; <float> [#uses=1] + %tmp2 = fmul float %tmp1, 1.000000e+01 ; <float> [#uses=1] ret float %tmp2 } diff --git a/test/Transforms/IndVarSimplify/2006-12-10-BitCast.ll b/test/Transforms/IndVarSimplify/2006-12-10-BitCast.ll index 903e81d..b2f8258 100644 --- a/test/Transforms/IndVarSimplify/2006-12-10-BitCast.ll +++ b/test/Transforms/IndVarSimplify/2006-12-10-BitCast.ll @@ -18,7 +18,7 @@ cond_true52: ; preds = %cond_true27 cond_next182.i: ; preds = %cond_next182.i, %cond_true52 %decay.i.0 = phi i32 [ %tmp195.i.upgrd.1, %cond_next182.i ], [ %tmp152.i, %cond_true52 ] ; <i32> [#uses=1] %tmp194.i53 = bitcast i32 %decay.i.0 to float ; <float> [#uses=1] - %tmp195.i = sub float %tmp194.i53, 8.000000e+00 ; <float> [#uses=1] + %tmp195.i = fsub float %tmp194.i53, 8.000000e+00 ; <float> [#uses=1] %tmp195.i.upgrd.1 = bitcast float %tmp195.i to i32 ; <i32> [#uses=1] br i1 false, label %cond_next182.i, label %bb418.i.preheader diff --git a/test/Transforms/IndVarSimplify/2008-11-03-Floating.ll b/test/Transforms/IndVarSimplify/2008-11-03-Floating.ll index 6fc065f..be8b36f 100644 --- a/test/Transforms/IndVarSimplify/2008-11-03-Floating.ll +++ b/test/Transforms/IndVarSimplify/2008-11-03-Floating.ll @@ -6,7 +6,7 @@ entry: bb: ; preds = %bb, %entry %x.0.reg2mem.0 = phi double [ 0.000000e+00, %entry ], [ %1, %bb ] ; <double> [#uses=2] %0 = tail call i32 @foo(double %x.0.reg2mem.0) nounwind ; <i32> [#uses=0] - %1 = add double %x.0.reg2mem.0, 1.000000e+00 ; <double> [#uses=2] + %1 = fadd double %x.0.reg2mem.0, 1.000000e+00 ; <double> [#uses=2] %2 = fcmp olt double %1, 1.000000e+04 ; <i1> [#uses=1] br i1 %2, label %bb, label %return @@ -23,7 +23,7 @@ entry: bb: ; preds = %bb, %entry %x.0.reg2mem.0 = phi double [ -10.000000e+00, %entry ], [ %1, %bb ] ; <double> [#uses=2] %0 = tail call i32 @foo(double %x.0.reg2mem.0) nounwind ; <i32> [#uses=0] - %1 = add double %x.0.reg2mem.0, 2.000000e+00 ; <double> [#uses=2] + %1 = fadd double %x.0.reg2mem.0, 2.000000e+00 ; <double> [#uses=2] %2 = fcmp olt double %1, -1.000000e+00 ; <i1> [#uses=1] br i1 %2, label %bb, label %return @@ -39,7 +39,7 @@ entry: bb: ; preds = %bb, %entry %x.0.reg2mem.0 = phi double [ 0.000000e+00, %entry ], [ %1, %bb ] ; <double> [#uses=2] %0 = tail call i32 @foo(double %x.0.reg2mem.0) nounwind ; <i32> [#uses=0] - %1 = add double %x.0.reg2mem.0, 1.000000e+00 ; <double> [#uses=2] + %1 = fadd double %x.0.reg2mem.0, 1.000000e+00 ; <double> [#uses=2] %2 = fcmp olt double %1, -1.000000e+00 ; <i1> [#uses=1] br i1 %2, label %bb, label %return @@ -54,7 +54,7 @@ entry: bb: ; preds = %bb, %entry %x.0.reg2mem.0 = phi double [ 40.000000e+00, %entry ], [ %1, %bb ] ; <double> [#uses=2] %0 = tail call i32 @foo(double %x.0.reg2mem.0) nounwind ; <i32> [#uses=0] - %1 = add double %x.0.reg2mem.0, -1.000000e+00 ; <double> [#uses=2] + %1 = fadd double %x.0.reg2mem.0, -1.000000e+00 ; <double> [#uses=2] %2 = fcmp olt double %1, 1.000000e+00 ; <i1> [#uses=1] br i1 %2, label %bb, label %return diff --git a/test/Transforms/IndVarSimplify/2008-11-17-Floating.ll b/test/Transforms/IndVarSimplify/2008-11-17-Floating.ll index faf1da3..c947d3b 100644 --- a/test/Transforms/IndVarSimplify/2008-11-17-Floating.ll +++ b/test/Transforms/IndVarSimplify/2008-11-17-Floating.ll @@ -9,7 +9,7 @@ entry: bb: ; preds = %bb, %entry %x.0.reg2mem.0 = phi double [ 0.000000e+00, %entry ], [ %1, %bb ] ; <double> [#uses=2] %0 = tail call i32 @foo(double %x.0.reg2mem.0) nounwind ; <i32> [#uses=0] - %1 = add double %x.0.reg2mem.0, 1.0e+0 ; <double> [#uses=2] + %1 = fadd double %x.0.reg2mem.0, 1.0e+0 ; <double> [#uses=2] %2 = fcmp olt double %1, 2147483646.0e+0 ; <i1> [#uses=1] br i1 %2, label %bb, label %return @@ -24,7 +24,7 @@ entry: bb: ; preds = %bb, %entry %x.0.reg2mem.0 = phi double [ 0.000000e+00, %entry ], [ %1, %bb ] ; <double> [#uses=2] %0 = tail call i32 @foo(double %x.0.reg2mem.0) nounwind ; <i32> [#uses=0] - %1 = add double %x.0.reg2mem.0, 1.0e+0 ; <double> [#uses=2] + %1 = fadd double %x.0.reg2mem.0, 1.0e+0 ; <double> [#uses=2] %2 = fcmp olt double %1, 2147483647.0e+0 ; <i1> [#uses=1] br i1 %2, label %bb, label %return diff --git a/test/Transforms/IndVarSimplify/2008-11-25-APFloatAssert.ll b/test/Transforms/IndVarSimplify/2008-11-25-APFloatAssert.ll index 9fd0eb9..e611b1f 100644 --- a/test/Transforms/IndVarSimplify/2008-11-25-APFloatAssert.ll +++ b/test/Transforms/IndVarSimplify/2008-11-25-APFloatAssert.ll @@ -6,6 +6,6 @@ entry: bb23.i91: ; preds = %bb23.i91, %entry %result.0.i89 = phi ppc_fp128 [ 0xM00000000000000000000000000000000, %entry ], [ %0, %bb23.i91 ] ; <ppc_fp128> [#uses=2] - %0 = mul ppc_fp128 %result.0.i89, %result.0.i89 ; <ppc_fp128> [#uses=1] + %0 = fmul ppc_fp128 %result.0.i89, %result.0.i89 ; <ppc_fp128> [#uses=1] br label %bb23.i91 } diff --git a/test/Transforms/IndVarSimplify/2009-04-27-Floating.ll b/test/Transforms/IndVarSimplify/2009-04-27-Floating.ll index 700f294..e70d577 100644 --- a/test/Transforms/IndVarSimplify/2009-04-27-Floating.ll +++ b/test/Transforms/IndVarSimplify/2009-04-27-Floating.ll @@ -9,7 +9,7 @@ entry: loop_body: %i = phi float [ %nexti, %loop_body ], [ 0.0, %entry ] tail call void @foo() - %nexti = add float %i, 1.0 + %nexti = fadd float %i, 1.0 %less = fcmp olt float %nexti, 2.0 br i1 %less, label %loop_body, label %done diff --git a/test/Transforms/IndVarSimplify/iv-zext.ll b/test/Transforms/IndVarSimplify/iv-zext.ll index 76d48de..d7eb7bd 100644 --- a/test/Transforms/IndVarSimplify/iv-zext.ll +++ b/test/Transforms/IndVarSimplify/iv-zext.ll @@ -13,16 +13,16 @@ loop: %indvar.i8 = and i64 %indvar, 255 %t0 = getelementptr double* %d, i64 %indvar.i8 %t1 = load double* %t0 - %t2 = mul double %t1, 0.1 + %t2 = fmul double %t1, 0.1 store double %t2, double* %t0 %indvar.i24 = and i64 %indvar, 16777215 %t3 = getelementptr double* %d, i64 %indvar.i24 %t4 = load double* %t3 - %t5 = mul double %t4, 2.3 + %t5 = fmul double %t4, 2.3 store double %t5, double* %t3 %t6 = getelementptr double* %d, i64 %indvar %t7 = load double* %t6 - %t8 = mul double %t7, 4.5 + %t8 = fmul double %t7, 4.5 store double %t8, double* %t6 %indvar.next = add i64 %indvar, 1 %exitcond = icmp eq i64 %indvar.next, 10 diff --git a/test/Transforms/InstCombine/2006-10-26-VectorReassoc.ll b/test/Transforms/InstCombine/2006-10-26-VectorReassoc.ll index 5ad0af4..c7cf0dd 100644 --- a/test/Transforms/InstCombine/2006-10-26-VectorReassoc.ll +++ b/test/Transforms/InstCombine/2006-10-26-VectorReassoc.ll @@ -2,8 +2,8 @@ ; RUN: grep mul | count 2 define <4 x float> @test(<4 x float> %V) { - %Y = mul <4 x float> %V, < float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00 > ; <<4 x float>> [#uses=1] - %Z = mul <4 x float> %Y, < float 1.000000e+00, float 2.000000e+05, float -3.000000e+00, float 4.000000e+00 > ; <<4 x float>> [#uses=1] + %Y = fmul <4 x float> %V, < float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00 > ; <<4 x float>> [#uses=1] + %Z = fmul <4 x float> %Y, < float 1.000000e+00, float 2.000000e+05, float -3.000000e+00, float 4.000000e+00 > ; <<4 x float>> [#uses=1] ret <4 x float> %Z } diff --git a/test/Transforms/InstCombine/2006-12-01-BadFPVectorXform.ll b/test/Transforms/InstCombine/2006-12-01-BadFPVectorXform.ll index 60ee503..eaf10a3 100644 --- a/test/Transforms/InstCombine/2006-12-01-BadFPVectorXform.ll +++ b/test/Transforms/InstCombine/2006-12-01-BadFPVectorXform.ll @@ -3,7 +3,7 @@ define <4 x float> @test(<4 x float> %tmp26, <4 x float> %tmp53) { ; (X+Y)-Y != X for fp vectors. - %tmp64 = add <4 x float> %tmp26, %tmp53 ; <<4 x float>> [#uses=1] - %tmp75 = sub <4 x float> %tmp64, %tmp53 ; <<4 x float>> [#uses=1] + %tmp64 = fadd <4 x float> %tmp26, %tmp53 ; <<4 x float>> [#uses=1] + %tmp75 = fsub <4 x float> %tmp64, %tmp53 ; <<4 x float>> [#uses=1] ret <4 x float> %tmp75 } diff --git a/test/Transforms/InstCombine/2008-07-16-fsub.ll b/test/Transforms/InstCombine/2008-07-16-fsub.ll index 1d0554d..ca4174d 100644 --- a/test/Transforms/InstCombine/2008-07-16-fsub.ll +++ b/test/Transforms/InstCombine/2008-07-16-fsub.ll @@ -3,6 +3,6 @@ define double @test(double %X) nounwind { ; fsub of self can't be optimized away. - %Y = sub double %X, %X + %Y = fsub double %X, %X ret double %Y } diff --git a/test/Transforms/InstCombine/add-sitofp.ll b/test/Transforms/InstCombine/add-sitofp.ll index 35c6567..298b9a1 100644 --- a/test/Transforms/InstCombine/add-sitofp.ll +++ b/test/Transforms/InstCombine/add-sitofp.ll @@ -4,6 +4,6 @@ define double @x(i32 %a, i32 %b) nounwind { %m = lshr i32 %a, 24 %n = and i32 %m, %b %o = sitofp i32 %n to double - %p = add double %o, 1.0 + %p = fadd double %o, 1.0 ret double %p } diff --git a/test/Transforms/InstCombine/dce-iterate.ll b/test/Transforms/InstCombine/dce-iterate.ll index e222970..faefa8a 100644 --- a/test/Transforms/InstCombine/dce-iterate.ll +++ b/test/Transforms/InstCombine/dce-iterate.ll @@ -18,7 +18,7 @@ entry: %c = lshr i960 %sz101112.ins, 320 ; <i960> [#uses=1] %d = trunc i960 %c to i64 ; <i64> [#uses=1] %e = bitcast i64 %d to double ; <double> [#uses=1] - %f = add double %b, %e + %f = fadd double %b, %e ret double %e } diff --git a/test/Transforms/InstCombine/fpextend.ll b/test/Transforms/InstCombine/fpextend.ll index 5971080..c212128 100644 --- a/test/Transforms/InstCombine/fpextend.ll +++ b/test/Transforms/InstCombine/fpextend.ll @@ -6,7 +6,7 @@ define void @test() nounwind { entry: %tmp = load float* @X, align 4 ; <float> [#uses=1] %tmp1 = fpext float %tmp to double ; <double> [#uses=1] - %tmp3 = add double %tmp1, 0.000000e+00 ; <double> [#uses=1] + %tmp3 = fadd double %tmp1, 0.000000e+00 ; <double> [#uses=1] %tmp34 = fptrunc double %tmp3 to float ; <float> [#uses=1] store float %tmp34, float* @X, align 4 ret void @@ -28,7 +28,7 @@ define void @test4() nounwind { entry: %tmp = load float* @X, align 4 ; <float> [#uses=1] %tmp1 = fpext float %tmp to double ; <double> [#uses=1] - %tmp2 = sub double -0.000000e+00, %tmp1 ; <double> [#uses=1] + %tmp2 = fsub double -0.000000e+00, %tmp1 ; <double> [#uses=1] %tmp34 = fptrunc double %tmp2 to float ; <float> [#uses=1] store float %tmp34, float* @X, align 4 ret void diff --git a/test/Transforms/InstCombine/mul.ll b/test/Transforms/InstCombine/mul.ll index 1a74025..9b5f7a5 100644 --- a/test/Transforms/InstCombine/mul.ll +++ b/test/Transforms/InstCombine/mul.ll @@ -20,7 +20,7 @@ define i32 @test3(i32 %A) { define double @test4(double %A) { ; This is safe for FP - %B = mul double 1.000000e+00, %A ; <double> [#uses=1] + %B = fmul double 1.000000e+00, %A ; <double> [#uses=1] ret double %B } @@ -79,15 +79,7 @@ define i32 @test12(i8 %a, i32 %b) { ; PR2642 define internal void @test13(<4 x float>*) { load <4 x float>* %0, align 1 - mul <4 x float> %2, < float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 > + fmul <4 x float> %2, < float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 > store <4 x float> %3, <4 x float>* %0, align 1 ret void } - -define internal void @test14(<4 x float>*) { - load <4 x float>* %0, align 1 - mul <4 x float> %2, zeroinitializer - store <4 x float> %3, <4 x float>* %0, align 1 - ret void -} - diff --git a/test/Transforms/InstCombine/multi-use-or.ll b/test/Transforms/InstCombine/multi-use-or.ll index 85a8b34..4804967 100644 --- a/test/Transforms/InstCombine/multi-use-or.ll +++ b/test/Transforms/InstCombine/multi-use-or.ll @@ -17,7 +17,7 @@ entry: %c = lshr i192 %sy222324.ins, 128 ; <i192> [#uses=1] %d = trunc i192 %c to i64 ; <i64> [#uses=1] %e = bitcast i64 %d to double ; <double> [#uses=1] - %f = add double %b, %e + %f = fadd double %b, %e ; ret double %e ret double %f diff --git a/test/Transforms/InstCombine/shufflemask-undef.ll b/test/Transforms/InstCombine/shufflemask-undef.ll index 2438417..a9e8d34 100644 --- a/test/Transforms/InstCombine/shufflemask-undef.ll +++ b/test/Transforms/InstCombine/shufflemask-undef.ll @@ -75,16 +75,16 @@ bb266.i: shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x float>>:3 [#uses=1] shufflevector <4 x float> undef, <4 x float> undef, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x float>>:4 [#uses=1] shufflevector <4 x float> %4, <4 x float> %3, <4 x i32> < i32 6, i32 7, i32 2, i32 3 > ; <<4 x float>>:5 [#uses=1] - mul <4 x float> %5, zeroinitializer ; <<4 x float>>:6 [#uses=2] - mul <4 x float> %6, %6 ; <<4 x float>>:7 [#uses=1] - add <4 x float> zeroinitializer, %7 ; <<4 x float>>:8 [#uses=1] + fmul <4 x float> %5, zeroinitializer ; <<4 x float>>:6 [#uses=2] + fmul <4 x float> %6, %6 ; <<4 x float>>:7 [#uses=1] + fadd <4 x float> zeroinitializer, %7 ; <<4 x float>>:8 [#uses=1] call <4 x float> @llvm.x86.sse.max.ps( <4 x float> zeroinitializer, <4 x float> %8 ) nounwind readnone ; <<4 x float>>:9 [#uses=1] %phitmp40 = bitcast <4 x float> %9 to <4 x i32> ; <<4 x i32>> [#uses=1] %tmp4109.i = and <4 x i32> %phitmp40, < i32 8388607, i32 8388607, i32 8388607, i32 8388607 > ; <<4 x i32>> [#uses=1] %tmp4116.i = or <4 x i32> %tmp4109.i, < i32 1065353216, i32 1065353216, i32 1065353216, i32 1065353216 > ; <<4 x i32>> [#uses=1] %tmp4117.i = bitcast <4 x i32> %tmp4116.i to <4 x float> ; <<4 x float>> [#uses=1] - add <4 x float> %tmp4117.i, zeroinitializer ; <<4 x float>>:10 [#uses=1] - mul <4 x float> %10, < float 5.000000e-01, float 5.000000e-01, float 5.000000e-01, float 5.000000e-01 > ; <<4 x float>>:11 [#uses=1] + fadd <4 x float> %tmp4117.i, zeroinitializer ; <<4 x float>>:10 [#uses=1] + fmul <4 x float> %10, < float 5.000000e-01, float 5.000000e-01, float 5.000000e-01, float 5.000000e-01 > ; <<4 x float>>:11 [#uses=1] call <4 x float> @llvm.x86.sse.max.ps( <4 x float> %11, <4 x float> zeroinitializer ) nounwind readnone ; <<4 x float>>:12 [#uses=1] call <4 x float> @llvm.x86.sse.min.ps( <4 x float> %12, <4 x float> zeroinitializer ) nounwind readnone ; <<4 x float>>:13 [#uses=1] %tmp4170.i = call <4 x float> @llvm.x86.sse.cmp.ps( <4 x float> %13, <4 x float> zeroinitializer, i8 2 ) nounwind ; <<4 x float>> [#uses=1] diff --git a/test/Transforms/InstCombine/signed-comparison.ll b/test/Transforms/InstCombine/signed-comparison.ll index fdf150f..86e07ec7 100644 --- a/test/Transforms/InstCombine/signed-comparison.ll +++ b/test/Transforms/InstCombine/signed-comparison.ll @@ -14,7 +14,7 @@ bb: %t0 = and i64 %indvar, 65535 %t1 = getelementptr double* %p, i64 %t0 %t2 = load double* %t1, align 8 - %t3 = mul double %t2, 2.2 + %t3 = fmul double %t2, 2.2 store double %t3, double* %t1, align 8 %i.04 = trunc i64 %indvar to i16 %t4 = add i16 %i.04, 1 diff --git a/test/Transforms/InstCombine/sitofp.ll b/test/Transforms/InstCombine/sitofp.ll index c26c351..2bf7385 100644 --- a/test/Transforms/InstCombine/sitofp.ll +++ b/test/Transforms/InstCombine/sitofp.ll @@ -36,7 +36,7 @@ define i32 @test6(i32 %A) { %C = and i32 %A, 32 ; <i32> [#uses=1] %D = sitofp i32 %B to double ; <double> [#uses=1] %E = sitofp i32 %C to double ; <double> [#uses=1] - %F = add double %D, %E ; <double> [#uses=1] + %F = fadd double %D, %E ; <double> [#uses=1] %G = fptosi double %F to i32 ; <i32> [#uses=1] ret i32 %G } diff --git a/test/Transforms/InstCombine/vec_demanded_elts.ll b/test/Transforms/InstCombine/vec_demanded_elts.ll index 03e070f..95df8c6 100644 --- a/test/Transforms/InstCombine/vec_demanded_elts.ll +++ b/test/Transforms/InstCombine/vec_demanded_elts.ll @@ -1,7 +1,7 @@ ; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \ -; RUN: grep {sub float} +; RUN: grep {fadd float} ; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \ -; RUN: grep {mul float} +; RUN: grep {fmul float} ; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \ ; RUN: not grep {insertelement.*0.00} ; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \ @@ -26,7 +26,7 @@ entry: } define i32 @test2(float %f) { - %tmp5 = mul float %f, %f + %tmp5 = fmul float %f, %f %tmp9 = insertelement <4 x float> undef, float %tmp5, i32 0 %tmp10 = insertelement <4 x float> %tmp9, float 0.000000e+00, i32 1 %tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, i32 2 diff --git a/test/Transforms/InstCombine/vec_narrow.ll b/test/Transforms/InstCombine/vec_narrow.ll index 9063148..e444c2a 100644 --- a/test/Transforms/InstCombine/vec_narrow.ll +++ b/test/Transforms/InstCombine/vec_narrow.ll @@ -5,7 +5,7 @@ define float @test(%V %A, %V %B, float %f) { %C = insertelement %V %A, float %f, i32 0 ; <%V> [#uses=1] - %D = add %V %C, %B ; <%V> [#uses=1] + %D = fadd %V %C, %B ; <%V> [#uses=1] %E = extractelement %V %D, i32 0 ; <float> [#uses=1] ret float %E } diff --git a/test/Transforms/InstCombine/zero-point-zero-add.ll b/test/Transforms/InstCombine/zero-point-zero-add.ll index bae60d9..adb28e4 100644 --- a/test/Transforms/InstCombine/zero-point-zero-add.ll +++ b/test/Transforms/InstCombine/zero-point-zero-add.ll @@ -3,13 +3,13 @@ declare double @abs(double) define double @test(double %X) { - %Y = add double %X, 0.0 ;; Should be a single add x, 0.0 - %Z = add double %Y, 0.0 + %Y = fadd double %X, 0.0 ;; Should be a single add x, 0.0 + %Z = fadd double %Y, 0.0 ret double %Z } define double @test1(double %X) { %Y = call double @abs(double %X) - %Z = add double %Y, 0.0 + %Z = fadd double %Y, 0.0 ret double %Z } diff --git a/test/Transforms/LCSSA/2007-07-12-LICM-2.ll b/test/Transforms/LCSSA/2007-07-12-LICM-2.ll index 58bb19d..e8dc391 100644 --- a/test/Transforms/LCSSA/2007-07-12-LICM-2.ll +++ b/test/Transforms/LCSSA/2007-07-12-LICM-2.ll @@ -5,10 +5,10 @@ entry: bb7: ; preds = %bb7, %entry %tmp39 = load <4 x float>* null ; <<4 x float>> [#uses=1] - %tmp40 = add <4 x float> %tmp39, < float 2.000000e+00, float 3.000000e+00, float 1.000000e+00, float 0.000000e+00 > ; <<4 x float>> [#uses=1] - %tmp43 = add <4 x float> %tmp40, < float 1.000000e+00, float 1.000000e+00, float 0.000000e+00, float 2.000000e+00 > ; <<4 x float>> [#uses=1] - %tmp46 = add <4 x float> %tmp43, < float 3.000000e+00, float 0.000000e+00, float 2.000000e+00, float 4.000000e+00 > ; <<4 x float>> [#uses=1] - %tmp49 = add <4 x float> %tmp46, < float 0.000000e+00, float 4.000000e+00, float 6.000000e+00, float 1.000000e+00 > ; <<4 x float>> [#uses=1] + %tmp40 = fadd <4 x float> %tmp39, < float 2.000000e+00, float 3.000000e+00, float 1.000000e+00, float 0.000000e+00 > ; <<4 x float>> [#uses=1] + %tmp43 = fadd <4 x float> %tmp40, < float 1.000000e+00, float 1.000000e+00, float 0.000000e+00, float 2.000000e+00 > ; <<4 x float>> [#uses=1] + %tmp46 = fadd <4 x float> %tmp43, < float 3.000000e+00, float 0.000000e+00, float 2.000000e+00, float 4.000000e+00 > ; <<4 x float>> [#uses=1] + %tmp49 = fadd <4 x float> %tmp46, < float 0.000000e+00, float 4.000000e+00, float 6.000000e+00, float 1.000000e+00 > ; <<4 x float>> [#uses=1] store <4 x float> %tmp49, <4 x float>* null br i1 false, label %bb7, label %bb56 diff --git a/test/Transforms/LCSSA/2007-07-12-LICM-3.ll b/test/Transforms/LCSSA/2007-07-12-LICM-3.ll index 79370ee..72cebed 100644 --- a/test/Transforms/LCSSA/2007-07-12-LICM-3.ll +++ b/test/Transforms/LCSSA/2007-07-12-LICM-3.ll @@ -9,10 +9,10 @@ bb: ; preds = %bb56, %entry bb7: ; preds = %bb7, %bb %tmp39 = load <4 x float>* null ; <<4 x float>> [#uses=1] - %tmp40 = add <4 x float> %tmp39, < float 2.000000e+00, float 3.000000e+00, float 1.000000e+00, float 0.000000e+00 > ; <<4 x float>> [#uses=1] - %tmp43 = add <4 x float> %tmp40, < float 1.000000e+00, float 1.000000e+00, float 0.000000e+00, float 2.000000e+00 > ; <<4 x float>> [#uses=1] - %tmp46 = add <4 x float> %tmp43, < float 3.000000e+00, float 0.000000e+00, float 2.000000e+00, float 4.000000e+00 > ; <<4 x float>> [#uses=1] - %tmp49 = add <4 x float> %tmp46, < float 0.000000e+00, float 4.000000e+00, float 6.000000e+00, float 1.000000e+00 > ; <<4 x float>> [#uses=1] + %tmp40 = fadd <4 x float> %tmp39, < float 2.000000e+00, float 3.000000e+00, float 1.000000e+00, float 0.000000e+00 > ; <<4 x float>> [#uses=1] + %tmp43 = fadd <4 x float> %tmp40, < float 1.000000e+00, float 1.000000e+00, float 0.000000e+00, float 2.000000e+00 > ; <<4 x float>> [#uses=1] + %tmp46 = fadd <4 x float> %tmp43, < float 3.000000e+00, float 0.000000e+00, float 2.000000e+00, float 4.000000e+00 > ; <<4 x float>> [#uses=1] + %tmp49 = fadd <4 x float> %tmp46, < float 0.000000e+00, float 4.000000e+00, float 6.000000e+00, float 1.000000e+00 > ; <<4 x float>> [#uses=1] store <4 x float> %tmp49, <4 x float>* null br i1 false, label %bb7, label %bb56 diff --git a/test/Transforms/LCSSA/2007-07-12-LICM.ll b/test/Transforms/LCSSA/2007-07-12-LICM.ll index 1c9830e..0c433c3 100644 --- a/test/Transforms/LCSSA/2007-07-12-LICM.ll +++ b/test/Transforms/LCSSA/2007-07-12-LICM.ll @@ -5,7 +5,7 @@ entry: bb7: ; preds = %bb7, %entry %tmp39 = load <4 x float>* null ; <<4 x float>> [#uses=1] - %tmp40 = add <4 x float> %tmp39, < float 2.000000e+00, float 3.000000e+00, float 1.000000e+00, float 0.000000e+00 > ; <<4 x float>> [#uses=0] + %tmp40 = fadd <4 x float> %tmp39, < float 2.000000e+00, float 3.000000e+00, float 1.000000e+00, float 0.000000e+00 > ; <<4 x float>> [#uses=0] store <4 x float> zeroinitializer, <4 x float>* null br i1 false, label %bb7, label %bb56 diff --git a/test/Transforms/LoopIndexSplit/2007-09-24-UpdateIterationSpace.ll b/test/Transforms/LoopIndexSplit/2007-09-24-UpdateIterationSpace.ll index ec29847..928fd95 100644 --- a/test/Transforms/LoopIndexSplit/2007-09-24-UpdateIterationSpace.ll +++ b/test/Transforms/LoopIndexSplit/2007-09-24-UpdateIterationSpace.ll @@ -8,7 +8,7 @@ entry: bb.preheader: ; preds = %entry %tmp3031 = fpext float %contribution to double ; <double> [#uses=1] - %tmp32 = mul double %tmp3031, 5.000000e-01 ; <double> [#uses=1] + %tmp32 = fmul double %tmp3031, 5.000000e-01 ; <double> [#uses=1] %tmp3839 = fpext float %sigmal to double ; <double> [#uses=1] br label %bb @@ -22,19 +22,19 @@ bb: ; preds = %bb.preheader, %cond_next45 cond_true9: ; preds = %bb %tmp12 = getelementptr float* %x, i32 %i.01.0 ; <float*> [#uses=1] %tmp13 = load float* %tmp12, align 4 ; <float> [#uses=1] - %tmp15 = sub float %xcen, %tmp13 ; <float> [#uses=1] + %tmp15 = fsub float %xcen, %tmp13 ; <float> [#uses=1] %tmp16 = tail call float @fabsf( float %tmp15 ) ; <float> [#uses=1] %tmp18 = fdiv float %tmp16, %sigmal ; <float> [#uses=1] %tmp21 = load float** %y, align 4 ; <float*> [#uses=2] %tmp27 = getelementptr float* %tmp21, i32 %i.01.0 ; <float*> [#uses=1] %tmp28 = load float* %tmp27, align 4 ; <float> [#uses=1] %tmp2829 = fpext float %tmp28 to double ; <double> [#uses=1] - %tmp34 = sub float -0.000000e+00, %tmp18 ; <float> [#uses=1] + %tmp34 = fsub float -0.000000e+00, %tmp18 ; <float> [#uses=1] %tmp3435 = fpext float %tmp34 to double ; <double> [#uses=1] %tmp36 = tail call double @exp( double %tmp3435 ) ; <double> [#uses=1] - %tmp37 = mul double %tmp32, %tmp36 ; <double> [#uses=1] + %tmp37 = fmul double %tmp32, %tmp36 ; <double> [#uses=1] %tmp40 = fdiv double %tmp37, %tmp3839 ; <double> [#uses=1] - %tmp41 = add double %tmp2829, %tmp40 ; <double> [#uses=1] + %tmp41 = fadd double %tmp2829, %tmp40 ; <double> [#uses=1] %tmp4142 = fptrunc double %tmp41 to float ; <float> [#uses=1] %tmp44 = getelementptr float* %tmp21, i32 %i.01.0 ; <float*> [#uses=1] store float %tmp4142, float* %tmp44, align 4 diff --git a/test/Transforms/LoopIndexSplit/2007-09-25-UpdateIterationSpace-2.ll b/test/Transforms/LoopIndexSplit/2007-09-25-UpdateIterationSpace-2.ll index 7d93785..6619c7d 100644 --- a/test/Transforms/LoopIndexSplit/2007-09-25-UpdateIterationSpace-2.ll +++ b/test/Transforms/LoopIndexSplit/2007-09-25-UpdateIterationSpace-2.ll @@ -9,7 +9,7 @@ entry: bb.preheader: ; preds = %entry %tmp3031 = fpext float %contribution to double ; <double> [#uses=1] - %tmp32 = mul double %tmp3031, 5.000000e-01 ; <double> [#uses=1] + %tmp32 = fmul double %tmp3031, 5.000000e-01 ; <double> [#uses=1] %tmp3839 = fpext float %sigmal to double ; <double> [#uses=1] br label %bb @@ -24,19 +24,19 @@ bb: ; preds = %cond_next45, %bb.preheader cond_true9: ; preds = %bb %tmp12 = getelementptr float* %x, i32 %i.01.0 ; <float*> [#uses=1] %tmp13 = load float* %tmp12, align 4 ; <float> [#uses=1] - %tmp15 = sub float %xcen, %tmp13 ; <float> [#uses=1] + %tmp15 = fsub float %xcen, %tmp13 ; <float> [#uses=1] %tmp16 = tail call float @fabsf(float %tmp15) ; <float> [#uses=1] %tmp18 = fdiv float %tmp16, %sigmal ; <float> [#uses=1] %tmp21 = load float** %y, align 4 ; <float*> [#uses=2] %tmp27 = getelementptr float* %tmp21, i32 %k.06.0 ; <float*> [#uses=1] %tmp28 = load float* %tmp27, align 4 ; <float> [#uses=1] %tmp2829 = fpext float %tmp28 to double ; <double> [#uses=1] - %tmp34 = sub float -0.000000e+00, %tmp18 ; <float> [#uses=1] + %tmp34 = fsub float -0.000000e+00, %tmp18 ; <float> [#uses=1] %tmp3435 = fpext float %tmp34 to double ; <double> [#uses=1] %tmp36 = tail call double @exp(double %tmp3435) ; <double> [#uses=1] - %tmp37 = mul double %tmp32, %tmp36 ; <double> [#uses=1] + %tmp37 = fmul double %tmp32, %tmp36 ; <double> [#uses=1] %tmp40 = fdiv double %tmp37, %tmp3839 ; <double> [#uses=1] - %tmp41 = add double %tmp2829, %tmp40 ; <double> [#uses=1] + %tmp41 = fadd double %tmp2829, %tmp40 ; <double> [#uses=1] %tmp4142 = fptrunc double %tmp41 to float ; <float> [#uses=1] %tmp44 = getelementptr float* %tmp21, i32 %k.06.0 ; <float*> [#uses=1] store float %tmp4142, float* %tmp44, align 4 diff --git a/test/Transforms/Mem2Reg/PromoteMemToRegister.ll b/test/Transforms/Mem2Reg/PromoteMemToRegister.ll index fdc33fb..63b8c78 100644 --- a/test/Transforms/Mem2Reg/PromoteMemToRegister.ll +++ b/test/Transforms/Mem2Reg/PromoteMemToRegister.ll @@ -12,7 +12,7 @@ define double @testfunc(i32 %i, double %j) { %t3 = load i32* %I ; <i32> [#uses=1] %t4 = sitofp i32 %t3 to double ; <double> [#uses=1] %t5 = load double* %J ; <double> [#uses=1] - %t6 = mul double %t4, %t5 ; <double> [#uses=1] + %t6 = fmul double %t4, %t5 ; <double> [#uses=1] ret double %t6 } diff --git a/test/Transforms/MemCpyOpt/memcpy.ll b/test/Transforms/MemCpyOpt/memcpy.ll index c5cdc29..94daee0 100644 --- a/test/Transforms/MemCpyOpt/memcpy.ll +++ b/test/Transforms/MemCpyOpt/memcpy.ll @@ -7,7 +7,7 @@ define void @ccosl({ x86_fp80, x86_fp80 }* sret %agg.result, x86_fp80 %z.0, x86 entry: %tmp2 = alloca { x86_fp80, x86_fp80 } ; <{ x86_fp80, x86_fp80 }*> [#uses=1] %memtmp = alloca { x86_fp80, x86_fp80 }, align 16 ; <{ x86_fp80, x86_fp80 }*> [#uses=2] - %tmp5 = sub x86_fp80 0xK80000000000000000000, %z.1 ; <x86_fp80> [#uses=1] + %tmp5 = fsub x86_fp80 0xK80000000000000000000, %z.1 ; <x86_fp80> [#uses=1] call void @ccoshl( { x86_fp80, x86_fp80 }* sret %memtmp, x86_fp80 %tmp5, x86_fp80 %z.0 ) nounwind %tmp219 = bitcast { x86_fp80, x86_fp80 }* %tmp2 to i8* ; <i8*> [#uses=2] %memtmp20 = bitcast { x86_fp80, x86_fp80 }* %memtmp to i8* ; <i8*> [#uses=1] diff --git a/test/Transforms/MemCpyOpt/sret.ll b/test/Transforms/MemCpyOpt/sret.ll index 1ac11aa..ad9fb1b 100644 --- a/test/Transforms/MemCpyOpt/sret.ll +++ b/test/Transforms/MemCpyOpt/sret.ll @@ -9,7 +9,7 @@ entry: %memtmp = alloca { x86_fp80, x86_fp80 }, align 16 ; <{ x86_fp80, x86_fp80 }*> [#uses=2] %tmp1 = getelementptr { x86_fp80, x86_fp80 }* %z, i32 0, i32 1 ; <x86_fp80*> [#uses=1] %tmp2 = load x86_fp80* %tmp1, align 16 ; <x86_fp80> [#uses=1] - %tmp3 = sub x86_fp80 0xK80000000000000000000, %tmp2 ; <x86_fp80> [#uses=1] + %tmp3 = fsub x86_fp80 0xK80000000000000000000, %tmp2 ; <x86_fp80> [#uses=1] %tmp4 = getelementptr { x86_fp80, x86_fp80 }* %iz, i32 0, i32 1 ; <x86_fp80*> [#uses=1] %real = getelementptr { x86_fp80, x86_fp80 }* %iz, i32 0, i32 0 ; <x86_fp80*> [#uses=1] %tmp7 = getelementptr { x86_fp80, x86_fp80 }* %z, i32 0, i32 0 ; <x86_fp80*> [#uses=1] diff --git a/test/Transforms/PruneEH/2008-09-05-CGUpdate.ll b/test/Transforms/PruneEH/2008-09-05-CGUpdate.ll index c3600ab..74434f4 100644 --- a/test/Transforms/PruneEH/2008-09-05-CGUpdate.ll +++ b/test/Transforms/PruneEH/2008-09-05-CGUpdate.ll @@ -477,12 +477,12 @@ invcont3: ; preds = %bb2 unreachable bb4: ; preds = %invcont - %3 = mul x86_fp80 %0, 0xK40008000000000000000 ; <x86_fp80> [#uses=1] + %3 = fmul x86_fp80 %0, 0xK40008000000000000000 ; <x86_fp80> [#uses=1] %4 = fcmp ult x86_fp80 %3, 0xKC0068000000000000000 ; <i1> [#uses=1] br i1 %4, label %bb8, label %bb6 bb6: ; preds = %bb4 - %5 = mul x86_fp80 %0, 0xK40008000000000000000 ; <x86_fp80> [#uses=1] + %5 = fmul x86_fp80 %0, 0xK40008000000000000000 ; <x86_fp80> [#uses=1] %6 = fcmp ugt x86_fp80 %5, 0xK4005FE00000000000000 ; <i1> [#uses=1] br i1 %6, label %bb8, label %bb10 @@ -494,16 +494,16 @@ invcont9: ; preds = %bb8 unreachable bb10: ; preds = %bb6 - %7 = mul x86_fp80 %0, 0xK40008000000000000000 ; <x86_fp80> [#uses=3] + %7 = fmul x86_fp80 %0, 0xK40008000000000000000 ; <x86_fp80> [#uses=3] %8 = fcmp ult x86_fp80 %7, 0xK00000000000000000000 ; <i1> [#uses=1] br i1 %8, label %bb13, label %bb12 bb12: ; preds = %bb10 - %9 = add x86_fp80 %7, 0xK3FFDFFFFFFFFFFFFFFFF ; <x86_fp80> [#uses=1] + %9 = fadd x86_fp80 %7, 0xK3FFDFFFFFFFFFFFFFFFF ; <x86_fp80> [#uses=1] br label %bb14 bb13: ; preds = %bb10 - %10 = sub x86_fp80 %7, 0xK3FFDFFFFFFFFFFFFFFFF ; <x86_fp80> [#uses=1] + %10 = fsub x86_fp80 %7, 0xK3FFDFFFFFFFFFFFFFFFF ; <x86_fp80> [#uses=1] br label %bb14 bb14: ; preds = %bb13, %bb12 diff --git a/test/Transforms/Reassociate/2006-04-27-ReassociateVector.ll b/test/Transforms/Reassociate/2006-04-27-ReassociateVector.ll index 05d6103..3662e09 100644 --- a/test/Transforms/Reassociate/2006-04-27-ReassociateVector.ll +++ b/test/Transforms/Reassociate/2006-04-27-ReassociateVector.ll @@ -1,8 +1,8 @@ ; RUN: llvm-as < %s | opt -reassociate -disable-output define void @foo() { - %tmp162 = sub <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>> [#uses=1] - %tmp164 = mul <4 x float> zeroinitializer, %tmp162 ; <<4 x float>> [#uses=0] + %tmp162 = fsub <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>> [#uses=1] + %tmp164 = fmul <4 x float> zeroinitializer, %tmp162 ; <<4 x float>> [#uses=0] ret void } diff --git a/test/Transforms/SCCP/2006-12-04-PackedType.ll b/test/Transforms/SCCP/2006-12-04-PackedType.ll index b7a7880..0e268c2 100644 --- a/test/Transforms/SCCP/2006-12-04-PackedType.ll +++ b/test/Transforms/SCCP/2006-12-04-PackedType.ll @@ -112,7 +112,7 @@ cond_true93: ; preds = %entry %tmp.upgrd.1 = getelementptr %struct.GLDContextRec* %ctx, i32 0, i32 31, i32 14 ; <i32*> [#uses=1] %tmp95 = load i32* %tmp.upgrd.1 ; <i32> [#uses=1] %tmp95.upgrd.2 = sitofp i32 %tmp95 to float ; <float> [#uses=1] - %tmp108 = mul float undef, %tmp95.upgrd.2 ; <float> [#uses=1] + %tmp108 = fmul float undef, %tmp95.upgrd.2 ; <float> [#uses=1] br label %cond_next116 cond_next116: ; preds = %cond_true93, %entry %point_size.2 = phi float [ %tmp108, %cond_true93 ], [ undef, %entry ] ; <float> [#uses=2] @@ -130,7 +130,7 @@ cond_true462: ; preds = %cond_true458 cond_true467: ; preds = %cond_true462 ret void cond_next484: ; preds = %cond_next116 - %tmp486 = mul float %point_size.2, 5.000000e-01 ; <float> [#uses=1] + %tmp486 = fmul float %point_size.2, 5.000000e-01 ; <float> [#uses=1] br label %cond_next487 cond_next487: ; preds = %cond_next484, %cond_true462, %cond_true458 %radius.0 = phi float [ %tmp486, %cond_next484 ], [ 5.000000e-01, %cond_true458 ], [ 5.000000e-01, %cond_true462 ] ; <float> [#uses=2] diff --git a/test/Transforms/SCCP/apint-ipsccp4.ll b/test/Transforms/SCCP/apint-ipsccp4.ll index de355d1..a0656b7 100644 --- a/test/Transforms/SCCP/apint-ipsccp4.ll +++ b/test/Transforms/SCCP/apint-ipsccp4.ll @@ -35,10 +35,10 @@ define float @All() %B = fcmp oge float %A, 1.0 br i1 %B, label %T, label %F T: - %C = add float %A, 1.0 + %C = fadd float %A, 1.0 br label %exit F: - %D = add float %A, 2.0 + %D = fadd float %A, 2.0 br label %exit exit: %E = phi float [%C, %T], [%D, %F] diff --git a/test/Transforms/ScalarRepl/2009-03-17-CleanUp.ll b/test/Transforms/ScalarRepl/2009-03-17-CleanUp.ll index 13055ea..facb7c1 100644 --- a/test/Transforms/ScalarRepl/2009-03-17-CleanUp.ll +++ b/test/Transforms/ScalarRepl/2009-03-17-CleanUp.ll @@ -1766,7 +1766,7 @@ _ZL13random_doublev.exit: ; preds = %bb.i, %bb7 call void @llvm.dbg.stoppoint(i32 75, i32 0, %0* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit to %0*)) %22 = load i32* @_ZZL13random_doublevE4seed, align 4 ; <i32> [#uses=2] %23 = sitofp i32 %22 to double ; <double> [#uses=1] - %24 = mul double %23, 0x3E340000002813D9 ; <double> [#uses=1] + %24 = fmul double %23, 0x3E340000002813D9 ; <double> [#uses=1] call void @llvm.dbg.stoppoint(i32 76, i32 0, %0* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit to %0*)) %25 = xor i32 %22, 123459876 ; <i32> [#uses=1] store i32 %25, i32* @_ZZL13random_doublevE4seed, align 4 @@ -1803,7 +1803,7 @@ bb8: ; preds = %bb.i1, %_ZL13random_doublev.exit call void @llvm.dbg.stoppoint(i32 75, i32 0, %0* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit to %0*)) %38 = load i32* @_ZZL13random_doublevE4seed, align 4 ; <i32> [#uses=2] %39 = sitofp i32 %38 to double ; <double> [#uses=1] - %40 = mul double %39, 0x3E340000002813D9 ; <double> [#uses=1] + %40 = fmul double %39, 0x3E340000002813D9 ; <double> [#uses=1] call void @llvm.dbg.stoppoint(i32 76, i32 0, %0* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit to %0*)) %41 = xor i32 %38, 123459876 ; <i32> [#uses=1] store i32 %41, i32* @_ZZL13random_doublevE4seed, align 4 @@ -2110,16 +2110,16 @@ entry: %real7 = load double* %real6, align 8 ; <double> [#uses=4] %imag8 = getelementptr %1* %memtmp1, i32 0, i32 1 ; <double*> [#uses=1] %imag9 = load double* %imag8, align 8 ; <double> [#uses=4] - %21 = mul double %real3, %real7 ; <double> [#uses=1] - %22 = mul double %imag5, %imag9 ; <double> [#uses=1] - %23 = add double %21, %22 ; <double> [#uses=1] - %24 = mul double %real7, %real7 ; <double> [#uses=1] - %25 = mul double %imag9, %imag9 ; <double> [#uses=1] - %26 = add double %24, %25 ; <double> [#uses=2] + %21 = fmul double %real3, %real7 ; <double> [#uses=1] + %22 = fmul double %imag5, %imag9 ; <double> [#uses=1] + %23 = fadd double %21, %22 ; <double> [#uses=1] + %24 = fmul double %real7, %real7 ; <double> [#uses=1] + %25 = fmul double %imag9, %imag9 ; <double> [#uses=1] + %26 = fadd double %24, %25 ; <double> [#uses=2] %27 = fdiv double %23, %26 ; <double> [#uses=1] - %28 = mul double %imag5, %real7 ; <double> [#uses=1] - %29 = mul double %real3, %imag9 ; <double> [#uses=1] - %30 = sub double %28, %29 ; <double> [#uses=1] + %28 = fmul double %imag5, %real7 ; <double> [#uses=1] + %29 = fmul double %real3, %imag9 ; <double> [#uses=1] + %30 = fsub double %28, %29 ; <double> [#uses=1] %31 = fdiv double %30, %26 ; <double> [#uses=1] %real10 = getelementptr %1* %0, i32 0, i32 0 ; <double*> [#uses=1] store double %27, double* %real10, align 8 @@ -2227,12 +2227,12 @@ entry: %real9 = load double* %real8, align 8 ; <double> [#uses=2] %imag10 = getelementptr %1* %memtmp3, i32 0, i32 1 ; <double*> [#uses=1] %imag11 = load double* %imag10, align 8 ; <double> [#uses=2] - %27 = mul double %real5, %real9 ; <double> [#uses=1] - %28 = mul double %imag7, %imag11 ; <double> [#uses=1] - %29 = sub double %27, %28 ; <double> [#uses=1] - %30 = mul double %real5, %imag11 ; <double> [#uses=1] - %31 = mul double %real9, %imag7 ; <double> [#uses=1] - %32 = add double %30, %31 ; <double> [#uses=1] + %27 = fmul double %real5, %real9 ; <double> [#uses=1] + %28 = fmul double %imag7, %imag11 ; <double> [#uses=1] + %29 = fsub double %27, %28 ; <double> [#uses=1] + %30 = fmul double %real5, %imag11 ; <double> [#uses=1] + %31 = fmul double %real9, %imag7 ; <double> [#uses=1] + %32 = fadd double %30, %31 ; <double> [#uses=1] %real12 = getelementptr %1* %0, i32 0, i32 0 ; <double*> [#uses=1] store double %29, double* %real12, align 8 %imag13 = getelementptr %1* %0, i32 0, i32 1 ; <double*> [#uses=1] @@ -2384,10 +2384,10 @@ entry: call void @llvm.dbg.stoppoint(i32 444, i32 0, %0* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit5 to %0*)) %0 = call double* @_ZNKSt7complexIdE4imagEv(%"struct.std::complex<double>"* %__x) nounwind ; <double*> [#uses=1] %1 = load double* %0, align 8 ; <double> [#uses=1] - %2 = sub double -0.000000e+00, %1 ; <double> [#uses=1] + %2 = fsub double -0.000000e+00, %1 ; <double> [#uses=1] %3 = call double* @_ZNKSt7complexIdE4realEv(%"struct.std::complex<double>"* %__x) nounwind ; <double*> [#uses=1] %4 = load double* %3, align 8 ; <double> [#uses=1] - %5 = sub double -0.000000e+00, %4 ; <double> [#uses=1] + %5 = fsub double -0.000000e+00, %4 ; <double> [#uses=1] call void @_ZNSt7complexIdEC1Edd(%"struct.std::complex<double>"* %agg.result, double %5, double %2) nounwind call void @llvm.dbg.region.end(%0* bitcast (%llvm.dbg.subprogram.type* @llvm.dbg.subprogram576 to %0*)) ret void @@ -2497,16 +2497,16 @@ entry: %real9 = load double* %real8, align 8 ; <double> [#uses=4] %imag10 = getelementptr %1* %memtmp3, i32 0, i32 1 ; <double*> [#uses=1] %imag11 = load double* %imag10, align 8 ; <double> [#uses=4] - %27 = mul double %real5, %real9 ; <double> [#uses=1] - %28 = mul double %imag7, %imag11 ; <double> [#uses=1] - %29 = add double %27, %28 ; <double> [#uses=1] - %30 = mul double %real9, %real9 ; <double> [#uses=1] - %31 = mul double %imag11, %imag11 ; <double> [#uses=1] - %32 = add double %30, %31 ; <double> [#uses=2] + %27 = fmul double %real5, %real9 ; <double> [#uses=1] + %28 = fmul double %imag7, %imag11 ; <double> [#uses=1] + %29 = fadd double %27, %28 ; <double> [#uses=1] + %30 = fmul double %real9, %real9 ; <double> [#uses=1] + %31 = fmul double %imag11, %imag11 ; <double> [#uses=1] + %32 = fadd double %30, %31 ; <double> [#uses=2] %33 = fdiv double %29, %32 ; <double> [#uses=1] - %34 = mul double %imag7, %real9 ; <double> [#uses=1] - %35 = mul double %real5, %imag11 ; <double> [#uses=1] - %36 = sub double %34, %35 ; <double> [#uses=1] + %34 = fmul double %imag7, %real9 ; <double> [#uses=1] + %35 = fmul double %real5, %imag11 ; <double> [#uses=1] + %36 = fsub double %34, %35 ; <double> [#uses=1] %37 = fdiv double %36, %32 ; <double> [#uses=1] %real12 = getelementptr %1* %0, i32 0, i32 0 ; <double*> [#uses=1] store double %33, double* %real12, align 8 @@ -2554,7 +2554,7 @@ entry: %1 = load double* %0, align 4 ; <double> [#uses=1] %2 = call double* @_ZNKSt7complexIdE4realEv(%"struct.std::complex<double>"* %__z) nounwind ; <double*> [#uses=1] %3 = load double* %2, align 8 ; <double> [#uses=1] - %4 = add double %1, %3 ; <double> [#uses=1] + %4 = fadd double %1, %3 ; <double> [#uses=1] %5 = getelementptr %"struct.std::complex<double>"* %this, i32 0, i32 0, i32 0 ; <double*> [#uses=1] store double %4, double* %5, align 4 call void @llvm.dbg.stoppoint(i32 1271, i32 0, %0* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit5 to %0*)) @@ -2562,7 +2562,7 @@ entry: %7 = load double* %6, align 4 ; <double> [#uses=1] %8 = call double* @_ZNKSt7complexIdE4imagEv(%"struct.std::complex<double>"* %__z) nounwind ; <double*> [#uses=1] %9 = load double* %8, align 8 ; <double> [#uses=1] - %10 = add double %7, %9 ; <double> [#uses=1] + %10 = fadd double %7, %9 ; <double> [#uses=1] %11 = getelementptr %"struct.std::complex<double>"* %this, i32 0, i32 0, i32 1 ; <double*> [#uses=1] store double %10, double* %11, align 4 call void @llvm.dbg.stoppoint(i32 1272, i32 0, %0* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit5 to %0*)) @@ -2599,7 +2599,7 @@ entry: %1 = load double* %0, align 4 ; <double> [#uses=1] %2 = call double* @_ZNKSt7complexIdE4realEv(%"struct.std::complex<double>"* %__z) nounwind ; <double*> [#uses=1] %3 = load double* %2, align 8 ; <double> [#uses=1] - %4 = sub double %1, %3 ; <double> [#uses=1] + %4 = fsub double %1, %3 ; <double> [#uses=1] %5 = getelementptr %"struct.std::complex<double>"* %this, i32 0, i32 0, i32 0 ; <double*> [#uses=1] store double %4, double* %5, align 4 call void @llvm.dbg.stoppoint(i32 1280, i32 0, %0* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit5 to %0*)) @@ -2607,7 +2607,7 @@ entry: %7 = load double* %6, align 4 ; <double> [#uses=1] %8 = call double* @_ZNKSt7complexIdE4imagEv(%"struct.std::complex<double>"* %__z) nounwind ; <double*> [#uses=1] %9 = load double* %8, align 8 ; <double> [#uses=1] - %10 = sub double %7, %9 ; <double> [#uses=1] + %10 = fsub double %7, %9 ; <double> [#uses=1] %11 = getelementptr %"struct.std::complex<double>"* %this, i32 0, i32 0, i32 1 ; <double*> [#uses=1] store double %10, double* %11, align 4 call void @llvm.dbg.stoppoint(i32 1281, i32 0, %0* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit5 to %0*)) diff --git a/test/Transforms/ScalarRepl/copy-aggregate.ll b/test/Transforms/ScalarRepl/copy-aggregate.ll index 4ab17ae..a1ad3f9 100644 --- a/test/Transforms/ScalarRepl/copy-aggregate.ll +++ b/test/Transforms/ScalarRepl/copy-aggregate.ll @@ -25,7 +25,7 @@ define float @test2(i128 %V) nounwind { %B = getelementptr {[4 x float]}* %X, i32 0, i32 0, i32 3 %a = load float* %A %b = load float* %B - %c = add float %a, %b + %c = fadd float %a, %b ret float %c } diff --git a/test/Transforms/ScalarRepl/memcpy-from-global.ll b/test/Transforms/ScalarRepl/memcpy-from-global.ll index ee77e1f..e62ccc2 100644 --- a/test/Transforms/ScalarRepl/memcpy-from-global.ll +++ b/test/Transforms/ScalarRepl/memcpy-from-global.ll @@ -10,23 +10,23 @@ entry: %tmp5 = and i32 %tmp3, 124 ; <i32> [#uses=4] %tmp753 = getelementptr [128 x float]* %lookupTable, i32 0, i32 %tmp5 ; <float*> [#uses=1] %tmp9 = load float* %tmp753 ; <float> [#uses=1] - %tmp11 = mul float %tmp9, %x ; <float> [#uses=1] - %tmp13 = add float %tmp11, 0.000000e+00 ; <float> [#uses=1] + %tmp11 = fmul float %tmp9, %x ; <float> [#uses=1] + %tmp13 = fadd float %tmp11, 0.000000e+00 ; <float> [#uses=1] %tmp17.sum52 = or i32 %tmp5, 1 ; <i32> [#uses=1] %tmp1851 = getelementptr [128 x float]* %lookupTable, i32 0, i32 %tmp17.sum52 ; <float*> [#uses=1] %tmp19 = load float* %tmp1851 ; <float> [#uses=1] - %tmp21 = mul float %tmp19, %y ; <float> [#uses=1] - %tmp23 = add float %tmp21, %tmp13 ; <float> [#uses=1] + %tmp21 = fmul float %tmp19, %y ; <float> [#uses=1] + %tmp23 = fadd float %tmp21, %tmp13 ; <float> [#uses=1] %tmp27.sum50 = or i32 %tmp5, 2 ; <i32> [#uses=1] %tmp2849 = getelementptr [128 x float]* %lookupTable, i32 0, i32 %tmp27.sum50 ; <float*> [#uses=1] %tmp29 = load float* %tmp2849 ; <float> [#uses=1] - %tmp31 = mul float %tmp29, %z ; <float> [#uses=1] - %tmp33 = add float %tmp31, %tmp23 ; <float> [#uses=1] + %tmp31 = fmul float %tmp29, %z ; <float> [#uses=1] + %tmp33 = fadd float %tmp31, %tmp23 ; <float> [#uses=1] %tmp37.sum48 = or i32 %tmp5, 3 ; <i32> [#uses=1] %tmp3847 = getelementptr [128 x float]* %lookupTable, i32 0, i32 %tmp37.sum48 ; <float*> [#uses=1] %tmp39 = load float* %tmp3847 ; <float> [#uses=1] - %tmp41 = mul float %tmp39, %w ; <float> [#uses=1] - %tmp43 = add float %tmp41, %tmp33 ; <float> [#uses=1] + %tmp41 = fmul float %tmp39, %w ; <float> [#uses=1] + %tmp43 = fadd float %tmp41, %tmp33 ; <float> [#uses=1] ret float %tmp43 } diff --git a/test/Transforms/ScalarRepl/vector_promote.ll b/test/Transforms/ScalarRepl/vector_promote.ll index a0d3317..4b6555b 100644 --- a/test/Transforms/ScalarRepl/vector_promote.ll +++ b/test/Transforms/ScalarRepl/vector_promote.ll @@ -5,12 +5,12 @@ define void @test(<4 x float>* %F, float %f) { entry: %G = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3] %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2] - %tmp3 = add <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1] + %tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1] store <4 x float> %tmp3, <4 x float>* %G %G.upgrd.1 = getelementptr <4 x float>* %G, i32 0, i32 0 ; <float*> [#uses=1] store float %f, float* %G.upgrd.1 %tmp4 = load <4 x float>* %G ; <<4 x float>> [#uses=2] - %tmp6 = add <4 x float> %tmp4, %tmp4 ; <<4 x float>> [#uses=1] + %tmp6 = fadd <4 x float> %tmp4, %tmp4 ; <<4 x float>> [#uses=1] store <4 x float> %tmp6, <4 x float>* %F ret void } @@ -19,12 +19,12 @@ define void @test2(<4 x float>* %F, float %f) { entry: %G = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3] %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2] - %tmp3 = add <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1] + %tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1] store <4 x float> %tmp3, <4 x float>* %G %tmp.upgrd.2 = getelementptr <4 x float>* %G, i32 0, i32 2 ; <float*> [#uses=1] store float %f, float* %tmp.upgrd.2 %tmp4 = load <4 x float>* %G ; <<4 x float>> [#uses=2] - %tmp6 = add <4 x float> %tmp4, %tmp4 ; <<4 x float>> [#uses=1] + %tmp6 = fadd <4 x float> %tmp4, %tmp4 ; <<4 x float>> [#uses=1] store <4 x float> %tmp6, <4 x float>* %F ret void } @@ -33,7 +33,7 @@ define void @test3(<4 x float>* %F, float* %f) { entry: %G = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2] %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2] - %tmp3 = add <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1] + %tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1] store <4 x float> %tmp3, <4 x float>* %G %tmp.upgrd.3 = getelementptr <4 x float>* %G, i32 0, i32 2 ; <float*> [#uses=1] %tmp.upgrd.4 = load float* %tmp.upgrd.3 ; <float> [#uses=1] @@ -45,7 +45,7 @@ define void @test4(<4 x float>* %F, float* %f) { entry: %G = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2] %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2] - %tmp3 = add <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1] + %tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1] store <4 x float> %tmp3, <4 x float>* %G %G.upgrd.5 = getelementptr <4 x float>* %G, i32 0, i32 0 ; <float*> [#uses=1] %tmp.upgrd.6 = load float* %G.upgrd.5 ; <float> [#uses=1] diff --git a/test/Transforms/SimplifyCFG/2006-10-29-InvokeCrash.ll b/test/Transforms/SimplifyCFG/2006-10-29-InvokeCrash.ll index f22ca6c..6bfef02 100644 --- a/test/Transforms/SimplifyCFG/2006-10-29-InvokeCrash.ll +++ b/test/Transforms/SimplifyCFG/2006-10-29-InvokeCrash.ll @@ -142,11 +142,11 @@ invcont57: ; preds = %invcont51 store double %tmp64, double* %tmp62 %tmp65 = call double* @_ZN6QSizeF6rwidthEv( %struct.QPointF* %scaledPageSize ) ; <double*> [#uses=2] %tmp67 = load double* %tmp65 ; <double> [#uses=1] - %tmp69 = mul double %tmp67, %tmp48 ; <double> [#uses=1] + %tmp69 = fmul double %tmp67, %tmp48 ; <double> [#uses=1] store double %tmp69, double* %tmp65 %tmp71 = call double* @_ZN6QSizeF7rheightEv( %struct.QPointF* %scaledPageSize ) ; <double*> [#uses=2] %tmp73 = load double* %tmp71 ; <double> [#uses=1] - %tmp75 = mul double %tmp73, %tmp54 ; <double> [#uses=1] + %tmp75 = fmul double %tmp73, %tmp54 ; <double> [#uses=1] store double %tmp75, double* %tmp71 %tmp78 = getelementptr %struct.QPrinter* %printer, i32 0, i32 0 ; <%struct.QPaintDevice*> [#uses=1] %tmp80 = invoke i32 @_ZNK12QPaintDevice6heightEv( %struct.QPaintDevice* %tmp78 ) @@ -188,7 +188,7 @@ invcont104: ; preds = %invcont103 to label %invcont106 unwind label %cleanup329 ; <i32> [#uses=1] invcont106: ; preds = %invcont104 %tmp108 = sitofp i32 %tmp107 to double ; <double> [#uses=1] - %tmp109 = mul double %tmp108, 0x3FE93264C993264C ; <double> [#uses=1] + %tmp109 = fmul double %tmp108, 0x3FE93264C993264C ; <double> [#uses=1] %tmp109.upgrd.17 = fptosi double %tmp109 to i32 ; <i32> [#uses=3] %tmp.upgrd.18 = call %struct.QTextBlockGroup* @_ZNK13QTextDocument9rootFrameEv( %struct.QAbstractTextDocumentLayout* %tmp95 ) ; <%struct.QTextBlockGroup*> [#uses=1] invoke void @_ZNK10QTextFrame11frameFormatEv( %struct.QTextBlockFormat* sret %fmt, %struct.QTextBlockGroup* %tmp.upgrd.18 ) @@ -235,7 +235,7 @@ invcont124: ; preds = %invcont122 store double %tmp137, double* %tmp135 %tmp138 = call double @_ZNK6QRectF6heightEv( %struct.QRectF* %body ) ; <double> [#uses=1] %tmp139 = sitofp i32 %tmp109.upgrd.17 to double ; <double> [#uses=1] - %tmp140 = sub double %tmp138, %tmp139 ; <double> [#uses=1] + %tmp140 = fsub double %tmp138, %tmp139 ; <double> [#uses=1] %tmp142 = invoke %struct.QPaintDevice* @_ZNK8QPainter6deviceEv( %struct.QPainter* %p ) to label %invcont141 unwind label %cleanup192 ; <%struct.QPaintDevice*> [#uses=1] invcont141: ; preds = %invcont124 @@ -249,7 +249,7 @@ invcont146: ; preds = %invcont144 to label %invcont148 unwind label %cleanup168 ; <i32> [#uses=1] invcont148: ; preds = %invcont146 %tmp149.upgrd.21 = sitofp i32 %tmp149 to double ; <double> [#uses=1] - %tmp150 = add double %tmp140, %tmp149.upgrd.21 ; <double> [#uses=1] + %tmp150 = fadd double %tmp140, %tmp149.upgrd.21 ; <double> [#uses=1] %tmp152 = invoke %struct.QPaintDevice* @_ZNK8QPainter6deviceEv( %struct.QPainter* %p ) to label %invcont151 unwind label %cleanup168 ; <%struct.QPaintDevice*> [#uses=1] invcont151: ; preds = %invcont148 @@ -259,10 +259,10 @@ invcont153: ; preds = %invcont151 %tmp155 = mul i32 %tmp154, 5 ; <i32> [#uses=1] %tmp156 = sdiv i32 %tmp155, 72 ; <i32> [#uses=1] %tmp156.upgrd.22 = sitofp i32 %tmp156 to double ; <double> [#uses=1] - %tmp157 = add double %tmp150, %tmp156.upgrd.22 ; <double> [#uses=1] + %tmp157 = fadd double %tmp150, %tmp156.upgrd.22 ; <double> [#uses=1] %tmp158 = call double @_ZNK6QRectF5widthEv( %struct.QRectF* %body ) ; <double> [#uses=1] %tmp159 = sitofp i32 %tmp109.upgrd.17 to double ; <double> [#uses=1] - %tmp160 = sub double %tmp158, %tmp159 ; <double> [#uses=1] + %tmp160 = fsub double %tmp158, %tmp159 ; <double> [#uses=1] call void @_ZN7QPointFC1Edd( %struct.QPointF* %tmp2, double %tmp160, double %tmp157 ) %tmp161 = getelementptr %struct.QPointF* %pageNumberPos, i32 0, i32 0 ; <double*> [#uses=1] %tmp162 = getelementptr %struct.QPointF* %tmp2, i32 0, i32 0 ; <double*> [#uses=1] diff --git a/test/Transforms/SimplifyCFG/2008-01-02-hoist-fp-add.ll b/test/Transforms/SimplifyCFG/2008-01-02-hoist-fp-add.ll index 43ff690..4c9c9e8 100644 --- a/test/Transforms/SimplifyCFG/2008-01-02-hoist-fp-add.ll +++ b/test/Transforms/SimplifyCFG/2008-01-02-hoist-fp-add.ll @@ -15,7 +15,7 @@ entry: br i1 %toBool, label %cond_true, label %cond_next cond_true: ; preds = %entry - %tmp7 = add double %tmp, %Z ; <double> [#uses=1] + %tmp7 = fadd double %tmp, %Z ; <double> [#uses=1] br label %cond_next cond_next: ; preds = %cond_true, %entry diff --git a/test/Transforms/SimplifyCFG/2008-04-27-MultipleReturnCrash.ll b/test/Transforms/SimplifyCFG/2008-04-27-MultipleReturnCrash.ll index a370b95..be3410c 100644 --- a/test/Transforms/SimplifyCFG/2008-04-27-MultipleReturnCrash.ll +++ b/test/Transforms/SimplifyCFG/2008-04-27-MultipleReturnCrash.ll @@ -21,7 +21,7 @@ bb56: ; preds = %bb48 bb174: ; preds = %bb144, %bb114 - %tmp191 = mul x86_fp80 0xK00000000000000000000, 0xK3FFE8000000000000000 ; <x86_fp80> [#uses=1] + %tmp191 = fmul x86_fp80 0xK00000000000000000000, 0xK3FFE8000000000000000 ; <x86_fp80> [#uses=1] br label %bb196 bb196: ; preds = %bb174, %bb56, %bb40 diff --git a/test/Transforms/SimplifyCFG/2009-05-12-externweak.ll b/test/Transforms/SimplifyCFG/2009-05-12-externweak.ll index 5969f27c..dc0cbbe 100644 --- a/test/Transforms/SimplifyCFG/2009-05-12-externweak.ll +++ b/test/Transforms/SimplifyCFG/2009-05-12-externweak.ll @@ -29,7 +29,7 @@ bb3: ; preds = %bb2, %bb1 store i32 %storemerge, i32* @j %1 = sitofp i32 %storemerge to double ; <double> [#uses=1] %2 = call double @sin(double %1) nounwind readonly ; <double> [#uses=1] - %3 = add double %2, %d.0 ; <double> [#uses=1] + %3 = fadd double %2, %d.0 ; <double> [#uses=1] %4 = add i32 %l.0, 1 ; <i32> [#uses=1] br label %bb4 diff --git a/test/Transforms/SimplifyLibCalls/half-powr.ll b/test/Transforms/SimplifyLibCalls/half-powr.ll index f4e898c..890e788 100644 --- a/test/Transforms/SimplifyLibCalls/half-powr.ll +++ b/test/Transforms/SimplifyLibCalls/half-powr.ll @@ -11,7 +11,7 @@ bb: ; preds = %entry bb1: ; preds = %bb, %entry %f_addr.0 = phi float [ %1, %bb ], [ %f, %entry ] ; <float> [#uses=1] - %2 = mul float %f_addr.0, %g ; <float> [#uses=1] + %2 = fmul float %f_addr.0, %g ; <float> [#uses=1] ret float %2 } |