diff options
author | dim <dim@FreeBSD.org> | 2012-08-15 19:34:23 +0000 |
---|---|---|
committer | dim <dim@FreeBSD.org> | 2012-08-15 19:34:23 +0000 |
commit | 721c201bd55ffb73cb2ba8d39e0570fa38c44e15 (patch) | |
tree | eacfc83d988e4b9d11114387ae7dc41243f2a363 /lib/Target/CellSPU/SPUISelLowering.cpp | |
parent | 2b2816e083a455f7a656ae88b0fd059d1688bb36 (diff) | |
download | FreeBSD-src-721c201bd55ffb73cb2ba8d39e0570fa38c44e15.zip FreeBSD-src-721c201bd55ffb73cb2ba8d39e0570fa38c44e15.tar.gz |
Vendor import of llvm trunk r161861:
http://llvm.org/svn/llvm-project/llvm/trunk@161861
Diffstat (limited to 'lib/Target/CellSPU/SPUISelLowering.cpp')
-rw-r--r-- | lib/Target/CellSPU/SPUISelLowering.cpp | 73 |
1 files changed, 40 insertions, 33 deletions
diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp index 0623741..4e9fcd1 100644 --- a/lib/Target/CellSPU/SPUISelLowering.cpp +++ b/lib/Target/CellSPU/SPUISelLowering.cpp @@ -77,12 +77,14 @@ namespace { // Splice the libcall in wherever FindInputOutputChains tells us to. Type *RetTy = Op.getNode()->getValueType(0).getTypeForEVT(*DAG.getContext()); - std::pair<SDValue, SDValue> CallInfo = - TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, + TargetLowering::CallLoweringInfo CLI(InChain, RetTy, isSigned, !isSigned, + false, false, 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false, - /*doesNotRet=*/false, /*isReturnValueUsed=*/true, + /*doesNotRet=*/false, + /*isReturnValueUsed=*/true, Callee, Args, DAG, Op.getDebugLoc()); + std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI); return CallInfo.first; } @@ -100,13 +102,13 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM) setLibcallName(RTLIB::DIV_F64, "__fast_divdf3"); // Set up the SPU's register classes: - addRegisterClass(MVT::i8, SPU::R8CRegisterClass); - addRegisterClass(MVT::i16, SPU::R16CRegisterClass); - addRegisterClass(MVT::i32, SPU::R32CRegisterClass); - addRegisterClass(MVT::i64, SPU::R64CRegisterClass); - addRegisterClass(MVT::f32, SPU::R32FPRegisterClass); - addRegisterClass(MVT::f64, SPU::R64FPRegisterClass); - addRegisterClass(MVT::i128, SPU::GPRCRegisterClass); + addRegisterClass(MVT::i8, &SPU::R8CRegClass); + addRegisterClass(MVT::i16, &SPU::R16CRegClass); + addRegisterClass(MVT::i32, &SPU::R32CRegClass); + addRegisterClass(MVT::i64, &SPU::R64CRegClass); + addRegisterClass(MVT::f32, &SPU::R32FPRegClass); + addRegisterClass(MVT::f64, &SPU::R64FPRegClass); + addRegisterClass(MVT::i128, &SPU::GPRCRegClass); // SPU has no sign or zero extended loads for i1, i8, i16: setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); @@ -397,12 +399,12 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM) // First set operation action for all vector types to expand. Then we // will selectively turn on ones that can be effectively codegen'd. - addRegisterClass(MVT::v16i8, SPU::VECREGRegisterClass); - addRegisterClass(MVT::v8i16, SPU::VECREGRegisterClass); - addRegisterClass(MVT::v4i32, SPU::VECREGRegisterClass); - addRegisterClass(MVT::v2i64, SPU::VECREGRegisterClass); - addRegisterClass(MVT::v4f32, SPU::VECREGRegisterClass); - addRegisterClass(MVT::v2f64, SPU::VECREGRegisterClass); + addRegisterClass(MVT::v16i8, &SPU::VECREGRegClass); + addRegisterClass(MVT::v8i16, &SPU::VECREGRegClass); + addRegisterClass(MVT::v4i32, &SPU::VECREGRegClass); + addRegisterClass(MVT::v2i64, &SPU::VECREGRegClass); + addRegisterClass(MVT::v4f32, &SPU::VECREGRegClass); + addRegisterClass(MVT::v2f64, &SPU::VECREGRegClass); for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { @@ -1133,7 +1135,7 @@ SPUTargetLowering::LowerFormalArguments(SDValue Chain, SmallVector<CCValAssign, 16> ArgLocs; CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + getTargetMachine(), ArgLocs, *DAG.getContext()); // FIXME: allow for other calling conventions CCInfo.AnalyzeFormalArguments(Ins, CCC_SPU); @@ -1263,14 +1265,19 @@ static SDNode *isLSAAddress(SDValue Op, SelectionDAG &DAG) { } SDValue -SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee, - CallingConv::ID CallConv, bool isVarArg, - bool doesNotRet, bool &isTailCall, - const SmallVectorImpl<ISD::OutputArg> &Outs, - const SmallVectorImpl<SDValue> &OutVals, - const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, +SPUTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { + SelectionDAG &DAG = CLI.DAG; + DebugLoc &dl = CLI.DL; + SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; + SmallVector<SDValue, 32> &OutVals = CLI.OutVals; + SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; + SDValue Chain = CLI.Chain; + SDValue Callee = CLI.Callee; + bool &isTailCall = CLI.IsTailCall; + CallingConv::ID CallConv = CLI.CallConv; + bool isVarArg = CLI.IsVarArg; + // CellSPU target does not yet support tail call optimization. isTailCall = false; @@ -1280,7 +1287,7 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee, SmallVector<CCValAssign, 16> ArgLocs; CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + getTargetMachine(), ArgLocs, *DAG.getContext()); // FIXME: allow for other calling conventions CCInfo.AnalyzeCallOperands(Outs, CCC_SPU); @@ -1441,7 +1448,7 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee, // Now handle the return value(s) SmallVector<CCValAssign, 16> RVLocs; CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); + getTargetMachine(), RVLocs, *DAG.getContext()); CCRetInfo.AnalyzeCallResult(Ins, CCC_SPU); @@ -1468,7 +1475,7 @@ SPUTargetLowering::LowerReturn(SDValue Chain, SmallVector<CCValAssign, 16> RVLocs; CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); + getTargetMachine(), RVLocs, *DAG.getContext()); CCInfo.AnalyzeReturn(Outs, RetCC_SPU); // If this is the first return lowered for this function, add the regs to the @@ -3139,16 +3146,16 @@ SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, case 'b': // R1-R31 case 'r': // R0-R31 if (VT == MVT::i64) - return std::make_pair(0U, SPU::R64CRegisterClass); - return std::make_pair(0U, SPU::R32CRegisterClass); + return std::make_pair(0U, &SPU::R64CRegClass); + return std::make_pair(0U, &SPU::R32CRegClass); case 'f': if (VT == MVT::f32) - return std::make_pair(0U, SPU::R32FPRegisterClass); - else if (VT == MVT::f64) - return std::make_pair(0U, SPU::R64FPRegisterClass); + return std::make_pair(0U, &SPU::R32FPRegClass); + if (VT == MVT::f64) + return std::make_pair(0U, &SPU::R64FPRegClass); break; case 'v': - return std::make_pair(0U, SPU::GPRCRegisterClass); + return std::make_pair(0U, &SPU::GPRCRegClass); } } |