diff options
Diffstat (limited to 'lib/Target/Mips/MipsISelLowering.cpp')
-rw-r--r-- | lib/Target/Mips/MipsISelLowering.cpp | 809 |
1 files changed, 603 insertions, 206 deletions
diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index ace47ab..c5207c6 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -81,6 +81,14 @@ const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const { case MipsISD::Sync: return "MipsISD::Sync"; case MipsISD::Ext: return "MipsISD::Ext"; case MipsISD::Ins: return "MipsISD::Ins"; + case MipsISD::LWL: return "MipsISD::LWL"; + case MipsISD::LWR: return "MipsISD::LWR"; + case MipsISD::SWL: return "MipsISD::SWL"; + case MipsISD::SWR: return "MipsISD::SWR"; + case MipsISD::LDL: return "MipsISD::LDL"; + case MipsISD::LDR: return "MipsISD::LDR"; + case MipsISD::SDL: return "MipsISD::SDL"; + case MipsISD::SDR: return "MipsISD::SDR"; default: return NULL; } } @@ -98,20 +106,25 @@ MipsTargetLowering(MipsTargetMachine &TM) setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? // Set up the register classes - addRegisterClass(MVT::i32, Mips::CPURegsRegisterClass); + addRegisterClass(MVT::i32, &Mips::CPURegsRegClass); if (HasMips64) - addRegisterClass(MVT::i64, Mips::CPU64RegsRegisterClass); + addRegisterClass(MVT::i64, &Mips::CPU64RegsRegClass); + + if (Subtarget->inMips16Mode()) { + addRegisterClass(MVT::i32, &Mips::CPU16RegsRegClass); + addRegisterClass(MVT::i32, &Mips::CPURARegRegClass); + } if (!TM.Options.UseSoftFloat) { - addRegisterClass(MVT::f32, Mips::FGR32RegisterClass); + addRegisterClass(MVT::f32, &Mips::FGR32RegClass); // When dealing with single precision only, use libcalls if (!Subtarget->isSingleFloat()) { if (HasMips64) - addRegisterClass(MVT::f64, Mips::FGR64RegisterClass); + addRegisterClass(MVT::f64, &Mips::FGR64RegClass); else - addRegisterClass(MVT::f64, Mips::AFGR64RegisterClass); + addRegisterClass(MVT::f64, &Mips::AFGR64RegClass); } } @@ -139,15 +152,18 @@ MipsTargetLowering(MipsTargetMachine &TM) setOperationAction(ISD::SELECT, MVT::f32, Custom); setOperationAction(ISD::SELECT, MVT::f64, Custom); setOperationAction(ISD::SELECT, MVT::i32, Custom); + setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); + setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); setOperationAction(ISD::SETCC, MVT::f32, Custom); setOperationAction(ISD::SETCC, MVT::f64, Custom); setOperationAction(ISD::BRCOND, MVT::Other, Custom); - setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); setOperationAction(ISD::VASTART, MVT::Other, Custom); setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom); setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); + setOperationAction(ISD::LOAD, MVT::i32, Custom); + setOperationAction(ISD::STORE, MVT::i32, Custom); if (!TM.Options.NoNaNsFPMath) { setOperationAction(ISD::FABS, MVT::f32, Custom); @@ -161,7 +177,14 @@ MipsTargetLowering(MipsTargetMachine &TM) setOperationAction(ISD::JumpTable, MVT::i64, Custom); setOperationAction(ISD::ConstantPool, MVT::i64, Custom); setOperationAction(ISD::SELECT, MVT::i64, Custom); - setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom); + setOperationAction(ISD::LOAD, MVT::i64, Custom); + setOperationAction(ISD::STORE, MVT::i64, Custom); + } + + if (!HasMips64) { + setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); + setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); + setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); } setOperationAction(ISD::SDIV, MVT::i32, Expand); @@ -192,6 +215,8 @@ MipsTargetLowering(MipsTargetMachine &TM) setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); setOperationAction(ISD::ROTL, MVT::i32, Expand); setOperationAction(ISD::ROTL, MVT::i64, Expand); + setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); + setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); if (!Subtarget->hasMips32r2()) setOperationAction(ISD::ROTR, MVT::i32, Expand); @@ -199,9 +224,6 @@ MipsTargetLowering(MipsTargetMachine &TM) if (!Subtarget->hasMips64r2()) setOperationAction(ISD::ROTR, MVT::i64, Expand); - setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); - setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); - setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); setOperationAction(ISD::FSIN, MVT::f32, Expand); setOperationAction(ISD::FSIN, MVT::f64, Expand); setOperationAction(ISD::FCOS, MVT::f32, Expand); @@ -243,9 +265,6 @@ MipsTargetLowering(MipsTargetMachine &TM) setInsertFencesForAtomic(true); - if (Subtarget->isSingleFloat()) - setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); - if (!Subtarget->hasSEInReg()) { setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); @@ -261,6 +280,13 @@ MipsTargetLowering(MipsTargetMachine &TM) setOperationAction(ISD::BSWAP, MVT::i64, Expand); } + if (HasMips64) { + setLoadExtAction(ISD::SEXTLOAD, MVT::i32, Custom); + setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, Custom); + setLoadExtAction(ISD::EXTLOAD, MVT::i32, Custom); + setTruncStoreAction(MVT::i64, MVT::i32, Custom); + } + setTargetDAGCombine(ISD::ADDE); setTargetDAGCombine(ISD::SUBE); setTargetDAGCombine(ISD::SDIVREM); @@ -268,6 +294,7 @@ MipsTargetLowering(MipsTargetMachine &TM) setTargetDAGCombine(ISD::SELECT); setTargetDAGCombine(ISD::AND); setTargetDAGCombine(ISD::OR); + setTargetDAGCombine(ISD::ADD); setMinFunctionAlignment(HasMips64 ? 3 : 2); @@ -276,6 +303,8 @@ MipsTargetLowering(MipsTargetMachine &TM) setExceptionPointerRegister(IsN64 ? Mips::A0_64 : Mips::A0); setExceptionSelectorRegister(IsN64 ? Mips::A1_64 : Mips::A1); + + maxStoresPerMemcpy = 16; } bool MipsTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const { @@ -284,10 +313,7 @@ bool MipsTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const { switch (SVT) { case MVT::i64: case MVT::i32: - case MVT::i16: return true; - case MVT::f32: - return Subtarget->hasMips32r2Or64(); default: return false; } @@ -305,17 +331,17 @@ EVT MipsTargetLowering::getSetCCResultType(EVT VT) const { // Lo0: initial value of Lo register // Hi0: initial value of Hi register // Return true if pattern matching was successful. -static bool SelectMadd(SDNode* ADDENode, SelectionDAG* CurDAG) { +static bool SelectMadd(SDNode *ADDENode, SelectionDAG *CurDAG) { // ADDENode's second operand must be a flag output of an ADDC node in order // for the matching to be successful. - SDNode* ADDCNode = ADDENode->getOperand(2).getNode(); + SDNode *ADDCNode = ADDENode->getOperand(2).getNode(); if (ADDCNode->getOpcode() != ISD::ADDC) return false; SDValue MultHi = ADDENode->getOperand(0); SDValue MultLo = ADDCNode->getOperand(0); - SDNode* MultNode = MultHi.getNode(); + SDNode *MultNode = MultHi.getNode(); unsigned MultOpc = MultHi.getOpcode(); // MultHi and MultLo must be generated by the same node, @@ -378,17 +404,17 @@ static bool SelectMadd(SDNode* ADDENode, SelectionDAG* CurDAG) { // Lo0: initial value of Lo register // Hi0: initial value of Hi register // Return true if pattern matching was successful. -static bool SelectMsub(SDNode* SUBENode, SelectionDAG* CurDAG) { +static bool SelectMsub(SDNode *SUBENode, SelectionDAG *CurDAG) { // SUBENode's second operand must be a flag output of an SUBC node in order // for the matching to be successful. - SDNode* SUBCNode = SUBENode->getOperand(2).getNode(); + SDNode *SUBCNode = SUBENode->getOperand(2).getNode(); if (SUBCNode->getOpcode() != ISD::SUBC) return false; SDValue MultHi = SUBENode->getOperand(1); SDValue MultLo = SUBCNode->getOperand(1); - SDNode* MultNode = MultHi.getNode(); + SDNode *MultNode = MultHi.getNode(); unsigned MultOpc = MultHi.getOpcode(); // MultHi and MultLo must be generated by the same node, @@ -443,9 +469,9 @@ static bool SelectMsub(SDNode* SUBENode, SelectionDAG* CurDAG) { return true; } -static SDValue PerformADDECombine(SDNode *N, SelectionDAG& DAG, +static SDValue PerformADDECombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, - const MipsSubtarget* Subtarget) { + const MipsSubtarget *Subtarget) { if (DCI.isBeforeLegalize()) return SDValue(); @@ -456,9 +482,9 @@ static SDValue PerformADDECombine(SDNode *N, SelectionDAG& DAG, return SDValue(); } -static SDValue PerformSUBECombine(SDNode *N, SelectionDAG& DAG, +static SDValue PerformSUBECombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, - const MipsSubtarget* Subtarget) { + const MipsSubtarget *Subtarget) { if (DCI.isBeforeLegalize()) return SDValue(); @@ -469,9 +495,9 @@ static SDValue PerformSUBECombine(SDNode *N, SelectionDAG& DAG, return SDValue(); } -static SDValue PerformDivRemCombine(SDNode *N, SelectionDAG& DAG, +static SDValue PerformDivRemCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, - const MipsSubtarget* Subtarget) { + const MipsSubtarget *Subtarget) { if (DCI.isBeforeLegalizeOps()) return SDValue(); @@ -546,7 +572,7 @@ static bool InvertFPCondCode(Mips::CondCode CC) { // Creates and returns an FPCmp node from a setcc node. // Returns Op if setcc is not a floating point comparison. -static SDValue CreateFPCmp(SelectionDAG& DAG, const SDValue& Op) { +static SDValue CreateFPCmp(SelectionDAG &DAG, const SDValue &Op) { // must be a SETCC node if (Op.getOpcode() != ISD::SETCC) return Op; @@ -568,7 +594,7 @@ static SDValue CreateFPCmp(SelectionDAG& DAG, const SDValue& Op) { } // Creates and returns a CMovFPT/F node. -static SDValue CreateCMovFP(SelectionDAG& DAG, SDValue Cond, SDValue True, +static SDValue CreateCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True, SDValue False, DebugLoc DL) { bool invert = InvertFPCondCode((Mips::CondCode) cast<ConstantSDNode>(Cond.getOperand(2)) @@ -578,9 +604,9 @@ static SDValue CreateCMovFP(SelectionDAG& DAG, SDValue Cond, SDValue True, True.getValueType(), True, False, Cond); } -static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG& DAG, +static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, - const MipsSubtarget* Subtarget) { + const MipsSubtarget *Subtarget) { if (DCI.isBeforeLegalizeOps()) return SDValue(); @@ -604,16 +630,16 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG& DAG, const DebugLoc DL = N->getDebugLoc(); ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get(); SDValue True = N->getOperand(1); - + SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0), SetCC.getOperand(1), ISD::getSetCCInverse(CC, true)); - + return DAG.getNode(ISD::SELECT, DL, FalseTy, SetCC, False, True); } -static SDValue PerformANDCombine(SDNode *N, SelectionDAG& DAG, +static SDValue PerformANDCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, - const MipsSubtarget* Subtarget) { + const MipsSubtarget *Subtarget) { // Pattern match EXT. // $dst = and ((sra or srl) $src , pos), (2**size - 1) // => ext $dst, $src, size, pos @@ -651,9 +677,9 @@ static SDValue PerformANDCombine(SDNode *N, SelectionDAG& DAG, DAG.getConstant(SMSize, MVT::i32)); } -static SDValue PerformORCombine(SDNode *N, SelectionDAG& DAG, +static SDValue PerformORCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, - const MipsSubtarget* Subtarget) { + const MipsSubtarget *Subtarget) { // Pattern match INS. // $dst = or (and $src1 , mask0), (and (shl $src, pos), mask1), // where mask1 = (2**size - 1) << pos, mask0 = ~mask1 @@ -705,6 +731,33 @@ static SDValue PerformORCombine(SDNode *N, SelectionDAG& DAG, DAG.getConstant(SMSize0, MVT::i32), And0.getOperand(0)); } +static SDValue PerformADDCombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const MipsSubtarget *Subtarget) { + // (add v0, (add v1, abs_lo(tjt))) => (add (add v0, v1), abs_lo(tjt)) + + if (DCI.isBeforeLegalizeOps()) + return SDValue(); + + SDValue Add = N->getOperand(1); + + if (Add.getOpcode() != ISD::ADD) + return SDValue(); + + SDValue Lo = Add.getOperand(1); + + if ((Lo.getOpcode() != MipsISD::Lo) || + (Lo.getOperand(0).getOpcode() != ISD::TargetJumpTable)) + return SDValue(); + + EVT ValTy = N->getValueType(0); + DebugLoc DL = N->getDebugLoc(); + + SDValue Add1 = DAG.getNode(ISD::ADD, DL, ValTy, N->getOperand(0), + Add.getOperand(0)); + return DAG.getNode(ISD::ADD, DL, ValTy, Add1, Lo); +} + SDValue MipsTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { SelectionDAG &DAG = DCI.DAG; @@ -720,11 +773,13 @@ SDValue MipsTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) case ISD::UDIVREM: return PerformDivRemCombine(N, DAG, DCI, Subtarget); case ISD::SELECT: - return PerformSELECTCombine(N, DAG, DCI, Subtarget); + return PerformSELECTCombine(N, DAG, DCI, Subtarget); case ISD::AND: return PerformANDCombine(N, DAG, DCI, Subtarget); case ISD::OR: return PerformORCombine(N, DAG, DCI, Subtarget); + case ISD::ADD: + return PerformADDCombine(N, DAG, DCI, Subtarget); } return SDValue(); @@ -737,19 +792,25 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::BRCOND: return LowerBRCOND(Op, DAG); case ISD::ConstantPool: return LowerConstantPool(Op, DAG); - case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); case ISD::JumpTable: return LowerJumpTable(Op, DAG); case ISD::SELECT: return LowerSELECT(Op, DAG); + case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); case ISD::SETCC: return LowerSETCC(Op, DAG); case ISD::VASTART: return LowerVASTART(Op, DAG); case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); case ISD::FABS: return LowerFABS(Op, DAG); case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); + case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG); case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG); + case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); + case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG, true); + case ISD::SRL_PARTS: return LowerShiftRightParts(Op, DAG, false); + case ISD::LOAD: return LowerLOAD(Op, DAG); + case ISD::STORE: return LowerSTORE(Op, DAG); } return SDValue(); } @@ -784,7 +845,7 @@ static Mips::FPBranchCode GetFPBranchCodeFromCond(Mips::CondCode CC) { /* static MachineBasicBlock* ExpandCondMov(MachineInstr *MI, MachineBasicBlock *BB, DebugLoc dl, - const MipsSubtarget* Subtarget, + const MipsSubtarget *Subtarget, const TargetInstrInfo *TII, bool isFPCmp, unsigned Opc) { // There is no need to expand CMov instructions if target has @@ -1440,42 +1501,6 @@ MipsTargetLowering::EmitAtomicCmpSwapPartword(MachineInstr *MI, // Misc Lower Operation implementation //===----------------------------------------------------------------------===// SDValue MipsTargetLowering:: -LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const -{ - MachineFunction &MF = DAG.getMachineFunction(); - MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); - unsigned SP = IsN64 ? Mips::SP_64 : Mips::SP; - - assert(getTargetMachine().getFrameLowering()->getStackAlignment() >= - cast<ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue() && - "Cannot lower if the alignment of the allocated space is larger than \ - that of the stack."); - - SDValue Chain = Op.getOperand(0); - SDValue Size = Op.getOperand(1); - DebugLoc dl = Op.getDebugLoc(); - - // Get a reference from Mips stack pointer - SDValue StackPointer = DAG.getCopyFromReg(Chain, dl, SP, getPointerTy()); - - // Subtract the dynamic size from the actual stack size to - // obtain the new stack size. - SDValue Sub = DAG.getNode(ISD::SUB, dl, getPointerTy(), StackPointer, Size); - - // The Sub result contains the new stack start address, so it - // must be placed in the stack pointer register. - Chain = DAG.getCopyToReg(StackPointer.getValue(1), dl, SP, Sub, SDValue()); - - // This node always has two return values: a new stack pointer - // value and a chain - SDVTList VTLs = DAG.getVTList(getPointerTy(), MVT::Other); - SDValue Ptr = DAG.getFrameIndex(MipsFI->getDynAllocFI(), getPointerTy()); - SDValue Ops[] = { Chain, Ptr, Chain.getValue(1) }; - - return DAG.getNode(MipsISD::DynAlloc, dl, VTLs, Ops, 3); -} - -SDValue MipsTargetLowering:: LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { // The first operand is the chain, the second is the condition, the third is @@ -1512,6 +1537,19 @@ LowerSELECT(SDValue Op, SelectionDAG &DAG) const Op.getDebugLoc()); } +SDValue MipsTargetLowering:: +LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const +{ + DebugLoc DL = Op.getDebugLoc(); + EVT Ty = Op.getOperand(0).getValueType(); + SDValue Cond = DAG.getNode(ISD::SETCC, DL, getSetCCResultType(Ty), + Op.getOperand(0), Op.getOperand(1), + Op.getOperand(4)); + + return DAG.getNode(ISD::SELECT, DL, Op.getValueType(), Cond, Op.getOperand(2), + Op.getOperand(3)); +} + SDValue MipsTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { SDValue Cond = CreateFPCmp(DAG, Op); @@ -1614,10 +1652,13 @@ LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const const GlobalValue *GV = GA->getGlobal(); EVT PtrVT = getPointerTy(); - if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { - // General Dynamic TLS Model - bool LocalDynamic = GV->hasInternalLinkage(); - unsigned Flag = LocalDynamic ? MipsII::MO_TLSLDM :MipsII::MO_TLSGD; + TLSModel::Model model = getTargetMachine().getTLSModel(GV); + + if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) { + // General Dynamic and Local Dynamic TLS Model. + unsigned Flag = (model == TLSModel::LocalDynamic) ? MipsII::MO_TLSLDM + : MipsII::MO_TLSGD; + SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, Flag); SDValue Argument = DAG.getNode(MipsISD::Wrapper, dl, PtrVT, GetGlobalReg(DAG, PtrVT), TGA); @@ -1632,16 +1673,16 @@ LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const Entry.Ty = PtrTy; Args.push_back(Entry); - std::pair<SDValue, SDValue> CallResult = - LowerCallTo(DAG.getEntryNode(), PtrTy, + TargetLowering::CallLoweringInfo CLI(DAG.getEntryNode(), PtrTy, false, false, false, false, 0, CallingConv::C, /*isTailCall=*/false, /*doesNotRet=*/false, /*isReturnValueUsed=*/true, TlsGetAddr, Args, DAG, dl); + std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); SDValue Ret = CallResult.first; - if (!LocalDynamic) + if (model != TLSModel::LocalDynamic) return Ret; SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, @@ -1655,7 +1696,7 @@ LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const } SDValue Offset; - if (GV->isDeclaration()) { + if (model == TLSModel::InitialExec) { // Initial Exec TLS Model SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, MipsII::MO_GOTTPREL); @@ -1666,6 +1707,7 @@ LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const false, false, false, 0); } else { // Local Exec TLS Model + assert(model == TLSModel::LocalExec); SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, MipsII::MO_TPREL_HI); SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, @@ -1942,9 +1984,26 @@ LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { return FrameAddr; } +SDValue MipsTargetLowering::LowerRETURNADDR(SDValue Op, + SelectionDAG &DAG) const { + // check the depth + assert((cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() == 0) && + "Return address can be determined only for current frame."); + + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo *MFI = MF.getFrameInfo(); + EVT VT = Op.getValueType(); + unsigned RA = IsN64 ? Mips::RA_64 : Mips::RA; + MFI->setReturnAddressIsTaken(true); + + // Return RA, which contains the return address. Mark it an implicit live-in. + unsigned Reg = MF.addLiveIn(RA, getRegClassFor(VT)); + return DAG.getCopyFromReg(DAG.getEntryNode(), Op.getDebugLoc(), Reg, VT); +} + // TODO: set SType according to the desired memory barrier behavior. SDValue -MipsTargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG& DAG) const { +MipsTargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const { unsigned SType = 0; DebugLoc dl = Op.getDebugLoc(); return DAG.getNode(MipsISD::Sync, dl, MVT::Other, Op.getOperand(0), @@ -1952,7 +2011,7 @@ MipsTargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG& DAG) const { } SDValue MipsTargetLowering::LowerATOMIC_FENCE(SDValue Op, - SelectionDAG& DAG) const { + SelectionDAG &DAG) const { // FIXME: Need pseudo-fence for 'singlethread' fences // FIXME: Set SType for weaker fences where supported/appropriate. unsigned SType = 0; @@ -1961,6 +2020,210 @@ SDValue MipsTargetLowering::LowerATOMIC_FENCE(SDValue Op, DAG.getConstant(SType, MVT::i32)); } +SDValue MipsTargetLowering::LowerShiftLeftParts(SDValue Op, + SelectionDAG &DAG) const { + DebugLoc DL = Op.getDebugLoc(); + SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1); + SDValue Shamt = Op.getOperand(2); + + // if shamt < 32: + // lo = (shl lo, shamt) + // hi = (or (shl hi, shamt) (srl (srl lo, 1), ~shamt)) + // else: + // lo = 0 + // hi = (shl lo, shamt[4:0]) + SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt, + DAG.getConstant(-1, MVT::i32)); + SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, MVT::i32, Lo, + DAG.getConstant(1, MVT::i32)); + SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, MVT::i32, ShiftRight1Lo, + Not); + SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi, Shamt); + SDValue Or = DAG.getNode(ISD::OR, DL, MVT::i32, ShiftLeftHi, ShiftRightLo); + SDValue ShiftLeftLo = DAG.getNode(ISD::SHL, DL, MVT::i32, Lo, Shamt); + SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt, + DAG.getConstant(0x20, MVT::i32)); + Lo = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond, + DAG.getConstant(0, MVT::i32), ShiftLeftLo); + Hi = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond, ShiftLeftLo, Or); + + SDValue Ops[2] = {Lo, Hi}; + return DAG.getMergeValues(Ops, 2, DL); +} + +SDValue MipsTargetLowering::LowerShiftRightParts(SDValue Op, SelectionDAG &DAG, + bool IsSRA) const { + DebugLoc DL = Op.getDebugLoc(); + SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1); + SDValue Shamt = Op.getOperand(2); + + // if shamt < 32: + // lo = (or (shl (shl hi, 1), ~shamt) (srl lo, shamt)) + // if isSRA: + // hi = (sra hi, shamt) + // else: + // hi = (srl hi, shamt) + // else: + // if isSRA: + // lo = (sra hi, shamt[4:0]) + // hi = (sra hi, 31) + // else: + // lo = (srl hi, shamt[4:0]) + // hi = 0 + SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt, + DAG.getConstant(-1, MVT::i32)); + SDValue ShiftLeft1Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi, + DAG.getConstant(1, MVT::i32)); + SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, MVT::i32, ShiftLeft1Hi, Not); + SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, MVT::i32, Lo, Shamt); + SDValue Or = DAG.getNode(ISD::OR, DL, MVT::i32, ShiftLeftHi, ShiftRightLo); + SDValue ShiftRightHi = DAG.getNode(IsSRA ? ISD::SRA : ISD::SRL, DL, MVT::i32, + Hi, Shamt); + SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt, + DAG.getConstant(0x20, MVT::i32)); + SDValue Shift31 = DAG.getNode(ISD::SRA, DL, MVT::i32, Hi, + DAG.getConstant(31, MVT::i32)); + Lo = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond, ShiftRightHi, Or); + Hi = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond, + IsSRA ? Shift31 : DAG.getConstant(0, MVT::i32), + ShiftRightHi); + + SDValue Ops[2] = {Lo, Hi}; + return DAG.getMergeValues(Ops, 2, DL); +} + +static SDValue CreateLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD, + SDValue Chain, SDValue Src, unsigned Offset) { + SDValue Ptr = LD->getBasePtr(); + EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT(); + EVT BasePtrVT = Ptr.getValueType(); + DebugLoc DL = LD->getDebugLoc(); + SDVTList VTList = DAG.getVTList(VT, MVT::Other); + + if (Offset) + Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr, + DAG.getConstant(Offset, BasePtrVT)); + + SDValue Ops[] = { Chain, Ptr, Src }; + return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, 3, MemVT, + LD->getMemOperand()); +} + +// Expand an unaligned 32 or 64-bit integer load node. +SDValue MipsTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { + LoadSDNode *LD = cast<LoadSDNode>(Op); + EVT MemVT = LD->getMemoryVT(); + + // Return if load is aligned or if MemVT is neither i32 nor i64. + if ((LD->getAlignment() >= MemVT.getSizeInBits() / 8) || + ((MemVT != MVT::i32) && (MemVT != MVT::i64))) + return SDValue(); + + bool IsLittle = Subtarget->isLittle(); + EVT VT = Op.getValueType(); + ISD::LoadExtType ExtType = LD->getExtensionType(); + SDValue Chain = LD->getChain(), Undef = DAG.getUNDEF(VT); + + assert((VT == MVT::i32) || (VT == MVT::i64)); + + // Expand + // (set dst, (i64 (load baseptr))) + // to + // (set tmp, (ldl (add baseptr, 7), undef)) + // (set dst, (ldr baseptr, tmp)) + if ((VT == MVT::i64) && (ExtType == ISD::NON_EXTLOAD)) { + SDValue LDL = CreateLoadLR(MipsISD::LDL, DAG, LD, Chain, Undef, + IsLittle ? 7 : 0); + return CreateLoadLR(MipsISD::LDR, DAG, LD, LDL.getValue(1), LDL, + IsLittle ? 0 : 7); + } + + SDValue LWL = CreateLoadLR(MipsISD::LWL, DAG, LD, Chain, Undef, + IsLittle ? 3 : 0); + SDValue LWR = CreateLoadLR(MipsISD::LWR, DAG, LD, LWL.getValue(1), LWL, + IsLittle ? 0 : 3); + + // Expand + // (set dst, (i32 (load baseptr))) or + // (set dst, (i64 (sextload baseptr))) or + // (set dst, (i64 (extload baseptr))) + // to + // (set tmp, (lwl (add baseptr, 3), undef)) + // (set dst, (lwr baseptr, tmp)) + if ((VT == MVT::i32) || (ExtType == ISD::SEXTLOAD) || + (ExtType == ISD::EXTLOAD)) + return LWR; + + assert((VT == MVT::i64) && (ExtType == ISD::ZEXTLOAD)); + + // Expand + // (set dst, (i64 (zextload baseptr))) + // to + // (set tmp0, (lwl (add baseptr, 3), undef)) + // (set tmp1, (lwr baseptr, tmp0)) + // (set tmp2, (shl tmp1, 32)) + // (set dst, (srl tmp2, 32)) + DebugLoc DL = LD->getDebugLoc(); + SDValue Const32 = DAG.getConstant(32, MVT::i32); + SDValue SLL = DAG.getNode(ISD::SHL, DL, MVT::i64, LWR, Const32); + SDValue SRL = DAG.getNode(ISD::SRL, DL, MVT::i64, SLL, Const32); + SDValue Ops[] = { SRL, LWR.getValue(1) }; + return DAG.getMergeValues(Ops, 2, DL); +} + +static SDValue CreateStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD, + SDValue Chain, unsigned Offset) { + SDValue Ptr = SD->getBasePtr(), Value = SD->getValue(); + EVT MemVT = SD->getMemoryVT(), BasePtrVT = Ptr.getValueType(); + DebugLoc DL = SD->getDebugLoc(); + SDVTList VTList = DAG.getVTList(MVT::Other); + + if (Offset) + Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr, + DAG.getConstant(Offset, BasePtrVT)); + + SDValue Ops[] = { Chain, Value, Ptr }; + return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, 3, MemVT, + SD->getMemOperand()); +} + +// Expand an unaligned 32 or 64-bit integer store node. +SDValue MipsTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { + StoreSDNode *SD = cast<StoreSDNode>(Op); + EVT MemVT = SD->getMemoryVT(); + + // Return if store is aligned or if MemVT is neither i32 nor i64. + if ((SD->getAlignment() >= MemVT.getSizeInBits() / 8) || + ((MemVT != MVT::i32) && (MemVT != MVT::i64))) + return SDValue(); + + bool IsLittle = Subtarget->isLittle(); + SDValue Value = SD->getValue(), Chain = SD->getChain(); + EVT VT = Value.getValueType(); + + // Expand + // (store val, baseptr) or + // (truncstore val, baseptr) + // to + // (swl val, (add baseptr, 3)) + // (swr val, baseptr) + if ((VT == MVT::i32) || SD->isTruncatingStore()) { + SDValue SWL = CreateStoreLR(MipsISD::SWL, DAG, SD, Chain, + IsLittle ? 3 : 0); + return CreateStoreLR(MipsISD::SWR, DAG, SD, SWL, IsLittle ? 0 : 3); + } + + assert(VT == MVT::i64); + + // Expand + // (store val, baseptr) + // to + // (sdl val, (add baseptr, 7)) + // (sdr val, baseptr) + SDValue SDL = CreateStoreLR(MipsISD::SDL, DAG, SD, Chain, IsLittle ? 7 : 0); + return CreateStoreLR(MipsISD::SDR, DAG, SD, SDL, IsLittle ? 0 : 7); +} + //===----------------------------------------------------------------------===// // Calling Convention Implementation //===----------------------------------------------------------------------===// @@ -2153,11 +2416,11 @@ static unsigned getNextIntArgReg(unsigned Reg) { // Write ByVal Arg to arg registers and stack. static void -WriteByValArg(SDValue& ByValChain, SDValue Chain, DebugLoc dl, - SmallVector<std::pair<unsigned, SDValue>, 16>& RegsToPass, - SmallVector<SDValue, 8>& MemOpChains, int& LastFI, +WriteByValArg(SDValue Chain, DebugLoc dl, + SmallVector<std::pair<unsigned, SDValue>, 16> &RegsToPass, + SmallVector<SDValue, 8> &MemOpChains, SDValue StackPtr, MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg, - const CCValAssign &VA, const ISD::ArgFlagsTy& Flags, + const CCValAssign &VA, const ISD::ArgFlagsTy &Flags, MVT PtrType, bool isLittle) { unsigned LocMemOffset = VA.getLocMemOffset(); unsigned Offset = 0; @@ -2229,26 +2492,26 @@ WriteByValArg(SDValue& ByValChain, SDValue Chain, DebugLoc dl, return; } - // Create a fixed object on stack at offset LocMemOffset and copy - // remaining part of byval arg to it using memcpy. + // Copy remaining part of byval arg using memcpy. SDValue Src = DAG.getNode(ISD::ADD, dl, MVT::i32, Arg, DAG.getConstant(Offset, MVT::i32)); - LastFI = MFI->CreateFixedObject(RemainingSize, LocMemOffset, true); - SDValue Dst = DAG.getFrameIndex(LastFI, PtrType); - ByValChain = DAG.getMemcpy(ByValChain, dl, Dst, Src, - DAG.getConstant(RemainingSize, MVT::i32), - std::min(ByValAlign, (unsigned)4), - /*isVolatile=*/false, /*AlwaysInline=*/false, - MachinePointerInfo(0), MachinePointerInfo(0)); + SDValue Dst = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, + DAG.getIntPtrConstant(LocMemOffset)); + Chain = DAG.getMemcpy(Chain, dl, Dst, Src, + DAG.getConstant(RemainingSize, MVT::i32), + std::min(ByValAlign, (unsigned)4), + /*isVolatile=*/false, /*AlwaysInline=*/false, + MachinePointerInfo(0), MachinePointerInfo(0)); + MemOpChains.push_back(Chain); } // Copy Mips64 byVal arg to registers and stack. void static -PassByValArg64(SDValue& ByValChain, SDValue Chain, DebugLoc dl, - SmallVector<std::pair<unsigned, SDValue>, 16>& RegsToPass, - SmallVector<SDValue, 8>& MemOpChains, int& LastFI, +PassByValArg64(SDValue Chain, DebugLoc dl, + SmallVector<std::pair<unsigned, SDValue>, 16> &RegsToPass, + SmallVector<SDValue, 8> &MemOpChains, SDValue StackPtr, MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg, - const CCValAssign &VA, const ISD::ArgFlagsTy& Flags, + const CCValAssign &VA, const ISD::ArgFlagsTy &Flags, EVT PtrTy, bool isLittle) { unsigned ByValSize = Flags.getByValSize(); unsigned Alignment = std::min(Flags.getByValAlign(), (unsigned)8); @@ -2318,30 +2581,35 @@ PassByValArg64(SDValue& ByValChain, SDValue Chain, DebugLoc dl, assert(MemCpySize && "MemCpySize must not be zero."); - // Create a fixed object on stack at offset LocMemOffset and copy - // remainder of byval arg to it with memcpy. + // Copy remainder of byval arg to it with memcpy. SDValue Src = DAG.getNode(ISD::ADD, dl, PtrTy, Arg, DAG.getConstant(Offset, PtrTy)); - LastFI = MFI->CreateFixedObject(MemCpySize, LocMemOffset, true); - SDValue Dst = DAG.getFrameIndex(LastFI, PtrTy); - ByValChain = DAG.getMemcpy(ByValChain, dl, Dst, Src, - DAG.getConstant(MemCpySize, PtrTy), Alignment, - /*isVolatile=*/false, /*AlwaysInline=*/false, - MachinePointerInfo(0), MachinePointerInfo(0)); + SDValue Dst = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, + DAG.getIntPtrConstant(LocMemOffset)); + Chain = DAG.getMemcpy(Chain, dl, Dst, Src, + DAG.getConstant(MemCpySize, PtrTy), Alignment, + /*isVolatile=*/false, /*AlwaysInline=*/false, + MachinePointerInfo(0), MachinePointerInfo(0)); + MemOpChains.push_back(Chain); } /// LowerCall - functions arguments are copied from virtual regs to /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted. /// TODO: isTailCall. SDValue -MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee, - CallingConv::ID CallConv, bool isVarArg, - bool doesNotRet, bool &isTailCall, - const SmallVectorImpl<ISD::OutputArg> &Outs, - const SmallVectorImpl<SDValue> &OutVals, - const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, +MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { + SelectionDAG &DAG = CLI.DAG; + DebugLoc &dl = CLI.DL; + SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; + SmallVector<SDValue, 32> &OutVals = CLI.OutVals; + SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; + SDValue Chain = CLI.Chain; + SDValue Callee = CLI.Callee; + bool &isTailCall = CLI.IsTailCall; + CallingConv::ID CallConv = CLI.CallConv; + bool isVarArg = CLI.IsVarArg; + // MIPs target does not yet support tail call optimization. isTailCall = false; @@ -2356,7 +2624,9 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee, CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), getTargetMachine(), ArgLocs, *DAG.getContext()); - if (IsO32) + if (CallConv == CallingConv::Fast) + CCInfo.AnalyzeCallOperands(Outs, CC_Mips_FastCC); + else if (IsO32) CCInfo.AnalyzeCallOperands(Outs, CC_MipsO32); else if (HasMips64) AnalyzeMips64CallOperands(CCInfo, Outs); @@ -2365,54 +2635,32 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee, // Get a count of how many bytes are to be pushed on the stack. unsigned NextStackOffset = CCInfo.getNextStackOffset(); - - // Chain is the output chain of the last Load/Store or CopyToReg node. - // ByValChain is the output chain of the last Memcpy node created for copying - // byval arguments to the stack. - SDValue Chain, CallSeqStart, ByValChain; - SDValue NextStackOffsetVal = DAG.getIntPtrConstant(NextStackOffset, true); - Chain = CallSeqStart = DAG.getCALLSEQ_START(InChain, NextStackOffsetVal); - ByValChain = InChain; - - // If this is the first call, create a stack frame object that points to - // a location to which .cprestore saves $gp. - if (IsO32 && IsPIC && MipsFI->globalBaseRegFixed() && !MipsFI->getGPFI()) - MipsFI->setGPFI(MFI->CreateFixedObject(4, 0, true)); - - // Get the frame index of the stack frame object that points to the location - // of dynamically allocated area on the stack. - int DynAllocFI = MipsFI->getDynAllocFI(); + unsigned StackAlignment = TFL->getStackAlignment(); + NextStackOffset = RoundUpToAlignment(NextStackOffset, StackAlignment); // Update size of the maximum argument space. // For O32, a minimum of four words (16 bytes) of argument space is // allocated. - if (IsO32) + if (IsO32 && (CallConv != CallingConv::Fast)) NextStackOffset = std::max(NextStackOffset, (unsigned)16); - unsigned MaxCallFrameSize = MipsFI->getMaxCallFrameSize(); - - if (MaxCallFrameSize < NextStackOffset) { - MipsFI->setMaxCallFrameSize(NextStackOffset); - - // Set the offsets relative to $sp of the $gp restore slot and dynamically - // allocated stack space. These offsets must be aligned to a boundary - // determined by the stack alignment of the ABI. - unsigned StackAlignment = TFL->getStackAlignment(); - NextStackOffset = (NextStackOffset + StackAlignment - 1) / - StackAlignment * StackAlignment; + // Chain is the output chain of the last Load/Store or CopyToReg node. + // ByValChain is the output chain of the last Memcpy node created for copying + // byval arguments to the stack. + SDValue NextStackOffsetVal = DAG.getIntPtrConstant(NextStackOffset, true); + Chain = DAG.getCALLSEQ_START(Chain, NextStackOffsetVal); - if (MipsFI->needGPSaveRestore()) - MFI->setObjectOffset(MipsFI->getGPFI(), NextStackOffset); + SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, + IsN64 ? Mips::SP_64 : Mips::SP, + getPointerTy()); - MFI->setObjectOffset(DynAllocFI, NextStackOffset); - } + if (MipsFI->getMaxCallFrameSize() < NextStackOffset) + MipsFI->setMaxCallFrameSize(NextStackOffset); // With EABI is it possible to have 16 args on registers. SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass; SmallVector<SDValue, 8> MemOpChains; - int FirstFI = -MFI->getNumFixedObjects() - 1, LastFI = 0; - // Walk the register/memloc assignments, inserting copies/loads. for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { SDValue Arg = OutVals[i]; @@ -2425,11 +2673,11 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee, assert(Flags.getByValSize() && "ByVal args of size 0 should have been ignored by front-end."); if (IsO32) - WriteByValArg(ByValChain, Chain, dl, RegsToPass, MemOpChains, LastFI, + WriteByValArg(Chain, dl, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg, VA, Flags, getPointerTy(), Subtarget->isLittle()); else - PassByValArg64(ByValChain, Chain, dl, RegsToPass, MemOpChains, LastFI, + PassByValArg64(Chain, dl, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg, VA, Flags, getPointerTy(), Subtarget->isLittle()); continue; @@ -2479,29 +2727,14 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee, // Register can't get to this point... assert(VA.isMemLoc()); - // Create the frame index object for this incoming parameter - LastFI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8, - VA.getLocMemOffset(), true); - SDValue PtrOff = DAG.getFrameIndex(LastFI, getPointerTy()); - // emit ISD::STORE whichs stores the // parameter value to a stack Location + SDValue PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, + DAG.getIntPtrConstant(VA.getLocMemOffset())); MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo(), false, false, 0)); } - // Extend range of indices of frame objects for outgoing arguments that were - // created during this function call. Skip this step if no such objects were - // created. - if (LastFI) - MipsFI->extendOutArgFIRange(FirstFI, LastFI); - - // If a memcpy has been created to copy a byval arg to a stack, replace the - // chain input of CallSeqStart with ByValChain. - if (InChain != ByValChain) - DAG.UpdateNodeOperands(CallSeqStart.getNode(), ByValChain, - NextStackOffsetVal); - // Transform all store nodes into one single node because all store // nodes are independent of each other. if (!MemOpChains.empty()) @@ -2565,6 +2798,9 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee, } } + // T9 register operand. + SDValue T9; + // T9 should contain the address of the callee function if // -reloction-model=pic or it is an indirect call. if (IsPICCall || !GlobalOrExternal) { @@ -2572,7 +2808,19 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee, unsigned T9Reg = IsN64 ? Mips::T9_64 : Mips::T9; Chain = DAG.getCopyToReg(Chain, dl, T9Reg, Callee, SDValue(0, 0)); InFlag = Chain.getValue(1); - Callee = DAG.getRegister(T9Reg, getPointerTy()); + + if (Subtarget->inMips16Mode()) + T9 = DAG.getRegister(T9Reg, getPointerTy()); + else + Callee = DAG.getRegister(T9Reg, getPointerTy()); + } + + // Insert node "GP copy globalreg" before call to function. + // Lazy-binding stubs require GP to point to the GOT. + if (IsPICCall) { + unsigned GPReg = IsN64 ? Mips::GP_64 : Mips::GP; + EVT Ty = IsN64 ? MVT::i64 : MVT::i32; + RegsToPass.push_back(std::make_pair(GPReg, GetGlobalReg(DAG, Ty))); } // Build a sequence of copy-to-reg nodes chained together with token @@ -2600,6 +2848,10 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee, Ops.push_back(DAG.getRegister(RegsToPass[i].first, RegsToPass[i].second.getValueType())); + // Add T9 register operand. + if (T9.getNode()) + Ops.push_back(T9); + // Add a register mask operand representing the call-preserved registers. const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); @@ -2613,8 +2865,7 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee, InFlag = Chain.getValue(1); // Create the CALLSEQ_END node. - Chain = DAG.getCALLSEQ_END(Chain, - DAG.getIntPtrConstant(NextStackOffset, true), + Chain = DAG.getCALLSEQ_END(Chain, NextStackOffsetVal, DAG.getIntPtrConstant(0, true), InFlag); InFlag = Chain.getValue(1); @@ -2635,7 +2886,7 @@ MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, // Assign locations to each value returned by this call. SmallVector<CCValAssign, 16> RVLocs; CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); + getTargetMachine(), RVLocs, *DAG.getContext()); CCInfo.AnalyzeCallResult(Ins, RetCC_Mips); @@ -2654,9 +2905,9 @@ MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, // Formal Arguments Calling Convention Implementation //===----------------------------------------------------------------------===// static void ReadByValArg(MachineFunction &MF, SDValue Chain, DebugLoc dl, - std::vector<SDValue>& OutChains, + std::vector<SDValue> &OutChains, SelectionDAG &DAG, unsigned NumWords, SDValue FIN, - const CCValAssign &VA, const ISD::ArgFlagsTy& Flags, + const CCValAssign &VA, const ISD::ArgFlagsTy &Flags, const Argument *FuncArg) { unsigned LocMem = VA.getLocMemOffset(); unsigned FirstWord = LocMem / 4; @@ -2668,7 +2919,7 @@ static void ReadByValArg(MachineFunction &MF, SDValue Chain, DebugLoc dl, break; unsigned SrcReg = O32IntRegs[CurWord]; - unsigned Reg = AddLiveIn(MF, SrcReg, Mips::CPURegsRegisterClass); + unsigned Reg = AddLiveIn(MF, SrcReg, &Mips::CPURegsRegClass); SDValue StorePtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIN, DAG.getConstant(i * 4, MVT::i32)); SDValue Store = DAG.getStore(Chain, dl, DAG.getRegister(Reg, MVT::i32), @@ -2681,8 +2932,8 @@ static void ReadByValArg(MachineFunction &MF, SDValue Chain, DebugLoc dl, // Create frame object on stack and copy registers used for byval passing to it. static unsigned CopyMips64ByValRegs(MachineFunction &MF, SDValue Chain, DebugLoc dl, - std::vector<SDValue>& OutChains, SelectionDAG &DAG, - const CCValAssign &VA, const ISD::ArgFlagsTy& Flags, + std::vector<SDValue> &OutChains, SelectionDAG &DAG, + const CCValAssign &VA, const ISD::ArgFlagsTy &Flags, MachineFrameInfo *MFI, bool IsRegLoc, SmallVectorImpl<SDValue> &InVals, MipsFunctionInfo *MipsFI, EVT PtrTy, const Argument *FuncArg) { @@ -2705,7 +2956,7 @@ CopyMips64ByValRegs(MachineFunction &MF, SDValue Chain, DebugLoc dl, // Copy arg registers. for (unsigned I = 0; (Reg != Mips64IntRegs + 8) && (I < NumRegs); ++Reg, ++I) { - unsigned VReg = AddLiveIn(MF, *Reg, Mips::CPU64RegsRegisterClass); + unsigned VReg = AddLiveIn(MF, *Reg, &Mips::CPU64RegsRegClass); SDValue StorePtr = DAG.getNode(ISD::ADD, dl, PtrTy, FIN, DAG.getConstant(I * 8, PtrTy)); SDValue Store = DAG.getStore(Chain, dl, DAG.getRegister(VReg, MVT::i64), @@ -2741,7 +2992,9 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain, CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), getTargetMachine(), ArgLocs, *DAG.getContext()); - if (IsO32) + if (CallConv == CallingConv::Fast) + CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FastCC); + else if (IsO32) CCInfo.AnalyzeFormalArguments(Ins, CC_MipsO32); else CCInfo.AnalyzeFormalArguments(Ins, CC_Mips); @@ -2781,13 +3034,13 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain, const TargetRegisterClass *RC; if (RegVT == MVT::i32) - RC = Mips::CPURegsRegisterClass; + RC = &Mips::CPURegsRegClass; else if (RegVT == MVT::i64) - RC = Mips::CPU64RegsRegisterClass; + RC = &Mips::CPU64RegsRegClass; else if (RegVT == MVT::f32) - RC = Mips::FGR32RegisterClass; + RC = &Mips::FGR32RegClass; else if (RegVT == MVT::f64) - RC = HasMips64 ? Mips::FGR64RegisterClass : Mips::AFGR64RegisterClass; + RC = HasMips64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; else llvm_unreachable("RegVT not supported by FormalArguments Lowering"); @@ -2861,8 +3114,9 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain, const uint16_t *ArgRegs = IsO32 ? O32IntRegs : Mips64IntRegs; unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs, NumOfRegs); int FirstRegSlotOffset = IsO32 ? 0 : -64 ; // offset of $a0's slot. - const TargetRegisterClass *RC - = IsO32 ? Mips::CPURegsRegisterClass : Mips::CPU64RegsRegisterClass; + const TargetRegisterClass *RC = IsO32 ? + (const TargetRegisterClass*)&Mips::CPURegsRegClass : + (const TargetRegisterClass*)&Mips::CPU64RegsRegClass; unsigned RegSize = RC->getSize(); int RegSlotOffset = FirstRegSlotOffset + Idx * RegSize; @@ -2926,7 +3180,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain, // CCState - Info about the registers and stack slot. CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); + getTargetMachine(), RVLocs, *DAG.getContext()); // Analize return values. CCInfo.AnalyzeReturn(Outs, RetCC_Mips); @@ -2972,11 +3226,10 @@ MipsTargetLowering::LowerReturn(SDValue Chain, // Return on Mips is always a "jr $ra" if (Flag.getNode()) - return DAG.getNode(MipsISD::Ret, dl, MVT::Other, - Chain, DAG.getRegister(Mips::RA, MVT::i32), Flag); - else // Return Void - return DAG.getNode(MipsISD::Ret, dl, MVT::Other, - Chain, DAG.getRegister(Mips::RA, MVT::i32)); + return DAG.getNode(MipsISD::Ret, dl, MVT::Other, Chain, Flag); + + // Return Void + return DAG.getNode(MipsISD::Ret, dl, MVT::Other, Chain); } //===----------------------------------------------------------------------===// @@ -2995,13 +3248,19 @@ getConstraintType(const std::string &Constraint) const // unless generating MIPS16 code. // 'y' : Equivalent to r; retained for // backwards compatibility. - // 'f' : Floating Point registers. + // 'c' : A register suitable for use in an indirect + // jump. This will always be $25 for -mabicalls. + // 'l' : The lo register. 1 word storage. + // 'x' : The hilo register pair. Double word storage. if (Constraint.size() == 1) { switch (Constraint[0]) { default : break; case 'd': case 'y': case 'f': + case 'c': + case 'l': + case 'x': return C_RegisterClass; } } @@ -3035,6 +3294,22 @@ MipsTargetLowering::getSingleConstraintMatchWeight( if (type->isFloatTy()) weight = CW_Register; break; + case 'c': // $25 for indirect jumps + case 'l': // lo register + case 'x': // hilo register pair + if (type->isIntegerTy()) + weight = CW_SpecificReg; + break; + case 'I': // signed 16 bit immediate + case 'J': // integer zero + case 'K': // unsigned 16 bit immediate + case 'L': // signed 32 bit immediate where lower 16 bits are 0 + case 'N': // immediate in the range of -65535 to -1 (inclusive) + case 'O': // signed 15 bit immediate (+- 16383) + case 'P': // immediate in the range of 65535 to 1 (inclusive) + if (isa<ConstantInt>(CallOperandVal)) + weight = CW_Constant; + break; } return weight; } @@ -3050,30 +3325,152 @@ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const case 'd': // Address register. Same as 'r' unless generating MIPS16 code. case 'y': // Same as 'r'. Exists for compatibility. case 'r': - if (VT == MVT::i32) - return std::make_pair(0U, Mips::CPURegsRegisterClass); - assert(VT == MVT::i64 && "Unexpected type."); - return std::make_pair(0U, Mips::CPU64RegsRegisterClass); + if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) + return std::make_pair(0U, &Mips::CPURegsRegClass); + if (VT == MVT::i64 && !HasMips64) + return std::make_pair(0U, &Mips::CPURegsRegClass); + if (VT == MVT::i64 && HasMips64) + return std::make_pair(0U, &Mips::CPU64RegsRegClass); + // This will generate an error message + return std::make_pair(0u, static_cast<const TargetRegisterClass*>(0)); case 'f': if (VT == MVT::f32) - return std::make_pair(0U, Mips::FGR32RegisterClass); + return std::make_pair(0U, &Mips::FGR32RegClass); if ((VT == MVT::f64) && (!Subtarget->isSingleFloat())) { if (Subtarget->isFP64bit()) - return std::make_pair(0U, Mips::FGR64RegisterClass); - else - return std::make_pair(0U, Mips::AFGR64RegisterClass); + return std::make_pair(0U, &Mips::FGR64RegClass); + return std::make_pair(0U, &Mips::AFGR64RegClass); } + break; + case 'c': // register suitable for indirect jump + if (VT == MVT::i32) + return std::make_pair((unsigned)Mips::T9, &Mips::CPURegsRegClass); + assert(VT == MVT::i64 && "Unexpected type."); + return std::make_pair((unsigned)Mips::T9_64, &Mips::CPU64RegsRegClass); + case 'l': // register suitable for indirect jump + if (VT == MVT::i32) + return std::make_pair((unsigned)Mips::LO, &Mips::HILORegClass); + return std::make_pair((unsigned)Mips::LO64, &Mips::HILO64RegClass); + case 'x': // register suitable for indirect jump + // Fixme: Not triggering the use of both hi and low + // This will generate an error message + return std::make_pair(0u, static_cast<const TargetRegisterClass*>(0)); } } return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); } +/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops +/// vector. If it is invalid, don't add anything to Ops. +void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op, + std::string &Constraint, + std::vector<SDValue>&Ops, + SelectionDAG &DAG) const { + SDValue Result(0, 0); + + // Only support length 1 constraints for now. + if (Constraint.length() > 1) return; + + char ConstraintLetter = Constraint[0]; + switch (ConstraintLetter) { + default: break; // This will fall through to the generic implementation + case 'I': // Signed 16 bit constant + // If this fails, the parent routine will give an error + if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { + EVT Type = Op.getValueType(); + int64_t Val = C->getSExtValue(); + if (isInt<16>(Val)) { + Result = DAG.getTargetConstant(Val, Type); + break; + } + } + return; + case 'J': // integer zero + if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { + EVT Type = Op.getValueType(); + int64_t Val = C->getZExtValue(); + if (Val == 0) { + Result = DAG.getTargetConstant(0, Type); + break; + } + } + return; + case 'K': // unsigned 16 bit immediate + if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { + EVT Type = Op.getValueType(); + uint64_t Val = (uint64_t)C->getZExtValue(); + if (isUInt<16>(Val)) { + Result = DAG.getTargetConstant(Val, Type); + break; + } + } + return; + case 'L': // signed 32 bit immediate where lower 16 bits are 0 + if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { + EVT Type = Op.getValueType(); + int64_t Val = C->getSExtValue(); + if ((isInt<32>(Val)) && ((Val & 0xffff) == 0)){ + Result = DAG.getTargetConstant(Val, Type); + break; + } + } + return; + case 'N': // immediate in the range of -65535 to -1 (inclusive) + if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { + EVT Type = Op.getValueType(); + int64_t Val = C->getSExtValue(); + if ((Val >= -65535) && (Val <= -1)) { + Result = DAG.getTargetConstant(Val, Type); + break; + } + } + return; + case 'O': // signed 15 bit immediate + if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { + EVT Type = Op.getValueType(); + int64_t Val = C->getSExtValue(); + if ((isInt<15>(Val))) { + Result = DAG.getTargetConstant(Val, Type); + break; + } + } + return; + case 'P': // immediate in the range of 1 to 65535 (inclusive) + if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { + EVT Type = Op.getValueType(); + int64_t Val = C->getSExtValue(); + if ((Val <= 65535) && (Val >= 1)) { + Result = DAG.getTargetConstant(Val, Type); + break; + } + } + return; + } + + if (Result.getNode()) { + Ops.push_back(Result); + return; + } + + TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); +} + bool MipsTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { // The Mips target isn't yet aware of offsets. return false; } +EVT MipsTargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign, + unsigned SrcAlign, bool IsZeroVal, + bool MemcpyStrSrc, + MachineFunction &MF) const { + if (Subtarget->hasMips64()) + return MVT::i64; + + return MVT::i32; +} + bool MipsTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { if (VT != MVT::f32 && VT != MVT::f64) return false; |