diff options
Diffstat (limited to 'contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp')
-rw-r--r-- | contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp | 383 |
1 files changed, 184 insertions, 199 deletions
diff --git a/contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp b/contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp index 6eaf991..e7330c6 100644 --- a/contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp +++ b/contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp @@ -31,6 +31,7 @@ #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetRegisterInfo.h" #include <cctype> + using namespace llvm; static cl::opt<bool> DisableHazardRecognizer( @@ -76,25 +77,27 @@ void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB, /// may be overloaded in the target code to do that. unsigned TargetInstrInfo::getInlineAsmLength(const char *Str, const MCAsmInfo &MAI) const { - - // Count the number of instructions in the asm. bool atInsnStart = true; - unsigned Length = 0; + unsigned InstCount = 0; for (; *Str; ++Str) { if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(), - strlen(MAI.getSeparatorString())) == 0) + strlen(MAI.getSeparatorString())) == 0) { atInsnStart = true; - if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) { - Length += MAI.getMaxInstLength(); + } else if (strncmp(Str, MAI.getCommentString(), + strlen(MAI.getCommentString())) == 0) { + // Stop counting as an instruction after a comment until the next + // separator. atInsnStart = false; } - if (atInsnStart && strncmp(Str, MAI.getCommentString(), - strlen(MAI.getCommentString())) == 0) + + if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) { + ++InstCount; atInsnStart = false; + } } - return Length; + return InstCount * MAI.getMaxInstLength(); } /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything @@ -108,23 +111,24 @@ TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, while (!MBB->succ_empty()) MBB->removeSuccessor(MBB->succ_begin()); + // Save off the debug loc before erasing the instruction. + DebugLoc DL = Tail->getDebugLoc(); + // Remove all the dead instructions from the end of MBB. MBB->erase(Tail, MBB->end()); // If MBB isn't immediately before MBB, insert a branch to it. if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest)) - InsertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), - Tail->getDebugLoc()); + InsertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL); MBB->addSuccessor(NewDest); } -MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr *MI, - bool NewMI, - unsigned Idx1, +MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI, + bool NewMI, unsigned Idx1, unsigned Idx2) const { - const MCInstrDesc &MCID = MI->getDesc(); + const MCInstrDesc &MCID = MI.getDesc(); bool HasDef = MCID.getNumDefs(); - if (HasDef && !MI->getOperand(0).isReg()) + if (HasDef && !MI.getOperand(0).isReg()) // No idea how to commute this instruction. Target should implement its own. return nullptr; @@ -133,60 +137,62 @@ MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr *MI, assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) && CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 && "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands."); - assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() && + assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() && "This only knows how to commute register operands so far"); - unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0; - unsigned Reg1 = MI->getOperand(Idx1).getReg(); - unsigned Reg2 = MI->getOperand(Idx2).getReg(); - unsigned SubReg0 = HasDef ? MI->getOperand(0).getSubReg() : 0; - unsigned SubReg1 = MI->getOperand(Idx1).getSubReg(); - unsigned SubReg2 = MI->getOperand(Idx2).getSubReg(); - bool Reg1IsKill = MI->getOperand(Idx1).isKill(); - bool Reg2IsKill = MI->getOperand(Idx2).isKill(); - bool Reg1IsUndef = MI->getOperand(Idx1).isUndef(); - bool Reg2IsUndef = MI->getOperand(Idx2).isUndef(); - bool Reg1IsInternal = MI->getOperand(Idx1).isInternalRead(); - bool Reg2IsInternal = MI->getOperand(Idx2).isInternalRead(); + unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0; + unsigned Reg1 = MI.getOperand(Idx1).getReg(); + unsigned Reg2 = MI.getOperand(Idx2).getReg(); + unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0; + unsigned SubReg1 = MI.getOperand(Idx1).getSubReg(); + unsigned SubReg2 = MI.getOperand(Idx2).getSubReg(); + bool Reg1IsKill = MI.getOperand(Idx1).isKill(); + bool Reg2IsKill = MI.getOperand(Idx2).isKill(); + bool Reg1IsUndef = MI.getOperand(Idx1).isUndef(); + bool Reg2IsUndef = MI.getOperand(Idx2).isUndef(); + bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead(); + bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead(); // If destination is tied to either of the commuted source register, then // it must be updated. if (HasDef && Reg0 == Reg1 && - MI->getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) { + MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) { Reg2IsKill = false; Reg0 = Reg2; SubReg0 = SubReg2; } else if (HasDef && Reg0 == Reg2 && - MI->getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) { + MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) { Reg1IsKill = false; Reg0 = Reg1; SubReg0 = SubReg1; } + MachineInstr *CommutedMI = nullptr; if (NewMI) { // Create a new instruction. - MachineFunction &MF = *MI->getParent()->getParent(); - MI = MF.CloneMachineInstr(MI); + MachineFunction &MF = *MI.getParent()->getParent(); + CommutedMI = MF.CloneMachineInstr(&MI); + } else { + CommutedMI = &MI; } if (HasDef) { - MI->getOperand(0).setReg(Reg0); - MI->getOperand(0).setSubReg(SubReg0); + CommutedMI->getOperand(0).setReg(Reg0); + CommutedMI->getOperand(0).setSubReg(SubReg0); } - MI->getOperand(Idx2).setReg(Reg1); - MI->getOperand(Idx1).setReg(Reg2); - MI->getOperand(Idx2).setSubReg(SubReg1); - MI->getOperand(Idx1).setSubReg(SubReg2); - MI->getOperand(Idx2).setIsKill(Reg1IsKill); - MI->getOperand(Idx1).setIsKill(Reg2IsKill); - MI->getOperand(Idx2).setIsUndef(Reg1IsUndef); - MI->getOperand(Idx1).setIsUndef(Reg2IsUndef); - MI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal); - MI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal); - return MI; + CommutedMI->getOperand(Idx2).setReg(Reg1); + CommutedMI->getOperand(Idx1).setReg(Reg2); + CommutedMI->getOperand(Idx2).setSubReg(SubReg1); + CommutedMI->getOperand(Idx1).setSubReg(SubReg2); + CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill); + CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill); + CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef); + CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef); + CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal); + CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal); + return CommutedMI; } -MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI, - bool NewMI, +MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const { // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose @@ -194,7 +200,7 @@ MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI, // called below. if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) && !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) { - assert(MI->isCommutable() && + assert(MI.isCommutable() && "Precondition violation: MI must be commutable."); return nullptr; } @@ -232,13 +238,13 @@ bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1, return true; } -bool TargetInstrInfo::findCommutedOpIndices(MachineInstr *MI, +bool TargetInstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const { - assert(!MI->isBundle() && + assert(!MI.isBundle() && "TargetInstrInfo::findCommutedOpIndices() can't handle bundles"); - const MCInstrDesc &MCID = MI->getDesc(); + const MCInstrDesc &MCID = MI.getDesc(); if (!MCID.isCommutable()) return false; @@ -250,39 +256,37 @@ bool TargetInstrInfo::findCommutedOpIndices(MachineInstr *MI, CommutableOpIdx1, CommutableOpIdx2)) return false; - if (!MI->getOperand(SrcOpIdx1).isReg() || - !MI->getOperand(SrcOpIdx2).isReg()) + if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg()) // No idea. return false; return true; } -bool -TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const { - if (!MI->isTerminator()) return false; +bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const { + if (!MI.isTerminator()) return false; // Conditional branch is a special case. - if (MI->isBranch() && !MI->isBarrier()) + if (MI.isBranch() && !MI.isBarrier()) return true; - if (!MI->isPredicable()) + if (!MI.isPredicable()) return true; return !isPredicated(MI); } bool TargetInstrInfo::PredicateInstruction( - MachineInstr *MI, ArrayRef<MachineOperand> Pred) const { + MachineInstr &MI, ArrayRef<MachineOperand> Pred) const { bool MadeChange = false; - assert(!MI->isBundle() && + assert(!MI.isBundle() && "TargetInstrInfo::PredicateInstruction() can't handle bundles"); - const MCInstrDesc &MCID = MI->getDesc(); - if (!MI->isPredicable()) + const MCInstrDesc &MCID = MI.getDesc(); + if (!MI.isPredicable()) return false; - for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) { + for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) { if (MCID.OpInfo[i].isPredicate()) { - MachineOperand &MO = MI->getOperand(i); + MachineOperand &MO = MI.getOperand(i); if (MO.isReg()) { MO.setReg(Pred[j].getReg()); MadeChange = true; @@ -299,13 +303,12 @@ bool TargetInstrInfo::PredicateInstruction( return MadeChange; } -bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI, +bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr &MI, const MachineMemOperand *&MMO, int &FrameIndex) const { - for (MachineInstr::mmo_iterator o = MI->memoperands_begin(), - oe = MI->memoperands_end(); - o != oe; - ++o) { + for (MachineInstr::mmo_iterator o = MI.memoperands_begin(), + oe = MI.memoperands_end(); + o != oe; ++o) { if ((*o)->isLoad()) { if (const FixedStackPseudoSourceValue *Value = dyn_cast_or_null<FixedStackPseudoSourceValue>( @@ -319,13 +322,12 @@ bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI, return false; } -bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr *MI, +bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr &MI, const MachineMemOperand *&MMO, int &FrameIndex) const { - for (MachineInstr::mmo_iterator o = MI->memoperands_begin(), - oe = MI->memoperands_end(); - o != oe; - ++o) { + for (MachineInstr::mmo_iterator o = MI.memoperands_begin(), + oe = MI.memoperands_end(); + o != oe; ++o) { if ((*o)->isStore()) { if (const FixedStackPseudoSourceValue *Value = dyn_cast_or_null<FixedStackPseudoSourceValue>( @@ -372,40 +374,37 @@ bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC, void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, - unsigned DestReg, - unsigned SubIdx, - const MachineInstr *Orig, + unsigned DestReg, unsigned SubIdx, + const MachineInstr &Orig, const TargetRegisterInfo &TRI) const { - MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig); + MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig); MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI); MBB.insert(I, MI); } -bool -TargetInstrInfo::produceSameValue(const MachineInstr *MI0, - const MachineInstr *MI1, - const MachineRegisterInfo *MRI) const { - return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs); +bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0, + const MachineInstr &MI1, + const MachineRegisterInfo *MRI) const { + return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs); } -MachineInstr *TargetInstrInfo::duplicate(MachineInstr *Orig, +MachineInstr *TargetInstrInfo::duplicate(MachineInstr &Orig, MachineFunction &MF) const { - assert(!Orig->isNotDuplicable() && - "Instruction cannot be duplicated"); - return MF.CloneMachineInstr(Orig); + assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated"); + return MF.CloneMachineInstr(&Orig); } // If the COPY instruction in MI can be folded to a stack operation, return // the register class to use. -static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI, +static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI, unsigned FoldIdx) { - assert(MI->isCopy() && "MI must be a COPY instruction"); - if (MI->getNumOperands() != 2) + assert(MI.isCopy() && "MI must be a COPY instruction"); + if (MI.getNumOperands() != 2) return nullptr; assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand"); - const MachineOperand &FoldOp = MI->getOperand(FoldIdx); - const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx); + const MachineOperand &FoldOp = MI.getOperand(FoldIdx); + const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx); if (FoldOp.getSubReg() || LiveOp.getSubReg()) return nullptr; @@ -416,7 +415,7 @@ static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI, assert(TargetRegisterInfo::isVirtualRegister(FoldReg) && "Cannot fold physregs"); - const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); + const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); const TargetRegisterClass *RC = MRI.getRegClass(FoldReg); if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg())) @@ -433,17 +432,17 @@ void TargetInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const { llvm_unreachable("Not a MachO target"); } -static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI, +static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, int FrameIndex, const TargetInstrInfo &TII) { unsigned StartIdx = 0; - switch (MI->getOpcode()) { + switch (MI.getOpcode()) { case TargetOpcode::STACKMAP: StartIdx = 2; // Skip ID, nShadowBytes. break; case TargetOpcode::PATCHPOINT: { // For PatchPoint, the call args are not foldable. - PatchPointOpers opers(MI); + PatchPointOpers opers(&MI); StartIdx = opers.getVarIdx(); break; } @@ -459,15 +458,15 @@ static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI, } MachineInstr *NewMI = - MF.CreateMachineInstr(TII.get(MI->getOpcode()), MI->getDebugLoc(), true); + MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true); MachineInstrBuilder MIB(MF, NewMI); // No need to fold return, the meta data, and function arguments for (unsigned i = 0; i < StartIdx; ++i) - MIB.addOperand(MI->getOperand(i)); + MIB.addOperand(MI.getOperand(i)); - for (unsigned i = StartIdx; i < MI->getNumOperands(); ++i) { - MachineOperand &MO = MI->getOperand(i); + for (unsigned i = StartIdx; i < MI.getNumOperands(); ++i) { + MachineOperand &MO = MI.getOperand(i); if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) { unsigned SpillSize; unsigned SpillOffset; @@ -495,35 +494,35 @@ static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI, /// operand folded, otherwise NULL is returned. The client is responsible for /// removing the old instruction and adding the new one in the instruction /// stream. -MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, - ArrayRef<unsigned> Ops, - int FI) const { - unsigned Flags = 0; +MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, + ArrayRef<unsigned> Ops, int FI, + LiveIntervals *LIS) const { + auto Flags = MachineMemOperand::MONone; for (unsigned i = 0, e = Ops.size(); i != e; ++i) - if (MI->getOperand(Ops[i]).isDef()) + if (MI.getOperand(Ops[i]).isDef()) Flags |= MachineMemOperand::MOStore; else Flags |= MachineMemOperand::MOLoad; - MachineBasicBlock *MBB = MI->getParent(); + MachineBasicBlock *MBB = MI.getParent(); assert(MBB && "foldMemoryOperand needs an inserted instruction"); MachineFunction &MF = *MBB->getParent(); MachineInstr *NewMI = nullptr; - if (MI->getOpcode() == TargetOpcode::STACKMAP || - MI->getOpcode() == TargetOpcode::PATCHPOINT) { + if (MI.getOpcode() == TargetOpcode::STACKMAP || + MI.getOpcode() == TargetOpcode::PATCHPOINT) { // Fold stackmap/patchpoint. NewMI = foldPatchpoint(MF, MI, Ops, FI, *this); if (NewMI) MBB->insert(MI, NewMI); } else { // Ask the target to do the actual folding. - NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI); + NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS); } if (NewMI) { - NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); + NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); // Add a memory operand, foldMemoryOperandImpl doesn't do that. assert((!(Flags & MachineMemOperand::MOStore) || NewMI->mayStore()) && @@ -542,14 +541,14 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, } // Straight COPY may fold as load/store. - if (!MI->isCopy() || Ops.size() != 1) + if (!MI.isCopy() || Ops.size() != 1) return nullptr; const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]); if (!RC) return nullptr; - const MachineOperand &MO = MI->getOperand(1-Ops[0]); + const MachineOperand &MO = MI.getOperand(1 - Ops[0]); MachineBasicBlock::iterator Pos = MI; const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); @@ -557,7 +556,7 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI); else loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI); - return --Pos; + return &*--Pos; } bool TargetInstrInfo::hasReassociableOperands( @@ -637,7 +636,6 @@ bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst, bool TargetInstrInfo::getMachineCombinerPatterns( MachineInstr &Root, SmallVectorImpl<MachineCombinerPattern> &Patterns) const { - bool Commute; if (isReassociationCandidate(Root, Commute)) { // We found a sequence of instructions that may be suitable for a @@ -656,7 +654,11 @@ bool TargetInstrInfo::getMachineCombinerPatterns( return false; } - +/// Return true when a code sequence can improve loop throughput. +bool +TargetInstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const { + return false; +} /// Attempt the reassociation transformation to reduce critical path length. /// See the above comments before getMachineCombinerPatterns(). void TargetInstrInfo::reassociateOps( @@ -768,75 +770,73 @@ void TargetInstrInfo::genAlternativeCodeSequence( assert(Prev && "Unknown pattern for machine combiner"); reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg); - return; } /// foldMemoryOperand - Same as the previous version except it allows folding /// of any load and store from / to any address, not just from a specific /// stack slot. -MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, +MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops, - MachineInstr *LoadMI) const { - assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!"); + MachineInstr &LoadMI, + LiveIntervals *LIS) const { + assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!"); #ifndef NDEBUG for (unsigned i = 0, e = Ops.size(); i != e; ++i) - assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!"); + assert(MI.getOperand(Ops[i]).isUse() && "Folding load into def!"); #endif - MachineBasicBlock &MBB = *MI->getParent(); + MachineBasicBlock &MBB = *MI.getParent(); MachineFunction &MF = *MBB.getParent(); // Ask the target to do the actual folding. MachineInstr *NewMI = nullptr; int FrameIndex = 0; - if ((MI->getOpcode() == TargetOpcode::STACKMAP || - MI->getOpcode() == TargetOpcode::PATCHPOINT) && + if ((MI.getOpcode() == TargetOpcode::STACKMAP || + MI.getOpcode() == TargetOpcode::PATCHPOINT) && isLoadFromStackSlot(LoadMI, FrameIndex)) { // Fold stackmap/patchpoint. NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this); if (NewMI) - NewMI = MBB.insert(MI, NewMI); + NewMI = &*MBB.insert(MI, NewMI); } else { // Ask the target to do the actual folding. - NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI); + NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS); } if (!NewMI) return nullptr; // Copy the memoperands from the load to the folded instruction. - if (MI->memoperands_empty()) { - NewMI->setMemRefs(LoadMI->memoperands_begin(), - LoadMI->memoperands_end()); + if (MI.memoperands_empty()) { + NewMI->setMemRefs(LoadMI.memoperands_begin(), LoadMI.memoperands_end()); } else { // Handle the rare case of folding multiple loads. - NewMI->setMemRefs(MI->memoperands_begin(), - MI->memoperands_end()); - for (MachineInstr::mmo_iterator I = LoadMI->memoperands_begin(), - E = LoadMI->memoperands_end(); I != E; ++I) { + NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(), + E = LoadMI.memoperands_end(); + I != E; ++I) { NewMI->addMemOperand(MF, *I); } } return NewMI; } -bool TargetInstrInfo:: -isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI, - AliasAnalysis *AA) const { - const MachineFunction &MF = *MI->getParent()->getParent(); +bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric( + const MachineInstr &MI, AliasAnalysis *AA) const { + const MachineFunction &MF = *MI.getParent()->getParent(); const MachineRegisterInfo &MRI = MF.getRegInfo(); // Remat clients assume operand 0 is the defined register. - if (!MI->getNumOperands() || !MI->getOperand(0).isReg()) + if (!MI.getNumOperands() || !MI.getOperand(0).isReg()) return false; - unsigned DefReg = MI->getOperand(0).getReg(); + unsigned DefReg = MI.getOperand(0).getReg(); // A sub-register definition can only be rematerialized if the instruction // doesn't read the other parts of the register. Otherwise it is really a // read-modify-write operation on the full virtual register which cannot be // moved safely. if (TargetRegisterInfo::isVirtualRegister(DefReg) && - MI->getOperand(0).getSubReg() && MI->readsVirtualRegister(DefReg)) + MI.getOperand(0).getSubReg() && MI.readsVirtualRegister(DefReg)) return false; // A load from a fixed stack slot can be rematerialized. This may be @@ -848,23 +848,22 @@ isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI, return true; // Avoid instructions obviously unsafe for remat. - if (MI->isNotDuplicable() || MI->mayStore() || - MI->hasUnmodeledSideEffects()) + if (MI.isNotDuplicable() || MI.mayStore() || MI.hasUnmodeledSideEffects()) return false; // Don't remat inline asm. We have no idea how expensive it is // even if it's side effect free. - if (MI->isInlineAsm()) + if (MI.isInlineAsm()) return false; // Avoid instructions which load from potentially varying memory. - if (MI->mayLoad() && !MI->isInvariantLoad(AA)) + if (MI.mayLoad() && !MI.isInvariantLoad(AA)) return false; // If any of the registers accessed are non-constant, conservatively assume // the instruction is not rematerializable. - for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { - const MachineOperand &MO = MI->getOperand(i); + for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { + const MachineOperand &MO = MI.getOperand(i); if (!MO.isReg()) continue; unsigned Reg = MO.getReg(); if (Reg == 0) @@ -901,8 +900,8 @@ isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI, return true; } -int TargetInstrInfo::getSPAdjust(const MachineInstr *MI) const { - const MachineFunction *MF = MI->getParent()->getParent(); +int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const { + const MachineFunction *MF = MI.getParent()->getParent(); const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); bool StackGrowsDown = TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown; @@ -910,15 +909,15 @@ int TargetInstrInfo::getSPAdjust(const MachineInstr *MI) const { unsigned FrameSetupOpcode = getCallFrameSetupOpcode(); unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode(); - if (MI->getOpcode() != FrameSetupOpcode && - MI->getOpcode() != FrameDestroyOpcode) + if (MI.getOpcode() != FrameSetupOpcode && + MI.getOpcode() != FrameDestroyOpcode) return 0; - - int SPAdj = MI->getOperand(0).getImm(); + + int SPAdj = MI.getOperand(0).getImm(); SPAdj = TFI->alignSPAdjust(SPAdj); - if ((!StackGrowsDown && MI->getOpcode() == FrameSetupOpcode) || - (StackGrowsDown && MI->getOpcode() == FrameDestroyOpcode)) + if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) || + (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode)) SPAdj = -SPAdj; return SPAdj; @@ -927,11 +926,11 @@ int TargetInstrInfo::getSPAdjust(const MachineInstr *MI) const { /// isSchedulingBoundary - Test if the given instruction should be /// considered a scheduling boundary. This primarily includes labels /// and terminators. -bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr *MI, +bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const { // Terminators and labels can't be scheduled around. - if (MI->isTerminator() || MI->isPosition()) + if (MI.isTerminator() || MI.isPosition()) return true; // Don't attempt to schedule around any instruction that defines @@ -941,7 +940,7 @@ bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr *MI, // modification. const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering(); const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); - return MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI); + return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI); } // Provide a global flag for disabling the PreRA hazard recognizer that targets @@ -1010,13 +1009,12 @@ int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, // MachineInstr latency interface. //===----------------------------------------------------------------------===// -unsigned -TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, - const MachineInstr *MI) const { +unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, + const MachineInstr &MI) const { if (!ItinData || ItinData->isEmpty()) return 1; - unsigned Class = MI->getDesc().getSchedClass(); + unsigned Class = MI.getDesc().getSchedClass(); int UOps = ItinData->Itineraries[Class].NumMicroOps; if (UOps >= 0) return UOps; @@ -1028,60 +1026,59 @@ TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, /// Return the default expected latency for a def based on it's opcode. unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel, - const MachineInstr *DefMI) const { - if (DefMI->isTransient()) + const MachineInstr &DefMI) const { + if (DefMI.isTransient()) return 0; - if (DefMI->mayLoad()) + if (DefMI.mayLoad()) return SchedModel.LoadLatency; - if (isHighLatencyDef(DefMI->getOpcode())) + if (isHighLatencyDef(DefMI.getOpcode())) return SchedModel.HighLatency; return 1; } -unsigned TargetInstrInfo::getPredicationCost(const MachineInstr *) const { +unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const { return 0; } -unsigned TargetInstrInfo:: -getInstrLatency(const InstrItineraryData *ItinData, - const MachineInstr *MI, - unsigned *PredCost) const { +unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, + const MachineInstr &MI, + unsigned *PredCost) const { // Default to one cycle for no itinerary. However, an "empty" itinerary may // still have a MinLatency property, which getStageLatency checks. if (!ItinData) - return MI->mayLoad() ? 2 : 1; + return MI.mayLoad() ? 2 : 1; - return ItinData->getStageLatency(MI->getDesc().getSchedClass()); + return ItinData->getStageLatency(MI.getDesc().getSchedClass()); } bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel, - const MachineInstr *DefMI, + const MachineInstr &DefMI, unsigned DefIdx) const { const InstrItineraryData *ItinData = SchedModel.getInstrItineraries(); if (!ItinData || ItinData->isEmpty()) return false; - unsigned DefClass = DefMI->getDesc().getSchedClass(); + unsigned DefClass = DefMI.getDesc().getSchedClass(); int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); return (DefCycle != -1 && DefCycle <= 1); } /// Both DefMI and UseMI must be valid. By default, call directly to the /// itinerary. This may be overriden by the target. -int TargetInstrInfo:: -getOperandLatency(const InstrItineraryData *ItinData, - const MachineInstr *DefMI, unsigned DefIdx, - const MachineInstr *UseMI, unsigned UseIdx) const { - unsigned DefClass = DefMI->getDesc().getSchedClass(); - unsigned UseClass = UseMI->getDesc().getSchedClass(); +int TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, + const MachineInstr &DefMI, + unsigned DefIdx, + const MachineInstr &UseMI, + unsigned UseIdx) const { + unsigned DefClass = DefMI.getDesc().getSchedClass(); + unsigned UseClass = UseMI.getDesc().getSchedClass(); return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); } /// If we can determine the operand latency from the def only, without itinerary /// lookup, do so. Otherwise return -1. int TargetInstrInfo::computeDefOperandLatency( - const InstrItineraryData *ItinData, - const MachineInstr *DefMI) const { + const InstrItineraryData *ItinData, const MachineInstr &DefMI) const { // Let the target hook getInstrLatency handle missing itineraries. if (!ItinData) @@ -1094,21 +1091,9 @@ int TargetInstrInfo::computeDefOperandLatency( return -1; } -/// computeOperandLatency - Compute and return the latency of the given data -/// dependent def and use when the operand indices are already known. UseMI may -/// be NULL for an unknown use. -/// -/// FindMin may be set to get the minimum vs. expected latency. Minimum -/// latency is used for scheduling groups, while expected latency is for -/// instruction cost and critical path. -/// -/// Depending on the subtarget's itinerary properties, this may or may not need -/// to call getOperandLatency(). For most subtargets, we don't need DefIdx or -/// UseIdx to compute min latency. -unsigned TargetInstrInfo:: -computeOperandLatency(const InstrItineraryData *ItinData, - const MachineInstr *DefMI, unsigned DefIdx, - const MachineInstr *UseMI, unsigned UseIdx) const { +unsigned TargetInstrInfo::computeOperandLatency( + const InstrItineraryData *ItinData, const MachineInstr &DefMI, + unsigned DefIdx, const MachineInstr *UseMI, unsigned UseIdx) const { int DefLatency = computeDefOperandLatency(ItinData, DefMI); if (DefLatency >= 0) @@ -1118,9 +1103,9 @@ computeOperandLatency(const InstrItineraryData *ItinData, int OperLatency = 0; if (UseMI) - OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx); + OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, *UseMI, UseIdx); else { - unsigned DefClass = DefMI->getDesc().getSchedClass(); + unsigned DefClass = DefMI.getDesc().getSchedClass(); OperLatency = ItinData->getOperandCycle(DefClass, DefIdx); } if (OperLatency >= 0) |