diff options
author | ed <ed@FreeBSD.org> | 2009-06-22 08:08:12 +0000 |
---|---|---|
committer | ed <ed@FreeBSD.org> | 2009-06-22 08:08:12 +0000 |
commit | a4c19d68f13cf0a83bc0da53bd6d547fcaf635fe (patch) | |
tree | 86c1bc482baa6c81fc70b8d715153bfa93377186 /lib/CodeGen | |
parent | db89e312d968c258aba3c79c1c398f5fb19267a3 (diff) | |
download | FreeBSD-src-a4c19d68f13cf0a83bc0da53bd6d547fcaf635fe.zip FreeBSD-src-a4c19d68f13cf0a83bc0da53bd6d547fcaf635fe.tar.gz |
Update LLVM sources to r73879.
Diffstat (limited to 'lib/CodeGen')
24 files changed, 526 insertions, 258 deletions
diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index 45462da..e931904 100644 --- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -152,6 +152,9 @@ void AsmPrinter::getAnalysisUsage(AnalysisUsage &AU) const { bool AsmPrinter::doInitialization(Module &M) { Mang = new Mangler(M, TAI->getGlobalPrefix(), TAI->getPrivateGlobalPrefix()); + if (TAI->doesAllowQuotesInName()) + Mang->setUseQuotes(true); + GCModuleInfo *MI = getAnalysisIfAvailable<GCModuleInfo>(); assert(MI && "AsmPrinter didn't require GCModuleInfo?"); @@ -174,9 +177,17 @@ bool AsmPrinter::doInitialization(Module &M) { SwitchToDataSection(""); // Reset back to no section. - MachineModuleInfo *MMI = getAnalysisIfAvailable<MachineModuleInfo>(); - if (MMI) MMI->AnalyzeModule(M); - DW = getAnalysisIfAvailable<DwarfWriter>(); + if (TAI->doesSupportDebugInformation() + || TAI->doesSupportExceptionHandling()) { + MachineModuleInfo *MMI = getAnalysisIfAvailable<MachineModuleInfo>(); + if (MMI) { + MMI->AnalyzeModule(M); + DW = getAnalysisIfAvailable<DwarfWriter>(); + if (DW) + DW->BeginModule(&M, MMI, O, this, TAI); + } + } + return false; } @@ -347,8 +358,9 @@ void AsmPrinter::EmitJumpTableInfo(MachineJumpTableInfo *MJTI, const char* JumpTableDataSection = TAI->getJumpTableDataSection(); const Function *F = MF.getFunction(); unsigned SectionFlags = TAI->SectionFlagsForGlobal(F); + bool JTInDiffSection = false; if ((IsPic && !(LoweringInfo && LoweringInfo->usesGlobalOffsetTable())) || - !JumpTableDataSection || + !JumpTableDataSection || SectionFlags & SectionFlags::Linkonce) { // In PIC mode, we need to emit the jump table to the same section as the // function body itself, otherwise the label differences won't make sense. @@ -357,6 +369,7 @@ void AsmPrinter::EmitJumpTableInfo(MachineJumpTableInfo *MJTI, SwitchToSection(TAI->SectionForGlobal(F)); } else { SwitchToDataSection(JumpTableDataSection); + JTInDiffSection = true; } EmitAlignment(Log2_32(MJTI->getAlignment())); @@ -380,8 +393,10 @@ void AsmPrinter::EmitJumpTableInfo(MachineJumpTableInfo *MJTI, // before each jump table. The first label is never referenced, but tells // the assembler and linker the extents of the jump table object. The // second label is actually referenced by the code. - if (const char *JTLabelPrefix = TAI->getJumpTableSpecialLabelPrefix()) - O << JTLabelPrefix << "JTI" << getFunctionNumber() << '_' << i << ":\n"; + if (JTInDiffSection) { + if (const char *JTLabelPrefix = TAI->getJumpTableSpecialLabelPrefix()) + O << JTLabelPrefix << "JTI" << getFunctionNumber() << '_' << i << ":\n"; + } O << TAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber() << '_' << i << ":\n"; @@ -502,7 +517,7 @@ const GlobalValue * AsmPrinter::findGlobalValue(const Constant *CV) { void AsmPrinter::EmitLLVMUsedList(Constant *List) { const char *Directive = TAI->getUsedDirective(); - // Should be an array of 'sbyte*'. + // Should be an array of 'i8*'. ConstantArray *InitList = dyn_cast<ConstantArray>(List); if (InitList == 0) return; diff --git a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp index c773378..9d340e3 100644 --- a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -1757,6 +1757,9 @@ unsigned DwarfDebug::RecordInlinedFnStart(DISubprogram &SP, DICompileUnit CU, if (TimePassesIsEnabled) DebugTimer->startTimer(); + CompileUnit *Unit = MainCU; + if (!Unit) + Unit = &FindCompileUnit(SP.getCompileUnit()); GlobalVariable *GV = SP.getGV(); DenseMap<const GlobalVariable *, DbgScope *>::iterator II = AbstractInstanceRootMap.find(GV); @@ -1767,7 +1770,6 @@ unsigned DwarfDebug::RecordInlinedFnStart(DISubprogram &SP, DICompileUnit CU, DbgScope *Scope = new DbgScope(NULL, DIDescriptor(GV)); // Get the compile unit context. - CompileUnit *Unit = &FindCompileUnit(SP.getCompileUnit()); DIE *SPDie = Unit->getDieMapSlotFor(GV); if (!SPDie) SPDie = CreateSubprogramDIE(Unit, SP, false, true); @@ -1789,7 +1791,6 @@ unsigned DwarfDebug::RecordInlinedFnStart(DISubprogram &SP, DICompileUnit CU, // Create a concrete inlined instance for this inlined function. DbgConcreteScope *ConcreteScope = new DbgConcreteScope(DIDescriptor(GV)); DIE *ScopeDie = new DIE(dwarf::DW_TAG_inlined_subroutine); - CompileUnit *Unit = &FindCompileUnit(SP.getCompileUnit()); ScopeDie->setAbstractCompileUnit(Unit); DIE *Origin = Unit->getDieMapSlotFor(GV); @@ -1850,7 +1851,14 @@ unsigned DwarfDebug::RecordInlinedFnEnd(DISubprogram &SP) { } SmallVector<DbgScope *, 8> &Scopes = I->second; - assert(!Scopes.empty() && "We should have at least one debug scope!"); + if (Scopes.empty()) { + // Returned ID is 0 if this is unbalanced "end of inlined + // scope". This could happen if optimizer eats dbg intrinsics + // or "beginning of inlined scope" is not recoginized due to + // missing location info. In such cases, ignore this region.end. + return 0; + } + DbgScope *Scope = Scopes.back(); Scopes.pop_back(); unsigned ID = MMI->NextLabelID(); MMI->RecordUsedDbgLabel(ID); @@ -1987,8 +1995,8 @@ void DwarfDebug::EmitInitial() { Asm->SwitchToDataSection(TAI->getDwarfARangesSection()); EmitLabel("section_aranges", 0); - if (TAI->doesSupportMacInfoSection()) { - Asm->SwitchToDataSection(TAI->getDwarfMacInfoSection()); + if (const char *LineInfoDirective = TAI->getDwarfMacroInfoSection()) { + Asm->SwitchToDataSection(LineInfoDirective); EmitLabel("section_macinfo", 0); } @@ -2534,9 +2542,9 @@ void DwarfDebug::EmitDebugRanges() { /// EmitDebugMacInfo - Emit visible names into a debug macinfo section. /// void DwarfDebug::EmitDebugMacInfo() { - if (TAI->doesSupportMacInfoSection()) { + if (const char *LineInfoDirective = TAI->getDwarfMacroInfoSection()) { // Start the dwarf macinfo section. - Asm->SwitchToDataSection(TAI->getDwarfMacInfoSection()); + Asm->SwitchToDataSection(LineInfoDirective); Asm->EOL(); } } diff --git a/lib/CodeGen/AsmPrinter/DwarfPrinter.cpp b/lib/CodeGen/AsmPrinter/DwarfPrinter.cpp index f7ca4f4..a1b97df 100644 --- a/lib/CodeGen/AsmPrinter/DwarfPrinter.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfPrinter.cpp @@ -190,7 +190,7 @@ void Dwarf::EmitFrameMoves(const char *BaseLabel, unsigned BaseLabelID, Asm->EmitULEB128Bytes(Offset); Asm->EOL("Offset"); } else { - assert(0 && "Machine move no supported yet."); + assert(0 && "Machine move not supported yet."); } } else if (Src.isReg() && Src.getReg() == MachineLocation::VirtualFP) { @@ -200,7 +200,7 @@ void Dwarf::EmitFrameMoves(const char *BaseLabel, unsigned BaseLabelID, Asm->EmitULEB128Bytes(RI->getDwarfRegNum(Dst.getReg(), isEH)); Asm->EOL("Register"); } else { - assert(0 && "Machine move no supported yet."); + assert(0 && "Machine move not supported yet."); } } else { unsigned Reg = RI->getDwarfRegNum(Src.getReg(), isEH); diff --git a/lib/CodeGen/IfConversion.cpp b/lib/CodeGen/IfConversion.cpp index 1d0887f..4d5c3c2 100644 --- a/lib/CodeGen/IfConversion.cpp +++ b/lib/CodeGen/IfConversion.cpp @@ -547,7 +547,11 @@ void IfConverter::ScanInstructions(BBInfo &BBI) { // fallthrough. if (!BBI.FalseBB) BBI.FalseBB = findFalseBlock(BBI.BB, BBI.TrueBB); - assert(BBI.FalseBB && "Expected to find the fallthrough block!"); + if (!BBI.FalseBB) { + // Malformed bcc? True and false blocks are the same? + BBI.IsUnpredicable = true; + return; + } } // Then scan all the instructions. @@ -663,6 +667,13 @@ IfConverter::BBInfo &IfConverter::AnalyzeBlock(MachineBasicBlock *BB, return BBI; } + // Do not ifcvt if true and false fallthrough blocks are the same. + if (!BBI.FalseBB) { + BBI.IsBeingAnalyzed = false; + BBI.IsAnalyzed = true; + return BBI; + } + BBInfo &TrueBBI = AnalyzeBlock(BBI.TrueBB, Tokens); BBInfo &FalseBBI = AnalyzeBlock(BBI.FalseBB, Tokens); diff --git a/lib/CodeGen/LazyLiveness.cpp b/lib/CodeGen/LazyLiveness.cpp index 6fb35d2..a951c99 100644 --- a/lib/CodeGen/LazyLiveness.cpp +++ b/lib/CodeGen/LazyLiveness.cpp @@ -32,10 +32,12 @@ void LazyLiveness::computeBackedgeChain(MachineFunction& mf, calculated.set(preorder[MBB]); for (SparseBitVector<128>::iterator I = tmp.begin(); I != tmp.end(); ++I) { + assert(rev_preorder.size() > *I && "Unknown block!"); + MachineBasicBlock* SrcMBB = rev_preorder[*I]; - for (MachineBasicBlock::succ_iterator SI = SrcMBB->succ_begin(); - SI != SrcMBB->succ_end(); ++SI) { + for (MachineBasicBlock::succ_iterator SI = SrcMBB->succ_begin(), + SE = SrcMBB->succ_end(); SI != SE; ++SI) { MachineBasicBlock* TgtMBB = *SI; if (backedges.count(std::make_pair(SrcMBB, TgtMBB)) && @@ -44,7 +46,8 @@ void LazyLiveness::computeBackedgeChain(MachineFunction& mf, computeBackedgeChain(mf, TgtMBB); tv[MBB].set(preorder[TgtMBB]); - tv[MBB] |= tv[TgtMBB]; + SparseBitVector<128> right = tv[TgtMBB]; + tv[MBB] |= right; } } @@ -60,6 +63,12 @@ bool LazyLiveness::runOnMachineFunction(MachineFunction &mf) { backedge_target.clear(); calculated.clear(); preorder.clear(); + rev_preorder.clear(); + + rv.resize(mf.size()); + tv.resize(mf.size()); + preorder.resize(mf.size()); + rev_preorder.reserve(mf.size()); MRI = &mf.getRegInfo(); MachineDominatorTree& MDT = getAnalysis<MachineDominatorTree>(); @@ -106,8 +115,8 @@ bool LazyLiveness::runOnMachineFunction(MachineFunction &mf) { for (MachineBasicBlock::succ_iterator SI = (*POI)->succ_begin(), SE = (*POI)->succ_end(); SI != SE; ++SI) if (!backedges.count(std::make_pair(*POI, *SI)) && tv.count(*SI)) { - SparseBitVector<128>& PBV = tv[*POI]; - PBV = tv[*SI]; + SparseBitVector<128> right = tv[*SI]; + tv[*POI] |= right; } for (po_iterator<MachineBasicBlock*> POI = po_begin(&*mf.begin()), diff --git a/lib/CodeGen/LiveInterval.cpp b/lib/CodeGen/LiveInterval.cpp index 67120b8..cac9253 100644 --- a/lib/CodeGen/LiveInterval.cpp +++ b/lib/CodeGen/LiveInterval.cpp @@ -19,6 +19,7 @@ //===----------------------------------------------------------------------===// #include "llvm/CodeGen/LiveInterval.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/STLExtras.h" @@ -305,9 +306,9 @@ void LiveInterval::removeRange(unsigned Start, unsigned End, VNInfo *VNI = valnos.back(); valnos.pop_back(); VNI->~VNInfo(); - } while (!valnos.empty() && valnos.back()->def == ~1U); + } while (!valnos.empty() && valnos.back()->isUnused()); } else { - ValNo->def = ~1U; + ValNo->setIsUnused(true); } } } @@ -353,9 +354,9 @@ void LiveInterval::removeValNo(VNInfo *ValNo) { VNInfo *VNI = valnos.back(); valnos.pop_back(); VNI->~VNInfo(); - } while (!valnos.empty() && valnos.back()->def == ~1U); + } while (!valnos.empty() && valnos.back()->isUnused()); } else { - ValNo->def = ~1U; + ValNo->setIsUnused(true); } } @@ -371,9 +372,8 @@ void LiveInterval::scaleNumbering(unsigned factor) { // Scale VNI info. for (vni_iterator VNI = vni_begin(), VNIE = vni_end(); VNI != VNIE; ++VNI) { VNInfo *vni = *VNI; - if (vni->def != ~0U && vni->def != ~1U) { - vni->def = InstrSlots::scale(vni->def, factor); - } + + vni->def = InstrSlots::scale(vni->def, factor); for (unsigned i = 0; i < vni->kills.size(); ++i) { if (vni->kills[i] != 0) @@ -421,13 +421,13 @@ VNInfo *LiveInterval::findDefinedVNInfo(unsigned DefIdxOrReg) const { return VNI; } - /// join - Join two live intervals (this, and other) together. This applies /// mappings to the value numbers in the LHS/RHS intervals as specified. If /// the intervals are not joinable, this aborts. void LiveInterval::join(LiveInterval &Other, const int *LHSValNoAssignments, const int *RHSValNoAssignments, - SmallVector<VNInfo*, 16> &NewVNInfo) { + SmallVector<VNInfo*, 16> &NewVNInfo, + MachineRegisterInfo *MRI) { // Determine if any of our live range values are mapped. This is uncommon, so // we want to avoid the interval scan if not. bool MustMapCurValNos = false; @@ -502,8 +502,18 @@ void LiveInterval::join(LiveInterval &Other, const int *LHSValNoAssignments, } weight += Other.weight; - if (Other.preference && !preference) - preference = Other.preference; + + // Update regalloc hint if currently there isn't one. + if (TargetRegisterInfo::isVirtualRegister(reg) && + TargetRegisterInfo::isVirtualRegister(Other.reg)) { + std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(reg); + if (Hint.first == 0 && Hint.second == 0) { + std::pair<unsigned, unsigned> OtherHint = + MRI->getRegAllocationHint(Other.reg); + if (OtherHint.first || OtherHint.second) + MRI->setRegAllocationHint(reg, OtherHint.first, OtherHint.second); + } + } } /// MergeRangesInAsValue - Merge all of the intervals in RHS into this live @@ -582,9 +592,9 @@ void LiveInterval::MergeValueInAsValue(const LiveInterval &RHS, VNInfo *VNI = valnos.back(); valnos.pop_back(); VNI->~VNInfo(); - } while (!valnos.empty() && valnos.back()->def == ~1U); + } while (!valnos.empty() && valnos.back()->isUnused()); } else { - V1->def = ~1U; + V1->setIsUnused(true); } } } @@ -611,7 +621,7 @@ void LiveInterval::MergeInClobberRanges(const LiveInterval &Clobbers, else if (UnusedValNo) ClobberValNo = UnusedValNo; else { - UnusedValNo = ClobberValNo = getNextValue(~0U, 0, VNInfoAllocator); + UnusedValNo = ClobberValNo = getNextValue(0, 0, false, VNInfoAllocator); ValNoMaps.insert(std::make_pair(I->valno, ClobberValNo)); } @@ -664,7 +674,7 @@ void LiveInterval::MergeInClobberRange(unsigned Start, unsigned End, BumpPtrAllocator &VNInfoAllocator) { // Find a value # to use for the clobber ranges. If there is already a value# // for unknown values, use it. - VNInfo *ClobberValNo = getNextValue(~0U, 0, VNInfoAllocator); + VNInfo *ClobberValNo = getNextValue(0, 0, false, VNInfoAllocator); iterator IP = begin(); IP = std::upper_bound(IP, end(), Start); @@ -747,24 +757,26 @@ VNInfo* LiveInterval::MergeValueNumberInto(VNInfo *V1, VNInfo *V2) { VNInfo *VNI = valnos.back(); valnos.pop_back(); VNI->~VNInfo(); - } while (valnos.back()->def == ~1U); + } while (valnos.back()->isUnused()); } else { - V1->def = ~1U; + V1->setIsUnused(true); } return V2; } void LiveInterval::Copy(const LiveInterval &RHS, + MachineRegisterInfo *MRI, BumpPtrAllocator &VNInfoAllocator) { ranges.clear(); valnos.clear(); - preference = RHS.preference; + std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(RHS.reg); + MRI->setRegAllocationHint(reg, Hint.first, Hint.second); + weight = RHS.weight; for (unsigned i = 0, e = RHS.getNumValNums(); i != e; ++i) { const VNInfo *VNI = RHS.getValNumInfo(i); - VNInfo *NewVNI = getNextValue(~0U, 0, VNInfoAllocator); - copyValNumInfo(NewVNI, VNI); + createValueCopy(VNI, VNInfoAllocator); } for (unsigned i = 0, e = RHS.ranges.size(); i != e; ++i) { const LiveRange &LR = RHS.ranges[i]; @@ -816,22 +828,22 @@ void LiveInterval::print(std::ostream &OS, const VNInfo *vni = *i; if (vnum) OS << " "; OS << vnum << "@"; - if (vni->def == ~1U) { + if (vni->isUnused()) { OS << "x"; } else { - if (vni->def == ~0U) + if (!vni->isDefAccurate()) OS << "?"; else OS << vni->def; unsigned ee = vni->kills.size(); - if (ee || vni->hasPHIKill) { + if (ee || vni->hasPHIKill()) { OS << "-("; for (unsigned j = 0; j != ee; ++j) { OS << vni->kills[j]; if (j != ee-1) OS << " "; } - if (vni->hasPHIKill) { + if (vni->hasPHIKill()) { if (ee) OS << " "; OS << "phi"; diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index cf0a648..d6931df 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -199,7 +199,7 @@ void LiveIntervals::computeNumbering() { // Remap the VNInfo def index, which works the same as the // start indices above. VN's with special sentinel defs // don't need to be remapped. - if (vni->def != ~0U && vni->def != ~1U) { + if (vni->isDefAccurate() && !vni->isUnused()) { unsigned index = vni->def / InstrSlots::NUM; unsigned offset = vni->def % InstrSlots::NUM; if (offset == InstrSlots::LOAD) { @@ -447,7 +447,7 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) CopyMI = mi; // Earlyclobbers move back one. - ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator); + ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator); assert(ValNo->id == 0 && "First value in interval is not 0?"); @@ -539,13 +539,15 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, // The new value number (#1) is defined by the instruction we claimed // defined value #0. VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy, + false, // update at * VNInfoAllocator); - + ValNo->setFlags(OldValNo->getFlags()); // * <- updating here + // Value#0 is now defined by the 2-addr instruction. OldValNo->def = RedefIndex; OldValNo->copy = 0; if (MO.isEarlyClobber()) - OldValNo->redefByEC = true; + OldValNo->setHasRedefByEC(true); // Add the new live interval which replaces the range for the input copy. LiveRange LR(DefIndex, RedefIndex, ValNo); @@ -577,12 +579,14 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, DOUT << " Removing [" << Start << "," << End << "] from: "; interval.print(DOUT, tri_); DOUT << "\n"; interval.removeRange(Start, End); - VNI->hasPHIKill = true; + VNI->setHasPHIKill(true); DOUT << " RESULT: "; interval.print(DOUT, tri_); // Replace the interval with one of a NEW value number. Note that this // value number isn't actually defined by an instruction, weird huh? :) - LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator)); + LiveRange LR(Start, End, + interval.getNextValue(mbb->getNumber(), 0, false, VNInfoAllocator)); + LR.valno->setIsPHIDef(true); DOUT << " replace range with " << LR; interval.addRange(LR); interval.addKill(LR.valno, End); @@ -604,13 +608,13 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG || tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) CopyMI = mi; - ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator); + ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator); unsigned killIndex = getMBBEndIdx(mbb) + 1; LiveRange LR(defIndex, killIndex, ValNo); interval.addRange(LR); interval.addKill(ValNo, killIndex); - ValNo->hasPHIKill = true; + ValNo->setHasPHIKill(true); DOUT << " +" << LR; } } @@ -692,9 +696,9 @@ exit: LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start); bool Extend = OldLR != interval.end(); VNInfo *ValNo = Extend - ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator); + ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator); if (MO.isEarlyClobber() && Extend) - ValNo->redefByEC = true; + ValNo->setHasRedefByEC(true); LiveRange LR(start, end, ValNo); interval.addRange(LR); interval.addKill(LR.valno, end); @@ -750,7 +754,7 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB, DOUT << " killed"; end = getUseIndex(baseIndex) + 1; SeenDefUse = true; - goto exit; + break; } else if (mi->modifiesRegister(interval.reg, tri_)) { // Another instruction redefines the register before it is ever read. // Then the register is essentially dead at the instruction that defines @@ -759,7 +763,7 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB, DOUT << " dead"; end = getDefIndex(start) + 1; SeenDefUse = true; - goto exit; + break; } baseIndex += InstrSlots::NUM; @@ -771,7 +775,6 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB, } } -exit: // Live-in register might not be used at all. if (!SeenDefUse) { if (isAlias) { @@ -783,7 +786,11 @@ exit: } } - LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator)); + VNInfo *vni = + interval.getNextValue(MBB->getNumber(), 0, false, VNInfoAllocator); + vni->setIsPHIDef(true); + LiveRange LR(start, end, vni); + interval.addRange(LR); interval.addKill(LR.valno, end); DOUT << " +" << LR << '\n'; @@ -896,7 +903,7 @@ LiveInterval* LiveIntervals::createInterval(unsigned reg) { /// managing the allocated memory. LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) { LiveInterval *NewLI = createInterval(li->reg); - NewLI->Copy(*li, getVNInfoAllocator()); + NewLI->Copy(*li, mri_, getVNInfoAllocator()); return NewLI; } @@ -1099,13 +1106,12 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li, for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); i != e; ++i) { const VNInfo *VNI = *i; - unsigned DefIdx = VNI->def; - if (DefIdx == ~1U) + if (VNI->isUnused()) continue; // Dead val#. // Is the def for the val# rematerializable? - if (DefIdx == ~0u) + if (!VNI->isDefAccurate()) return false; - MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx); + MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def); bool DefIsLoad = false; if (!ReMatDefMI || !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad)) @@ -1450,7 +1456,7 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI, if (HasUse) { if (CreatedNewVReg) { LiveRange LR(getLoadIndex(index), getUseIndex(index)+1, - nI.getNextValue(~0U, 0, VNInfoAllocator)); + nI.getNextValue(0, 0, false, VNInfoAllocator)); DOUT << " +" << LR; nI.addRange(LR); } else { @@ -1464,7 +1470,7 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI, } if (HasDef) { LiveRange LR(getDefIndex(index), getStoreIndex(index), - nI.getNextValue(~0U, 0, VNInfoAllocator)); + nI.getNextValue(0, 0, false, VNInfoAllocator)); DOUT << " +" << LR; nI.addRange(LR); } @@ -1840,14 +1846,14 @@ addIntervalsForSpillsFast(const LiveInterval &li, unsigned index = getInstructionIndex(MI); if (HasUse) { LiveRange LR(getLoadIndex(index), getUseIndex(index), - nI.getNextValue(~0U, 0, getVNInfoAllocator())); + nI.getNextValue(0, 0, false, getVNInfoAllocator())); DOUT << " +" << LR; nI.addRange(LR); vrm.addRestorePoint(NewVReg, MI); } if (HasDef) { LiveRange LR(getDefIndex(index), getStoreIndex(index), - nI.getNextValue(~0U, 0, getVNInfoAllocator())); + nI.getNextValue(0, 0, false, getVNInfoAllocator())); DOUT << " +" << LR; nI.addRange(LR); vrm.addSpillPoint(NewVReg, true, MI); @@ -1961,12 +1967,11 @@ addIntervalsForSpills(const LiveInterval &li, i != e; ++i) { const VNInfo *VNI = *i; unsigned VN = VNI->id; - unsigned DefIdx = VNI->def; - if (DefIdx == ~1U) + if (VNI->isUnused()) continue; // Dead val#. // Is the def for the val# rematerializable? - MachineInstr *ReMatDefMI = (DefIdx == ~0u) - ? 0 : getInstructionFromIndex(DefIdx); + MachineInstr *ReMatDefMI = VNI->isDefAccurate() + ? getInstructionFromIndex(VNI->def) : 0; bool dummy; if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) { // Remember how to remat the def of this val#. @@ -1977,7 +1982,7 @@ addIntervalsForSpills(const LiveInterval &li, ReMatDefs[VN] = Clone; bool CanDelete = true; - if (VNI->hasPHIKill) { + if (VNI->hasPHIKill()) { // A kill is a phi node, not all of its uses can be rematerialized. // It must not be deleted. CanDelete = false; @@ -2287,8 +2292,8 @@ LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg, LiveInterval& Interval = getOrCreateInterval(reg); VNInfo* VN = Interval.getNextValue( getInstructionIndex(startInst) + InstrSlots::DEF, - startInst, getVNInfoAllocator()); - VN->hasPHIKill = true; + startInst, true, getVNInfoAllocator()); + VN->setHasPHIKill(true); VN->kills.push_back(getMBBEndIdx(startInst->getParent())); LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF, getMBBEndIdx(startInst->getParent()) + 1, VN); diff --git a/lib/CodeGen/LiveVariables.cpp b/lib/CodeGen/LiveVariables.cpp index 6228821..bd84508 100644 --- a/lib/CodeGen/LiveVariables.cpp +++ b/lib/CodeGen/LiveVariables.cpp @@ -359,12 +359,11 @@ bool LiveVariables::HandlePhysRegKill(unsigned Reg, MachineInstr *MI) { // That is, unless we are currently processing the last reference itself. LastRefOrPartRef->addRegisterDead(Reg, TRI, true); - /* Partial uses. Mark register def dead and add implicit def of - sub-registers which are used. - FIXME: LiveIntervalAnalysis can't handle this yet! - EAX<dead> = op AL<imp-def> - That is, EAX def is dead but AL def extends pass it. - Enable this after live interval analysis is fixed to improve codegen! + // Partial uses. Mark register def dead and add implicit def of + // sub-registers which are used. + // EAX<dead> = op AL<imp-def> + // That is, EAX def is dead but AL def extends pass it. + // Enable this after live interval analysis is fixed to improve codegen! else if (!PhysRegUse[Reg]) { PhysRegDef[Reg]->addRegisterDead(Reg, TRI, true); for (const unsigned *SubRegs = TRI->getSubRegisters(Reg); @@ -377,7 +376,7 @@ bool LiveVariables::HandlePhysRegKill(unsigned Reg, MachineInstr *MI) { PartUses.erase(*SS); } } - } */ + } else LastRefOrPartRef->addRegisterKilled(Reg, TRI, true); return true; diff --git a/lib/CodeGen/MachineRegisterInfo.cpp b/lib/CodeGen/MachineRegisterInfo.cpp index 4f5ab1f..544d83a 100644 --- a/lib/CodeGen/MachineRegisterInfo.cpp +++ b/lib/CodeGen/MachineRegisterInfo.cpp @@ -16,6 +16,7 @@ using namespace llvm; MachineRegisterInfo::MachineRegisterInfo(const TargetRegisterInfo &TRI) { VRegInfo.reserve(256); + RegAllocHints.reserve(256); RegClass2VRegMap.resize(TRI.getNumRegClasses()+1); // RC ID starts at 1. UsedPhysRegs.resize(TRI.getNumRegs()); @@ -64,6 +65,7 @@ MachineRegisterInfo::createVirtualRegister(const TargetRegisterClass *RegClass){ // Add a reg, but keep track of whether the vector reallocated or not. void *ArrayBase = VRegInfo.empty() ? 0 : &VRegInfo[0]; VRegInfo.push_back(std::make_pair(RegClass, (MachineOperand*)0)); + RegAllocHints.push_back(std::make_pair(0, 0)); if (!((&VRegInfo[0] == ArrayBase || VRegInfo.size() == 1))) // The vector reallocated, handle this now. diff --git a/lib/CodeGen/PreAllocSplitting.cpp b/lib/CodeGen/PreAllocSplitting.cpp index 97d4728..ae60c86 100644 --- a/lib/CodeGen/PreAllocSplitting.cpp +++ b/lib/CodeGen/PreAllocSplitting.cpp @@ -343,7 +343,7 @@ int PreAllocSplitting::CreateSpillStackSlot(unsigned Reg, if (CurrSLI->hasAtLeastOneValue()) CurrSValNo = CurrSLI->getValNumInfo(0); else - CurrSValNo = CurrSLI->getNextValue(~0U, 0, LSs->getVNInfoAllocator()); + CurrSValNo = CurrSLI->getNextValue(0, 0, false, LSs->getVNInfoAllocator()); return SS; } @@ -637,8 +637,9 @@ PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator Us if (Phis.count(MBB)) return Phis[MBB]; unsigned StartIndex = LIs->getMBBStartIdx(MBB); - VNInfo *RetVNI = Phis[MBB] = LI->getNextValue(~0U, /*FIXME*/ 0, - LIs->getVNInfoAllocator()); + VNInfo *RetVNI = Phis[MBB] = + LI->getNextValue(0, /*FIXME*/ 0, false, LIs->getVNInfoAllocator()); + if (!IsIntraBlock) LiveOut[MBB] = RetVNI; // If there are no uses or defs between our starting point and the @@ -654,7 +655,7 @@ PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator Us IncomingVNs[*PI] = Incoming; } - if (MBB->pred_size() == 1 && !RetVNI->hasPHIKill) { + if (MBB->pred_size() == 1 && !RetVNI->hasPHIKill()) { VNInfo* OldVN = RetVNI; VNInfo* NewVN = IncomingVNs.begin()->second; VNInfo* MergedVN = LI->MergeValueNumberInto(OldVN, NewVN); @@ -678,7 +679,7 @@ PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator Us // VNInfo to represent the joined value. for (DenseMap<MachineBasicBlock*, VNInfo*>::iterator I = IncomingVNs.begin(), E = IncomingVNs.end(); I != E; ++I) { - I->second->hasPHIKill = true; + I->second->setHasPHIKill(true); unsigned KillIndex = LIs->getMBBEndIdx(I->first); if (!LiveInterval::isKill(I->second, KillIndex)) LI->addKill(I->second, KillIndex); @@ -730,7 +731,9 @@ void PreAllocSplitting::ReconstructLiveInterval(LiveInterval* LI) { unsigned DefIdx = LIs->getInstructionIndex(&*DI); DefIdx = LiveIntervals::getDefIndex(DefIdx); - VNInfo* NewVN = LI->getNextValue(DefIdx, 0, Alloc); + assert(DI->getOpcode() != TargetInstrInfo::PHI && + "Following NewVN isPHIDef flag incorrect. Fix me!"); + VNInfo* NewVN = LI->getNextValue(DefIdx, 0, true, Alloc); // If the def is a move, set the copy field. unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx; @@ -793,7 +796,7 @@ void PreAllocSplitting::RenumberValno(VNInfo* VN) { // Bail out if we ever encounter a valno that has a PHI kill. We can't // renumber these. - if (OldVN->hasPHIKill) return; + if (OldVN->hasPHIKill()) return; VNsToCopy.push_back(OldVN); @@ -823,9 +826,7 @@ void PreAllocSplitting::RenumberValno(VNInfo* VN) { VNInfo* OldVN = *OI; // Copy the valno over - VNInfo* NewVN = NewLI.getNextValue(OldVN->def, OldVN->copy, - LIs->getVNInfoAllocator()); - NewLI.copyValNumInfo(NewVN, OldVN); + VNInfo* NewVN = NewLI.createValueCopy(OldVN, LIs->getVNInfoAllocator()); NewLI.MergeValueInAsValue(*CurrLI, OldVN, NewVN); // Remove the valno from the old interval @@ -873,7 +874,7 @@ bool PreAllocSplitting::Rematerialize(unsigned vreg, VNInfo* ValNo, MachineBasicBlock::iterator KillPt = BarrierMBB->end(); unsigned KillIdx = 0; - if (ValNo->def == ~0U || DefMI->getParent() == BarrierMBB) + if (!ValNo->isDefAccurate() || DefMI->getParent() == BarrierMBB) KillPt = findSpillPoint(BarrierMBB, Barrier, NULL, RefsInMBB, KillIdx); else KillPt = findNextEmptySlot(DefMI->getParent(), DefMI, KillIdx); @@ -942,7 +943,7 @@ MachineInstr* PreAllocSplitting::FoldSpill(unsigned vreg, if (CurrSLI->hasAtLeastOneValue()) CurrSValNo = CurrSLI->getValNumInfo(0); else - CurrSValNo = CurrSLI->getNextValue(~0U, 0, LSs->getVNInfoAllocator()); + CurrSValNo = CurrSLI->getNextValue(0, 0, false, LSs->getVNInfoAllocator()); } return FMI; @@ -1032,13 +1033,13 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) { CurrLI->FindLiveRangeContaining(LIs->getUseIndex(BarrierIdx)); VNInfo *ValNo = LR->valno; - if (ValNo->def == ~1U) { + if (ValNo->isUnused()) { // Defined by a dead def? How can this be? assert(0 && "Val# is defined by a dead def?"); abort(); } - MachineInstr *DefMI = (ValNo->def != ~0U) + MachineInstr *DefMI = ValNo->isDefAccurate() ? LIs->getInstructionFromIndex(ValNo->def) : NULL; // If this would create a new join point, do not split. @@ -1072,8 +1073,8 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) { unsigned SpillIndex = 0; MachineInstr *SpillMI = NULL; int SS = -1; - if (ValNo->def == ~0U) { - // If it's defined by a phi, we must split just before the barrier. + if (!ValNo->isDefAccurate()) { + // If we don't know where the def is we must split just before the barrier. if ((SpillMI = FoldSpill(LI->reg, RC, 0, Barrier, BarrierMBB, SS, RefsInMBB))) { SpillIndex = LIs->getInstructionIndex(SpillMI); @@ -1254,17 +1255,16 @@ bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) { // We don't currently try to handle definitions with PHI kills, because // it would involve processing more than one VNInfo at once. - if (CurrVN->hasPHIKill) continue; + if (CurrVN->hasPHIKill()) continue; // We also don't try to handle the results of PHI joins, since there's // no defining instruction to analyze. - unsigned DefIdx = CurrVN->def; - if (DefIdx == ~0U || DefIdx == ~1U) continue; + if (!CurrVN->isDefAccurate() || CurrVN->isUnused()) continue; // We're only interested in eliminating cruft introduced by the splitter, // is of the form load-use or load-use-store. First, check that the // definition is a load, and remember what stack slot we loaded it from. - MachineInstr* DefMI = LIs->getInstructionFromIndex(DefIdx); + MachineInstr* DefMI = LIs->getInstructionFromIndex(CurrVN->def); int FrameIndex; if (!TII->isLoadFromStackSlot(DefMI, FrameIndex)) continue; @@ -1383,7 +1383,7 @@ bool PreAllocSplitting::createsNewJoin(LiveRange* LR, if (DefMBB == BarrierMBB) return false; - if (LR->valno->hasPHIKill) + if (LR->valno->hasPHIKill()) return false; unsigned MBBEnd = LIs->getMBBEndIdx(BarrierMBB); diff --git a/lib/CodeGen/RegAllocLinearScan.cpp b/lib/CodeGen/RegAllocLinearScan.cpp index 804fae5..41a42fd 100644 --- a/lib/CodeGen/RegAllocLinearScan.cpp +++ b/lib/CodeGen/RegAllocLinearScan.cpp @@ -281,7 +281,8 @@ namespace { /// getFreePhysReg - return a free physical register for this virtual /// register interval if we have one, otherwise return 0. unsigned getFreePhysReg(LiveInterval* cur); - unsigned getFreePhysReg(const TargetRegisterClass *RC, + unsigned getFreePhysReg(LiveInterval* cur, + const TargetRegisterClass *RC, unsigned MaxInactiveCount, SmallVector<unsigned, 256> &inactiveCounts, bool SkipDGRegs); @@ -352,11 +353,12 @@ void RALinScan::ComputeRelatedRegClasses() { /// different register classes or because the coalescer was overly /// conservative. unsigned RALinScan::attemptTrivialCoalescing(LiveInterval &cur, unsigned Reg) { - if ((cur.preference && cur.preference == Reg) || !cur.containsOneValue()) + unsigned Preference = vrm_->getRegAllocPref(cur.reg); + if ((Preference && Preference == Reg) || !cur.containsOneValue()) return Reg; VNInfo *vni = cur.begin()->valno; - if (!vni->def || vni->def == ~1U || vni->def == ~0U) + if (!vni->def || vni->isUnused() || !vni->isDefAccurate()) return Reg; MachineInstr *CopyMI = li_->getInstructionFromIndex(vni->def); unsigned SrcReg, DstReg, SrcSubReg, DstSubReg, PhysReg; @@ -584,7 +586,7 @@ void RALinScan::linearScan() // register allocator had to spill other registers in its register class. if (ls_->getNumIntervals() == 0) return; - if (!vrm_->FindUnusedRegisters(tri_, li_)) + if (!vrm_->FindUnusedRegisters(li_)) return; } @@ -743,7 +745,7 @@ static void addStackInterval(LiveInterval *cur, LiveStacks *ls_, if (SI.hasAtLeastOneValue()) VNI = SI.getValNumInfo(0); else - VNI = SI.getNextValue(~0U, 0, ls_->getVNInfoAllocator()); + VNI = SI.getNextValue(0, 0, false, ls_->getVNInfoAllocator()); LiveInterval &RI = li_->getInterval(cur->reg); // FIXME: This may be overly conservative. @@ -897,7 +899,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) // This is an implicitly defined live interval, just assign any register. const TargetRegisterClass *RC = mri_->getRegClass(cur->reg); if (cur->empty()) { - unsigned physReg = cur->preference; + unsigned physReg = vrm_->getRegAllocPref(cur->reg); if (!physReg) physReg = *RC->allocation_order_begin(*mf_); DOUT << tri_->getName(physReg) << '\n'; @@ -917,9 +919,9 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) // register class, then we should try to assign it the same register. // This can happen when the move is from a larger register class to a smaller // one, e.g. X86::mov32to32_. These move instructions are not coalescable. - if (!cur->preference && cur->hasAtLeastOneValue()) { + if (!vrm_->getRegAllocPref(cur->reg) && cur->hasAtLeastOneValue()) { VNInfo *vni = cur->begin()->valno; - if (vni->def && vni->def != ~1U && vni->def != ~0U) { + if (vni->def && !vni->isUnused() && vni->isDefAccurate()) { MachineInstr *CopyMI = li_->getInstructionFromIndex(vni->def); unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; if (CopyMI && @@ -935,7 +937,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) if (DstSubReg) Reg = tri_->getMatchingSuperReg(Reg, DstSubReg, RC); if (Reg && allocatableRegs_[Reg] && RC->contains(Reg)) - cur->preference = Reg; + mri_->setRegAllocationHint(cur->reg, 0, Reg); } } } @@ -1044,7 +1046,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) if (LiveInterval *NextReloadLI = hasNextReloadInterval(cur)) { // "Downgrade" physReg to try to keep physReg from being allocated until // the next reload from the same SS is allocated. - NextReloadLI->preference = physReg; + mri_->setRegAllocationHint(NextReloadLI->reg, 0, physReg); DowngradeRegister(cur, physReg); } return; @@ -1071,7 +1073,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) // Find a register to spill. float minWeight = HUGE_VALF; - unsigned minReg = 0; /*cur->preference*/; // Try the pref register first. + unsigned minReg = 0; bool Found = false; std::vector<std::pair<unsigned,float> > RegsWeights; @@ -1290,7 +1292,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) // It interval has a preference, it must be defined by a copy. Clear the // preference now since the source interval allocation may have been // undone as well. - i->preference = 0; + mri_->setRegAllocationHint(i->reg, 0, 0); else { UpgradeRegister(ii->second); } @@ -1346,15 +1348,23 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) } } -unsigned RALinScan::getFreePhysReg(const TargetRegisterClass *RC, +unsigned RALinScan::getFreePhysReg(LiveInterval* cur, + const TargetRegisterClass *RC, unsigned MaxInactiveCount, SmallVector<unsigned, 256> &inactiveCounts, bool SkipDGRegs) { unsigned FreeReg = 0; unsigned FreeRegInactiveCount = 0; - TargetRegisterClass::iterator I = RC->allocation_order_begin(*mf_); - TargetRegisterClass::iterator E = RC->allocation_order_end(*mf_); + std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(cur->reg); + // Resolve second part of the hint (if possible) given the current allocation. + unsigned physReg = Hint.second; + if (physReg && + TargetRegisterInfo::isVirtualRegister(physReg) && vrm_->hasPhys(physReg)) + physReg = vrm_->getPhys(physReg); + + TargetRegisterClass::iterator I, E; + tie(I, E) = tri_->getAllocationOrder(RC, Hint.first, physReg, *mf_); assert(I != E && "No allocatable register in this register class!"); // Scan for the first available register. @@ -1377,7 +1387,7 @@ unsigned RALinScan::getFreePhysReg(const TargetRegisterClass *RC, // return this register. if (FreeReg == 0 || FreeRegInactiveCount == MaxInactiveCount) return FreeReg; - + // Continue scanning the registers, looking for the one with the highest // inactive count. Alkis found that this reduced register pressure very // slightly on X86 (in rev 1.94 of this file), though this should probably be @@ -1428,20 +1438,21 @@ unsigned RALinScan::getFreePhysReg(LiveInterval *cur) { // If copy coalescer has assigned a "preferred" register, check if it's // available first. - if (cur->preference) { - DOUT << "(preferred: " << tri_->getName(cur->preference) << ") "; - if (isRegAvail(cur->preference) && - RC->contains(cur->preference)) - return cur->preference; + unsigned Preference = vrm_->getRegAllocPref(cur->reg); + if (Preference) { + DOUT << "(preferred: " << tri_->getName(Preference) << ") "; + if (isRegAvail(Preference) && + RC->contains(Preference)) + return Preference; } if (!DowngradedRegs.empty()) { - unsigned FreeReg = getFreePhysReg(RC, MaxInactiveCount, inactiveCounts, + unsigned FreeReg = getFreePhysReg(cur, RC, MaxInactiveCount, inactiveCounts, true); if (FreeReg) return FreeReg; } - return getFreePhysReg(RC, MaxInactiveCount, inactiveCounts, false); + return getFreePhysReg(cur, RC, MaxInactiveCount, inactiveCounts, false); } FunctionPass* llvm::createLinearScanRegisterAllocator() { diff --git a/lib/CodeGen/RegAllocPBQP.cpp b/lib/CodeGen/RegAllocPBQP.cpp index 61450a7..89e2c59 100644 --- a/lib/CodeGen/RegAllocPBQP.cpp +++ b/lib/CodeGen/RegAllocPBQP.cpp @@ -651,7 +651,7 @@ void PBQPRegAlloc::addStackInterval(const LiveInterval *spilled, if (stackInterval.getNumValNums() != 0) vni = stackInterval.getValNumInfo(0); else - vni = stackInterval.getNextValue(-0U, 0, lss->getVNInfoAllocator()); + vni = stackInterval.getNextValue(0, 0, false, lss->getVNInfoAllocator()); LiveInterval &rhsInterval = lis->getInterval(spilled->reg); stackInterval.MergeRangesInAsValue(rhsInterval, vni); @@ -733,8 +733,7 @@ void PBQPRegAlloc::finalizeAlloc() const { itr != end; ++itr) { LiveInterval *li = *itr; - unsigned physReg = li->preference; - + unsigned physReg = vrm->getRegAllocPref(li->reg); if (physReg == 0) { const TargetRegisterClass *liRC = mri->getRegClass(li->reg); physReg = *liRC->allocation_order_begin(*mf); diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp index 4a7dbeb..24fccf0 100644 --- a/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -47,7 +47,6 @@ #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/DebugLoc.h" #include "llvm/CodeGen/DwarfWriter.h" #include "llvm/Analysis/DebugInfo.h" #include "llvm/Target/TargetData.h" @@ -361,7 +360,7 @@ bool FastISel::SelectCall(User *I) { // Returned ID is 0 if this is unbalanced "end of inlined // scope". This could happen if optimizer eats dbg intrinsics // or "beginning of inlined scope" is not recoginized due to - // missing location info. In such cases, do ignore this region.end. + // missing location info. In such cases, ignore this region.end. BuildMI(MBB, DL, II).addImm(ID); } else { const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL); diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index f3c2833..1bb8090 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -2768,6 +2768,53 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node, ISD::SETULT : ISD::SETUGT)); break; } + case ISD::UMULO: + case ISD::SMULO: { + MVT VT = Node->getValueType(0); + SDValue LHS = Node->getOperand(0); + SDValue RHS = Node->getOperand(1); + SDValue BottomHalf; + SDValue TopHalf; + static unsigned Ops[2][3] = + { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND }, + { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }}; + bool isSigned = Node->getOpcode() == ISD::SMULO; + if (TLI.isOperationLegalOrCustom(Ops[isSigned][0], VT)) { + BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); + TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS); + } else if (TLI.isOperationLegalOrCustom(Ops[isSigned][1], VT)) { + BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS, + RHS); + TopHalf = BottomHalf.getValue(1); + } else if (TLI.isTypeLegal(MVT::getIntegerVT(VT.getSizeInBits() * 2))) { + MVT WideVT = MVT::getIntegerVT(VT.getSizeInBits() * 2); + LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS); + RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS); + Tmp1 = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS); + BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1, + DAG.getIntPtrConstant(0)); + TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1, + DAG.getIntPtrConstant(1)); + } else { + // FIXME: We should be able to fall back to a libcall with an illegal + // type in some cases cases. + // Also, we can fall back to a division in some cases, but that's a big + // performance hit in the general case. + assert(0 && "Don't know how to expand this operation yet!"); + } + if (isSigned) { + Tmp1 = DAG.getConstant(VT.getSizeInBits() - 1, TLI.getShiftAmountTy()); + Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, Tmp1); + TopHalf = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), TopHalf, Tmp1, + ISD::SETNE); + } else { + TopHalf = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), TopHalf, + DAG.getConstant(0, VT), ISD::SETNE); + } + Results.push_back(BottomHalf); + Results.push_back(TopHalf); + break; + } case ISD::BUILD_PAIR: { MVT PairTy = Node->getValueType(0); Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, PairTy, Node->getOperand(0)); diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp index b7d7818..6e5adee 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp @@ -95,14 +95,13 @@ void DAGTypeLegalizer::ExpandRes_BIT_CONVERT(SDNode *N, SDValue &Lo, if (InVT.isVector() && OutVT.isInteger()) { // Handle cases like i64 = BIT_CONVERT v1i64 on x86, where the operand // is legal but the result is not. - MVT NVT = MVT::getVectorVT(TLI.getTypeToTransformTo(OutVT), 2); + MVT NVT = MVT::getVectorVT(NOutVT, 2); if (isTypeLegal(NVT)) { SDValue CastInOp = DAG.getNode(ISD::BIT_CONVERT, dl, NVT, InOp); - MVT EltNVT = NVT.getVectorElementType(); - Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltNVT, CastInOp, + Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NOutVT, CastInOp, DAG.getIntPtrConstant(0)); - Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltNVT, CastInOp, + Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NOutVT, CastInOp, DAG.getIntPtrConstant(1)); if (TLI.isBigEndian()) diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp index 93750d6..48ebd0f 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp @@ -5317,8 +5317,12 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) { if ((OpFlag & 7) == 2 /*REGDEF*/ || (OpFlag & 7) == 6 /* EARLYCLOBBER REGDEF */) { // Add (OpFlag&0xffff)>>3 registers to MatchedRegs. - assert(!OpInfo.isIndirect && - "Don't know how to handle tied indirect register inputs yet!"); + if (OpInfo.isIndirect) { + cerr << "llvm: error: " + "Don't know how to handle tied indirect " + "register inputs yet!\n"; + exit(1); + } RegsForValue MatchedRegs; MatchedRegs.TLI = &TLI; MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType()); diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp index ab4cd51..a771d46 100644 --- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -171,6 +171,8 @@ static void InitLibcallNames(const char **Names) { Names[RTLIB::FPROUND_PPCF128_F32] = "__trunctfsf2"; Names[RTLIB::FPROUND_F80_F64] = "__truncxfdf2"; Names[RTLIB::FPROUND_PPCF128_F64] = "__trunctfdf2"; + Names[RTLIB::FPTOSINT_F32_I8] = "__fixsfi8"; + Names[RTLIB::FPTOSINT_F32_I16] = "__fixsfi16"; Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi"; Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi"; Names[RTLIB::FPTOSINT_F32_I128] = "__fixsfti"; @@ -183,6 +185,8 @@ static void InitLibcallNames(const char **Names) { Names[RTLIB::FPTOSINT_PPCF128_I32] = "__fixtfsi"; Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi"; Names[RTLIB::FPTOSINT_PPCF128_I128] = "__fixtfti"; + Names[RTLIB::FPTOUINT_F32_I8] = "__fixunssfi8"; + Names[RTLIB::FPTOUINT_F32_I16] = "__fixunssfi16"; Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi"; Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi"; Names[RTLIB::FPTOUINT_F32_I128] = "__fixunssfti"; @@ -271,6 +275,10 @@ RTLIB::Libcall RTLIB::getFPROUND(MVT OpVT, MVT RetVT) { /// UNKNOWN_LIBCALL if there is none. RTLIB::Libcall RTLIB::getFPTOSINT(MVT OpVT, MVT RetVT) { if (OpVT == MVT::f32) { + if (RetVT == MVT::i8) + return FPTOSINT_F32_I8; + if (RetVT == MVT::i16) + return FPTOSINT_F32_I16; if (RetVT == MVT::i32) return FPTOSINT_F32_I32; if (RetVT == MVT::i64) @@ -306,6 +314,10 @@ RTLIB::Libcall RTLIB::getFPTOSINT(MVT OpVT, MVT RetVT) { /// UNKNOWN_LIBCALL if there is none. RTLIB::Libcall RTLIB::getFPTOUINT(MVT OpVT, MVT RetVT) { if (OpVT == MVT::f32) { + if (RetVT == MVT::i8) + return FPTOUINT_F32_I8; + if (RetVT == MVT::i16) + return FPTOUINT_F32_I16; if (RetVT == MVT::i32) return FPTOUINT_F32_I32; if (RetVT == MVT::i64) @@ -2584,8 +2596,12 @@ bool TargetLowering::CheckTailCallReturnConstraints(CallSDNode *TheCall, // Check that operand of the RET node sources from the CALL node. The RET node // has at least two operands. Operand 0 holds the chain. Operand 1 holds the // value. + // Also we need to check that there is no code in between the call and the + // return. Hence we also check that the incomming chain to the return sources + // from the outgoing chain of the call. if (NumOps > 1 && - IgnoreHarmlessInstructions(Ret.getOperand(1)) == SDValue(TheCall,0)) + IgnoreHarmlessInstructions(Ret.getOperand(1)) == SDValue(TheCall,0) && + Ret.getOperand(0) == SDValue(TheCall, TheCall->getNumValues()-1)) return true; // void return: The RET node has the chain result value of the CALL node as // input. diff --git a/lib/CodeGen/SimpleRegisterCoalescing.cpp b/lib/CodeGen/SimpleRegisterCoalescing.cpp index 2bc234f..2034805 100644 --- a/lib/CodeGen/SimpleRegisterCoalescing.cpp +++ b/lib/CodeGen/SimpleRegisterCoalescing.cpp @@ -141,7 +141,7 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA, // The live interval of ECX is represented as this: // %reg20,inf = [46,47:1)[174,230:0) 0@174-(230) 1@46-(47) // The coalescer has no idea there was a def in the middle of [174,230]. - if (AValNo->redefByEC) + if (AValNo->hasRedefByEC()) return false; // If AValNo is defined as a copy from IntB, we can potentially process this. @@ -203,7 +203,8 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA, for (const unsigned *SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR) { LiveInterval &SRLI = li_->getInterval(*SR); SRLI.addRange(LiveRange(FillerStart, FillerEnd, - SRLI.getNextValue(FillerStart, 0, li_->getVNInfoAllocator()))); + SRLI.getNextValue(FillerStart, 0, true, + li_->getVNInfoAllocator()))); } } @@ -304,8 +305,10 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA, assert(ALR != IntA.end() && "Live range not found!"); VNInfo *AValNo = ALR->valno; // If other defs can reach uses of this def, then it's not safe to perform - // the optimization. - if (AValNo->def == ~0U || AValNo->def == ~1U || AValNo->hasPHIKill) + // the optimization. FIXME: Do isPHIDef and isDefAccurate both need to be + // tested? + if (AValNo->isPHIDef() || !AValNo->isDefAccurate() || + AValNo->isUnused() || AValNo->hasPHIKill()) return false; MachineInstr *DefMI = li_->getInstructionFromIndex(AValNo->def); const TargetInstrDesc &TID = DefMI->getDesc(); @@ -351,7 +354,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA, unsigned OpIdx = NewMI->findRegisterUseOperandIdx(IntA.reg, false); NewMI->getOperand(OpIdx).setIsKill(); - bool BHasPHIKill = BValNo->hasPHIKill; + bool BHasPHIKill = BValNo->hasPHIKill(); SmallVector<VNInfo*, 4> BDeadValNos; SmallVector<unsigned, 4> BKills; std::map<unsigned, unsigned> BExtend; @@ -403,7 +406,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA, // extended to the end of the existing live range defined by the copy. unsigned DefIdx = li_->getDefIndex(UseIdx); const LiveRange *DLR = IntB.getLiveRangeContaining(DefIdx); - BHasPHIKill |= DLR->valno->hasPHIKill; + BHasPHIKill |= DLR->valno->hasPHIKill(); assert(DLR->valno->def == DefIdx); BDeadValNos.push_back(DLR->valno); BExtend[DLR->start] = DLR->end; @@ -462,7 +465,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA, } } IntB.addKills(ValNo, BKills); - ValNo->hasPHIKill = BHasPHIKill; + ValNo->setHasPHIKill(BHasPHIKill); DOUT << " result = "; IntB.print(DOUT, tri_); DOUT << "\n"; @@ -578,8 +581,10 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt, assert(SrcLR != SrcInt.end() && "Live range not found!"); VNInfo *ValNo = SrcLR->valno; // If other defs can reach uses of this def, then it's not safe to perform - // the optimization. - if (ValNo->def == ~0U || ValNo->def == ~1U || ValNo->hasPHIKill) + // the optimization. FIXME: Do isPHIDef and isDefAccurate both need to be + // tested? + if (ValNo->isPHIDef() || !ValNo->isDefAccurate() || + ValNo->isUnused() || ValNo->hasPHIKill()) return false; MachineInstr *DefMI = li_->getInstructionFromIndex(ValNo->def); const TargetInstrDesc &TID = DefMI->getDesc(); @@ -616,19 +621,17 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt, } MachineBasicBlock::iterator MII = next(MachineBasicBlock::iterator(CopyMI)); - CopyMI->removeFromParent(); tii_->reMaterialize(*MBB, MII, DstReg, DefMI); MachineInstr *NewMI = prior(MII); if (checkForDeadDef) { - // PR4090 fix: Trim interval failed because there was no use of the - // source interval in this MBB. If the def is in this MBB too then we - // should mark it dead: - if (DefMI->getParent() == MBB) { - DefMI->addRegisterDead(SrcInt.reg, tri_); - SrcLR->end = SrcLR->start + 1; - } - + // PR4090 fix: Trim interval failed because there was no use of the + // source interval in this MBB. If the def is in this MBB too then we + // should mark it dead: + if (DefMI->getParent() == MBB) { + DefMI->addRegisterDead(SrcInt.reg, tri_); + SrcLR->end = SrcLR->start + 1; + } } // CopyMI may have implicit operands, transfer them over to the newly @@ -647,7 +650,7 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt, } li_->ReplaceMachineInstrInMaps(CopyMI, NewMI); - MBB->getParent()->DeleteMachineInstr(CopyMI); + CopyMI->eraseFromParent(); ReMatCopies.insert(CopyMI); ReMatDefs.insert(DefMI); ++NumReMats; @@ -673,7 +676,7 @@ bool SimpleRegisterCoalescing::isBackEdgeCopy(MachineInstr *CopyMI, return false; unsigned KillIdx = li_->getMBBEndIdx(MBB) + 1; if (DstLR->valno->kills.size() == 1 && - DstLR->valno->kills[0] == KillIdx && DstLR->valno->hasPHIKill) + DstLR->valno->kills[0] == KillIdx && DstLR->valno->hasPHIKill()) return true; return false; } @@ -937,7 +940,7 @@ bool SimpleRegisterCoalescing::CanCoalesceWithImpDef(MachineInstr *CopyMI, LiveInterval::iterator LR = li.FindLiveRangeContaining(CopyIdx); if (LR == li.end()) return false; - if (LR->valno->hasPHIKill) + if (LR->valno->hasPHIKill()) return false; if (LR->valno->def != CopyIdx) return false; @@ -965,11 +968,11 @@ bool SimpleRegisterCoalescing::CanCoalesceWithImpDef(MachineInstr *CopyMI, } -/// RemoveCopiesFromValNo - The specified value# is defined by an implicit -/// def and it is being removed. Turn all copies from this value# into -/// identity copies so they will be removed. -void SimpleRegisterCoalescing::RemoveCopiesFromValNo(LiveInterval &li, - VNInfo *VNI) { +/// TurnCopiesFromValNoToImpDefs - The specified value# is defined by an +/// implicit_def and it is being removed. Turn all copies from this value# +/// into implicit_defs. +void SimpleRegisterCoalescing::TurnCopiesFromValNoToImpDefs(LiveInterval &li, + VNInfo *VNI) { SmallVector<MachineInstr*, 4> ImpDefs; MachineOperand *LastUse = NULL; unsigned LastUseIdx = li_->getUseIndex(VNI->def); @@ -979,9 +982,8 @@ void SimpleRegisterCoalescing::RemoveCopiesFromValNo(LiveInterval &li, MachineInstr *MI = &*RI; ++RI; if (MO->isDef()) { - if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) { + if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) ImpDefs.push_back(MI); - } continue; } if (JoinedCopies.count(MI)) @@ -994,13 +996,18 @@ void SimpleRegisterCoalescing::RemoveCopiesFromValNo(LiveInterval &li, unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx; if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) && SrcReg == li.reg) { - // Each use MI may have multiple uses of this register. Change them all. - for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { - MachineOperand &MO = MI->getOperand(i); - if (MO.isReg() && MO.getReg() == li.reg) - MO.setReg(DstReg); - } - JoinedCopies.insert(MI); + // Change it to an implicit_def. + MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF)); + for (int i = MI->getNumOperands() - 1, e = 0; i > e; --i) + MI->RemoveOperand(i); + // It's no longer a copy, update the valno it defines. + unsigned DefIdx = li_->getDefIndex(UseIdx); + LiveInterval &DstInt = li_->getInterval(DstReg); + LiveInterval::iterator DLR = DstInt.FindLiveRangeContaining(DefIdx); + assert(DLR != DstInt.end() && "Live range not found!"); + assert(DLR->valno->copy == MI); + DLR->valno->copy = NULL; + ReMatCopies.insert(MI); } else if (UseIdx > LastUseIdx) { LastUseIdx = UseIdx; LastUse = MO; @@ -1265,6 +1272,17 @@ SimpleRegisterCoalescing::CanJoinInsertSubRegToPhysReg(unsigned DstReg, return true; } +/// getRegAllocPreference - Return register allocation preference register. +/// +static unsigned getRegAllocPreference(unsigned Reg, MachineFunction &MF, + MachineRegisterInfo *MRI, + const TargetRegisterInfo *TRI) { + if (TargetRegisterInfo::isPhysicalRegister(Reg)) + return 0; + std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg); + return TRI->ResolveRegAllocHint(Hint.first, Hint.second, MF); +} + /// JoinCopy - Attempt to join intervals corresponding to SrcReg/DstReg, /// which are the src/dst of the copy instruction CopyMI. This returns true /// if the copy was successfully coalesced away. If it is not currently @@ -1566,7 +1584,7 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) { if (PhysJoinTweak) { if (SrcIsPhys) { if (!isWinToJoinVRWithSrcPhysReg(CopyMI, CopyMBB, DstInt, SrcInt)) { - DstInt.preference = SrcReg; + mri_->setRegAllocationHint(DstInt.reg, 0, SrcReg); ++numAborts; DOUT << "\tMay tie down a physical register, abort!\n"; Again = true; // May be possible to coalesce later. @@ -1574,7 +1592,7 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) { } } else { if (!isWinToJoinVRWithDstPhysReg(CopyMI, CopyMBB, DstInt, SrcInt)) { - SrcInt.preference = DstReg; + mri_->setRegAllocationHint(SrcInt.reg, 0, DstReg); ++numAborts; DOUT << "\tMay tie down a physical register, abort!\n"; Again = true; // May be possible to coalesce later. @@ -1598,7 +1616,7 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) { if (Length > Threshold && (((float)std::distance(mri_->use_begin(JoinVReg), mri_->use_end()) / Length) < Ratio)) { - JoinVInt.preference = JoinPReg; + mri_->setRegAllocationHint(JoinVInt.reg, 0, JoinPReg); ++numAborts; DOUT << "\tMay tie down a physical register, abort!\n"; Again = true; // May be possible to coalesce later. @@ -1669,9 +1687,9 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) { E = SavedLI->vni_end(); I != E; ++I) { const VNInfo *ValNo = *I; VNInfo *NewValNo = RealInt.getNextValue(ValNo->def, ValNo->copy, + false, // updated at * li_->getVNInfoAllocator()); - NewValNo->hasPHIKill = ValNo->hasPHIKill; - NewValNo->redefByEC = ValNo->redefByEC; + NewValNo->setFlags(ValNo->getFlags()); // * updated here. RealInt.addKills(NewValNo, ValNo->kills); RealInt.MergeValueInAsValue(*SavedLI, ValNo, NewValNo); } @@ -1691,7 +1709,7 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) { !SrcIsPhys && !DstIsPhys) { if ((isExtSubReg && !Swapped) || ((isInsSubReg || isSubRegToReg) && Swapped)) { - ResSrcInt->Copy(*ResDstInt, li_->getVNInfoAllocator()); + ResSrcInt->Copy(*ResDstInt, mri_, li_->getVNInfoAllocator()); std::swap(SrcReg, DstReg); std::swap(ResSrcInt, ResDstInt); } @@ -1710,7 +1728,8 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) { for (LiveInterval::const_vni_iterator i = ResSrcInt->vni_begin(), e = ResSrcInt->vni_end(); i != e; ++i) { const VNInfo *vni = *i; - if (!vni->def || vni->def == ~1U || vni->def == ~0U) + // FIXME: Do isPHIDef and isDefAccurate both need to be tested? + if (!vni->def || vni->isUnused() || vni->isPHIDef() || !vni->isDefAccurate()) continue; MachineInstr *CopyMI = li_->getInstructionFromIndex(vni->def); unsigned NewSrcReg, NewDstReg, NewSrcSubIdx, NewDstSubIdx; @@ -1747,6 +1766,9 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) { // being merged. li_->removeInterval(SrcReg); + // Update regalloc hint. + tri_->UpdateRegAllocHint(SrcReg, DstReg, *mf_); + // Manually deleted the live interval copy. if (SavedLI) { SavedLI->clear(); @@ -1762,7 +1784,7 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) { VNInfo *ImpVal = LR->valno; assert(ImpVal->def == CopyIdx); unsigned NextDef = LR->end; - RemoveCopiesFromValNo(*ResDstInt, ImpVal); + TurnCopiesFromValNoToImpDefs(*ResDstInt, ImpVal); ResDstInt->removeValNo(ImpVal); LR = ResDstInt->FindLiveRangeContaining(NextDef); if (LR != ResDstInt->end() && LR->valno->def == NextDef) { @@ -1778,11 +1800,12 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) { // If resulting interval has a preference that no longer fits because of subreg // coalescing, just clear the preference. - if (ResDstInt->preference && (isExtSubReg || isInsSubReg || isSubRegToReg) && + unsigned Preference = getRegAllocPreference(ResDstInt->reg, *mf_, mri_, tri_); + if (Preference && (isExtSubReg || isInsSubReg || isSubRegToReg) && TargetRegisterInfo::isVirtualRegister(ResDstInt->reg)) { const TargetRegisterClass *RC = mri_->getRegClass(ResDstInt->reg); - if (!RC->contains(ResDstInt->preference)) - ResDstInt->preference = 0; + if (!RC->contains(Preference)) + mri_->setRegAllocationHint(ResDstInt->reg, 0, 0); } DOUT << "\n\t\tJoined. Result = "; ResDstInt->print(DOUT, tri_); @@ -1856,7 +1879,8 @@ bool SimpleRegisterCoalescing::RangeIsDefinedByCopyFromReg(LiveInterval &li, unsigned SrcReg = li_->getVNInfoSourceReg(LR->valno); if (SrcReg == Reg) return true; - if (LR->valno->def == ~0U && + // FIXME: Do isPHIDef and isDefAccurate both need to be tested? + if ((LR->valno->isPHIDef() || !LR->valno->isDefAccurate()) && TargetRegisterInfo::isPhysicalRegister(li.reg) && *tri_->getSuperRegisters(li.reg)) { // It's a sub-register live interval, we may not have precise information. @@ -2025,12 +2049,20 @@ bool SimpleRegisterCoalescing::SimpleJoin(LiveInterval &LHS, LiveInterval &RHS){ // Okay, the final step is to loop over the RHS live intervals, adding them to // the LHS. - LHSValNo->hasPHIKill |= VNI->hasPHIKill; + if (VNI->hasPHIKill()) + LHSValNo->setHasPHIKill(true); LHS.addKills(LHSValNo, VNI->kills); LHS.MergeRangesInAsValue(RHS, LHSValNo); LHS.weight += RHS.weight; - if (RHS.preference && !LHS.preference) - LHS.preference = RHS.preference; + + // Update regalloc hint if both are virtual registers. + if (TargetRegisterInfo::isVirtualRegister(LHS.reg) && + TargetRegisterInfo::isVirtualRegister(RHS.reg)) { + std::pair<unsigned, unsigned> RHSPref = mri_->getRegAllocationHint(RHS.reg); + std::pair<unsigned, unsigned> LHSPref = mri_->getRegAllocationHint(LHS.reg); + if (RHSPref != LHSPref) + mri_->setRegAllocationHint(LHS.reg, RHSPref.first, RHSPref.second); + } // Update the liveintervals of sub-registers. if (TargetRegisterInfo::isPhysicalRegister(LHS.reg)) @@ -2185,7 +2217,7 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS, for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end(); i != e; ++i) { VNInfo *VNI = *i; - if (VNI->def == ~1U || VNI->copy == 0) // Src not defined by a copy? + if (VNI->isUnused() || VNI->copy == 0) // Src not defined by a copy? continue; // DstReg is known to be a register in the LHS interval. If the src is @@ -2202,7 +2234,7 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS, for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end(); i != e; ++i) { VNInfo *VNI = *i; - if (VNI->def == ~1U || VNI->copy == 0) // Src not defined by a copy? + if (VNI->isUnused() || VNI->copy == 0) // Src not defined by a copy? continue; // DstReg is known to be a register in the RHS interval. If the src is @@ -2222,7 +2254,7 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS, i != e; ++i) { VNInfo *VNI = *i; unsigned VN = VNI->id; - if (LHSValNoAssignments[VN] >= 0 || VNI->def == ~1U) + if (LHSValNoAssignments[VN] >= 0 || VNI->isUnused()) continue; ComputeUltimateVN(VNI, NewVNInfo, LHSValsDefinedFromRHS, RHSValsDefinedFromLHS, @@ -2232,7 +2264,7 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS, i != e; ++i) { VNInfo *VNI = *i; unsigned VN = VNI->id; - if (RHSValNoAssignments[VN] >= 0 || VNI->def == ~1U) + if (RHSValNoAssignments[VN] >= 0 || VNI->isUnused()) continue; // If this value number isn't a copy from the LHS, it's a new number. if (RHSValsDefinedFromLHS.find(VNI) == RHSValsDefinedFromLHS.end()) { @@ -2296,7 +2328,8 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS, VNInfo *VNI = I->first; unsigned LHSValID = LHSValNoAssignments[VNI->id]; LiveInterval::removeKill(NewVNInfo[LHSValID], VNI->def); - NewVNInfo[LHSValID]->hasPHIKill |= VNI->hasPHIKill; + if (VNI->hasPHIKill()) + NewVNInfo[LHSValID]->setHasPHIKill(true); RHS.addKills(NewVNInfo[LHSValID], VNI->kills); } @@ -2306,7 +2339,8 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS, VNInfo *VNI = I->first; unsigned RHSValID = RHSValNoAssignments[VNI->id]; LiveInterval::removeKill(NewVNInfo[RHSValID], VNI->def); - NewVNInfo[RHSValID]->hasPHIKill |= VNI->hasPHIKill; + if (VNI->hasPHIKill()) + NewVNInfo[RHSValID]->setHasPHIKill(true); LHS.addKills(NewVNInfo[RHSValID], VNI->kills); } @@ -2315,10 +2349,12 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS, if ((RHS.ranges.size() > LHS.ranges.size() && TargetRegisterInfo::isVirtualRegister(LHS.reg)) || TargetRegisterInfo::isPhysicalRegister(RHS.reg)) { - RHS.join(LHS, &RHSValNoAssignments[0], &LHSValNoAssignments[0], NewVNInfo); + RHS.join(LHS, &RHSValNoAssignments[0], &LHSValNoAssignments[0], NewVNInfo, + mri_); Swapped = true; } else { - LHS.join(RHS, &LHSValNoAssignments[0], &RHSValNoAssignments[0], NewVNInfo); + LHS.join(RHS, &LHSValNoAssignments[0], &RHSValNoAssignments[0], NewVNInfo, + mri_); Swapped = false; } return true; @@ -2620,6 +2656,11 @@ SimpleRegisterCoalescing::TurnCopyIntoImpDef(MachineBasicBlock::iterator &I, return false; LiveInterval &DstInt = li_->getInterval(DstReg); const LiveRange *DstLR = DstInt.getLiveRangeContaining(CopyIdx); + // If the valno extends beyond this basic block, then it's not safe to delete + // the val# or else livein information won't be correct. + MachineBasicBlock *EndMBB = li_->getMBBFromIndex(DstLR->end); + if (EndMBB != MBB) + return false; DstInt.removeValNo(DstLR->valno); CopyMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF)); for (int i = CopyMI->getNumOperands() - 1, e = 0; i > e; --i) @@ -2800,7 +2841,8 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) { } // Slightly prefer live interval that has been assigned a preferred reg. - if (LI.preference) + std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(LI.reg); + if (Hint.first || Hint.second) LI.weight *= 1.01F; // Divide the weight of the interval by its size. This encourages diff --git a/lib/CodeGen/SimpleRegisterCoalescing.h b/lib/CodeGen/SimpleRegisterCoalescing.h index a495bfd..d2c5581 100644 --- a/lib/CodeGen/SimpleRegisterCoalescing.h +++ b/lib/CodeGen/SimpleRegisterCoalescing.h @@ -219,10 +219,10 @@ namespace llvm { bool CanCoalesceWithImpDef(MachineInstr *CopyMI, LiveInterval &li, LiveInterval &ImpLi) const; - /// RemoveCopiesFromValNo - The specified value# is defined by an implicit - /// def and it is being removed. Turn all copies from this value# into - /// identity copies so they will be removed. - void RemoveCopiesFromValNo(LiveInterval &li, VNInfo *VNI); + /// TurnCopiesFromValNoToImpDefs - The specified value# is defined by an + /// implicit_def and it is being removed. Turn all copies from this value# + /// into implicit_defs. + void TurnCopiesFromValNoToImpDefs(LiveInterval &li, VNInfo *VNI); /// isWinToJoinVRWithSrcPhysReg - Return true if it's worth while to join a /// a virtual destination register with physical source register. diff --git a/lib/CodeGen/Spiller.cpp b/lib/CodeGen/Spiller.cpp index ce63121..919a0ce 100644 --- a/lib/CodeGen/Spiller.cpp +++ b/lib/CodeGen/Spiller.cpp @@ -39,7 +39,8 @@ protected: VirtRegMap *vrm; /// Construct a spiller base. - SpillerBase(MachineFunction *mf, LiveIntervals *lis, LiveStacks *ls, VirtRegMap *vrm) : + SpillerBase(MachineFunction *mf, LiveIntervals *lis, LiveStacks *ls, + VirtRegMap *vrm) : mf(mf), lis(lis), ls(ls), vrm(vrm) { mfi = mf->getFrameInfo(); @@ -47,16 +48,24 @@ protected: tii = mf->getTarget().getInstrInfo(); } - /// Insert a store of the given vreg to the given stack slot immediately - /// after the given instruction. Returns the base index of the inserted - /// instruction. The caller is responsible for adding an appropriate - /// LiveInterval to the LiveIntervals analysis. - unsigned insertStoreFor(MachineInstr *mi, unsigned ss, - unsigned newVReg, - const TargetRegisterClass *trc) { - MachineBasicBlock::iterator nextInstItr(mi); - ++nextInstItr; + /// Ensures there is space before the given machine instruction, returns the + /// instruction's new number. + unsigned makeSpaceBefore(MachineInstr *mi) { + if (!lis->hasGapBeforeInstr(lis->getInstructionIndex(mi))) { + lis->scaleNumbering(2); + ls->scaleNumbering(2); + } + + unsigned miIdx = lis->getInstructionIndex(mi); + assert(lis->hasGapBeforeInstr(miIdx)); + + return miIdx; + } + + /// Ensure there is space after the given machine instruction, returns the + /// instruction's new number. + unsigned makeSpaceAfter(MachineInstr *mi) { if (!lis->hasGapAfterInstr(lis->getInstructionIndex(mi))) { lis->scaleNumbering(2); ls->scaleNumbering(2); @@ -66,7 +75,24 @@ protected: assert(lis->hasGapAfterInstr(miIdx)); - tii->storeRegToStackSlot(*mi->getParent(), nextInstItr, newVReg, + return miIdx; + } + + + /// Insert a store of the given vreg to the given stack slot immediately + /// after the given instruction. Returns the base index of the inserted + /// instruction. The caller is responsible for adding an appropriate + /// LiveInterval to the LiveIntervals analysis. + unsigned insertStoreFor(MachineInstr *mi, unsigned ss, + unsigned vreg, + const TargetRegisterClass *trc) { + + MachineBasicBlock::iterator nextInstItr(mi); + ++nextInstItr; + + unsigned miIdx = makeSpaceAfter(mi); + + tii->storeRegToStackSlot(*mi->getParent(), nextInstItr, vreg, true, ss, trc); MachineBasicBlock::iterator storeInstItr(mi); ++storeInstItr; @@ -81,25 +107,35 @@ protected: return storeInstIdx; } + void insertStoreOnInterval(LiveInterval *li, + MachineInstr *mi, unsigned ss, + unsigned vreg, + const TargetRegisterClass *trc) { + + unsigned storeInstIdx = insertStoreFor(mi, ss, vreg, trc); + unsigned start = lis->getDefIndex(lis->getInstructionIndex(mi)), + end = lis->getUseIndex(storeInstIdx); + + VNInfo *vni = + li->getNextValue(storeInstIdx, 0, true, lis->getVNInfoAllocator()); + vni->kills.push_back(storeInstIdx); + LiveRange lr(start, end, vni); + + li->addRange(lr); + } + /// Insert a load of the given veg from the given stack slot immediately /// before the given instruction. Returns the base index of the inserted /// instruction. The caller is responsible for adding an appropriate /// LiveInterval to the LiveIntervals analysis. unsigned insertLoadFor(MachineInstr *mi, unsigned ss, - unsigned newVReg, + unsigned vreg, const TargetRegisterClass *trc) { MachineBasicBlock::iterator useInstItr(mi); - - if (!lis->hasGapBeforeInstr(lis->getInstructionIndex(mi))) { - lis->scaleNumbering(2); - ls->scaleNumbering(2); - } - - unsigned miIdx = lis->getInstructionIndex(mi); - - assert(lis->hasGapBeforeInstr(miIdx)); - - tii->loadRegFromStackSlot(*mi->getParent(), useInstItr, newVReg, ss, trc); + + unsigned miIdx = makeSpaceBefore(mi); + + tii->loadRegFromStackSlot(*mi->getParent(), useInstItr, vreg, ss, trc); MachineBasicBlock::iterator loadInstItr(mi); --loadInstItr; MachineInstr *loadInst = &*loadInstItr; @@ -113,6 +149,24 @@ protected: return loadInstIdx; } + void insertLoadOnInterval(LiveInterval *li, + MachineInstr *mi, unsigned ss, + unsigned vreg, + const TargetRegisterClass *trc) { + + unsigned loadInstIdx = insertLoadFor(mi, ss, vreg, trc); + unsigned start = lis->getDefIndex(loadInstIdx), + end = lis->getUseIndex(lis->getInstructionIndex(mi)); + + VNInfo *vni = + li->getNextValue(loadInstIdx, 0, true, lis->getVNInfoAllocator()); + vni->kills.push_back(lis->getInstructionIndex(mi)); + LiveRange lr(start, end, vni); + + li->addRange(lr); + } + + /// Add spill ranges for every use/def of the live interval, inserting loads /// immediately before each use, and stores after each def. No folding is @@ -173,35 +227,16 @@ protected: assert(hasUse || hasDef); if (hasUse) { - unsigned loadInstIdx = insertLoadFor(mi, ss, newVReg, trc); - unsigned start = lis->getDefIndex(loadInstIdx), - end = lis->getUseIndex(lis->getInstructionIndex(mi)); - - VNInfo *vni = - newLI->getNextValue(loadInstIdx, 0, lis->getVNInfoAllocator()); - vni->kills.push_back(lis->getInstructionIndex(mi)); - LiveRange lr(start, end, vni); - - newLI->addRange(lr); + insertLoadOnInterval(newLI, mi, ss, newVReg, trc); } if (hasDef) { - unsigned storeInstIdx = insertStoreFor(mi, ss, newVReg, trc); - unsigned start = lis->getDefIndex(lis->getInstructionIndex(mi)), - end = lis->getUseIndex(storeInstIdx); - - VNInfo *vni = - newLI->getNextValue(storeInstIdx, 0, lis->getVNInfoAllocator()); - vni->kills.push_back(storeInstIdx); - LiveRange lr(start, end, vni); - - newLI->addRange(lr); + insertStoreOnInterval(newLI, mi, ss, newVReg, trc); } added.push_back(newLI); } - return added; } @@ -212,13 +247,44 @@ protected: /// folding. class TrivialSpiller : public SpillerBase { public: - TrivialSpiller(MachineFunction *mf, LiveIntervals *lis, LiveStacks *ls, VirtRegMap *vrm) : + + TrivialSpiller(MachineFunction *mf, LiveIntervals *lis, LiveStacks *ls, + VirtRegMap *vrm) : SpillerBase(mf, lis, ls, vrm) {} std::vector<LiveInterval*> spill(LiveInterval *li) { return trivialSpillEverywhere(li); } + std::vector<LiveInterval*> intraBlockSplit(LiveInterval *li, VNInfo *valno) { + std::vector<LiveInterval*> spillIntervals; + MachineBasicBlock::iterator storeInsertPoint; + + if (valno->isDefAccurate()) { + // If we have an accurate def we can just grab an iterator to the instr + // after the def. + storeInsertPoint = + next(MachineBasicBlock::iterator(lis->getInstructionFromIndex(valno->def))); + } else { + // If the def info isn't accurate we check if this is a PHI def. + // If it is then def holds the index of the defining Basic Block, and we + // can use that to get an insertion point. + if (valno->isPHIDef()) { + + } else { + // We have no usable def info. We can't split this value sensibly. + // FIXME: Need sensible feedback for "failure to split", an empty + // set of spill intervals could be reasonably returned from a + // split where both the store and load are folded. + return spillIntervals; + } + } + + + + return spillIntervals; + } + }; } diff --git a/lib/CodeGen/Spiller.h b/lib/CodeGen/Spiller.h index cad054d..9c3900d 100644 --- a/lib/CodeGen/Spiller.h +++ b/lib/CodeGen/Spiller.h @@ -13,11 +13,14 @@ #include <vector> namespace llvm { + class LiveInterval; class LiveIntervals; class LiveStacks; class MachineFunction; + class MachineInstr; class VirtRegMap; + class VNInfo; /// Spiller interface. /// @@ -26,7 +29,15 @@ namespace llvm { class Spiller { public: virtual ~Spiller() = 0; + + /// Spill the given live range. The method used will depend on the Spiller + /// implementation selected. virtual std::vector<LiveInterval*> spill(LiveInterval *li) = 0; + + /// Intra-block split. + virtual std::vector<LiveInterval*> intraBlockSplit(LiveInterval *li, + VNInfo *valno) = 0; + }; /// Create and return a spiller object, as specified on the command line. diff --git a/lib/CodeGen/StrongPHIElimination.cpp b/lib/CodeGen/StrongPHIElimination.cpp index a2c1255..ca99528 100644 --- a/lib/CodeGen/StrongPHIElimination.cpp +++ b/lib/CodeGen/StrongPHIElimination.cpp @@ -827,7 +827,7 @@ void StrongPHIElimination::InsertCopies(MachineDomTreeNode* MDTN, // Add a live range for the new vreg LiveInterval& Int = LI.getInterval(I->getOperand(i).getReg()); VNInfo* FirstVN = *Int.vni_begin(); - FirstVN->hasPHIKill = false; + FirstVN->setHasPHIKill(false); if (I->getOperand(i).isKill()) FirstVN->kills.push_back( LiveIntervals::getUseIndex(LI.getInstructionIndex(I))); @@ -886,10 +886,7 @@ bool StrongPHIElimination::mergeLiveIntervals(unsigned primary, VNInfo* OldVN = R.valno; VNInfo*& NewVN = VNMap[OldVN]; if (!NewVN) { - NewVN = LHS.getNextValue(OldVN->def, - OldVN->copy, - LI.getVNInfoAllocator()); - NewVN->kills = OldVN->kills; + NewVN = LHS.createValueCopy(OldVN, LI.getVNInfoAllocator()); } LiveRange LR (R.start, R.end, NewVN); @@ -987,7 +984,7 @@ bool StrongPHIElimination::runOnMachineFunction(MachineFunction &Fn) { LiveInterval& Int = LI.getOrCreateInterval(I->first); const LiveRange* LR = Int.getLiveRangeContaining(LI.getMBBEndIdx(SI->second)); - LR->valno->hasPHIKill = true; + LR->valno->setHasPHIKill(true); I->second.erase(SI->first); } @@ -1037,7 +1034,7 @@ bool StrongPHIElimination::runOnMachineFunction(MachineFunction &Fn) { // now has an unknown def. unsigned idx = LI.getDefIndex(LI.getInstructionIndex(PInstr)); const LiveRange* PLR = PI.getLiveRangeContaining(idx); - PLR->valno->def = ~0U; + PLR->valno->setIsPHIDef(true); LiveRange R (LI.getMBBStartIdx(PInstr->getParent()), PLR->start, PLR->valno); PI.addRange(R); diff --git a/lib/CodeGen/VirtRegMap.cpp b/lib/CodeGen/VirtRegMap.cpp index 29637b9..4d3417f 100644 --- a/lib/CodeGen/VirtRegMap.cpp +++ b/lib/CodeGen/VirtRegMap.cpp @@ -51,6 +51,7 @@ static RegisterPass<VirtRegMap> X("virtregmap", "Virtual Register Map"); bool VirtRegMap::runOnMachineFunction(MachineFunction &mf) { + MRI = &mf.getRegInfo(); TII = mf.getTarget().getInstrInfo(); TRI = mf.getTarget().getRegisterInfo(); MF = &mf; @@ -98,6 +99,18 @@ void VirtRegMap::grow() { ImplicitDefed.resize(LastVirtReg-TargetRegisterInfo::FirstVirtualRegister+1); } +unsigned VirtRegMap::getRegAllocPref(unsigned virtReg) { + std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(virtReg); + unsigned physReg = Hint.second; + if (physReg && + TargetRegisterInfo::isVirtualRegister(physReg) && hasPhys(physReg)) + physReg = getPhys(physReg); + if (Hint.first == 0) + return (physReg && TargetRegisterInfo::isPhysicalRegister(physReg)) + ? physReg : 0; + return TRI->ResolveRegAllocHint(Hint.first, physReg, *MF); +} + int VirtRegMap::assignVirt2StackSlot(unsigned virtReg) { assert(TargetRegisterInfo::isVirtualRegister(virtReg)); assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT && @@ -213,8 +226,7 @@ void VirtRegMap::RemoveMachineInstrFromMaps(MachineInstr *MI) { /// FindUnusedRegisters - Gather a list of allocatable registers that /// have not been allocated to any virtual register. -bool VirtRegMap::FindUnusedRegisters(const TargetRegisterInfo *TRI, - LiveIntervals* LIs) { +bool VirtRegMap::FindUnusedRegisters(LiveIntervals* LIs) { unsigned NumRegs = TRI->getNumRegs(); UnusedRegs.reset(); UnusedRegs.resize(NumRegs); diff --git a/lib/CodeGen/VirtRegMap.h b/lib/CodeGen/VirtRegMap.h index 507557d..fe767b7 100644 --- a/lib/CodeGen/VirtRegMap.h +++ b/lib/CodeGen/VirtRegMap.h @@ -31,6 +31,7 @@ namespace llvm { class LiveIntervals; class MachineInstr; class MachineFunction; + class MachineRegisterInfo; class TargetInstrInfo; class TargetRegisterInfo; @@ -47,6 +48,7 @@ namespace llvm { std::pair<unsigned, ModRef> > MI2VirtMapTy; private: + MachineRegisterInfo *MRI; const TargetInstrInfo *TII; const TargetRegisterInfo *TRI; MachineFunction *MF; @@ -190,6 +192,9 @@ namespace llvm { grow(); } + /// @brief returns the register allocation preference. + unsigned getRegAllocPref(unsigned virtReg); + /// @brief records virtReg is a split live interval from SReg. void setIsSplitFromReg(unsigned virtReg, unsigned SReg) { Virt2SplitMap[virtReg] = SReg; @@ -445,8 +450,7 @@ namespace llvm { /// FindUnusedRegisters - Gather a list of allocatable registers that /// have not been allocated to any virtual register. - bool FindUnusedRegisters(const TargetRegisterInfo *TRI, - LiveIntervals* LIs); + bool FindUnusedRegisters(LiveIntervals* LIs); /// HasUnusedRegisters - Return true if there are any allocatable registers /// that have not been allocated to any virtual register. |