summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.cpp')
-rw-r--r--contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.cpp157
1 files changed, 88 insertions, 69 deletions
diff --git a/contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.cpp b/contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.cpp
index 2843c1a..35b8e14 100644
--- a/contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.cpp
+++ b/contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.cpp
@@ -104,6 +104,18 @@ void SimpleRegisterCoalescing::getAnalysisUsage(AnalysisUsage &AU) const {
MachineFunctionPass::getAnalysisUsage(AU);
}
+void SimpleRegisterCoalescing::markAsJoined(MachineInstr *CopyMI) {
+ /// Joined copies are not deleted immediately, but kept in JoinedCopies.
+ JoinedCopies.insert(CopyMI);
+
+ /// Mark all register operands of CopyMI as <undef> so they won't affect dead
+ /// code elimination.
+ for (MachineInstr::mop_iterator I = CopyMI->operands_begin(),
+ E = CopyMI->operands_end(); I != E; ++I)
+ if (I->isReg())
+ I->setIsUndef(true);
+}
+
/// AdjustCopiesBackFrom - We found a non-trivially-coalescable copy with IntA
/// being the source and IntB being the dest, thus this defines a value number
/// in IntB. If the source value number (in IntA) is defined by a copy from B,
@@ -196,15 +208,14 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(const CoalescerPair &CP,
if (ValLR+1 != BLR) return false;
// If a live interval is a physical register, conservatively check if any
- // of its sub-registers is overlapping the live interval of the virtual
- // register. If so, do not coalesce.
- if (TargetRegisterInfo::isPhysicalRegister(IntB.reg) &&
- *tri_->getSubRegisters(IntB.reg)) {
- for (const unsigned* SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR)
- if (li_->hasInterval(*SR) && IntA.overlaps(li_->getInterval(*SR))) {
+ // of its aliases is overlapping the live interval of the virtual register.
+ // If so, do not coalesce.
+ if (TargetRegisterInfo::isPhysicalRegister(IntB.reg)) {
+ for (const unsigned *AS = tri_->getAliasSet(IntB.reg); *AS; ++AS)
+ if (li_->hasInterval(*AS) && IntA.overlaps(li_->getInterval(*AS))) {
DEBUG({
- dbgs() << "\t\tInterfere with sub-register ";
- li_->getInterval(*SR).print(dbgs(), tri_);
+ dbgs() << "\t\tInterfere with alias ";
+ li_->getInterval(*AS).print(dbgs(), tri_);
});
return false;
}
@@ -471,7 +482,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(const CoalescerPair &CP,
DEBUG(dbgs() << "\t\tnoop: " << DefIdx << '\t' << *UseMI);
assert(DVNI->def == DefIdx);
BValNo = IntB.MergeValueNumberInto(BValNo, DVNI);
- JoinedCopies.insert(UseMI);
+ markAsJoined(UseMI);
}
// Extend BValNo by merging in IntA live ranges of AValNo. Val# definition
@@ -901,6 +912,58 @@ SimpleRegisterCoalescing::ShortenDeadCopySrcLiveRange(LiveInterval &li,
return removeIntervalIfEmpty(li, li_, tri_);
}
+/// shouldJoinPhys - Return true if a copy involving a physreg should be joined.
+/// We need to be careful about coalescing a source physical register with a
+/// virtual register. Once the coalescing is done, it cannot be broken and these
+/// are not spillable! If the destination interval uses are far away, think
+/// twice about coalescing them!
+bool SimpleRegisterCoalescing::shouldJoinPhys(CoalescerPair &CP) {
+ bool Allocatable = li_->isAllocatable(CP.getDstReg());
+ LiveInterval &JoinVInt = li_->getInterval(CP.getSrcReg());
+
+ /// Always join simple intervals that are defined by a single copy from a
+ /// reserved register. This doesn't increase register pressure, so it is
+ /// always beneficial.
+ if (!Allocatable && CP.isFlipped() && JoinVInt.containsOneValue())
+ return true;
+
+ if (DisablePhysicalJoin) {
+ DEBUG(dbgs() << "\tPhysreg joins disabled.\n");
+ return false;
+ }
+
+ // Only coalesce to allocatable physreg, we don't want to risk modifying
+ // reserved registers.
+ if (!Allocatable) {
+ DEBUG(dbgs() << "\tRegister is an unallocatable physreg.\n");
+ return false; // Not coalescable.
+ }
+
+ // Don't join with physregs that have a ridiculous number of live
+ // ranges. The data structure performance is really bad when that
+ // happens.
+ if (li_->hasInterval(CP.getDstReg()) &&
+ li_->getInterval(CP.getDstReg()).ranges.size() > 1000) {
+ ++numAborts;
+ DEBUG(dbgs()
+ << "\tPhysical register live interval too complicated, abort!\n");
+ return false;
+ }
+
+ // FIXME: Why are we skipping this test for partial copies?
+ // CodeGen/X86/phys_subreg_coalesce-3.ll needs it.
+ if (!CP.isPartial()) {
+ const TargetRegisterClass *RC = mri_->getRegClass(CP.getSrcReg());
+ unsigned Threshold = allocatableRCRegs_[RC].count() * 2;
+ unsigned Length = li_->getApproximateInstructionCount(JoinVInt);
+ if (Length > Threshold) {
+ ++numAborts;
+ DEBUG(dbgs() << "\tMay tie down a physical register, abort!\n");
+ return false;
+ }
+ }
+ return true;
+}
/// isWinToJoinCrossClass - Return true if it's profitable to coalesce
/// two virtual registers from different register classes.
@@ -973,27 +1036,25 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
return false; // Not coalescable.
}
- if (DisablePhysicalJoin && CP.isPhys()) {
- DEBUG(dbgs() << "\tPhysical joins disabled.\n");
- return false;
- }
-
- DEBUG(dbgs() << "\tConsidering merging " << PrintReg(CP.getSrcReg(), tri_));
+ DEBUG(dbgs() << "\tConsidering merging " << PrintReg(CP.getSrcReg(), tri_)
+ << " with " << PrintReg(CP.getDstReg(), tri_, CP.getSubIdx())
+ << "\n");
// Enforce policies.
if (CP.isPhys()) {
- DEBUG(dbgs() <<" with physreg " << PrintReg(CP.getDstReg(), tri_) << "\n");
- // Only coalesce to allocatable physreg.
- if (!li_->isAllocatable(CP.getDstReg())) {
- DEBUG(dbgs() << "\tRegister is an unallocatable physreg.\n");
- return false; // Not coalescable.
+ if (!shouldJoinPhys(CP)) {
+ // Before giving up coalescing, if definition of source is defined by
+ // trivial computation, try rematerializing it.
+ if (!CP.isFlipped() &&
+ ReMaterializeTrivialDef(li_->getInterval(CP.getSrcReg()), true,
+ CP.getDstReg(), 0, CopyMI))
+ return true;
+ return false;
}
} else {
- DEBUG(dbgs() << " with " << PrintReg(CP.getDstReg(), tri_, CP.getSubIdx())
- << " to " << CP.getNewRC()->getName() << "\n");
-
// Avoid constraining virtual register regclass too much.
if (CP.isCrossClass()) {
+ DEBUG(dbgs() << "\tCross-class to " << CP.getNewRC()->getName() << ".\n");
if (DisableCrossClassJoin) {
DEBUG(dbgs() << "\tCross-class joins disabled.\n");
return false;
@@ -1002,8 +1063,7 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
mri_->getRegClass(CP.getSrcReg()),
mri_->getRegClass(CP.getDstReg()),
CP.getNewRC())) {
- DEBUG(dbgs() << "\tAvoid coalescing to constrained register class: "
- << CP.getNewRC()->getName() << ".\n");
+ DEBUG(dbgs() << "\tAvoid coalescing to constrained register class.\n");
Again = true; // May be possible to coalesce later.
return false;
}
@@ -1015,45 +1075,6 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
CP.flip();
}
- // We need to be careful about coalescing a source physical register with a
- // virtual register. Once the coalescing is done, it cannot be broken and
- // these are not spillable! If the destination interval uses are far away,
- // think twice about coalescing them!
- // FIXME: Why are we skipping this test for partial copies?
- // CodeGen/X86/phys_subreg_coalesce-3.ll needs it.
- if (!CP.isPartial() && CP.isPhys()) {
- LiveInterval &JoinVInt = li_->getInterval(CP.getSrcReg());
-
- // Don't join with physregs that have a ridiculous number of live
- // ranges. The data structure performance is really bad when that
- // happens.
- if (li_->hasInterval(CP.getDstReg()) &&
- li_->getInterval(CP.getDstReg()).ranges.size() > 1000) {
- ++numAborts;
- DEBUG(dbgs()
- << "\tPhysical register live interval too complicated, abort!\n");
- return false;
- }
-
- const TargetRegisterClass *RC = mri_->getRegClass(CP.getSrcReg());
- unsigned Threshold = allocatableRCRegs_[RC].count() * 2;
- unsigned Length = li_->getApproximateInstructionCount(JoinVInt);
- if (Length > Threshold &&
- std::distance(mri_->use_nodbg_begin(CP.getSrcReg()),
- mri_->use_nodbg_end()) * Threshold < Length) {
- // Before giving up coalescing, if definition of source is defined by
- // trivial computation, try rematerializing it.
- if (!CP.isFlipped() &&
- ReMaterializeTrivialDef(JoinVInt, true, CP.getDstReg(), 0, CopyMI))
- return true;
-
- ++numAborts;
- DEBUG(dbgs() << "\tMay tie down a physical register, abort!\n");
- Again = true; // May be possible to coalesce later.
- return false;
- }
- }
-
// Okay, attempt to join these two intervals. On failure, this returns false.
// Otherwise, if one of the intervals being joined is a physreg, this method
// always canonicalizes DstInt to be it. The output "SrcInt" will not have
@@ -1072,7 +1093,7 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
if (!CP.isPartial()) {
if (AdjustCopiesBackFrom(CP, CopyMI) ||
RemoveCopyByCommutingDef(CP, CopyMI)) {
- JoinedCopies.insert(CopyMI);
+ markAsJoined(CopyMI);
DEBUG(dbgs() << "\tTrivial!\n");
return true;
}
@@ -1092,7 +1113,7 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
}
// Remember to delete the copy instruction.
- JoinedCopies.insert(CopyMI);
+ markAsJoined(CopyMI);
UpdateRegDefsUses(CP);
@@ -1568,9 +1589,7 @@ SimpleRegisterCoalescing::lastRegisterUse(SlotIndex Start,
if (UseMI->isIdentityCopy())
continue;
SlotIndex Idx = li_->getInstructionIndex(UseMI);
- // FIXME: Should this be Idx != UseIdx? SlotIndex() will return something
- // that compares higher than any other interval.
- if (Idx >= Start && Idx < End && Idx >= UseIdx) {
+ if (Idx >= Start && Idx < End && (!UseIdx.isValid() || Idx >= UseIdx)) {
LastUse = &Use;
UseIdx = Idx.getUseIndex();
}
OpenPOWER on IntegriCloud