summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
diff options
context:
space:
mode:
authordim <dim@FreeBSD.org>2016-12-26 20:36:37 +0000
committerdim <dim@FreeBSD.org>2016-12-26 20:36:37 +0000
commit06210ae42d418d50d8d9365d5c9419308ae9e7ee (patch)
treeab60b4cdd6e430dda1f292a46a77ddb744723f31 /contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
parent2dd166267f53df1c3748b4325d294b9b839de74b (diff)
downloadFreeBSD-src-06210ae42d418d50d8d9365d5c9419308ae9e7ee.zip
FreeBSD-src-06210ae42d418d50d8d9365d5c9419308ae9e7ee.tar.gz
MFC r309124:
Upgrade our copies of clang, llvm, lldb, compiler-rt and libc++ to 3.9.0 release, and add lld 3.9.0. Also completely revamp the build system for clang, llvm, lldb and their related tools. Please note that from 3.5.0 onwards, clang, llvm and lldb require C++11 support to build; see UPDATING for more information. Release notes for llvm, clang and lld are available here: <http://llvm.org/releases/3.9.0/docs/ReleaseNotes.html> <http://llvm.org/releases/3.9.0/tools/clang/docs/ReleaseNotes.html> <http://llvm.org/releases/3.9.0/tools/lld/docs/ReleaseNotes.html> Thanks to Ed Maste, Bryan Drewery, Andrew Turner, Antoine Brodin and Jan Beich for their help. Relnotes: yes MFC r309147: Pull in r282174 from upstream llvm trunk (by Krzysztof Parzyszek): [PPC] Set SP after loading data from stack frame, if no red zone is present Follow-up to r280705: Make sure that the SP is only restored after all data is loaded from the stack frame, if there is no red zone. This completes the fix for https://llvm.org/bugs/show_bug.cgi?id=26519. Differential Revision: https://reviews.llvm.org/D24466 Reported by: Mark Millard PR: 214433 MFC r309149: Pull in r283060 from upstream llvm trunk (by Hal Finkel): [PowerPC] Refactor soft-float support, and enable PPC64 soft float This change enables soft-float for PowerPC64, and also makes soft-float disable all vector instruction sets for both 32-bit and 64-bit modes. This latter part is necessary because the PPC backend canonicalizes many Altivec vector types to floating-point types, and so soft-float breaks scalarization support for many operations. Both for embedded targets and for operating-system kernels desiring soft-float support, it seems reasonable that disabling hardware floating-point also disables vector instructions (embedded targets without hardware floating point support are unlikely to have Altivec, etc. and operating system kernels desiring not to use floating-point registers to lower syscall cost are unlikely to want to use vector registers either). If someone needs this to work, we'll need to change the fact that we promote many Altivec operations to act on v4f32. To make it possible to disable Altivec when soft-float is enabled, hardware floating-point support needs to be expressed as a positive feature, like the others, and not a negative feature, because target features cannot have dependencies on the disabling of some other feature. So +soft-float has now become -hard-float. Fixes PR26970. Pull in r283061 from upstream clang trunk (by Hal Finkel): [PowerPC] Enable soft-float for PPC64, and +soft-float -> -hard-float Enable soft-float support on PPC64, as the backend now supports it. Also, the backend now uses -hard-float instead of +soft-float, so set the target features accordingly. Fixes PR26970. Reported by: Mark Millard PR: 214433 MFC r309212: Add a few missed clang 3.9.0 files to OptionalObsoleteFiles. MFC r309262: Fix packaging for clang, lldb and lld 3.9.0 During the upgrade of clang/llvm etc to 3.9.0 in r309124, the PACKAGE directive in the usr.bin/clang/*.mk files got dropped accidentally. Restore it, with a few minor changes and additions: * Correct license in clang.ucl to NCSA * Add PACKAGE=clang for clang and most of the "ll" tools * Put lldb in its own package * Put lld in its own package Reviewed by: gjb, jmallett Differential Revision: https://reviews.freebsd.org/D8666 MFC r309656: During the bootstrap phase, when building the minimal llvm library on PowerPC, add lib/Support/Atomic.cpp. This is needed because upstream llvm revision r271821 disabled the use of std::call_once, which causes some fallback functions from Atomic.cpp to be used instead. Reported by: Mark Millard PR: 214902 MFC r309835: Tentatively apply https://reviews.llvm.org/D18730 to work around gcc PR 70528 (bogus error: constructor required before non-static data member). This should fix buildworld with the external gcc package. Reported by: https://jenkins.freebsd.org/job/FreeBSD_HEAD_amd64_gcc/ MFC r310194: Upgrade our copies of clang, llvm, lld, lldb, compiler-rt and libc++ to 3.9.1 release. Please note that from 3.5.0 onwards, clang, llvm and lldb require C++11 support to build; see UPDATING for more information. Release notes for llvm, clang and lld will be available here: <http://releases.llvm.org/3.9.1/docs/ReleaseNotes.html> <http://releases.llvm.org/3.9.1/tools/clang/docs/ReleaseNotes.html> <http://releases.llvm.org/3.9.1/tools/lld/docs/ReleaseNotes.html> Relnotes: yes
Diffstat (limited to 'contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp')
-rw-r--r--contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp552
1 files changed, 333 insertions, 219 deletions
diff --git a/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp b/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
index a506e05..5f3281f 100644
--- a/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
@@ -9,15 +9,13 @@
//
// This file implements the LiveInterval analysis pass which is used
// by the Linear Scan Register allocator. This pass linearizes the
-// basic blocks of the function in DFS order and uses the
-// LiveVariables pass to conservatively compute live intervals for
+// basic blocks of the function in DFS order and computes live intervals for
// each virtual and physical register.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "LiveRangeCalc.h"
-#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/LiveVariables.h"
@@ -38,7 +36,6 @@
#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
#include <cmath>
-#include <limits>
using namespace llvm;
#define DEBUG_TYPE "regalloc"
@@ -48,7 +45,6 @@ char &llvm::LiveIntervalsID = LiveIntervals::ID;
INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
"Live Interval Analysis", false, false)
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(LiveVariables)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
@@ -77,10 +73,6 @@ void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addRequired<AAResultsWrapperPass>();
AU.addPreserved<AAResultsWrapperPass>();
- // LiveVariables isn't really required by this analysis, it is only required
- // here to make sure it is live during TwoAddressInstructionPass and
- // PHIElimination. This is temporary.
- AU.addRequired<LiveVariables>();
AU.addPreserved<LiveVariables>();
AU.addPreservedID(MachineLoopInfoID);
AU.addRequiredTransitiveID(MachineDominatorsID);
@@ -197,16 +189,9 @@ LiveInterval* LiveIntervals::createInterval(unsigned reg) {
void LiveIntervals::computeVirtRegInterval(LiveInterval &LI) {
assert(LRCalc && "LRCalc not initialized.");
assert(LI.empty() && "Should only compute empty intervals.");
- bool ShouldTrackSubRegLiveness = MRI->shouldTrackSubRegLiveness(LI.reg);
LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
- LRCalc->calculate(LI, ShouldTrackSubRegLiveness);
- bool SeparatedComponents = computeDeadValues(LI, nullptr);
- if (SeparatedComponents) {
- assert(ShouldTrackSubRegLiveness
- && "Separated components should only occur for unused subreg defs");
- SmallVector<LiveInterval*, 8> SplitLIs;
- splitSeparateComponents(LI, SplitLIs);
- }
+ LRCalc->calculate(LI, MRI->shouldTrackSubRegLiveness(LI.reg));
+ computeDeadValues(LI, nullptr);
}
void LiveIntervals::computeVirtRegs() {
@@ -236,14 +221,18 @@ void LiveIntervals::computeRegMasks() {
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isRegMask())
continue;
- RegMaskSlots.push_back(Indexes->getInstructionIndex(&MI).getRegSlot());
+ RegMaskSlots.push_back(Indexes->getInstructionIndex(MI).getRegSlot());
RegMaskBits.push_back(MO.getRegMask());
}
}
- // Some block ends, such as funclet returns, create masks.
+ // Some block ends, such as funclet returns, create masks. Put the mask on
+ // the last instruction of the block, because MBB slot index intervals are
+ // half-open.
if (const uint32_t *Mask = MBB.getEndClobberMask(TRI)) {
- RegMaskSlots.push_back(Indexes->getMBBEndIdx(&MBB));
+ assert(!MBB.empty() && "empty return block?");
+ RegMaskSlots.push_back(
+ Indexes->getInstructionIndex(MBB.back()).getRegSlot());
RegMaskBits.push_back(Mask);
}
@@ -439,7 +428,7 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li,
MachineInstr *UseMI = &*(I++);
if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
continue;
- SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot();
+ SlotIndex Idx = getInstructionIndex(*UseMI).getRegSlot();
LiveQueryResult LRQ = li->Query(Idx);
VNInfo *VNI = LRQ.valueIn();
if (!VNI) {
@@ -485,13 +474,11 @@ bool LiveIntervals::computeDeadValues(LiveInterval &LI,
// Is the register live before? Otherwise we may have to add a read-undef
// flag for subregister defs.
- bool DeadBeforeDef = false;
unsigned VReg = LI.reg;
if (MRI->shouldTrackSubRegLiveness(VReg)) {
if ((I == LI.begin() || std::prev(I)->end < Def) && !VNI->isPHIDef()) {
MachineInstr *MI = getInstructionFromIndex(Def);
MI->setRegisterDefReadUndef(VReg);
- DeadBeforeDef = true;
}
}
@@ -507,15 +494,7 @@ bool LiveIntervals::computeDeadValues(LiveInterval &LI,
// This is a dead def. Make sure the instruction knows.
MachineInstr *MI = getInstructionFromIndex(Def);
assert(MI && "No instruction defining live value");
- MI->addRegisterDead(VReg, TRI);
-
- // If we have a dead def that is completely separate from the rest of
- // the liverange then we rewrite it to use a different VReg to not violate
- // the rule that the liveness of a virtual register forms a connected
- // component. This should only happen if subregister liveness is tracked.
- if (DeadBeforeDef)
- MayHaveSplitComponents = true;
-
+ MI->addRegisterDead(LI.reg, TRI);
if (dead && MI->allDefsAreDead()) {
DEBUG(dbgs() << "All defs dead: " << Def << '\t' << *MI);
dead->push_back(MI);
@@ -547,7 +526,7 @@ void LiveIntervals::shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg)
continue;
}
// We only need to visit each instruction once.
- SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot();
+ SlotIndex Idx = getInstructionIndex(*UseMI).getRegSlot();
if (Idx == LastIdx)
continue;
LastIdx = Idx;
@@ -585,9 +564,9 @@ void LiveIntervals::shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg)
continue;
if (VNI->isPHIDef()) {
// This is a dead PHI. Remove it.
+ DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
VNI->markUnused();
SR.removeSegment(*Segment);
- DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
}
}
@@ -837,24 +816,22 @@ LiveIntervals::hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const {
return false;
}
-float
-LiveIntervals::getSpillWeight(bool isDef, bool isUse,
- const MachineBlockFrequencyInfo *MBFI,
- const MachineInstr *MI) {
- BlockFrequency Freq = MBFI->getBlockFreq(MI->getParent());
+float LiveIntervals::getSpillWeight(bool isDef, bool isUse,
+ const MachineBlockFrequencyInfo *MBFI,
+ const MachineInstr &MI) {
+ BlockFrequency Freq = MBFI->getBlockFreq(MI.getParent());
const float Scale = 1.0f / MBFI->getEntryFreq();
return (isDef + isUse) * (Freq.getFrequency() * Scale);
}
LiveRange::Segment
-LiveIntervals::addSegmentToEndOfBlock(unsigned reg, MachineInstr* startInst) {
+LiveIntervals::addSegmentToEndOfBlock(unsigned reg, MachineInstr &startInst) {
LiveInterval& Interval = createEmptyInterval(reg);
- VNInfo* VN = Interval.getNextValue(
- SlotIndex(getInstructionIndex(startInst).getRegSlot()),
- getVNInfoAllocator());
- LiveRange::Segment S(
- SlotIndex(getInstructionIndex(startInst).getRegSlot()),
- getMBBEndIdx(startInst->getParent()), VN);
+ VNInfo *VN = Interval.getNextValue(
+ SlotIndex(getInstructionIndex(startInst).getRegSlot()),
+ getVNInfoAllocator());
+ LiveRange::Segment S(SlotIndex(getInstructionIndex(startInst).getRegSlot()),
+ getMBBEndIdx(startInst.getParent()), VN);
Interval.addSegment(S);
return S;
@@ -962,10 +939,13 @@ public:
hasRegMask = true;
if (!MO.isReg())
continue;
- // Aggressively clear all kill flags.
- // They are reinserted by VirtRegRewriter.
- if (MO.isUse())
+ if (MO.isUse()) {
+ if (!MO.readsReg())
+ continue;
+ // Aggressively clear all kill flags.
+ // They are reinserted by VirtRegRewriter.
MO.setIsKill(false);
+ }
unsigned Reg = MO.getReg();
if (!Reg)
@@ -1021,172 +1001,296 @@ private:
}
/// Update LR to reflect an instruction has been moved downwards from OldIdx
- /// to NewIdx.
- ///
- /// 1. Live def at OldIdx:
- /// Move def to NewIdx, assert endpoint after NewIdx.
- ///
- /// 2. Live def at OldIdx, killed at NewIdx:
- /// Change to dead def at NewIdx.
- /// (Happens when bundling def+kill together).
- ///
- /// 3. Dead def at OldIdx:
- /// Move def to NewIdx, possibly across another live value.
- ///
- /// 4. Def at OldIdx AND at NewIdx:
- /// Remove segment [OldIdx;NewIdx) and value defined at OldIdx.
- /// (Happens when bundling multiple defs together).
- ///
- /// 5. Value read at OldIdx, killed before NewIdx:
- /// Extend kill to NewIdx.
- ///
+ /// to NewIdx (OldIdx < NewIdx).
void handleMoveDown(LiveRange &LR) {
- // First look for a kill at OldIdx.
- LiveRange::iterator I = LR.find(OldIdx.getBaseIndex());
LiveRange::iterator E = LR.end();
- // Is LR even live at OldIdx?
- if (I == E || SlotIndex::isEarlierInstr(OldIdx, I->start))
+ // Segment going into OldIdx.
+ LiveRange::iterator OldIdxIn = LR.find(OldIdx.getBaseIndex());
+
+ // No value live before or after OldIdx? Nothing to do.
+ if (OldIdxIn == E || SlotIndex::isEarlierInstr(OldIdx, OldIdxIn->start))
return;
- // Handle a live-in value.
- if (!SlotIndex::isSameInstr(I->start, OldIdx)) {
- bool isKill = SlotIndex::isSameInstr(OldIdx, I->end);
+ LiveRange::iterator OldIdxOut;
+ // Do we have a value live-in to OldIdx?
+ if (SlotIndex::isEarlierInstr(OldIdxIn->start, OldIdx)) {
// If the live-in value already extends to NewIdx, there is nothing to do.
- if (!SlotIndex::isEarlierInstr(I->end, NewIdx))
+ if (SlotIndex::isEarlierEqualInstr(NewIdx, OldIdxIn->end))
return;
// Aggressively remove all kill flags from the old kill point.
// Kill flags shouldn't be used while live intervals exist, they will be
// reinserted by VirtRegRewriter.
- if (MachineInstr *KillMI = LIS.getInstructionFromIndex(I->end))
- for (MIBundleOperands MO(KillMI); MO.isValid(); ++MO)
+ if (MachineInstr *KillMI = LIS.getInstructionFromIndex(OldIdxIn->end))
+ for (MIBundleOperands MO(*KillMI); MO.isValid(); ++MO)
if (MO->isReg() && MO->isUse())
MO->setIsKill(false);
- // Adjust I->end to reach NewIdx. This may temporarily make LR invalid by
- // overlapping ranges. Case 5 above.
- I->end = NewIdx.getRegSlot(I->end.isEarlyClobber());
- // If this was a kill, there may also be a def. Otherwise we're done.
+
+ // Is there a def before NewIdx which is not OldIdx?
+ LiveRange::iterator Next = std::next(OldIdxIn);
+ if (Next != E && !SlotIndex::isSameInstr(OldIdx, Next->start) &&
+ SlotIndex::isEarlierInstr(Next->start, NewIdx)) {
+ // If we are here then OldIdx was just a use but not a def. We only have
+ // to ensure liveness extends to NewIdx.
+ LiveRange::iterator NewIdxIn =
+ LR.advanceTo(Next, NewIdx.getBaseIndex());
+ // Extend the segment before NewIdx if necessary.
+ if (NewIdxIn == E ||
+ !SlotIndex::isEarlierInstr(NewIdxIn->start, NewIdx)) {
+ LiveRange::iterator Prev = std::prev(NewIdxIn);
+ Prev->end = NewIdx.getRegSlot();
+ }
+ return;
+ }
+
+ // Adjust OldIdxIn->end to reach NewIdx. This may temporarily make LR
+ // invalid by overlapping ranges.
+ bool isKill = SlotIndex::isSameInstr(OldIdx, OldIdxIn->end);
+ OldIdxIn->end = NewIdx.getRegSlot(OldIdxIn->end.isEarlyClobber());
+ // If this was not a kill, then there was no def and we're done.
if (!isKill)
return;
- ++I;
+
+ // Did we have a Def at OldIdx?
+ OldIdxOut = Next;
+ if (OldIdxOut == E || !SlotIndex::isSameInstr(OldIdx, OldIdxOut->start))
+ return;
+ } else {
+ OldIdxOut = OldIdxIn;
}
- // Check for a def at OldIdx.
- if (I == E || !SlotIndex::isSameInstr(OldIdx, I->start))
- return;
- // We have a def at OldIdx.
- VNInfo *DefVNI = I->valno;
- assert(DefVNI->def == I->start && "Inconsistent def");
- DefVNI->def = NewIdx.getRegSlot(I->start.isEarlyClobber());
- // If the defined value extends beyond NewIdx, just move the def down.
- // This is case 1 above.
- if (SlotIndex::isEarlierInstr(NewIdx, I->end)) {
- I->start = DefVNI->def;
+ // If we are here then there is a Definition at OldIdx. OldIdxOut points
+ // to the segment starting there.
+ assert(OldIdxOut != E && SlotIndex::isSameInstr(OldIdx, OldIdxOut->start) &&
+ "No def?");
+ VNInfo *OldIdxVNI = OldIdxOut->valno;
+ assert(OldIdxVNI->def == OldIdxOut->start && "Inconsistent def");
+
+ // If the defined value extends beyond NewIdx, just move the beginning
+ // of the segment to NewIdx.
+ SlotIndex NewIdxDef = NewIdx.getRegSlot(OldIdxOut->start.isEarlyClobber());
+ if (SlotIndex::isEarlierInstr(NewIdxDef, OldIdxOut->end)) {
+ OldIdxVNI->def = NewIdxDef;
+ OldIdxOut->start = OldIdxVNI->def;
return;
}
- // The remaining possibilities are now:
- // 2. Live def at OldIdx, killed at NewIdx: isSameInstr(I->end, NewIdx).
- // 3. Dead def at OldIdx: I->end = OldIdx.getDeadSlot().
- // In either case, it is possible that there is an existing def at NewIdx.
- assert((I->end == OldIdx.getDeadSlot() ||
- SlotIndex::isSameInstr(I->end, NewIdx)) &&
- "Cannot move def below kill");
- LiveRange::iterator NewI = LR.advanceTo(I, NewIdx.getRegSlot());
- if (NewI != E && SlotIndex::isSameInstr(NewI->start, NewIdx)) {
- // There is an existing def at NewIdx, case 4 above. The def at OldIdx is
- // coalesced into that value.
- assert(NewI->valno != DefVNI && "Multiple defs of value?");
- LR.removeValNo(DefVNI);
+
+ // If we are here then we have a Definition at OldIdx which ends before
+ // NewIdx.
+
+ // Is there an existing Def at NewIdx?
+ LiveRange::iterator AfterNewIdx
+ = LR.advanceTo(OldIdxOut, NewIdx.getRegSlot());
+ bool OldIdxDefIsDead = OldIdxOut->end.isDead();
+ if (!OldIdxDefIsDead &&
+ SlotIndex::isEarlierInstr(OldIdxOut->end, NewIdxDef)) {
+ // OldIdx is not a dead def, and NewIdxDef is inside a new interval.
+ VNInfo *DefVNI;
+ if (OldIdxOut != LR.begin() &&
+ !SlotIndex::isEarlierInstr(std::prev(OldIdxOut)->end,
+ OldIdxOut->start)) {
+ // There is no gap between OldIdxOut and its predecessor anymore,
+ // merge them.
+ LiveRange::iterator IPrev = std::prev(OldIdxOut);
+ DefVNI = OldIdxVNI;
+ IPrev->end = OldIdxOut->end;
+ } else {
+ // The value is live in to OldIdx
+ LiveRange::iterator INext = std::next(OldIdxOut);
+ assert(INext != E && "Must have following segment");
+ // We merge OldIdxOut and its successor. As we're dealing with subreg
+ // reordering, there is always a successor to OldIdxOut in the same BB
+ // We don't need INext->valno anymore and will reuse for the new segment
+ // we create later.
+ DefVNI = OldIdxVNI;
+ INext->start = OldIdxOut->end;
+ INext->valno->def = INext->start;
+ }
+ // If NewIdx is behind the last segment, extend that and append a new one.
+ if (AfterNewIdx == E) {
+ // OldIdxOut is undef at this point, Slide (OldIdxOut;AfterNewIdx] up
+ // one position.
+ // |- ?/OldIdxOut -| |- X0 -| ... |- Xn -| end
+ // => |- X0/OldIdxOut -| ... |- Xn -| |- undef/NewS -| end
+ std::copy(std::next(OldIdxOut), E, OldIdxOut);
+ // The last segment is undefined now, reuse it for a dead def.
+ LiveRange::iterator NewSegment = std::prev(E);
+ *NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(),
+ DefVNI);
+ DefVNI->def = NewIdxDef;
+
+ LiveRange::iterator Prev = std::prev(NewSegment);
+ Prev->end = NewIdxDef;
+ } else {
+ // OldIdxOut is undef at this point, Slide (OldIdxOut;AfterNewIdx] up
+ // one position.
+ // |- ?/OldIdxOut -| |- X0 -| ... |- Xn/AfterNewIdx -| |- Next -|
+ // => |- X0/OldIdxOut -| ... |- Xn -| |- Xn/AfterNewIdx -| |- Next -|
+ std::copy(std::next(OldIdxOut), std::next(AfterNewIdx), OldIdxOut);
+ LiveRange::iterator Prev = std::prev(AfterNewIdx);
+ // We have two cases:
+ if (SlotIndex::isEarlierInstr(Prev->start, NewIdxDef)) {
+ // Case 1: NewIdx is inside a liverange. Split this liverange at
+ // NewIdxDef into the segment "Prev" followed by "NewSegment".
+ LiveRange::iterator NewSegment = AfterNewIdx;
+ *NewSegment = LiveRange::Segment(NewIdxDef, Prev->end, Prev->valno);
+ Prev->valno->def = NewIdxDef;
+
+ *Prev = LiveRange::Segment(Prev->start, NewIdxDef, DefVNI);
+ DefVNI->def = Prev->start;
+ } else {
+ // Case 2: NewIdx is in a lifetime hole. Keep AfterNewIdx as is and
+ // turn Prev into a segment from NewIdx to AfterNewIdx->start.
+ *Prev = LiveRange::Segment(NewIdxDef, AfterNewIdx->start, DefVNI);
+ DefVNI->def = NewIdxDef;
+ assert(DefVNI != AfterNewIdx->valno);
+ }
+ }
return;
}
- // There was no existing def at NewIdx. Turn *I into a dead def at NewIdx.
- // If the def at OldIdx was dead, we allow it to be moved across other LR
- // values. The new range should be placed immediately before NewI, move any
- // intermediate ranges up.
- assert(NewI != I && "Inconsistent iterators");
- std::copy(std::next(I), NewI, I);
- *std::prev(NewI)
- = LiveRange::Segment(DefVNI->def, NewIdx.getDeadSlot(), DefVNI);
+
+ if (AfterNewIdx != E &&
+ SlotIndex::isSameInstr(AfterNewIdx->start, NewIdxDef)) {
+ // There is an existing def at NewIdx. The def at OldIdx is coalesced into
+ // that value.
+ assert(AfterNewIdx->valno != OldIdxVNI && "Multiple defs of value?");
+ LR.removeValNo(OldIdxVNI);
+ } else {
+ // There was no existing def at NewIdx. We need to create a dead def
+ // at NewIdx. Shift segments over the old OldIdxOut segment, this frees
+ // a new segment at the place where we want to construct the dead def.
+ // |- OldIdxOut -| |- X0 -| ... |- Xn -| |- AfterNewIdx -|
+ // => |- X0/OldIdxOut -| ... |- Xn -| |- undef/NewS. -| |- AfterNewIdx -|
+ assert(AfterNewIdx != OldIdxOut && "Inconsistent iterators");
+ std::copy(std::next(OldIdxOut), AfterNewIdx, OldIdxOut);
+ // We can reuse OldIdxVNI now.
+ LiveRange::iterator NewSegment = std::prev(AfterNewIdx);
+ VNInfo *NewSegmentVNI = OldIdxVNI;
+ NewSegmentVNI->def = NewIdxDef;
+ *NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(),
+ NewSegmentVNI);
+ }
}
/// Update LR to reflect an instruction has been moved upwards from OldIdx
- /// to NewIdx.
- ///
- /// 1. Live def at OldIdx:
- /// Hoist def to NewIdx.
- ///
- /// 2. Dead def at OldIdx:
- /// Hoist def+end to NewIdx, possibly move across other values.
- ///
- /// 3. Dead def at OldIdx AND existing def at NewIdx:
- /// Remove value defined at OldIdx, coalescing it with existing value.
- ///
- /// 4. Live def at OldIdx AND existing def at NewIdx:
- /// Remove value defined at NewIdx, hoist OldIdx def to NewIdx.
- /// (Happens when bundling multiple defs together).
- ///
- /// 5. Value killed at OldIdx:
- /// Hoist kill to NewIdx, then scan for last kill between NewIdx and
- /// OldIdx.
- ///
+ /// to NewIdx (NewIdx < OldIdx).
void handleMoveUp(LiveRange &LR, unsigned Reg, LaneBitmask LaneMask) {
- // First look for a kill at OldIdx.
- LiveRange::iterator I = LR.find(OldIdx.getBaseIndex());
LiveRange::iterator E = LR.end();
- // Is LR even live at OldIdx?
- if (I == E || SlotIndex::isEarlierInstr(OldIdx, I->start))
+ // Segment going into OldIdx.
+ LiveRange::iterator OldIdxIn = LR.find(OldIdx.getBaseIndex());
+
+ // No value live before or after OldIdx? Nothing to do.
+ if (OldIdxIn == E || SlotIndex::isEarlierInstr(OldIdx, OldIdxIn->start))
return;
- // Handle a live-in value.
- if (!SlotIndex::isSameInstr(I->start, OldIdx)) {
- // If the live-in value isn't killed here, there is nothing to do.
- if (!SlotIndex::isSameInstr(OldIdx, I->end))
- return;
- // Adjust I->end to end at NewIdx. If we are hoisting a kill above
- // another use, we need to search for that use. Case 5 above.
- I->end = NewIdx.getRegSlot(I->end.isEarlyClobber());
- ++I;
- // If OldIdx also defines a value, there couldn't have been another use.
- if (I == E || !SlotIndex::isSameInstr(I->start, OldIdx)) {
- // No def, search for the new kill.
- // This can never be an early clobber kill since there is no def.
- std::prev(I)->end = findLastUseBefore(Reg, LaneMask).getRegSlot();
+ LiveRange::iterator OldIdxOut;
+ // Do we have a value live-in to OldIdx?
+ if (SlotIndex::isEarlierInstr(OldIdxIn->start, OldIdx)) {
+ // If the live-in value isn't killed here, then we have no Def at
+ // OldIdx, moreover the value must be live at NewIdx so there is nothing
+ // to do.
+ bool isKill = SlotIndex::isSameInstr(OldIdx, OldIdxIn->end);
+ if (!isKill)
return;
- }
- }
- // Now deal with the def at OldIdx.
- assert(I != E && SlotIndex::isSameInstr(I->start, OldIdx) && "No def?");
- VNInfo *DefVNI = I->valno;
- assert(DefVNI->def == I->start && "Inconsistent def");
- DefVNI->def = NewIdx.getRegSlot(I->start.isEarlyClobber());
-
- // Check for an existing def at NewIdx.
- LiveRange::iterator NewI = LR.find(NewIdx.getRegSlot());
- if (SlotIndex::isSameInstr(NewI->start, NewIdx)) {
- assert(NewI->valno != DefVNI && "Same value defined more than once?");
- // There is an existing def at NewIdx.
- if (I->end.isDead()) {
- // Case 3: Remove the dead def at OldIdx.
- LR.removeValNo(DefVNI);
+ // At this point we have to move OldIdxIn->end back to the nearest
+ // previous use or (dead-)def but no further than NewIdx.
+ SlotIndex DefBeforeOldIdx
+ = std::max(OldIdxIn->start.getDeadSlot(),
+ NewIdx.getRegSlot(OldIdxIn->end.isEarlyClobber()));
+ OldIdxIn->end = findLastUseBefore(DefBeforeOldIdx, Reg, LaneMask);
+
+ // Did we have a Def at OldIdx? If not we are done now.
+ OldIdxOut = std::next(OldIdxIn);
+ if (OldIdxOut == E || !SlotIndex::isSameInstr(OldIdx, OldIdxOut->start))
return;
- }
- // Case 4: Replace def at NewIdx with live def at OldIdx.
- I->start = DefVNI->def;
- LR.removeValNo(NewI->valno);
- return;
+ } else {
+ OldIdxOut = OldIdxIn;
+ OldIdxIn = OldIdxOut != LR.begin() ? std::prev(OldIdxOut) : E;
}
- // There is no existing def at NewIdx. Hoist DefVNI.
- if (!I->end.isDead()) {
- // Leave the end point of a live def.
- I->start = DefVNI->def;
- return;
+ // If we are here then there is a Definition at OldIdx. OldIdxOut points
+ // to the segment starting there.
+ assert(OldIdxOut != E && SlotIndex::isSameInstr(OldIdx, OldIdxOut->start) &&
+ "No def?");
+ VNInfo *OldIdxVNI = OldIdxOut->valno;
+ assert(OldIdxVNI->def == OldIdxOut->start && "Inconsistent def");
+ bool OldIdxDefIsDead = OldIdxOut->end.isDead();
+
+ // Is there an existing def at NewIdx?
+ SlotIndex NewIdxDef = NewIdx.getRegSlot(OldIdxOut->start.isEarlyClobber());
+ LiveRange::iterator NewIdxOut = LR.find(NewIdx.getRegSlot());
+ if (SlotIndex::isSameInstr(NewIdxOut->start, NewIdx)) {
+ assert(NewIdxOut->valno != OldIdxVNI &&
+ "Same value defined more than once?");
+ // If OldIdx was a dead def remove it.
+ if (!OldIdxDefIsDead) {
+ // Remove segment starting at NewIdx and move begin of OldIdxOut to
+ // NewIdx so it can take its place.
+ OldIdxVNI->def = NewIdxDef;
+ OldIdxOut->start = NewIdxDef;
+ LR.removeValNo(NewIdxOut->valno);
+ } else {
+ // Simply remove the dead def at OldIdx.
+ LR.removeValNo(OldIdxVNI);
+ }
+ } else {
+ // Previously nothing was live after NewIdx, so all we have to do now is
+ // move the begin of OldIdxOut to NewIdx.
+ if (!OldIdxDefIsDead) {
+ // Do we have any intermediate Defs between OldIdx and NewIdx?
+ if (OldIdxIn != E &&
+ SlotIndex::isEarlierInstr(NewIdxDef, OldIdxIn->start)) {
+ // OldIdx is not a dead def and NewIdx is before predecessor start.
+ LiveRange::iterator NewIdxIn = NewIdxOut;
+ assert(NewIdxIn == LR.find(NewIdx.getBaseIndex()));
+ const SlotIndex SplitPos = NewIdxDef;
+
+ // Merge the OldIdxIn and OldIdxOut segments into OldIdxOut.
+ *OldIdxOut = LiveRange::Segment(OldIdxIn->start, OldIdxOut->end,
+ OldIdxIn->valno);
+ // OldIdxIn and OldIdxVNI are now undef and can be overridden.
+ // We Slide [NewIdxIn, OldIdxIn) down one position.
+ // |- X0/NewIdxIn -| ... |- Xn-1 -||- Xn/OldIdxIn -||- OldIdxOut -|
+ // => |- undef/NexIdxIn -| |- X0 -| ... |- Xn-1 -| |- Xn/OldIdxOut -|
+ std::copy_backward(NewIdxIn, OldIdxIn, OldIdxOut);
+ // NewIdxIn is now considered undef so we can reuse it for the moved
+ // value.
+ LiveRange::iterator NewSegment = NewIdxIn;
+ LiveRange::iterator Next = std::next(NewSegment);
+ if (SlotIndex::isEarlierInstr(Next->start, NewIdx)) {
+ // There is no gap between NewSegment and its predecessor.
+ *NewSegment = LiveRange::Segment(Next->start, SplitPos,
+ Next->valno);
+ *Next = LiveRange::Segment(SplitPos, Next->end, OldIdxVNI);
+ Next->valno->def = SplitPos;
+ } else {
+ // There is a gap between NewSegment and its predecessor
+ // Value becomes live in.
+ *NewSegment = LiveRange::Segment(SplitPos, Next->start, OldIdxVNI);
+ NewSegment->valno->def = SplitPos;
+ }
+ } else {
+ // Leave the end point of a live def.
+ OldIdxOut->start = NewIdxDef;
+ OldIdxVNI->def = NewIdxDef;
+ if (OldIdxIn != E && SlotIndex::isEarlierInstr(NewIdx, OldIdxIn->end))
+ OldIdxIn->end = NewIdx.getRegSlot();
+ }
+ } else {
+ // OldIdxVNI is a dead def. It may have been moved across other values
+ // in LR, so move OldIdxOut up to NewIdxOut. Slide [NewIdxOut;OldIdxOut)
+ // down one position.
+ // |- X0/NewIdxOut -| ... |- Xn-1 -| |- Xn/OldIdxOut -| |- next - |
+ // => |- undef/NewIdxOut -| |- X0 -| ... |- Xn-1 -| |- next -|
+ std::copy_backward(NewIdxOut, OldIdxOut, std::next(OldIdxOut));
+ // OldIdxVNI can be reused now to build a new dead def segment.
+ LiveRange::iterator NewSegment = NewIdxOut;
+ VNInfo *NewSegmentVNI = OldIdxVNI;
+ *NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(),
+ NewSegmentVNI);
+ NewSegmentVNI->def = NewIdxDef;
+ }
}
-
- // DefVNI is a dead def. It may have been moved across other values in LR,
- // so move I up to NewI. Slide [NewI;I) down one position.
- std::copy_backward(NewI, I, std::next(I));
- *NewI = LiveRange::Segment(DefVNI->def, NewIdx.getDeadSlot(), DefVNI);
}
void updateRegMaskSlots() {
@@ -1205,29 +1309,31 @@ private:
}
// Return the last use of reg between NewIdx and OldIdx.
- SlotIndex findLastUseBefore(unsigned Reg, LaneBitmask LaneMask) {
-
+ SlotIndex findLastUseBefore(SlotIndex Before, unsigned Reg,
+ LaneBitmask LaneMask) {
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
- SlotIndex LastUse = NewIdx;
+ SlotIndex LastUse = Before;
for (MachineOperand &MO : MRI.use_nodbg_operands(Reg)) {
+ if (MO.isUndef())
+ continue;
unsigned SubReg = MO.getSubReg();
if (SubReg != 0 && LaneMask != 0
&& (TRI.getSubRegIndexLaneMask(SubReg) & LaneMask) == 0)
continue;
- const MachineInstr *MI = MO.getParent();
+ const MachineInstr &MI = *MO.getParent();
SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI);
if (InstSlot > LastUse && InstSlot < OldIdx)
- LastUse = InstSlot;
+ LastUse = InstSlot.getRegSlot();
}
return LastUse;
}
// This is a regunit interval, so scanning the use list could be very
// expensive. Scan upwards from OldIdx instead.
- assert(NewIdx < OldIdx && "Expected upwards move");
+ assert(Before < OldIdx && "Expected upwards move");
SlotIndexes *Indexes = LIS.getSlotIndexes();
- MachineBasicBlock *MBB = Indexes->getMBBFromIndex(NewIdx);
+ MachineBasicBlock *MBB = Indexes->getMBBFromIndex(Before);
// OldIdx may not correspond to an instruction any longer, so set MII to
// point to the next instruction after OldIdx, or MBB->end().
@@ -1241,44 +1347,44 @@ private:
while (MII != Begin) {
if ((--MII)->isDebugValue())
continue;
- SlotIndex Idx = Indexes->getInstructionIndex(MII);
+ SlotIndex Idx = Indexes->getInstructionIndex(*MII);
- // Stop searching when NewIdx is reached.
- if (!SlotIndex::isEarlierInstr(NewIdx, Idx))
- return NewIdx;
+ // Stop searching when Before is reached.
+ if (!SlotIndex::isEarlierInstr(Before, Idx))
+ return Before;
// Check if MII uses Reg.
- for (MIBundleOperands MO(MII); MO.isValid(); ++MO)
- if (MO->isReg() &&
+ for (MIBundleOperands MO(*MII); MO.isValid(); ++MO)
+ if (MO->isReg() && !MO->isUndef() &&
TargetRegisterInfo::isPhysicalRegister(MO->getReg()) &&
TRI.hasRegUnit(MO->getReg(), Reg))
- return Idx;
+ return Idx.getRegSlot();
}
- // Didn't reach NewIdx. It must be the first instruction in the block.
- return NewIdx;
+ // Didn't reach Before. It must be the first instruction in the block.
+ return Before;
}
};
-void LiveIntervals::handleMove(MachineInstr* MI, bool UpdateFlags) {
- assert(!MI->isBundled() && "Can't handle bundled instructions yet.");
+void LiveIntervals::handleMove(MachineInstr &MI, bool UpdateFlags) {
+ assert(!MI.isBundled() && "Can't handle bundled instructions yet.");
SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
Indexes->removeMachineInstrFromMaps(MI);
SlotIndex NewIndex = Indexes->insertMachineInstrInMaps(MI);
- assert(getMBBStartIdx(MI->getParent()) <= OldIndex &&
- OldIndex < getMBBEndIdx(MI->getParent()) &&
+ assert(getMBBStartIdx(MI.getParent()) <= OldIndex &&
+ OldIndex < getMBBEndIdx(MI.getParent()) &&
"Cannot handle moves across basic block boundaries.");
HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags);
- HME.updateAllRanges(MI);
+ HME.updateAllRanges(&MI);
}
-void LiveIntervals::handleMoveIntoBundle(MachineInstr* MI,
- MachineInstr* BundleStart,
+void LiveIntervals::handleMoveIntoBundle(MachineInstr &MI,
+ MachineInstr &BundleStart,
bool UpdateFlags) {
SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
SlotIndex NewIndex = Indexes->getInstructionIndex(BundleStart);
HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags);
- HME.updateAllRanges(MI);
+ HME.updateAllRanges(&MI);
}
void LiveIntervals::repairOldRegInRange(const MachineBasicBlock::iterator Begin,
@@ -1295,8 +1401,8 @@ void LiveIntervals::repairOldRegInRange(const MachineBasicBlock::iterator Begin,
for (MachineBasicBlock::iterator I = End; I != Begin;) {
--I;
- MachineInstr *MI = I;
- if (MI->isDebugValue())
+ MachineInstr &MI = *I;
+ if (MI.isDebugValue())
continue;
SlotIndex instrIdx = getInstructionIndex(MI);
@@ -1305,8 +1411,9 @@ void LiveIntervals::repairOldRegInRange(const MachineBasicBlock::iterator Begin,
// FIXME: This doesn't currently handle early-clobber or multiple removed
// defs inside of the region to repair.
- for (MachineInstr::mop_iterator OI = MI->operands_begin(),
- OE = MI->operands_end(); OI != OE; ++OI) {
+ for (MachineInstr::mop_iterator OI = MI.operands_begin(),
+ OE = MI.operands_end();
+ OI != OE; ++OI) {
const MachineOperand &MO = *OI;
if (!MO.isReg() || MO.getReg() != Reg)
continue;
@@ -1376,26 +1483,27 @@ LiveIntervals::repairIntervalsInRange(MachineBasicBlock *MBB,
ArrayRef<unsigned> OrigRegs) {
// Find anchor points, which are at the beginning/end of blocks or at
// instructions that already have indexes.
- while (Begin != MBB->begin() && !Indexes->hasIndex(Begin))
+ while (Begin != MBB->begin() && !Indexes->hasIndex(*Begin))
--Begin;
- while (End != MBB->end() && !Indexes->hasIndex(End))
+ while (End != MBB->end() && !Indexes->hasIndex(*End))
++End;
SlotIndex endIdx;
if (End == MBB->end())
endIdx = getMBBEndIdx(MBB).getPrevSlot();
else
- endIdx = getInstructionIndex(End);
+ endIdx = getInstructionIndex(*End);
Indexes->repairIndexesInRange(MBB, Begin, End);
for (MachineBasicBlock::iterator I = End; I != Begin;) {
--I;
- MachineInstr *MI = I;
- if (MI->isDebugValue())
+ MachineInstr &MI = *I;
+ if (MI.isDebugValue())
continue;
- for (MachineInstr::const_mop_iterator MOI = MI->operands_begin(),
- MOE = MI->operands_end(); MOI != MOE; ++MOI) {
+ for (MachineInstr::const_mop_iterator MOI = MI.operands_begin(),
+ MOE = MI.operands_end();
+ MOI != MOE; ++MOI) {
if (MOI->isReg() &&
TargetRegisterInfo::isVirtualRegister(MOI->getReg()) &&
!hasInterval(MOI->getReg())) {
@@ -1459,3 +1567,9 @@ void LiveIntervals::splitSeparateComponents(LiveInterval &LI,
}
ConEQ.Distribute(LI, SplitLIs.data(), *MRI);
}
+
+void LiveIntervals::constructMainRangeFromSubranges(LiveInterval &LI) {
+ assert(LRCalc && "LRCalc not initialized.");
+ LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
+ LRCalc->constructMainRangeFromSubranges(LI);
+}
OpenPOWER on IntegriCloud