summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/CodeGen/MachineScheduler.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/CodeGen/MachineScheduler.cpp')
-rw-r--r--contrib/llvm/lib/CodeGen/MachineScheduler.cpp1456
1 files changed, 958 insertions, 498 deletions
diff --git a/contrib/llvm/lib/CodeGen/MachineScheduler.cpp b/contrib/llvm/lib/CodeGen/MachineScheduler.cpp
index fff6b2b..e71c4df 100644
--- a/contrib/llvm/lib/CodeGen/MachineScheduler.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineScheduler.cpp
@@ -21,6 +21,7 @@
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/CodeGen/ScheduleDFS.h"
@@ -30,6 +31,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/GraphWriter.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetInstrInfo.h"
#include <queue>
using namespace llvm;
@@ -51,10 +53,11 @@ static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
static bool ViewMISchedDAGs = false;
#endif // NDEBUG
-// FIXME: remove this flag after initial testing. It should always be a good
-// thing.
-static cl::opt<bool> EnableCopyConstrain("misched-vcopy", cl::Hidden,
- cl::desc("Constrain vreg copies."), cl::init(true));
+static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden,
+ cl::desc("Enable register pressure scheduling."), cl::init(true));
+
+static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden,
+ cl::desc("Enable cyclic critical path analysis."), cl::init(true));
static cl::opt<bool> EnableLoadCluster("misched-cluster", cl::Hidden,
cl::desc("Enable load clustering."), cl::init(true));
@@ -69,6 +72,10 @@ static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden,
// DAG subtrees must have at least this many nodes.
static const unsigned MinSubtreeSize = 8;
+// Pin the vtables to this file.
+void MachineSchedStrategy::anchor() {}
+void ScheduleDAGMutation::anchor() {}
+
//===----------------------------------------------------------------------===//
// Machine Instruction Scheduling Pass and Registry
//===----------------------------------------------------------------------===//
@@ -98,6 +105,9 @@ public:
virtual void print(raw_ostream &O, const Module* = 0) const;
static char ID; // Class identification, replacement for typeinfo
+
+protected:
+ ScheduleDAGInstrs *createMachineScheduler();
};
} // namespace
@@ -152,12 +162,13 @@ DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
/// Forward declare the standard machine scheduler. This will be used as the
/// default scheduler if the target does not set a default.
-static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C);
+static ScheduleDAGInstrs *createGenericSched(MachineSchedContext *C);
/// Decrement this iterator until reaching the top or a non-debug instr.
-static MachineBasicBlock::iterator
-priorNonDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator Beg) {
+static MachineBasicBlock::const_iterator
+priorNonDebug(MachineBasicBlock::const_iterator I,
+ MachineBasicBlock::const_iterator Beg) {
assert(I != Beg && "reached the top of the region, cannot decrement");
while (--I != Beg) {
if (!I->isDebugValue())
@@ -166,10 +177,19 @@ priorNonDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator Beg) {
return I;
}
+/// Non-const version.
+static MachineBasicBlock::iterator
+priorNonDebug(MachineBasicBlock::iterator I,
+ MachineBasicBlock::const_iterator Beg) {
+ return const_cast<MachineInstr*>(
+ &*priorNonDebug(MachineBasicBlock::const_iterator(I), Beg));
+}
+
/// If this iterator is a debug value, increment until reaching the End or a
/// non-debug instruction.
-static MachineBasicBlock::iterator
-nextIfDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator End) {
+static MachineBasicBlock::const_iterator
+nextIfDebug(MachineBasicBlock::const_iterator I,
+ MachineBasicBlock::const_iterator End) {
for(; I != End; ++I) {
if (!I->isDebugValue())
break;
@@ -177,6 +197,34 @@ nextIfDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator End) {
return I;
}
+/// Non-const version.
+static MachineBasicBlock::iterator
+nextIfDebug(MachineBasicBlock::iterator I,
+ MachineBasicBlock::const_iterator End) {
+ // Cast the return value to nonconst MachineInstr, then cast to an
+ // instr_iterator, which does not check for null, finally return a
+ // bundle_iterator.
+ return MachineBasicBlock::instr_iterator(
+ const_cast<MachineInstr*>(
+ &*nextIfDebug(MachineBasicBlock::const_iterator(I), End)));
+}
+
+/// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
+ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() {
+ // Select the scheduler, or set the default.
+ MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
+ if (Ctor != useDefaultMachineSched)
+ return Ctor(this);
+
+ // Get the default scheduler set by the target for this function.
+ ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this);
+ if (Scheduler)
+ return Scheduler;
+
+ // Default to GenericScheduler.
+ return createGenericSched(this);
+}
+
/// Top-level MachineScheduler pass driver.
///
/// Visit blocks in function order. Divide each block into scheduling regions
@@ -207,23 +255,14 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
if (VerifyScheduling) {
- DEBUG(LIS->print(dbgs()));
+ DEBUG(LIS->dump());
MF->verify(this, "Before machine scheduling.");
}
RegClassInfo->runOnMachineFunction(*MF);
- // Select the scheduler, or set the default.
- MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
- if (Ctor == useDefaultMachineSched) {
- // Get the default scheduler set by the target.
- Ctor = MachineSchedRegistry::getDefault();
- if (!Ctor) {
- Ctor = createConvergingSched;
- MachineSchedRegistry::setDefault(Ctor);
- }
- }
- // Instantiate the selected scheduler.
- OwningPtr<ScheduleDAGInstrs> Scheduler(Ctor(this));
+ // Instantiate the selected scheduler for this target, function, and
+ // optimization level.
+ OwningPtr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
// Visit all machine basic blocks.
//
@@ -258,14 +297,15 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
// The next region starts above the previous region. Look backward in the
// instruction stream until we find the nearest boundary.
+ unsigned NumRegionInstrs = 0;
MachineBasicBlock::iterator I = RegionEnd;
- for(;I != MBB->begin(); --I, --RemainingInstrs) {
+ for(;I != MBB->begin(); --I, --RemainingInstrs, ++NumRegionInstrs) {
if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF))
break;
}
// Notify the scheduler of the region, even if we may skip scheduling
// it. Perhaps it still needs to be bundled.
- Scheduler->enterRegion(MBB, I, RegionEnd, RemainingInstrs);
+ Scheduler->enterRegion(MBB, I, RegionEnd, NumRegionInstrs);
// Skip empty scheduling regions (0 or 1 schedulable instructions).
if (I == RegionEnd || I == llvm::prior(RegionEnd)) {
@@ -280,7 +320,8 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
<< "\n From: " << *I << " To: ";
if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
else dbgs() << "End";
- dbgs() << " Remaining: " << RemainingInstrs << "\n");
+ dbgs() << " RegionInstrs: " << NumRegionInstrs
+ << " Remaining: " << RemainingInstrs << "\n");
// Schedule a region: possibly reorder instructions.
// This invalidates 'RegionEnd' and 'I'.
@@ -297,7 +338,7 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
Scheduler->finishBlock();
}
Scheduler->finalizeSchedule();
- DEBUG(LIS->print(dbgs()));
+ DEBUG(LIS->dump());
if (VerifyScheduling)
MF->verify(this, "After machine scheduling.");
return true;
@@ -309,7 +350,7 @@ void MachineScheduler::print(raw_ostream &O, const Module* m) const {
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void ReadyQueue::dump() {
- dbgs() << " " << Name << ": ";
+ dbgs() << Name << ": ";
for (unsigned i = 0, e = Queue.size(); i < e; ++i)
dbgs() << Queue[i]->NodeNum << " ";
dbgs() << "\n";
@@ -449,13 +490,19 @@ bool ScheduleDAGMI::checkSchedLimit() {
void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
MachineBasicBlock::iterator begin,
MachineBasicBlock::iterator end,
- unsigned endcount)
+ unsigned regioninstrs)
{
- ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount);
+ ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
// For convenience remember the end of the liveness region.
LiveRegionEnd =
(RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd);
+
+ SUPressureDiffs.clear();
+
+ SchedImpl->initPolicy(begin, end, regioninstrs);
+
+ ShouldTrackPressure = SchedImpl->shouldTrackPressure();
}
// Setup the register pressure trackers for the top scheduled top and bottom
@@ -467,7 +514,7 @@ void ScheduleDAGMI::initRegPressure() {
// Close the RPTracker to finalize live ins.
RPTracker.closeRegion();
- DEBUG(RPTracker.getPressure().dump(TRI));
+ DEBUG(RPTracker.dump());
// Initialize the live ins and live outs.
TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
@@ -479,9 +526,23 @@ void ScheduleDAGMI::initRegPressure() {
TopRPTracker.closeTop();
BotRPTracker.closeBottom();
+ BotRPTracker.initLiveThru(RPTracker);
+ if (!BotRPTracker.getLiveThru().empty()) {
+ TopRPTracker.initLiveThru(BotRPTracker.getLiveThru());
+ DEBUG(dbgs() << "Live Thru: ";
+ dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI));
+ };
+
+ // For each live out vreg reduce the pressure change associated with other
+ // uses of the same vreg below the live-out reaching def.
+ updatePressureDiffs(RPTracker.getPressure().LiveOutRegs);
+
// Account for liveness generated by the region boundary.
- if (LiveRegionEnd != RegionEnd)
- BotRPTracker.recede();
+ if (LiveRegionEnd != RegionEnd) {
+ SmallVector<unsigned, 8> LiveUses;
+ BotRPTracker.recede(&LiveUses);
+ updatePressureDiffs(LiveUses);
+ }
assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
@@ -491,38 +552,88 @@ void ScheduleDAGMI::initRegPressure() {
const std::vector<unsigned> &RegionPressure =
RPTracker.getPressure().MaxSetPressure;
for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
- unsigned Limit = TRI->getRegPressureSetLimit(i);
- DEBUG(dbgs() << TRI->getRegPressureSetName(i)
- << "Limit " << Limit
- << " Actual " << RegionPressure[i] << "\n");
- if (RegionPressure[i] > Limit)
- RegionCriticalPSets.push_back(PressureElement(i, 0));
+ unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
+ if (RegionPressure[i] > Limit) {
+ DEBUG(dbgs() << TRI->getRegPressureSetName(i)
+ << " Limit " << Limit
+ << " Actual " << RegionPressure[i] << "\n");
+ RegionCriticalPSets.push_back(PressureChange(i));
+ }
}
DEBUG(dbgs() << "Excess PSets: ";
for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
dbgs() << TRI->getRegPressureSetName(
- RegionCriticalPSets[i].PSetID) << " ";
+ RegionCriticalPSets[i].getPSet()) << " ";
dbgs() << "\n");
}
-// FIXME: When the pressure tracker deals in pressure differences then we won't
-// iterate over all RegionCriticalPSets[i].
void ScheduleDAGMI::
-updateScheduledPressure(const std::vector<unsigned> &NewMaxPressure) {
- for (unsigned i = 0, e = RegionCriticalPSets.size(); i < e; ++i) {
- unsigned ID = RegionCriticalPSets[i].PSetID;
- int &MaxUnits = RegionCriticalPSets[i].UnitIncrease;
- if ((int)NewMaxPressure[ID] > MaxUnits)
- MaxUnits = NewMaxPressure[ID];
+updateScheduledPressure(const SUnit *SU,
+ const std::vector<unsigned> &NewMaxPressure) {
+ const PressureDiff &PDiff = getPressureDiff(SU);
+ unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size();
+ for (PressureDiff::const_iterator I = PDiff.begin(), E = PDiff.end();
+ I != E; ++I) {
+ if (!I->isValid())
+ break;
+ unsigned ID = I->getPSet();
+ while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID)
+ ++CritIdx;
+ if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) {
+ if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc()
+ && NewMaxPressure[ID] <= INT16_MAX)
+ RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]);
+ }
+ unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID);
+ if (NewMaxPressure[ID] >= Limit - 2) {
+ DEBUG(dbgs() << " " << TRI->getRegPressureSetName(ID) << ": "
+ << NewMaxPressure[ID] << " > " << Limit << "(+ "
+ << BotRPTracker.getLiveThru()[ID] << " livethru)\n");
+ }
}
- DEBUG(
- for (unsigned i = 0, e = NewMaxPressure.size(); i < e; ++i) {
- unsigned Limit = TRI->getRegPressureSetLimit(i);
- if (NewMaxPressure[i] > Limit ) {
- dbgs() << " " << TRI->getRegPressureSetName(i) << ": "
- << NewMaxPressure[i] << " > " << Limit << "\n";
+}
+
+/// Update the PressureDiff array for liveness after scheduling this
+/// instruction.
+void ScheduleDAGMI::updatePressureDiffs(ArrayRef<unsigned> LiveUses) {
+ for (unsigned LUIdx = 0, LUEnd = LiveUses.size(); LUIdx != LUEnd; ++LUIdx) {
+ /// FIXME: Currently assuming single-use physregs.
+ unsigned Reg = LiveUses[LUIdx];
+ DEBUG(dbgs() << " LiveReg: " << PrintVRegOrUnit(Reg, TRI) << "\n");
+ if (!TRI->isVirtualRegister(Reg))
+ continue;
+
+ // This may be called before CurrentBottom has been initialized. However,
+ // BotRPTracker must have a valid position. We want the value live into the
+ // instruction or live out of the block, so ask for the previous
+ // instruction's live-out.
+ const LiveInterval &LI = LIS->getInterval(Reg);
+ VNInfo *VNI;
+ MachineBasicBlock::const_iterator I =
+ nextIfDebug(BotRPTracker.getPos(), BB->end());
+ if (I == BB->end())
+ VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
+ else {
+ LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(I));
+ VNI = LRQ.valueIn();
+ }
+ // RegisterPressureTracker guarantees that readsReg is true for LiveUses.
+ assert(VNI && "No live value at use.");
+ for (VReg2UseMap::iterator
+ UI = VRegUses.find(Reg); UI != VRegUses.end(); ++UI) {
+ SUnit *SU = UI->SU;
+ DEBUG(dbgs() << " UpdateRegP: SU(" << SU->NodeNum << ") "
+ << *SU->getInstr());
+ // If this use comes before the reaching def, it cannot be a last use, so
+ // descrease its pressure change.
+ if (!SU->isScheduled && SU != &ExitSU) {
+ LiveQueryResult LRQ
+ = LI.Query(LIS->getInstructionIndex(SU->getInstr()));
+ if (LRQ.valueIn() == VNI)
+ getPressureDiff(SU).addPressureChange(Reg, true, &MRI);
}
- });
+ }
+ }
}
/// schedule - Called back from MachineScheduler::runOnMachineFunction
@@ -580,15 +691,23 @@ void ScheduleDAGMI::schedule() {
/// Build the DAG and setup three register pressure trackers.
void ScheduleDAGMI::buildDAGWithRegPressure() {
+ if (!ShouldTrackPressure) {
+ RPTracker.reset();
+ RegionCriticalPSets.clear();
+ buildSchedGraph(AA);
+ return;
+ }
+
// Initialize the register pressure tracker used by buildSchedGraph.
- RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
+ RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
+ /*TrackUntiedDefs=*/true);
// Account for liveness generate by the region boundary.
if (LiveRegionEnd != RegionEnd)
RPTracker.recede();
// Build the DAG, and compute current register pressure.
- buildSchedGraph(AA, &RPTracker);
+ buildSchedGraph(AA, &RPTracker, &SUPressureDiffs);
// Initialize top/bottom trackers after computing region pressure.
initRegPressure();
@@ -631,6 +750,91 @@ void ScheduleDAGMI::findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
ExitSU.biasCriticalPath();
}
+/// Compute the max cyclic critical path through the DAG. The scheduling DAG
+/// only provides the critical path for single block loops. To handle loops that
+/// span blocks, we could use the vreg path latencies provided by
+/// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
+/// available for use in the scheduler.
+///
+/// The cyclic path estimation identifies a def-use pair that crosses the back
+/// edge and considers the depth and height of the nodes. For example, consider
+/// the following instruction sequence where each instruction has unit latency
+/// and defines an epomymous virtual register:
+///
+/// a->b(a,c)->c(b)->d(c)->exit
+///
+/// The cyclic critical path is a two cycles: b->c->b
+/// The acyclic critical path is four cycles: a->b->c->d->exit
+/// LiveOutHeight = height(c) = len(c->d->exit) = 2
+/// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
+/// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
+/// LiveInDepth = depth(b) = len(a->b) = 1
+///
+/// LiveOutDepth - LiveInDepth = 3 - 1 = 2
+/// LiveInHeight - LiveOutHeight = 4 - 2 = 2
+/// CyclicCriticalPath = min(2, 2) = 2
+unsigned ScheduleDAGMI::computeCyclicCriticalPath() {
+ // This only applies to single block loop.
+ if (!BB->isSuccessor(BB))
+ return 0;
+
+ unsigned MaxCyclicLatency = 0;
+ // Visit each live out vreg def to find def/use pairs that cross iterations.
+ ArrayRef<unsigned> LiveOuts = RPTracker.getPressure().LiveOutRegs;
+ for (ArrayRef<unsigned>::iterator RI = LiveOuts.begin(), RE = LiveOuts.end();
+ RI != RE; ++RI) {
+ unsigned Reg = *RI;
+ if (!TRI->isVirtualRegister(Reg))
+ continue;
+ const LiveInterval &LI = LIS->getInterval(Reg);
+ const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
+ if (!DefVNI)
+ continue;
+
+ MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def);
+ const SUnit *DefSU = getSUnit(DefMI);
+ if (!DefSU)
+ continue;
+
+ unsigned LiveOutHeight = DefSU->getHeight();
+ unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency;
+ // Visit all local users of the vreg def.
+ for (VReg2UseMap::iterator
+ UI = VRegUses.find(Reg); UI != VRegUses.end(); ++UI) {
+ if (UI->SU == &ExitSU)
+ continue;
+
+ // Only consider uses of the phi.
+ LiveQueryResult LRQ =
+ LI.Query(LIS->getInstructionIndex(UI->SU->getInstr()));
+ if (!LRQ.valueIn()->isPHIDef())
+ continue;
+
+ // Assume that a path spanning two iterations is a cycle, which could
+ // overestimate in strange cases. This allows cyclic latency to be
+ // estimated as the minimum slack of the vreg's depth or height.
+ unsigned CyclicLatency = 0;
+ if (LiveOutDepth > UI->SU->getDepth())
+ CyclicLatency = LiveOutDepth - UI->SU->getDepth();
+
+ unsigned LiveInHeight = UI->SU->getHeight() + DefSU->Latency;
+ if (LiveInHeight > LiveOutHeight) {
+ if (LiveInHeight - LiveOutHeight < CyclicLatency)
+ CyclicLatency = LiveInHeight - LiveOutHeight;
+ }
+ else
+ CyclicLatency = 0;
+
+ DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
+ << UI->SU->NodeNum << ") = " << CyclicLatency << "c\n");
+ if (CyclicLatency > MaxCyclicLatency)
+ MaxCyclicLatency = CyclicLatency;
+ }
+ }
+ DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
+ return MaxCyclicLatency;
+}
+
/// Identify DAG roots and setup scheduler queues.
void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
ArrayRef<SUnit*> BotRoots) {
@@ -658,11 +862,13 @@ void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
SchedImpl->registerRoots();
// Advance past initial DebugValues.
- assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
- TopRPTracker.setPos(CurrentTop);
-
CurrentBottom = RegionEnd;
+
+ if (ShouldTrackPressure) {
+ assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
+ TopRPTracker.setPos(CurrentTop);
+ }
}
/// Move an instruction and update register pressure.
@@ -679,10 +885,12 @@ void ScheduleDAGMI::scheduleMI(SUnit *SU, bool IsTopNode) {
TopRPTracker.setPos(MI);
}
- // Update top scheduled pressure.
- TopRPTracker.advance();
- assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
- updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure);
+ if (ShouldTrackPressure) {
+ // Update top scheduled pressure.
+ TopRPTracker.advance();
+ assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
+ updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure);
+ }
}
else {
assert(SU->isBottomReady() && "node still has unscheduled dependencies");
@@ -698,10 +906,14 @@ void ScheduleDAGMI::scheduleMI(SUnit *SU, bool IsTopNode) {
moveInstruction(MI, CurrentBottom);
CurrentBottom = MI;
}
- // Update bottom scheduled pressure.
- BotRPTracker.recede();
- assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
- updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure);
+ if (ShouldTrackPressure) {
+ // Update bottom scheduled pressure.
+ SmallVector<unsigned, 8> LiveUses;
+ BotRPTracker.recede(&LiveUses);
+ assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
+ updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure);
+ updatePressureDiffs(LiveUses);
+ }
}
}
@@ -1019,6 +1231,12 @@ void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMI *DAG) {
GlobalSegment->start)) {
return;
}
+ // If the prior global segment may be defined by the same two-address
+ // instruction that also defines LocalLI, then can't make a hole here.
+ if (SlotIndex::isSameInstr(llvm::prior(GlobalSegment)->start,
+ LocalLI->beginIndex())) {
+ return;
+ }
// If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
// it would be a disconnected component in the live range.
assert(llvm::prior(GlobalSegment)->start < LocalLI->beginIndex() &&
@@ -1101,24 +1319,23 @@ void CopyConstrain::apply(ScheduleDAGMI *DAG) {
}
//===----------------------------------------------------------------------===//
-// ConvergingScheduler - Implementation of the standard MachineSchedStrategy.
+// GenericScheduler - Implementation of the generic MachineSchedStrategy.
//===----------------------------------------------------------------------===//
namespace {
-/// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance
+/// GenericScheduler shrinks the unscheduled zone using heuristics to balance
/// the schedule.
-class ConvergingScheduler : public MachineSchedStrategy {
+class GenericScheduler : public MachineSchedStrategy {
public:
/// Represent the type of SchedCandidate found within a single queue.
/// pickNodeBidirectional depends on these listed by decreasing priority.
enum CandReason {
- NoCand, PhysRegCopy, SingleExcess, SingleCritical, Cluster, Weak,
+ NoCand, PhysRegCopy, RegExcess, RegCritical, Cluster, Weak, RegMax,
ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce,
- TopDepthReduce, TopPathReduce, SingleMax, MultiPressure, NextDefUse,
- NodeOrder};
+ TopDepthReduce, TopPathReduce, NextDefUse, NodeOrder};
#ifndef NDEBUG
- static const char *getReasonStr(ConvergingScheduler::CandReason Reason);
+ static const char *getReasonStr(GenericScheduler::CandReason Reason);
#endif
/// Policy for scheduling the next instruction in the candidate's zone.
@@ -1149,7 +1366,7 @@ public:
}
};
- /// Store the state used by ConvergingScheduler heuristics, required for the
+ /// Store the state used by GenericScheduler heuristics, required for the
/// lifetime of one invocation of pickNode().
struct SchedCandidate {
CandPolicy Policy;
@@ -1160,6 +1377,9 @@ public:
// The reason for this candidate.
CandReason Reason;
+ // Set of reasons that apply to multiple candidates.
+ uint32_t RepeatReasonSet;
+
// Register pressure values for the best candidate.
RegPressureDelta RPDelta;
@@ -1167,7 +1387,7 @@ public:
SchedResourceDelta ResDelta;
SchedCandidate(const CandPolicy &policy)
- : Policy(policy), SU(NULL), Reason(NoCand) {}
+ : Policy(policy), SU(NULL), Reason(NoCand), RepeatReasonSet(0) {}
bool isValid() const { return SU; }
@@ -1180,6 +1400,9 @@ public:
ResDelta = Best.ResDelta;
}
+ bool isRepeat(CandReason R) { return RepeatReasonSet & (1 << R); }
+ void setRepeat(CandReason R) { RepeatReasonSet |= (1 << R); }
+
void initResourceDelta(const ScheduleDAGMI *DAG,
const TargetSchedModel *SchedModel);
};
@@ -1188,33 +1411,27 @@ public:
struct SchedRemainder {
// Critical path through the DAG in expected latency.
unsigned CriticalPath;
+ unsigned CyclicCritPath;
+
+ // Scaled count of micro-ops left to schedule.
+ unsigned RemIssueCount;
+
+ bool IsAcyclicLatencyLimited;
// Unscheduled resources
SmallVector<unsigned, 16> RemainingCounts;
- // Critical resource for the unscheduled zone.
- unsigned CritResIdx;
- // Number of micro-ops left to schedule.
- unsigned RemainingMicroOps;
void reset() {
CriticalPath = 0;
+ CyclicCritPath = 0;
+ RemIssueCount = 0;
+ IsAcyclicLatencyLimited = false;
RemainingCounts.clear();
- CritResIdx = 0;
- RemainingMicroOps = 0;
}
SchedRemainder() { reset(); }
void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel);
-
- unsigned getMaxRemainingCount(const TargetSchedModel *SchedModel) const {
- if (!SchedModel->hasInstrSchedModel())
- return 0;
-
- return std::max(
- RemainingMicroOps * SchedModel->getMicroOpFactor(),
- RemainingCounts[CritResIdx]);
- }
};
/// Each Scheduling boundary is associated with ready queues. It tracks the
@@ -1235,8 +1452,13 @@ public:
ScheduleHazardRecognizer *HazardRec;
+ /// Number of cycles it takes to issue the instructions scheduled in this
+ /// zone. It is defined as: scheduled-micro-ops / issue-width + stalls.
+ /// See getStalls().
unsigned CurrCycle;
- unsigned IssueCount;
+
+ /// Micro-ops issued in the current cycle
+ unsigned CurrMOps;
/// MinReadyCycle - Cycle of the soonest available instruction.
unsigned MinReadyCycle;
@@ -1244,52 +1466,71 @@ public:
// The expected latency of the critical path in this scheduled zone.
unsigned ExpectedLatency;
- // Resources used in the scheduled zone beyond this boundary.
- SmallVector<unsigned, 16> ResourceCounts;
+ // The latency of dependence chains leading into this zone.
+ // For each node scheduled bottom-up: DLat = max DLat, N.Depth.
+ // For each cycle scheduled: DLat -= 1.
+ unsigned DependentLatency;
+
+ /// Count the scheduled (issued) micro-ops that can be retired by
+ /// time=CurrCycle assuming the first scheduled instr is retired at time=0.
+ unsigned RetiredMOps;
+
+ // Count scheduled resources that have been executed. Resources are
+ // considered executed if they become ready in the time that it takes to
+ // saturate any resource including the one in question. Counts are scaled
+ // for direct comparison with other resources. Counts can be compared with
+ // MOps * getMicroOpFactor and Latency * getLatencyFactor.
+ SmallVector<unsigned, 16> ExecutedResCounts;
+
+ /// Cache the max count for a single resource.
+ unsigned MaxExecutedResCount;
// Cache the critical resources ID in this scheduled zone.
- unsigned CritResIdx;
+ unsigned ZoneCritResIdx;
// Is the scheduled region resource limited vs. latency limited.
bool IsResourceLimited;
- unsigned ExpectedCount;
-
#ifndef NDEBUG
- // Remember the greatest min operand latency.
- unsigned MaxMinLatency;
+ // Remember the greatest operand latency as an upper bound on the number of
+ // times we should retry the pending queue because of a hazard.
+ unsigned MaxObservedLatency;
#endif
void reset() {
// A new HazardRec is created for each DAG and owned by SchedBoundary.
- delete HazardRec;
-
+ // Destroying and reconstructing it is very expensive though. So keep
+ // invalid, placeholder HazardRecs.
+ if (HazardRec && HazardRec->isEnabled()) {
+ delete HazardRec;
+ HazardRec = 0;
+ }
Available.clear();
Pending.clear();
CheckPending = false;
NextSUs.clear();
- HazardRec = 0;
CurrCycle = 0;
- IssueCount = 0;
+ CurrMOps = 0;
MinReadyCycle = UINT_MAX;
ExpectedLatency = 0;
- ResourceCounts.resize(1);
- assert(!ResourceCounts[0] && "nonzero count for bad resource");
- CritResIdx = 0;
+ DependentLatency = 0;
+ RetiredMOps = 0;
+ MaxExecutedResCount = 0;
+ ZoneCritResIdx = 0;
IsResourceLimited = false;
- ExpectedCount = 0;
#ifndef NDEBUG
- MaxMinLatency = 0;
+ MaxObservedLatency = 0;
#endif
// Reserve a zero-count for invalid CritResIdx.
- ResourceCounts.resize(1);
+ ExecutedResCounts.resize(1);
+ assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
}
/// Pending queues extend the ready queues with the same ID and the
/// PendingFlag set.
SchedBoundary(unsigned ID, const Twine &Name):
DAG(0), SchedModel(0), Rem(0), Available(ID, Name+".A"),
- Pending(ID << ConvergingScheduler::LogMaxQID, Name+".P"),
+ Pending(ID << GenericScheduler::LogMaxQID, Name+".P"),
HazardRec(0) {
reset();
}
@@ -1300,28 +1541,63 @@ public:
SchedRemainder *rem);
bool isTop() const {
- return Available.getID() == ConvergingScheduler::TopQID;
+ return Available.getID() == GenericScheduler::TopQID;
+ }
+
+#ifndef NDEBUG
+ const char *getResourceName(unsigned PIdx) {
+ if (!PIdx)
+ return "MOps";
+ return SchedModel->getProcResource(PIdx)->Name;
+ }
+#endif
+
+ /// Get the number of latency cycles "covered" by the scheduled
+ /// instructions. This is the larger of the critical path within the zone
+ /// and the number of cycles required to issue the instructions.
+ unsigned getScheduledLatency() const {
+ return std::max(ExpectedLatency, CurrCycle);
}
unsigned getUnscheduledLatency(SUnit *SU) const {
- if (isTop())
- return SU->getHeight();
- return SU->getDepth() + SU->Latency;
+ return isTop() ? SU->getHeight() : SU->getDepth();
+ }
+
+ unsigned getResourceCount(unsigned ResIdx) const {
+ return ExecutedResCounts[ResIdx];
}
+ /// Get the scaled count of scheduled micro-ops and resources, including
+ /// executed resources.
unsigned getCriticalCount() const {
- return ResourceCounts[CritResIdx];
+ if (!ZoneCritResIdx)
+ return RetiredMOps * SchedModel->getMicroOpFactor();
+ return getResourceCount(ZoneCritResIdx);
+ }
+
+ /// Get a scaled count for the minimum execution time of the scheduled
+ /// micro-ops that are ready to execute by getExecutedCount. Notice the
+ /// feedback loop.
+ unsigned getExecutedCount() const {
+ return std::max(CurrCycle * SchedModel->getLatencyFactor(),
+ MaxExecutedResCount);
}
bool checkHazard(SUnit *SU);
- void setLatencyPolicy(CandPolicy &Policy);
+ unsigned findMaxLatency(ArrayRef<SUnit*> ReadySUs);
+
+ unsigned getOtherResourceCount(unsigned &OtherCritIdx);
+
+ void setPolicy(CandPolicy &Policy, SchedBoundary &OtherZone);
void releaseNode(SUnit *SU, unsigned ReadyCycle);
- void bumpCycle();
+ void bumpCycle(unsigned NextCycle);
- void countResource(unsigned PIdx, unsigned Cycles);
+ void incExecutedResources(unsigned PIdx, unsigned Count);
+
+ unsigned countResource(unsigned PIdx, unsigned Cycles, unsigned ReadyCycle);
void bumpNode(SUnit *SU);
@@ -1330,9 +1606,14 @@ public:
void removeReady(SUnit *SU);
SUnit *pickOnlyChoice();
+
+#ifndef NDEBUG
+ void dumpScheduledState();
+#endif
};
private:
+ const MachineSchedContext *Context;
ScheduleDAGMI *DAG;
const TargetSchedModel *SchedModel;
const TargetRegisterInfo *TRI;
@@ -1342,6 +1623,7 @@ private:
SchedBoundary Top;
SchedBoundary Bot;
+ MachineSchedPolicy RegionPolicy;
public:
/// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
enum {
@@ -1350,8 +1632,15 @@ public:
LogMaxQID = 2
};
- ConvergingScheduler():
- DAG(0), SchedModel(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {}
+ GenericScheduler(const MachineSchedContext *C):
+ Context(C), DAG(0), SchedModel(0), TRI(0),
+ Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {}
+
+ virtual void initPolicy(MachineBasicBlock::iterator Begin,
+ MachineBasicBlock::iterator End,
+ unsigned NumRegionInstrs);
+
+ bool shouldTrackPressure() const { return RegionPolicy.ShouldTrackPressure; }
virtual void initialize(ScheduleDAGMI *dag);
@@ -1366,14 +1655,7 @@ public:
virtual void registerRoots();
protected:
- void balanceZones(
- ConvergingScheduler::SchedBoundary &CriticalZone,
- ConvergingScheduler::SchedCandidate &CriticalCand,
- ConvergingScheduler::SchedBoundary &OppositeZone,
- ConvergingScheduler::SchedCandidate &OppositeCand);
-
- void checkResourceLimits(ConvergingScheduler::SchedCandidate &TopCand,
- ConvergingScheduler::SchedCandidate &BotCand);
+ void checkAcyclicLatency();
void tryCandidate(SchedCandidate &Cand,
SchedCandidate &TryCand,
@@ -1395,7 +1677,7 @@ protected:
};
} // namespace
-void ConvergingScheduler::SchedRemainder::
+void GenericScheduler::SchedRemainder::
init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
reset();
if (!SchedModel->hasInstrSchedModel())
@@ -1404,7 +1686,8 @@ init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
for (std::vector<SUnit>::iterator
I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) {
const MCSchedClassDesc *SC = DAG->getSchedClass(&*I);
- RemainingMicroOps += SchedModel->getNumMicroOps(I->getInstr(), SC);
+ RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC)
+ * SchedModel->getMicroOpFactor();
for (TargetSchedModel::ProcResIter
PI = SchedModel->getWriteProcResBegin(SC),
PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
@@ -1413,26 +1696,61 @@ init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
RemainingCounts[PIdx] += (Factor * PI->Cycles);
}
}
- for (unsigned PIdx = 0, PEnd = SchedModel->getNumProcResourceKinds();
- PIdx != PEnd; ++PIdx) {
- if ((int)(RemainingCounts[PIdx] - RemainingCounts[CritResIdx])
- >= (int)SchedModel->getLatencyFactor()) {
- CritResIdx = PIdx;
- }
- }
}
-void ConvergingScheduler::SchedBoundary::
+void GenericScheduler::SchedBoundary::
init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
reset();
DAG = dag;
SchedModel = smodel;
Rem = rem;
if (SchedModel->hasInstrSchedModel())
- ResourceCounts.resize(SchedModel->getNumProcResourceKinds());
+ ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds());
+}
+
+/// Initialize the per-region scheduling policy.
+void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
+ MachineBasicBlock::iterator End,
+ unsigned NumRegionInstrs) {
+ const TargetMachine &TM = Context->MF->getTarget();
+
+ // Avoid setting up the register pressure tracker for small regions to save
+ // compile time. As a rough heuristic, only track pressure when the number of
+ // schedulable instructions exceeds half the integer register file.
+ unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
+ TM.getTargetLowering()->getRegClassFor(MVT::i32));
+
+ RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
+
+ // For generic targets, we default to bottom-up, because it's simpler and more
+ // compile-time optimizations have been implemented in that direction.
+ RegionPolicy.OnlyBottomUp = true;
+
+ // Allow the subtarget to override default policy.
+ const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
+ ST.overrideSchedPolicy(RegionPolicy, Begin, End, NumRegionInstrs);
+
+ // After subtarget overrides, apply command line options.
+ if (!EnableRegPressure)
+ RegionPolicy.ShouldTrackPressure = false;
+
+ // Check -misched-topdown/bottomup can force or unforce scheduling direction.
+ // e.g. -misched-bottomup=false allows scheduling in both directions.
+ assert((!ForceTopDown || !ForceBottomUp) &&
+ "-misched-topdown incompatible with -misched-bottomup");
+ if (ForceBottomUp.getNumOccurrences() > 0) {
+ RegionPolicy.OnlyBottomUp = ForceBottomUp;
+ if (RegionPolicy.OnlyBottomUp)
+ RegionPolicy.OnlyTopDown = false;
+ }
+ if (ForceTopDown.getNumOccurrences() > 0) {
+ RegionPolicy.OnlyTopDown = ForceTopDown;
+ if (RegionPolicy.OnlyTopDown)
+ RegionPolicy.OnlyBottomUp = false;
+ }
}
-void ConvergingScheduler::initialize(ScheduleDAGMI *dag) {
+void GenericScheduler::initialize(ScheduleDAGMI *dag) {
DAG = dag;
SchedModel = DAG->getSchedModel();
TRI = DAG->TRI;
@@ -1447,31 +1765,36 @@ void ConvergingScheduler::initialize(ScheduleDAGMI *dag) {
// are disabled, then these HazardRecs will be disabled.
const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
const TargetMachine &TM = DAG->MF.getTarget();
- Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
- Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
-
- assert((!ForceTopDown || !ForceBottomUp) &&
- "-misched-topdown incompatible with -misched-bottomup");
+ if (!Top.HazardRec) {
+ Top.HazardRec =
+ TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
+ }
+ if (!Bot.HazardRec) {
+ Bot.HazardRec =
+ TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
+ }
}
-void ConvergingScheduler::releaseTopNode(SUnit *SU) {
+void GenericScheduler::releaseTopNode(SUnit *SU) {
if (SU->isScheduled)
return;
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
+ if (I->isWeak())
+ continue;
unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle;
- unsigned MinLatency = I->getMinLatency();
+ unsigned Latency = I->getLatency();
#ifndef NDEBUG
- Top.MaxMinLatency = std::max(MinLatency, Top.MaxMinLatency);
+ Top.MaxObservedLatency = std::max(Latency, Top.MaxObservedLatency);
#endif
- if (SU->TopReadyCycle < PredReadyCycle + MinLatency)
- SU->TopReadyCycle = PredReadyCycle + MinLatency;
+ if (SU->TopReadyCycle < PredReadyCycle + Latency)
+ SU->TopReadyCycle = PredReadyCycle + Latency;
}
Top.releaseNode(SU, SU->TopReadyCycle);
}
-void ConvergingScheduler::releaseBottomNode(SUnit *SU) {
+void GenericScheduler::releaseBottomNode(SUnit *SU) {
if (SU->isScheduled)
return;
@@ -1482,18 +1805,56 @@ void ConvergingScheduler::releaseBottomNode(SUnit *SU) {
if (I->isWeak())
continue;
unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle;
- unsigned MinLatency = I->getMinLatency();
+ unsigned Latency = I->getLatency();
#ifndef NDEBUG
- Bot.MaxMinLatency = std::max(MinLatency, Bot.MaxMinLatency);
+ Bot.MaxObservedLatency = std::max(Latency, Bot.MaxObservedLatency);
#endif
- if (SU->BotReadyCycle < SuccReadyCycle + MinLatency)
- SU->BotReadyCycle = SuccReadyCycle + MinLatency;
+ if (SU->BotReadyCycle < SuccReadyCycle + Latency)
+ SU->BotReadyCycle = SuccReadyCycle + Latency;
}
Bot.releaseNode(SU, SU->BotReadyCycle);
}
-void ConvergingScheduler::registerRoots() {
+/// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
+/// critical path by more cycles than it takes to drain the instruction buffer.
+/// We estimate an upper bounds on in-flight instructions as:
+///
+/// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
+/// InFlightIterations = AcyclicPath / CyclesPerIteration
+/// InFlightResources = InFlightIterations * LoopResources
+///
+/// TODO: Check execution resources in addition to IssueCount.
+void GenericScheduler::checkAcyclicLatency() {
+ if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
+ return;
+
+ // Scaled number of cycles per loop iteration.
+ unsigned IterCount =
+ std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
+ Rem.RemIssueCount);
+ // Scaled acyclic critical path.
+ unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
+ // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
+ unsigned InFlightCount =
+ (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
+ unsigned BufferLimit =
+ SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
+
+ Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
+
+ DEBUG(dbgs() << "IssueCycles="
+ << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
+ << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
+ << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount
+ << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
+ << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
+ if (Rem.IsAcyclicLatencyLimited)
+ dbgs() << " ACYCLIC LATENCY LIMIT\n");
+}
+
+void GenericScheduler::registerRoots() {
Rem.CriticalPath = DAG->ExitSU.getDepth();
+
// Some roots may not feed into ExitSU. Check all of them in case.
for (std::vector<SUnit*>::const_iterator
I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
@@ -1501,6 +1862,11 @@ void ConvergingScheduler::registerRoots() {
Rem.CriticalPath = (*I)->getDepth();
}
DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n');
+
+ if (EnableCyclicPath) {
+ Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
+ checkAcyclicLatency();
+ }
}
/// Does this SU have a hazard within the current instruction group.
@@ -1516,12 +1882,12 @@ void ConvergingScheduler::registerRoots() {
/// can dispatch per cycle.
///
/// TODO: Also check whether the SU must start a new group.
-bool ConvergingScheduler::SchedBoundary::checkHazard(SUnit *SU) {
+bool GenericScheduler::SchedBoundary::checkHazard(SUnit *SU) {
if (HazardRec->isEnabled())
return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard;
unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
- if ((IssueCount > 0) && (IssueCount + uops > SchedModel->getIssueWidth())) {
+ if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops="
<< SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
return true;
@@ -1529,45 +1895,125 @@ bool ConvergingScheduler::SchedBoundary::checkHazard(SUnit *SU) {
return false;
}
-/// Compute the remaining latency to determine whether ILP should be increased.
-void ConvergingScheduler::SchedBoundary::setLatencyPolicy(CandPolicy &Policy) {
- // FIXME: compile time. In all, we visit four queues here one we should only
- // need to visit the one that was last popped if we cache the result.
+// Find the unscheduled node in ReadySUs with the highest latency.
+unsigned GenericScheduler::SchedBoundary::
+findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
+ SUnit *LateSU = 0;
unsigned RemLatency = 0;
- for (ReadyQueue::iterator I = Available.begin(), E = Available.end();
+ for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end();
I != E; ++I) {
unsigned L = getUnscheduledLatency(*I);
- DEBUG(dbgs() << " " << Available.getName()
- << " RemLatency SU(" << (*I)->NodeNum << ") " << L << '\n');
- if (L > RemLatency)
+ if (L > RemLatency) {
RemLatency = L;
+ LateSU = *I;
+ }
}
- for (ReadyQueue::iterator I = Pending.begin(), E = Pending.end();
- I != E; ++I) {
- unsigned L = getUnscheduledLatency(*I);
- if (L > RemLatency)
- RemLatency = L;
+ if (LateSU) {
+ DEBUG(dbgs() << Available.getName() << " RemLatency SU("
+ << LateSU->NodeNum << ") " << RemLatency << "c\n");
}
- unsigned CriticalPathLimit = Rem->CriticalPath + SchedModel->getILPWindow();
- DEBUG(dbgs() << " " << Available.getName()
- << " ExpectedLatency " << ExpectedLatency
- << " CP Limit " << CriticalPathLimit << '\n');
- if (RemLatency + ExpectedLatency >= CriticalPathLimit
- && RemLatency > Rem->getMaxRemainingCount(SchedModel)) {
- Policy.ReduceLatency = true;
- DEBUG(dbgs() << " Increase ILP: " << Available.getName() << '\n');
+ return RemLatency;
+}
+
+// Count resources in this zone and the remaining unscheduled
+// instruction. Return the max count, scaled. Set OtherCritIdx to the critical
+// resource index, or zero if the zone is issue limited.
+unsigned GenericScheduler::SchedBoundary::
+getOtherResourceCount(unsigned &OtherCritIdx) {
+ OtherCritIdx = 0;
+ if (!SchedModel->hasInstrSchedModel())
+ return 0;
+
+ unsigned OtherCritCount = Rem->RemIssueCount
+ + (RetiredMOps * SchedModel->getMicroOpFactor());
+ DEBUG(dbgs() << " " << Available.getName() << " + Remain MOps: "
+ << OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
+ for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
+ PIdx != PEnd; ++PIdx) {
+ unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
+ if (OtherCount > OtherCritCount) {
+ OtherCritCount = OtherCount;
+ OtherCritIdx = PIdx;
+ }
+ }
+ if (OtherCritIdx) {
+ DEBUG(dbgs() << " " << Available.getName() << " + Remain CritRes: "
+ << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
+ << " " << getResourceName(OtherCritIdx) << "\n");
}
+ return OtherCritCount;
}
-void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU,
- unsigned ReadyCycle) {
+/// Set the CandPolicy for this zone given the current resources and latencies
+/// inside and outside the zone.
+void GenericScheduler::SchedBoundary::setPolicy(CandPolicy &Policy,
+ SchedBoundary &OtherZone) {
+ // Now that potential stalls have been considered, apply preemptive heuristics
+ // based on the the total latency and resources inside and outside this
+ // zone.
+
+ // Compute remaining latency. We need this both to determine whether the
+ // overall schedule has become latency-limited and whether the instructions
+ // outside this zone are resource or latency limited.
+ //
+ // The "dependent" latency is updated incrementally during scheduling as the
+ // max height/depth of scheduled nodes minus the cycles since it was
+ // scheduled:
+ // DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
+ //
+ // The "independent" latency is the max ready queue depth:
+ // ILat = max N.depth for N in Available|Pending
+ //
+ // RemainingLatency is the greater of independent and dependent latency.
+ unsigned RemLatency = DependentLatency;
+ RemLatency = std::max(RemLatency, findMaxLatency(Available.elements()));
+ RemLatency = std::max(RemLatency, findMaxLatency(Pending.elements()));
+
+ // Compute the critical resource outside the zone.
+ unsigned OtherCritIdx;
+ unsigned OtherCount = OtherZone.getOtherResourceCount(OtherCritIdx);
+
+ bool OtherResLimited = false;
+ if (SchedModel->hasInstrSchedModel()) {
+ unsigned LFactor = SchedModel->getLatencyFactor();
+ OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor;
+ }
+ if (!OtherResLimited && (RemLatency + CurrCycle > Rem->CriticalPath)) {
+ Policy.ReduceLatency |= true;
+ DEBUG(dbgs() << " " << Available.getName() << " RemainingLatency "
+ << RemLatency << " + " << CurrCycle << "c > CritPath "
+ << Rem->CriticalPath << "\n");
+ }
+ // If the same resource is limiting inside and outside the zone, do nothing.
+ if (ZoneCritResIdx == OtherCritIdx)
+ return;
+ DEBUG(
+ if (IsResourceLimited) {
+ dbgs() << " " << Available.getName() << " ResourceLimited: "
+ << getResourceName(ZoneCritResIdx) << "\n";
+ }
+ if (OtherResLimited)
+ dbgs() << " RemainingLimit: " << getResourceName(OtherCritIdx) << "\n";
+ if (!IsResourceLimited && !OtherResLimited)
+ dbgs() << " Latency limited both directions.\n");
+
+ if (IsResourceLimited && !Policy.ReduceResIdx)
+ Policy.ReduceResIdx = ZoneCritResIdx;
+
+ if (OtherResLimited)
+ Policy.DemandResIdx = OtherCritIdx;
+}
+
+void GenericScheduler::SchedBoundary::releaseNode(SUnit *SU,
+ unsigned ReadyCycle) {
if (ReadyCycle < MinReadyCycle)
MinReadyCycle = ReadyCycle;
// Check for interlocks first. For the purpose of other heuristics, an
// instruction that cannot issue appears as if it's not in the ReadyQueue.
- if (ReadyCycle > CurrCycle || checkHazard(SU))
+ bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
+ if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU))
Pending.push(SU);
else
Available.push(SU);
@@ -1577,16 +2023,21 @@ void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU,
}
/// Move the boundary of scheduled code by one cycle.
-void ConvergingScheduler::SchedBoundary::bumpCycle() {
- unsigned Width = SchedModel->getIssueWidth();
- IssueCount = (IssueCount <= Width) ? 0 : IssueCount - Width;
-
- unsigned NextCycle = CurrCycle + 1;
- assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
- if (MinReadyCycle > NextCycle) {
- IssueCount = 0;
- NextCycle = MinReadyCycle;
- }
+void GenericScheduler::SchedBoundary::bumpCycle(unsigned NextCycle) {
+ if (SchedModel->getMicroOpBufferSize() == 0) {
+ assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
+ if (MinReadyCycle > NextCycle)
+ NextCycle = MinReadyCycle;
+ }
+ // Update the current micro-ops, which will issue in the next cycle.
+ unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle);
+ CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps;
+
+ // Decrement DependentLatency based on the next cycle.
+ if ((NextCycle - CurrCycle) > DependentLatency)
+ DependentLatency = 0;
+ else
+ DependentLatency -= (NextCycle - CurrCycle);
if (!HazardRec->isEnabled()) {
// Bypass HazardRec virtual calls.
@@ -1602,38 +2053,54 @@ void ConvergingScheduler::SchedBoundary::bumpCycle() {
}
}
CheckPending = true;
- IsResourceLimited = getCriticalCount() > std::max(ExpectedLatency, CurrCycle);
+ unsigned LFactor = SchedModel->getLatencyFactor();
+ IsResourceLimited =
+ (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
+ > (int)LFactor;
+
+ DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n');
+}
- DEBUG(dbgs() << " " << Available.getName()
- << " Cycle: " << CurrCycle << '\n');
+void GenericScheduler::SchedBoundary::incExecutedResources(unsigned PIdx,
+ unsigned Count) {
+ ExecutedResCounts[PIdx] += Count;
+ if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
+ MaxExecutedResCount = ExecutedResCounts[PIdx];
}
/// Add the given processor resource to this scheduled zone.
-void ConvergingScheduler::SchedBoundary::countResource(unsigned PIdx,
- unsigned Cycles) {
+///
+/// \param Cycles indicates the number of consecutive (non-pipelined) cycles
+/// during which this resource is consumed.
+///
+/// \return the next cycle at which the instruction may execute without
+/// oversubscribing resources.
+unsigned GenericScheduler::SchedBoundary::
+countResource(unsigned PIdx, unsigned Cycles, unsigned ReadyCycle) {
unsigned Factor = SchedModel->getResourceFactor(PIdx);
- DEBUG(dbgs() << " " << SchedModel->getProcResource(PIdx)->Name
- << " +(" << Cycles << "x" << Factor
- << ") / " << SchedModel->getLatencyFactor() << '\n');
-
unsigned Count = Factor * Cycles;
- ResourceCounts[PIdx] += Count;
+ DEBUG(dbgs() << " " << getResourceName(PIdx)
+ << " +" << Cycles << "x" << Factor << "u\n");
+
+ // Update Executed resources counts.
+ incExecutedResources(PIdx, Count);
assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
Rem->RemainingCounts[PIdx] -= Count;
- // Check if this resource exceeds the current critical resource by a full
- // cycle. If so, it becomes the critical resource.
- if ((int)(ResourceCounts[PIdx] - ResourceCounts[CritResIdx])
- >= (int)SchedModel->getLatencyFactor()) {
- CritResIdx = PIdx;
+ // Check if this resource exceeds the current critical resource. If so, it
+ // becomes the critical resource.
+ if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
+ ZoneCritResIdx = PIdx;
DEBUG(dbgs() << " *** Critical resource "
- << SchedModel->getProcResource(PIdx)->Name << " x"
- << ResourceCounts[PIdx] << '\n');
+ << getResourceName(PIdx) << ": "
+ << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n");
}
+ // TODO: We don't yet model reserved resources. It's not hard though.
+ return CurrCycle;
}
/// Move the boundary of scheduled code by one SUnit.
-void ConvergingScheduler::SchedBoundary::bumpNode(SUnit *SU) {
+void GenericScheduler::SchedBoundary::bumpNode(SUnit *SU) {
// Update the reservation table.
if (HazardRec->isEnabled()) {
if (!isTop() && SU->isCall) {
@@ -1643,51 +2110,108 @@ void ConvergingScheduler::SchedBoundary::bumpNode(SUnit *SU) {
}
HazardRec->EmitInstruction(SU);
}
+ const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
+ unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
+ CurrMOps += IncMOps;
+ // checkHazard prevents scheduling multiple instructions per cycle that exceed
+ // issue width. However, we commonly reach the maximum. In this case
+ // opportunistically bump the cycle to avoid uselessly checking everything in
+ // the readyQ. Furthermore, a single instruction may produce more than one
+ // cycle's worth of micro-ops.
+ //
+ // TODO: Also check if this SU must end a dispatch group.
+ unsigned NextCycle = CurrCycle;
+ if (CurrMOps >= SchedModel->getIssueWidth()) {
+ ++NextCycle;
+ DEBUG(dbgs() << " *** Max MOps " << CurrMOps
+ << " at cycle " << CurrCycle << '\n');
+ }
+ unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
+ DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n");
+
+ switch (SchedModel->getMicroOpBufferSize()) {
+ case 0:
+ assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
+ break;
+ case 1:
+ if (ReadyCycle > NextCycle) {
+ NextCycle = ReadyCycle;
+ DEBUG(dbgs() << " *** Stall until: " << ReadyCycle << "\n");
+ }
+ break;
+ default:
+ // We don't currently model the OOO reorder buffer, so consider all
+ // scheduled MOps to be "retired".
+ break;
+ }
+ RetiredMOps += IncMOps;
+
// Update resource counts and critical resource.
if (SchedModel->hasInstrSchedModel()) {
- const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
- Rem->RemainingMicroOps -= SchedModel->getNumMicroOps(SU->getInstr(), SC);
+ unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor();
+ assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted");
+ Rem->RemIssueCount -= DecRemIssue;
+ if (ZoneCritResIdx) {
+ // Scale scheduled micro-ops for comparing with the critical resource.
+ unsigned ScaledMOps =
+ RetiredMOps * SchedModel->getMicroOpFactor();
+
+ // If scaled micro-ops are now more than the previous critical resource by
+ // a full cycle, then micro-ops issue becomes critical.
+ if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
+ >= (int)SchedModel->getLatencyFactor()) {
+ ZoneCritResIdx = 0;
+ DEBUG(dbgs() << " *** Critical resource NumMicroOps: "
+ << ScaledMOps / SchedModel->getLatencyFactor() << "c\n");
+ }
+ }
for (TargetSchedModel::ProcResIter
PI = SchedModel->getWriteProcResBegin(SC),
PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
- countResource(PI->ProcResourceIdx, PI->Cycles);
+ unsigned RCycle =
+ countResource(PI->ProcResourceIdx, PI->Cycles, ReadyCycle);
+ if (RCycle > NextCycle)
+ NextCycle = RCycle;
}
}
- if (isTop()) {
- if (SU->getDepth() > ExpectedLatency)
- ExpectedLatency = SU->getDepth();
+ // Update ExpectedLatency and DependentLatency.
+ unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
+ unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
+ if (SU->getDepth() > TopLatency) {
+ TopLatency = SU->getDepth();
+ DEBUG(dbgs() << " " << Available.getName()
+ << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n");
}
- else {
- if (SU->getHeight() > ExpectedLatency)
- ExpectedLatency = SU->getHeight();
+ if (SU->getHeight() > BotLatency) {
+ BotLatency = SU->getHeight();
+ DEBUG(dbgs() << " " << Available.getName()
+ << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n");
}
-
- IsResourceLimited = getCriticalCount() > std::max(ExpectedLatency, CurrCycle);
-
- // Check the instruction group dispatch limit.
- // TODO: Check if this SU must end a dispatch group.
- IssueCount += SchedModel->getNumMicroOps(SU->getInstr());
-
- // checkHazard prevents scheduling multiple instructions per cycle that exceed
- // issue width. However, we commonly reach the maximum. In this case
- // opportunistically bump the cycle to avoid uselessly checking everything in
- // the readyQ. Furthermore, a single instruction may produce more than one
- // cycle's worth of micro-ops.
- if (IssueCount >= SchedModel->getIssueWidth()) {
- DEBUG(dbgs() << " *** Max instrs at cycle " << CurrCycle << '\n');
- bumpCycle();
+ // If we stall for any reason, bump the cycle.
+ if (NextCycle > CurrCycle) {
+ bumpCycle(NextCycle);
+ }
+ else {
+ // After updating ZoneCritResIdx and ExpectedLatency, check if we're
+ // resource limited. If a stall occured, bumpCycle does this.
+ unsigned LFactor = SchedModel->getLatencyFactor();
+ IsResourceLimited =
+ (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
+ > (int)LFactor;
}
+ DEBUG(dumpScheduledState());
}
/// Release pending ready nodes in to the available queue. This makes them
/// visible to heuristics.
-void ConvergingScheduler::SchedBoundary::releasePending() {
+void GenericScheduler::SchedBoundary::releasePending() {
// If the available queue is empty, it is safe to reset MinReadyCycle.
if (Available.empty())
MinReadyCycle = UINT_MAX;
// Check to see if any of the pending instructions are ready to issue. If
// so, add them to the available queue.
+ bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
SUnit *SU = *(Pending.begin()+i);
unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
@@ -1695,7 +2219,7 @@ void ConvergingScheduler::SchedBoundary::releasePending() {
if (ReadyCycle < MinReadyCycle)
MinReadyCycle = ReadyCycle;
- if (ReadyCycle > CurrCycle)
+ if (!IsBuffered && ReadyCycle > CurrCycle)
continue;
if (checkHazard(SU))
@@ -1710,7 +2234,7 @@ void ConvergingScheduler::SchedBoundary::releasePending() {
}
/// Remove SU from the ready set for this boundary.
-void ConvergingScheduler::SchedBoundary::removeReady(SUnit *SU) {
+void GenericScheduler::SchedBoundary::removeReady(SUnit *SU) {
if (Available.isInQueue(SU))
Available.remove(Available.find(SU));
else {
@@ -1722,11 +2246,11 @@ void ConvergingScheduler::SchedBoundary::removeReady(SUnit *SU) {
/// If this queue only has one ready candidate, return it. As a side effect,
/// defer any nodes that now hit a hazard, and advance the cycle until at least
/// one node is ready. If multiple instructions are ready, return NULL.
-SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() {
+SUnit *GenericScheduler::SchedBoundary::pickOnlyChoice() {
if (CheckPending)
releasePending();
- if (IssueCount > 0) {
+ if (CurrMOps > 0) {
// Defer any ready instrs that now have a hazard.
for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
if (checkHazard(*I)) {
@@ -1738,9 +2262,9 @@ SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() {
}
}
for (unsigned i = 0; Available.empty(); ++i) {
- assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) &&
+ assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedLatency) &&
"permanent hazard"); (void)i;
- bumpCycle();
+ bumpCycle(CurrCycle + 1);
releasePending();
}
if (Available.size() == 1)
@@ -1748,106 +2272,33 @@ SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() {
return NULL;
}
-/// Record the candidate policy for opposite zones with different critical
-/// resources.
-///
-/// If the CriticalZone is latency limited, don't force a policy for the
-/// candidates here. Instead, setLatencyPolicy sets ReduceLatency if needed.
-void ConvergingScheduler::balanceZones(
- ConvergingScheduler::SchedBoundary &CriticalZone,
- ConvergingScheduler::SchedCandidate &CriticalCand,
- ConvergingScheduler::SchedBoundary &OppositeZone,
- ConvergingScheduler::SchedCandidate &OppositeCand) {
-
- if (!CriticalZone.IsResourceLimited)
- return;
- assert(SchedModel->hasInstrSchedModel() && "required schedmodel");
-
- SchedRemainder *Rem = CriticalZone.Rem;
-
- // If the critical zone is overconsuming a resource relative to the
- // remainder, try to reduce it.
- unsigned RemainingCritCount =
- Rem->RemainingCounts[CriticalZone.CritResIdx];
- if ((int)(Rem->getMaxRemainingCount(SchedModel) - RemainingCritCount)
- > (int)SchedModel->getLatencyFactor()) {
- CriticalCand.Policy.ReduceResIdx = CriticalZone.CritResIdx;
- DEBUG(dbgs() << " Balance " << CriticalZone.Available.getName()
- << " reduce "
- << SchedModel->getProcResource(CriticalZone.CritResIdx)->Name
- << '\n');
- }
- // If the other zone is underconsuming a resource relative to the full zone,
- // try to increase it.
- unsigned OppositeCount =
- OppositeZone.ResourceCounts[CriticalZone.CritResIdx];
- if ((int)(OppositeZone.ExpectedCount - OppositeCount)
- > (int)SchedModel->getLatencyFactor()) {
- OppositeCand.Policy.DemandResIdx = CriticalZone.CritResIdx;
- DEBUG(dbgs() << " Balance " << OppositeZone.Available.getName()
- << " demand "
- << SchedModel->getProcResource(OppositeZone.CritResIdx)->Name
- << '\n');
- }
-}
-
-/// Determine if the scheduled zones exceed resource limits or critical path and
-/// set each candidate's ReduceHeight policy accordingly.
-void ConvergingScheduler::checkResourceLimits(
- ConvergingScheduler::SchedCandidate &TopCand,
- ConvergingScheduler::SchedCandidate &BotCand) {
-
- // Set ReduceLatency to true if needed.
- Bot.setLatencyPolicy(BotCand.Policy);
- Top.setLatencyPolicy(TopCand.Policy);
-
- // Handle resource-limited regions.
- if (Top.IsResourceLimited && Bot.IsResourceLimited
- && Top.CritResIdx == Bot.CritResIdx) {
- // If the scheduled critical resource in both zones is no longer the
- // critical remaining resource, attempt to reduce resource height both ways.
- if (Top.CritResIdx != Rem.CritResIdx) {
- TopCand.Policy.ReduceResIdx = Top.CritResIdx;
- BotCand.Policy.ReduceResIdx = Bot.CritResIdx;
- DEBUG(dbgs() << " Reduce scheduled "
- << SchedModel->getProcResource(Top.CritResIdx)->Name << '\n');
- }
- return;
- }
- // Handle latency-limited regions.
- if (!Top.IsResourceLimited && !Bot.IsResourceLimited) {
- // If the total scheduled expected latency exceeds the region's critical
- // path then reduce latency both ways.
- //
- // Just because a zone is not resource limited does not mean it is latency
- // limited. Unbuffered resource, such as max micro-ops may cause CurrCycle
- // to exceed expected latency.
- if ((Top.ExpectedLatency + Bot.ExpectedLatency >= Rem.CriticalPath)
- && (Rem.CriticalPath > Top.CurrCycle + Bot.CurrCycle)) {
- TopCand.Policy.ReduceLatency = true;
- BotCand.Policy.ReduceLatency = true;
- DEBUG(dbgs() << " Reduce scheduled latency " << Top.ExpectedLatency
- << " + " << Bot.ExpectedLatency << '\n');
- }
- return;
+#ifndef NDEBUG
+// This is useful information to dump after bumpNode.
+// Note that the Queue contents are more useful before pickNodeFromQueue.
+void GenericScheduler::SchedBoundary::dumpScheduledState() {
+ unsigned ResFactor;
+ unsigned ResCount;
+ if (ZoneCritResIdx) {
+ ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx);
+ ResCount = getResourceCount(ZoneCritResIdx);
}
- // The critical resource is different in each zone, so request balancing.
-
- // Compute the cost of each zone.
- Top.ExpectedCount = std::max(Top.ExpectedLatency, Top.CurrCycle);
- Top.ExpectedCount = std::max(
- Top.getCriticalCount(),
- Top.ExpectedCount * SchedModel->getLatencyFactor());
- Bot.ExpectedCount = std::max(Bot.ExpectedLatency, Bot.CurrCycle);
- Bot.ExpectedCount = std::max(
- Bot.getCriticalCount(),
- Bot.ExpectedCount * SchedModel->getLatencyFactor());
-
- balanceZones(Top, TopCand, Bot, BotCand);
- balanceZones(Bot, BotCand, Top, TopCand);
+ else {
+ ResFactor = SchedModel->getMicroOpFactor();
+ ResCount = RetiredMOps * SchedModel->getMicroOpFactor();
+ }
+ unsigned LFactor = SchedModel->getLatencyFactor();
+ dbgs() << Available.getName() << " @" << CurrCycle << "c\n"
+ << " Retired: " << RetiredMOps;
+ dbgs() << "\n Executed: " << getExecutedCount() / LFactor << "c";
+ dbgs() << "\n Critical: " << ResCount / LFactor << "c, "
+ << ResCount / ResFactor << " " << getResourceName(ZoneCritResIdx)
+ << "\n ExpectedLatency: " << ExpectedLatency << "c\n"
+ << (IsResourceLimited ? " - Resource" : " - Latency")
+ << " limited.\n";
}
+#endif
-void ConvergingScheduler::SchedCandidate::
+void GenericScheduler::SchedCandidate::
initResourceDelta(const ScheduleDAGMI *DAG,
const TargetSchedModel *SchedModel) {
if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
@@ -1864,11 +2315,12 @@ initResourceDelta(const ScheduleDAGMI *DAG,
}
}
+
/// Return true if this heuristic determines order.
static bool tryLess(int TryVal, int CandVal,
- ConvergingScheduler::SchedCandidate &TryCand,
- ConvergingScheduler::SchedCandidate &Cand,
- ConvergingScheduler::CandReason Reason) {
+ GenericScheduler::SchedCandidate &TryCand,
+ GenericScheduler::SchedCandidate &Cand,
+ GenericScheduler::CandReason Reason) {
if (TryVal < CandVal) {
TryCand.Reason = Reason;
return true;
@@ -1878,13 +2330,14 @@ static bool tryLess(int TryVal, int CandVal,
Cand.Reason = Reason;
return true;
}
+ Cand.setRepeat(Reason);
return false;
}
static bool tryGreater(int TryVal, int CandVal,
- ConvergingScheduler::SchedCandidate &TryCand,
- ConvergingScheduler::SchedCandidate &Cand,
- ConvergingScheduler::CandReason Reason) {
+ GenericScheduler::SchedCandidate &TryCand,
+ GenericScheduler::SchedCandidate &Cand,
+ GenericScheduler::CandReason Reason) {
if (TryVal > CandVal) {
TryCand.Reason = Reason;
return true;
@@ -1894,9 +2347,34 @@ static bool tryGreater(int TryVal, int CandVal,
Cand.Reason = Reason;
return true;
}
+ Cand.setRepeat(Reason);
return false;
}
+static bool tryPressure(const PressureChange &TryP,
+ const PressureChange &CandP,
+ GenericScheduler::SchedCandidate &TryCand,
+ GenericScheduler::SchedCandidate &Cand,
+ GenericScheduler::CandReason Reason) {
+ int TryRank = TryP.getPSetOrMax();
+ int CandRank = CandP.getPSetOrMax();
+ // If both candidates affect the same set, go with the smallest increase.
+ if (TryRank == CandRank) {
+ return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand,
+ Reason);
+ }
+ // If one candidate decreases and the other increases, go with it.
+ // Invalid candidates have UnitInc==0.
+ if (tryLess(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand,
+ Reason)) {
+ return true;
+ }
+ // If the candidates are decreasing pressure, reverse priority.
+ if (TryP.getUnitInc() < 0)
+ std::swap(TryRank, CandRank);
+ return tryGreater(TryRank, CandRank, TryCand, Cand, Reason);
+}
+
static unsigned getWeakLeft(const SUnit *SU, bool isTop) {
return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
}
@@ -1929,6 +2407,32 @@ static int biasPhysRegCopy(const SUnit *SU, bool isTop) {
return 0;
}
+static bool tryLatency(GenericScheduler::SchedCandidate &TryCand,
+ GenericScheduler::SchedCandidate &Cand,
+ GenericScheduler::SchedBoundary &Zone) {
+ if (Zone.isTop()) {
+ if (Cand.SU->getDepth() > Zone.getScheduledLatency()) {
+ if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
+ TryCand, Cand, GenericScheduler::TopDepthReduce))
+ return true;
+ }
+ if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
+ TryCand, Cand, GenericScheduler::TopPathReduce))
+ return true;
+ }
+ else {
+ if (Cand.SU->getHeight() > Zone.getScheduledLatency()) {
+ if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
+ TryCand, Cand, GenericScheduler::BotHeightReduce))
+ return true;
+ }
+ if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
+ TryCand, Cand, GenericScheduler::BotPathReduce))
+ return true;
+ }
+ return false;
+}
+
/// Apply a set of heursitics to a new candidate. Heuristics are currently
/// hierarchical. This may be more efficient than a graduated cost model because
/// we don't need to evaluate all aspects of the model for each node in the
@@ -1940,16 +2444,44 @@ static int biasPhysRegCopy(const SUnit *SU, bool isTop) {
/// \param Zone describes the scheduled zone that we are extending.
/// \param RPTracker describes reg pressure within the scheduled zone.
/// \param TempTracker is a scratch pressure tracker to reuse in queries.
-void ConvergingScheduler::tryCandidate(SchedCandidate &Cand,
+void GenericScheduler::tryCandidate(SchedCandidate &Cand,
SchedCandidate &TryCand,
SchedBoundary &Zone,
const RegPressureTracker &RPTracker,
RegPressureTracker &TempTracker) {
- // Always initialize TryCand's RPDelta.
- TempTracker.getMaxPressureDelta(TryCand.SU->getInstr(), TryCand.RPDelta,
- DAG->getRegionCriticalPSets(),
- DAG->getRegPressure().MaxSetPressure);
+ if (DAG->isTrackingPressure()) {
+ // Always initialize TryCand's RPDelta.
+ if (Zone.isTop()) {
+ TempTracker.getMaxDownwardPressureDelta(
+ TryCand.SU->getInstr(),
+ TryCand.RPDelta,
+ DAG->getRegionCriticalPSets(),
+ DAG->getRegPressure().MaxSetPressure);
+ }
+ else {
+ if (VerifyScheduling) {
+ TempTracker.getMaxUpwardPressureDelta(
+ TryCand.SU->getInstr(),
+ &DAG->getPressureDiff(TryCand.SU),
+ TryCand.RPDelta,
+ DAG->getRegionCriticalPSets(),
+ DAG->getRegPressure().MaxSetPressure);
+ }
+ else {
+ RPTracker.getUpwardPressureDelta(
+ TryCand.SU->getInstr(),
+ DAG->getPressureDiff(TryCand.SU),
+ TryCand.RPDelta,
+ DAG->getRegionCriticalPSets(),
+ DAG->getRegPressure().MaxSetPressure);
+ }
+ }
+ }
+ DEBUG(if (TryCand.RPDelta.Excess.isValid())
+ dbgs() << " SU(" << TryCand.SU->NodeNum << ") "
+ << TRI->getRegPressureSetName(TryCand.RPDelta.Excess.getPSet())
+ << ":" << TryCand.RPDelta.Excess.getUnitInc() << "\n");
// Initialize the candidate if needed.
if (!Cand.isValid()) {
@@ -1962,20 +2494,25 @@ void ConvergingScheduler::tryCandidate(SchedCandidate &Cand,
TryCand, Cand, PhysRegCopy))
return;
- // Avoid exceeding the target's limit.
- if (tryLess(TryCand.RPDelta.Excess.UnitIncrease,
- Cand.RPDelta.Excess.UnitIncrease, TryCand, Cand, SingleExcess))
+ // Avoid exceeding the target's limit. If signed PSetID is negative, it is
+ // invalid; convert it to INT_MAX to give it lowest priority.
+ if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess,
+ Cand.RPDelta.Excess,
+ TryCand, Cand, RegExcess))
return;
- if (Cand.Reason == SingleExcess)
- Cand.Reason = MultiPressure;
// Avoid increasing the max critical pressure in the scheduled region.
- if (tryLess(TryCand.RPDelta.CriticalMax.UnitIncrease,
- Cand.RPDelta.CriticalMax.UnitIncrease,
- TryCand, Cand, SingleCritical))
+ if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax,
+ Cand.RPDelta.CriticalMax,
+ TryCand, Cand, RegCritical))
+ return;
+
+ // For loops that are acyclic path limited, aggressively schedule for latency.
+ // This can result in very long dependence chains scheduled in sequence, so
+ // once every cycle (when CurrMOps == 0), switch to normal heuristics.
+ if (Rem.IsAcyclicLatencyLimited && !Zone.CurrMOps
+ && tryLatency(TryCand, Cand, Zone))
return;
- if (Cand.Reason == SingleCritical)
- Cand.Reason = MultiPressure;
// Keep clustered nodes together to encourage downstream peephole
// optimizations which may reduce resource requirements.
@@ -1990,17 +2527,17 @@ void ConvergingScheduler::tryCandidate(SchedCandidate &Cand,
return;
// Weak edges are for clustering and other constraints.
- //
- // Deferring TryCand here does not change Cand's reason. This is good in the
- // sense that a bad candidate shouldn't affect a previous candidate's
- // goodness, but bad in that it is assymetric and depends on queue order.
- CandReason OrigReason = Cand.Reason;
if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()),
getWeakLeft(Cand.SU, Zone.isTop()),
TryCand, Cand, Weak)) {
- Cand.Reason = OrigReason;
return;
}
+ // Avoid increasing the max pressure of the entire region.
+ if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax,
+ Cand.RPDelta.CurrentMax,
+ TryCand, Cand, RegMax))
+ return;
+
// Avoid critical resource consumption and balance the schedule.
TryCand.initResourceDelta(DAG, SchedModel);
if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
@@ -2012,41 +2549,15 @@ void ConvergingScheduler::tryCandidate(SchedCandidate &Cand,
return;
// Avoid serializing long latency dependence chains.
- if (Cand.Policy.ReduceLatency) {
- if (Zone.isTop()) {
- if (Cand.SU->getDepth() * SchedModel->getLatencyFactor()
- > Zone.ExpectedCount) {
- if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
- TryCand, Cand, TopDepthReduce))
- return;
- }
- if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
- TryCand, Cand, TopPathReduce))
- return;
- }
- else {
- if (Cand.SU->getHeight() * SchedModel->getLatencyFactor()
- > Zone.ExpectedCount) {
- if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
- TryCand, Cand, BotHeightReduce))
- return;
- }
- if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
- TryCand, Cand, BotPathReduce))
- return;
- }
- }
-
- // Avoid increasing the max pressure of the entire region.
- if (tryLess(TryCand.RPDelta.CurrentMax.UnitIncrease,
- Cand.RPDelta.CurrentMax.UnitIncrease, TryCand, Cand, SingleMax))
+ // For acyclic path limited loops, latency was already checked above.
+ if (Cand.Policy.ReduceLatency && !Rem.IsAcyclicLatencyLimited
+ && tryLatency(TryCand, Cand, Zone)) {
return;
- if (Cand.Reason == SingleMax)
- Cand.Reason = MultiPressure;
+ }
// Prefer immediate defs/users of the last scheduled instruction. This is a
- // nice pressure avoidance strategy that also conserves the processor's
- // register renaming resources and keeps the machine code readable.
+ // local pressure avoidance strategy that also makes the machine code
+ // readable.
if (tryGreater(Zone.NextSUs.count(TryCand.SU), Zone.NextSUs.count(Cand.SU),
TryCand, Cand, NextDefUse))
return;
@@ -2058,49 +2569,17 @@ void ConvergingScheduler::tryCandidate(SchedCandidate &Cand,
}
}
-/// pickNodeFromQueue helper that returns true if the LHS reg pressure effect is
-/// more desirable than RHS from scheduling standpoint.
-static bool compareRPDelta(const RegPressureDelta &LHS,
- const RegPressureDelta &RHS) {
- // Compare each component of pressure in decreasing order of importance
- // without checking if any are valid. Invalid PressureElements are assumed to
- // have UnitIncrease==0, so are neutral.
-
- // Avoid increasing the max critical pressure in the scheduled region.
- if (LHS.Excess.UnitIncrease != RHS.Excess.UnitIncrease) {
- DEBUG(dbgs() << " RP excess top - bot: "
- << (LHS.Excess.UnitIncrease - RHS.Excess.UnitIncrease) << '\n');
- return LHS.Excess.UnitIncrease < RHS.Excess.UnitIncrease;
- }
- // Avoid increasing the max critical pressure in the scheduled region.
- if (LHS.CriticalMax.UnitIncrease != RHS.CriticalMax.UnitIncrease) {
- DEBUG(dbgs() << " RP critical top - bot: "
- << (LHS.CriticalMax.UnitIncrease - RHS.CriticalMax.UnitIncrease)
- << '\n');
- return LHS.CriticalMax.UnitIncrease < RHS.CriticalMax.UnitIncrease;
- }
- // Avoid increasing the max pressure of the entire region.
- if (LHS.CurrentMax.UnitIncrease != RHS.CurrentMax.UnitIncrease) {
- DEBUG(dbgs() << " RP current top - bot: "
- << (LHS.CurrentMax.UnitIncrease - RHS.CurrentMax.UnitIncrease)
- << '\n');
- return LHS.CurrentMax.UnitIncrease < RHS.CurrentMax.UnitIncrease;
- }
- return false;
-}
-
#ifndef NDEBUG
-const char *ConvergingScheduler::getReasonStr(
- ConvergingScheduler::CandReason Reason) {
+const char *GenericScheduler::getReasonStr(
+ GenericScheduler::CandReason Reason) {
switch (Reason) {
case NoCand: return "NOCAND ";
case PhysRegCopy: return "PREG-COPY";
- case SingleExcess: return "REG-EXCESS";
- case SingleCritical: return "REG-CRIT ";
+ case RegExcess: return "REG-EXCESS";
+ case RegCritical: return "REG-CRIT ";
case Cluster: return "CLUSTER ";
case Weak: return "WEAK ";
- case SingleMax: return "REG-MAX ";
- case MultiPressure: return "REG-MULTI ";
+ case RegMax: return "REG-MAX ";
case ResourceReduce: return "RES-REDUCE";
case ResourceDemand: return "RES-DEMAND";
case TopDepthReduce: return "TOP-DEPTH ";
@@ -2113,20 +2592,20 @@ const char *ConvergingScheduler::getReasonStr(
llvm_unreachable("Unknown reason!");
}
-void ConvergingScheduler::traceCandidate(const SchedCandidate &Cand) {
- PressureElement P;
+void GenericScheduler::traceCandidate(const SchedCandidate &Cand) {
+ PressureChange P;
unsigned ResIdx = 0;
unsigned Latency = 0;
switch (Cand.Reason) {
default:
break;
- case SingleExcess:
+ case RegExcess:
P = Cand.RPDelta.Excess;
break;
- case SingleCritical:
+ case RegCritical:
P = Cand.RPDelta.CriticalMax;
break;
- case SingleMax:
+ case RegMax:
P = Cand.RPDelta.CurrentMax;
break;
case ResourceReduce:
@@ -2150,8 +2629,8 @@ void ConvergingScheduler::traceCandidate(const SchedCandidate &Cand) {
}
dbgs() << " SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
if (P.isValid())
- dbgs() << " " << TRI->getRegPressureSetName(P.PSetID)
- << ":" << P.UnitIncrease << " ";
+ dbgs() << " " << TRI->getRegPressureSetName(P.getPSet())
+ << ":" << P.getUnitInc() << " ";
else
dbgs() << " ";
if (ResIdx)
@@ -2166,12 +2645,12 @@ void ConvergingScheduler::traceCandidate(const SchedCandidate &Cand) {
}
#endif
-/// Pick the best candidate from the top queue.
+/// Pick the best candidate from the queue.
///
/// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
/// DAG building. To adjust for the current scheduling location we need to
/// maintain the number of vreg uses remaining to be top-scheduled.
-void ConvergingScheduler::pickNodeFromQueue(SchedBoundary &Zone,
+void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone,
const RegPressureTracker &RPTracker,
SchedCandidate &Cand) {
ReadyQueue &Q = Zone.Available;
@@ -2196,30 +2675,31 @@ void ConvergingScheduler::pickNodeFromQueue(SchedBoundary &Zone,
}
}
-static void tracePick(const ConvergingScheduler::SchedCandidate &Cand,
+static void tracePick(const GenericScheduler::SchedCandidate &Cand,
bool IsTop) {
DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
- << ConvergingScheduler::getReasonStr(Cand.Reason) << '\n');
+ << GenericScheduler::getReasonStr(Cand.Reason) << '\n');
}
/// Pick the best candidate node from either the top or bottom queue.
-SUnit *ConvergingScheduler::pickNodeBidirectional(bool &IsTopNode) {
+SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
// Schedule as far as possible in the direction of no choice. This is most
// efficient, but also provides the best heuristics for CriticalPSets.
if (SUnit *SU = Bot.pickOnlyChoice()) {
IsTopNode = false;
- DEBUG(dbgs() << "Pick Top NOCAND\n");
+ DEBUG(dbgs() << "Pick Bot NOCAND\n");
return SU;
}
if (SUnit *SU = Top.pickOnlyChoice()) {
IsTopNode = true;
- DEBUG(dbgs() << "Pick Bot NOCAND\n");
+ DEBUG(dbgs() << "Pick Top NOCAND\n");
return SU;
}
CandPolicy NoPolicy;
SchedCandidate BotCand(NoPolicy);
SchedCandidate TopCand(NoPolicy);
- checkResourceLimits(TopCand, BotCand);
+ Bot.setPolicy(BotCand.Policy, Top);
+ Top.setPolicy(TopCand.Policy, Bot);
// Prefer bottom scheduling when heuristics are silent.
pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
@@ -2232,7 +2712,10 @@ SUnit *ConvergingScheduler::pickNodeBidirectional(bool &IsTopNode) {
// affects picking from either Q. If scheduling in one direction must
// increase pressure for one of the excess PSets, then schedule in that
// direction first to provide more freedom in the other direction.
- if (BotCand.Reason == SingleExcess || BotCand.Reason == SingleCritical) {
+ if ((BotCand.Reason == RegExcess && !BotCand.isRepeat(RegExcess))
+ || (BotCand.Reason == RegCritical
+ && !BotCand.isRepeat(RegCritical)))
+ {
IsTopNode = false;
tracePick(BotCand, IsTopNode);
return BotCand.SU;
@@ -2241,37 +2724,20 @@ SUnit *ConvergingScheduler::pickNodeBidirectional(bool &IsTopNode) {
pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
assert(TopCand.Reason != NoCand && "failed to find the first candidate");
- // If either Q has a single candidate that minimizes pressure above the
- // original region's pressure pick it.
- if (TopCand.Reason <= SingleMax || BotCand.Reason <= SingleMax) {
- if (TopCand.Reason < BotCand.Reason) {
- IsTopNode = true;
- tracePick(TopCand, IsTopNode);
- return TopCand.SU;
- }
- IsTopNode = false;
- tracePick(BotCand, IsTopNode);
- return BotCand.SU;
- }
- // Check for a salient pressure difference and pick the best from either side.
- if (compareRPDelta(TopCand.RPDelta, BotCand.RPDelta)) {
- IsTopNode = true;
- tracePick(TopCand, IsTopNode);
- return TopCand.SU;
- }
- // Otherwise prefer the bottom candidate, in node order if all else failed.
+ // Choose the queue with the most important (lowest enum) reason.
if (TopCand.Reason < BotCand.Reason) {
IsTopNode = true;
tracePick(TopCand, IsTopNode);
return TopCand.SU;
}
+ // Otherwise prefer the bottom candidate, in node order if all else failed.
IsTopNode = false;
tracePick(BotCand, IsTopNode);
return BotCand.SU;
}
/// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
-SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) {
+SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
if (DAG->top() == DAG->bottom()) {
assert(Top.Available.empty() && Top.Pending.empty() &&
Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
@@ -2279,24 +2745,26 @@ SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) {
}
SUnit *SU;
do {
- if (ForceTopDown) {
+ if (RegionPolicy.OnlyTopDown) {
SU = Top.pickOnlyChoice();
if (!SU) {
CandPolicy NoPolicy;
SchedCandidate TopCand(NoPolicy);
pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
- assert(TopCand.Reason != NoCand && "failed to find the first candidate");
+ assert(TopCand.Reason != NoCand && "failed to find a candidate");
+ tracePick(TopCand, true);
SU = TopCand.SU;
}
IsTopNode = true;
}
- else if (ForceBottomUp) {
+ else if (RegionPolicy.OnlyBottomUp) {
SU = Bot.pickOnlyChoice();
if (!SU) {
CandPolicy NoPolicy;
SchedCandidate BotCand(NoPolicy);
pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
- assert(BotCand.Reason != NoCand && "failed to find the first candidate");
+ assert(BotCand.Reason != NoCand && "failed to find a candidate");
+ tracePick(BotCand, false);
SU = BotCand.SU;
}
IsTopNode = false;
@@ -2315,7 +2783,7 @@ SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) {
return SU;
}
-void ConvergingScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
+void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
MachineBasicBlock::iterator InsertPos = SU->getInstr();
if (!isTop)
@@ -2346,15 +2814,15 @@ void ConvergingScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
///
/// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
/// them here. See comments in biasPhysRegCopy.
-void ConvergingScheduler::schedNode(SUnit *SU, bool IsTopNode) {
+void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
if (IsTopNode) {
- SU->TopReadyCycle = Top.CurrCycle;
+ SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.CurrCycle);
Top.bumpNode(SU);
if (SU->hasPhysRegUses)
reschedulePhysRegCopies(SU, true);
}
else {
- SU->BotReadyCycle = Bot.CurrCycle;
+ SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.CurrCycle);
Bot.bumpNode(SU);
if (SU->hasPhysRegDefs)
reschedulePhysRegCopies(SU, false);
@@ -2363,26 +2831,23 @@ void ConvergingScheduler::schedNode(SUnit *SU, bool IsTopNode) {
/// Create the standard converging machine scheduler. This will be used as the
/// default scheduler if the target does not set a default.
-static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) {
- assert((!ForceTopDown || !ForceBottomUp) &&
- "-misched-topdown incompatible with -misched-bottomup");
- ScheduleDAGMI *DAG = new ScheduleDAGMI(C, new ConvergingScheduler());
+static ScheduleDAGInstrs *createGenericSched(MachineSchedContext *C) {
+ ScheduleDAGMI *DAG = new ScheduleDAGMI(C, new GenericScheduler(C));
// Register DAG post-processors.
//
// FIXME: extend the mutation API to allow earlier mutations to instantiate
// data and pass it to later mutations. Have a single mutation that gathers
// the interesting nodes in one pass.
- if (EnableCopyConstrain)
- DAG->addMutation(new CopyConstrain(DAG->TII, DAG->TRI));
- if (EnableLoadCluster)
+ DAG->addMutation(new CopyConstrain(DAG->TII, DAG->TRI));
+ if (EnableLoadCluster && DAG->TII->enableClusterLoads())
DAG->addMutation(new LoadClusterMutation(DAG->TII, DAG->TRI));
if (EnableMacroFusion)
DAG->addMutation(new MacroFusion(DAG->TII));
return DAG;
}
static MachineSchedRegistry
-ConvergingSchedRegistry("converge", "Standard converging scheduler.",
- createConvergingSched);
+GenericSchedRegistry("converge", "Standard converging scheduler.",
+ createGenericSched);
//===----------------------------------------------------------------------===//
// ILP Scheduler. Currently for experimental analysis of heuristics.
@@ -2424,15 +2889,6 @@ struct ILPOrder {
/// \brief Schedule based on the ILP metric.
class ILPScheduler : public MachineSchedStrategy {
- /// In case all subtrees are eventually connected to a common root through
- /// data dependence (e.g. reduction), place an upper limit on their size.
- ///
- /// FIXME: A subtree limit is generally good, but in the situation commented
- /// above, where multiple similar subtrees feed a common root, we should
- /// only split at a point where the resulting subtrees will be balanced.
- /// (a motivating test case must be found).
- static const unsigned SubtreeLimit = 16;
-
ScheduleDAGMI *DAG;
ILPOrder Cmp;
@@ -2616,7 +3072,7 @@ struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
}
static bool isNodeHidden(const SUnit *Node) {
- return (Node->NumPreds > 10 || Node->NumSuccs > 10);
+ return (Node->Preds.size() > 10 || Node->Succs.size() > 10);
}
static bool hasNodeAddressLabel(const SUnit *Node,
@@ -2639,7 +3095,11 @@ struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
std::string Str;
raw_string_ostream SS(Str);
- SS << "SU(" << SU->NodeNum << ')';
+ const SchedDFSResult *DFS =
+ static_cast<const ScheduleDAGMI*>(G)->getDFSResult();
+ SS << "SU:" << SU->NodeNum;
+ if (DFS)
+ SS << " I:" << DFS->getNumInstrs(SU);
return SS.str();
}
static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
OpenPOWER on IntegriCloud