summaryrefslogtreecommitdiffstats
path: root/lib/CodeGen/ScheduleDAGInstrs.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen/ScheduleDAGInstrs.cpp')
-rw-r--r--lib/CodeGen/ScheduleDAGInstrs.cpp135
1 files changed, 96 insertions, 39 deletions
diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp
index 8e18b3d..44e9296 100644
--- a/lib/CodeGen/ScheduleDAGInstrs.cpp
+++ b/lib/CodeGen/ScheduleDAGInstrs.cpp
@@ -14,8 +14,10 @@
#define DEBUG_TYPE "sched-instrs"
#include "ScheduleDAGInstrs.h"
+#include "llvm/Operator.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Target/TargetMachine.h"
@@ -45,35 +47,24 @@ void ScheduleDAGInstrs::Run(MachineBasicBlock *bb,
ScheduleDAG::Run(bb, end);
}
-/// getOpcode - If this is an Instruction or a ConstantExpr, return the
-/// opcode value. Otherwise return UserOp1.
-static unsigned getOpcode(const Value *V) {
- if (const Instruction *I = dyn_cast<Instruction>(V))
- return I->getOpcode();
- if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
- return CE->getOpcode();
- // Use UserOp1 to mean there's no opcode.
- return Instruction::UserOp1;
-}
-
/// getUnderlyingObjectFromInt - This is the function that does the work of
/// looking through basic ptrtoint+arithmetic+inttoptr sequences.
static const Value *getUnderlyingObjectFromInt(const Value *V) {
do {
- if (const User *U = dyn_cast<User>(V)) {
+ if (const Operator *U = dyn_cast<Operator>(V)) {
// If we find a ptrtoint, we can transfer control back to the
// regular getUnderlyingObjectFromInt.
- if (getOpcode(U) == Instruction::PtrToInt)
+ if (U->getOpcode() == Instruction::PtrToInt)
return U->getOperand(0);
// If we find an add of a constant or a multiplied value, it's
// likely that the other operand will lead us to the base
// object. We don't have to worry about the case where the
- // object address is somehow being computed bt the multiply,
+ // object address is somehow being computed by the multiply,
// because our callers only care when the result is an
// identifibale object.
- if (getOpcode(U) != Instruction::Add ||
+ if (U->getOpcode() != Instruction::Add ||
(!isa<ConstantInt>(U->getOperand(1)) &&
- getOpcode(U->getOperand(1)) != Instruction::Mul))
+ Operator::getOpcode(U->getOperand(1)) != Instruction::Mul))
return V;
V = U->getOperand(0);
} else {
@@ -90,7 +81,7 @@ static const Value *getUnderlyingObject(const Value *V) {
do {
V = V->getUnderlyingObject();
// If it found an inttoptr, use special code to continue climing.
- if (getOpcode(V) != Instruction::IntToPtr)
+ if (Operator::getOpcode(V) != Instruction::IntToPtr)
break;
const Value *O = getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
// If that succeeded in finding a pointer, continue the search.
@@ -106,11 +97,11 @@ static const Value *getUnderlyingObject(const Value *V) {
/// object, return the Value for that object. Otherwise return null.
static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI) {
if (!MI->hasOneMemOperand() ||
- !MI->memoperands_begin()->getValue() ||
- MI->memoperands_begin()->isVolatile())
+ !(*MI->memoperands_begin())->getValue() ||
+ (*MI->memoperands_begin())->isVolatile())
return 0;
- const Value *V = MI->memoperands_begin()->getValue();
+ const Value *V = (*MI->memoperands_begin())->getValue();
if (!V)
return 0;
@@ -132,7 +123,7 @@ void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) {
}
}
-void ScheduleDAGInstrs::BuildSchedGraph() {
+void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
// We'll be allocating one SUnit for each instruction, plus one for
// the region exit node.
SUnits.reserve(BB->size());
@@ -155,8 +146,8 @@ void ScheduleDAGInstrs::BuildSchedGraph() {
bool UnitLatencies = ForceUnitLatencies();
// Ask the target if address-backscheduling is desirable, and if so how much.
- unsigned SpecialAddressLatency =
- TM.getSubtarget<TargetSubtarget>().getSpecialAddressLatency();
+ const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>();
+ unsigned SpecialAddressLatency = ST.getSpecialAddressLatency();
// Walk the list of instructions, from bottom moving up.
for (MachineBasicBlock::iterator MII = InsertPos, MIE = Begin;
@@ -184,16 +175,20 @@ void ScheduleDAGInstrs::BuildSchedGraph() {
assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!");
std::vector<SUnit *> &UseList = Uses[Reg];
std::vector<SUnit *> &DefList = Defs[Reg];
- // Optionally add output and anti dependencies.
- // TODO: Using a latency of 1 here assumes there's no cost for
- // reusing registers.
+ // Optionally add output and anti dependencies. For anti
+ // dependencies we use a latency of 0 because for a multi-issue
+ // target we want to allow the defining instruction to issue
+ // in the same cycle as the using instruction.
+ // TODO: Using a latency of 1 here for output dependencies assumes
+ // there's no cost for reusing registers.
SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output;
+ unsigned AOLatency = (Kind == SDep::Anti) ? 0 : 1;
for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
SUnit *DefSU = DefList[i];
if (DefSU != SU &&
(Kind != SDep::Output || !MO.isDead() ||
!DefSU->getInstr()->registerDefIsDead(Reg)))
- DefSU->addPred(SDep(SU, Kind, /*Latency=*/1, /*Reg=*/Reg));
+ DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/Reg));
}
for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
std::vector<SUnit *> &DefList = Defs[*Alias];
@@ -202,7 +197,7 @@ void ScheduleDAGInstrs::BuildSchedGraph() {
if (DefSU != SU &&
(Kind != SDep::Output || !MO.isDead() ||
!DefSU->getInstr()->registerDefIsDead(Reg)))
- DefSU->addPred(SDep(SU, Kind, /*Latency=*/1, /*Reg=*/ *Alias));
+ DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/ *Alias));
}
}
@@ -216,6 +211,10 @@ void ScheduleDAGInstrs::BuildSchedGraph() {
// Optionally add in a special extra latency for nodes that
// feed addresses.
// TODO: Do this for register aliases too.
+ // TODO: Perhaps we should get rid of
+ // SpecialAddressLatency and just move this into
+ // adjustSchedDependency for the targets that care about
+ // it.
if (SpecialAddressLatency != 0 && !UnitLatencies) {
MachineInstr *UseMI = UseSU->getInstr();
const TargetInstrDesc &UseTID = UseMI->getDesc();
@@ -226,15 +225,29 @@ void ScheduleDAGInstrs::BuildSchedGraph() {
UseTID.OpInfo[RegUseIndex].isLookupPtrRegClass())
LDataLatency += SpecialAddressLatency;
}
- UseSU->addPred(SDep(SU, SDep::Data, LDataLatency, Reg));
+ // Adjust the dependence latency using operand def/use
+ // information (if any), and then allow the target to
+ // perform its own adjustments.
+ const SDep& dep = SDep(SU, SDep::Data, LDataLatency, Reg);
+ if (!UnitLatencies) {
+ ComputeOperandLatency(SU, UseSU, (SDep &)dep);
+ ST.adjustSchedDependency(SU, UseSU, (SDep &)dep);
+ }
+ UseSU->addPred(dep);
}
}
for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
std::vector<SUnit *> &UseList = Uses[*Alias];
for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
SUnit *UseSU = UseList[i];
- if (UseSU != SU)
- UseSU->addPred(SDep(SU, SDep::Data, DataLatency, *Alias));
+ if (UseSU != SU) {
+ const SDep& dep = SDep(SU, SDep::Data, DataLatency, *Alias);
+ if (!UnitLatencies) {
+ ComputeOperandLatency(SU, UseSU, (SDep &)dep);
+ ST.adjustSchedDependency(SU, UseSU, (SDep &)dep);
+ }
+ UseSU->addPred(dep);
+ }
}
}
@@ -323,10 +336,10 @@ void ScheduleDAGInstrs::BuildSchedGraph() {
if (!ChainTID.isCall() &&
!ChainTID.hasUnmodeledSideEffects() &&
ChainMI->hasOneMemOperand() &&
- !ChainMI->memoperands_begin()->isVolatile() &&
- ChainMI->memoperands_begin()->getValue())
+ !(*ChainMI->memoperands_begin())->isVolatile() &&
+ (*ChainMI->memoperands_begin())->getValue())
// We know that the Chain accesses one specific memory location.
- ChainMMO = &*ChainMI->memoperands_begin();
+ ChainMMO = *ChainMI->memoperands_begin();
else
// Unknown memory accesses. Assume the worst.
ChainMMO = 0;
@@ -362,7 +375,7 @@ void ScheduleDAGInstrs::BuildSchedGraph() {
// Treat all other stores conservatively.
goto new_chain;
} else if (TID.mayLoad()) {
- if (TII->isInvariantLoad(MI)) {
+ if (MI->isInvariantLoad(AA)) {
// Invariant load, no chain dependencies needed!
} else if (const Value *V = getUnderlyingObjectForInstr(MI)) {
// A load from a specific PseudoSourceValue. Add precise dependencies.
@@ -409,10 +422,9 @@ void ScheduleDAGInstrs::FinishBlock() {
void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) {
const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
- // Compute the latency for the node. We use the sum of the latencies for
- // all nodes flagged together into this SUnit.
+ // Compute the latency for the node.
SU->Latency =
- InstrItins.getLatency(SU->getInstr()->getDesc().getSchedClass());
+ InstrItins.getStageLatency(SU->getInstr()->getDesc().getSchedClass());
// Simplistic target-independent heuristic: assume that loads take
// extra time.
@@ -421,6 +433,50 @@ void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) {
SU->Latency += 2;
}
+void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use,
+ SDep& dep) const {
+ const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
+ if (InstrItins.isEmpty())
+ return;
+
+ // For a data dependency with a known register...
+ if ((dep.getKind() != SDep::Data) || (dep.getReg() == 0))
+ return;
+
+ const unsigned Reg = dep.getReg();
+
+ // ... find the definition of the register in the defining
+ // instruction
+ MachineInstr *DefMI = Def->getInstr();
+ int DefIdx = DefMI->findRegisterDefOperandIdx(Reg);
+ if (DefIdx != -1) {
+ int DefCycle = InstrItins.getOperandCycle(DefMI->getDesc().getSchedClass(), DefIdx);
+ if (DefCycle >= 0) {
+ MachineInstr *UseMI = Use->getInstr();
+ const unsigned UseClass = UseMI->getDesc().getSchedClass();
+
+ // For all uses of the register, calculate the maxmimum latency
+ int Latency = -1;
+ for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = UseMI->getOperand(i);
+ if (!MO.isReg() || !MO.isUse())
+ continue;
+ unsigned MOReg = MO.getReg();
+ if (MOReg != Reg)
+ continue;
+
+ int UseCycle = InstrItins.getOperandCycle(UseClass, i);
+ if (UseCycle >= 0)
+ Latency = std::max(Latency, DefCycle - UseCycle + 1);
+ }
+
+ // If we found a latency, then replace the existing dependence latency.
+ if (Latency >= 0)
+ dep.setLatency(Latency);
+ }
+ }
+}
+
void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const {
SU->getInstr()->dump();
}
@@ -438,7 +494,8 @@ std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const {
}
// EmitSchedule - Emit the machine code in scheduled order.
-MachineBasicBlock *ScheduleDAGInstrs::EmitSchedule() {
+MachineBasicBlock *ScheduleDAGInstrs::
+EmitSchedule(DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) {
// For MachineInstr-based scheduling, we're rescheduling the instructions in
// the block, so start by removing them from the block.
while (Begin != InsertPos) {
OpenPOWER on IntegriCloud