summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp')
-rw-r--r--contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp95
1 files changed, 49 insertions, 46 deletions
diff --git a/contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp b/contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp
index e7330c6..01f91b9 100644
--- a/contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp
@@ -84,8 +84,8 @@ unsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
strlen(MAI.getSeparatorString())) == 0) {
atInsnStart = true;
- } else if (strncmp(Str, MAI.getCommentString(),
- strlen(MAI.getCommentString())) == 0) {
+ } else if (strncmp(Str, MAI.getCommentString().data(),
+ MAI.getCommentString().size()) == 0) {
// Stop counting as an instruction after a comment until the next
// separator.
atInsnStart = false;
@@ -119,7 +119,7 @@ TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
// If MBB isn't immediately before MBB, insert a branch to it.
if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
- InsertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
+ insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
MBB->addSuccessor(NewDest);
}
@@ -437,13 +437,20 @@ static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI,
const TargetInstrInfo &TII) {
unsigned StartIdx = 0;
switch (MI.getOpcode()) {
- case TargetOpcode::STACKMAP:
- StartIdx = 2; // Skip ID, nShadowBytes.
+ case TargetOpcode::STACKMAP: {
+ // StackMapLiveValues are foldable
+ StartIdx = StackMapOpers(&MI).getVarIdx();
break;
+ }
case TargetOpcode::PATCHPOINT: {
- // For PatchPoint, the call args are not foldable.
- PatchPointOpers opers(&MI);
- StartIdx = opers.getVarIdx();
+ // For PatchPoint, the call args are not foldable (even if reported in the
+ // stackmap e.g. via anyregcc).
+ StartIdx = PatchPointOpers(&MI).getVarIdx();
+ break;
+ }
+ case TargetOpcode::STATEPOINT: {
+ // For statepoints, fold deopt and gc arguments, but not call arguments.
+ StartIdx = StatepointOpers(&MI).getVarIdx();
break;
}
default:
@@ -467,7 +474,7 @@ static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI,
for (unsigned i = StartIdx; i < MI.getNumOperands(); ++i) {
MachineOperand &MO = MI.getOperand(i);
- if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) {
+ if (is_contained(Ops, i)) {
unsigned SpillSize;
unsigned SpillOffset;
// Compute the spill slot size and offset.
@@ -508,10 +515,36 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
assert(MBB && "foldMemoryOperand needs an inserted instruction");
MachineFunction &MF = *MBB->getParent();
+ // If we're not folding a load into a subreg, the size of the load is the
+ // size of the spill slot. But if we are, we need to figure out what the
+ // actual load size is.
+ int64_t MemSize = 0;
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
+
+ if (Flags & MachineMemOperand::MOStore) {
+ MemSize = MFI.getObjectSize(FI);
+ } else {
+ for (unsigned Idx : Ops) {
+ int64_t OpSize = MFI.getObjectSize(FI);
+
+ if (auto SubReg = MI.getOperand(Idx).getSubReg()) {
+ unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
+ if (SubRegSize > 0 && !(SubRegSize % 8))
+ OpSize = SubRegSize / 8;
+ }
+
+ MemSize = std::max(MemSize, OpSize);
+ }
+ }
+
+ assert(MemSize && "Did not expect a zero-sized stack slot");
+
MachineInstr *NewMI = nullptr;
if (MI.getOpcode() == TargetOpcode::STACKMAP ||
- MI.getOpcode() == TargetOpcode::PATCHPOINT) {
+ MI.getOpcode() == TargetOpcode::PATCHPOINT ||
+ MI.getOpcode() == TargetOpcode::STATEPOINT) {
// Fold stackmap/patchpoint.
NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
if (NewMI)
@@ -530,10 +563,9 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
assert((!(Flags & MachineMemOperand::MOLoad) ||
NewMI->mayLoad()) &&
"Folded a use to a non-load!");
- const MachineFrameInfo &MFI = *MF.getFrameInfo();
assert(MFI.getObjectOffset(FI) != -1);
MachineMemOperand *MMO = MF.getMachineMemOperand(
- MachinePointerInfo::getFixedStack(MF, FI), Flags, MFI.getObjectSize(FI),
+ MachinePointerInfo::getFixedStack(MF, FI), Flags, MemSize,
MFI.getObjectAlignment(FI));
NewMI->addMemOperand(MF, MMO);
@@ -550,7 +582,6 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
MachineBasicBlock::iterator Pos = MI;
- const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (Flags == MachineMemOperand::MOStore)
storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
@@ -792,7 +823,8 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
int FrameIndex = 0;
if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
- MI.getOpcode() == TargetOpcode::PATCHPOINT) &&
+ MI.getOpcode() == TargetOpcode::PATCHPOINT ||
+ MI.getOpcode() == TargetOpcode::STATEPOINT) &&
isLoadFromStackSlot(LoadMI, FrameIndex)) {
// Fold stackmap/patchpoint.
NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
@@ -844,7 +876,7 @@ bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
// simple, and a common case.
int FrameIdx = 0;
if (isLoadFromStackSlot(MI, FrameIdx) &&
- MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx))
+ MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
return true;
// Avoid instructions obviously unsafe for remat.
@@ -857,7 +889,7 @@ bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
return false;
// Avoid instructions which load from potentially varying memory.
- if (MI.mayLoad() && !MI.isInvariantLoad(AA))
+ if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad(AA))
return false;
// If any of the registers accessed are non-constant, conservatively assume
@@ -875,7 +907,7 @@ bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
// If the physreg has no defs anywhere, it's just an ambient register
// and we can freely move its uses. Alternatively, if it's allocatable,
// it could get allocated to something with a def during allocation.
- if (!MRI.isConstantPhysReg(Reg, MF))
+ if (!MRI.isConstantPhysReg(Reg))
return false;
} else {
// A physreg def. We can't remat it.
@@ -1091,35 +1123,6 @@ int TargetInstrInfo::computeDefOperandLatency(
return -1;
}
-unsigned TargetInstrInfo::computeOperandLatency(
- const InstrItineraryData *ItinData, const MachineInstr &DefMI,
- unsigned DefIdx, const MachineInstr *UseMI, unsigned UseIdx) const {
-
- int DefLatency = computeDefOperandLatency(ItinData, DefMI);
- if (DefLatency >= 0)
- return DefLatency;
-
- assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail");
-
- int OperLatency = 0;
- if (UseMI)
- OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, *UseMI, UseIdx);
- else {
- unsigned DefClass = DefMI.getDesc().getSchedClass();
- OperLatency = ItinData->getOperandCycle(DefClass, DefIdx);
- }
- if (OperLatency >= 0)
- return OperLatency;
-
- // No operand latency was found.
- unsigned InstrLatency = getInstrLatency(ItinData, DefMI);
-
- // Expected latency is the max of the stage latency and itinerary props.
- InstrLatency = std::max(InstrLatency,
- defaultDefLatency(ItinData->SchedModel, DefMI));
- return InstrLatency;
-}
-
bool TargetInstrInfo::getRegSequenceInputs(
const MachineInstr &MI, unsigned DefIdx,
SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
OpenPOWER on IntegriCloud