summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp')
-rw-r--r--contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp72
1 files changed, 37 insertions, 35 deletions
diff --git a/contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp b/contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp
index 608b806..c809087 100644
--- a/contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp
@@ -142,6 +142,10 @@ MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI,
unsigned SubReg2 = MI->getOperand(Idx2).getSubReg();
bool Reg1IsKill = MI->getOperand(Idx1).isKill();
bool Reg2IsKill = MI->getOperand(Idx2).isKill();
+ bool Reg1IsUndef = MI->getOperand(Idx1).isUndef();
+ bool Reg2IsUndef = MI->getOperand(Idx2).isUndef();
+ bool Reg1IsInternal = MI->getOperand(Idx1).isInternalRead();
+ bool Reg2IsInternal = MI->getOperand(Idx2).isInternalRead();
// If destination is tied to either of the commuted source register, then
// it must be updated.
if (HasDef && Reg0 == Reg1 &&
@@ -172,6 +176,10 @@ MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI,
MI->getOperand(Idx1).setSubReg(SubReg2);
MI->getOperand(Idx2).setIsKill(Reg1IsKill);
MI->getOperand(Idx1).setIsKill(Reg2IsKill);
+ MI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
+ MI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
+ MI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
+ MI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
return MI;
}
@@ -285,21 +293,20 @@ bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr *MI,
bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
unsigned SubIdx, unsigned &Size,
unsigned &Offset,
- const TargetMachine *TM) const {
+ const MachineFunction &MF) const {
if (!SubIdx) {
Size = RC->getSize();
Offset = 0;
return true;
}
- unsigned BitSize =
- TM->getSubtargetImpl()->getRegisterInfo()->getSubRegIdxSize(SubIdx);
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
+ unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
// Convert bit size to byte size to be consistent with
// MCRegisterClass::getSize().
if (BitSize % 8)
return false;
- int BitOffset =
- TM->getSubtargetImpl()->getRegisterInfo()->getSubRegIdxOffset(SubIdx);
+ int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
if (BitOffset < 0 || BitOffset % 8)
return false;
@@ -308,7 +315,7 @@ bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
assert(RC->getSize() >= (Offset + Size) && "bad subregister range");
- if (!TM->getSubtargetImpl()->getDataLayout()->isLittleEndian()) {
+ if (!MF.getTarget().getDataLayout()->isLittleEndian()) {
Offset = RC->getSize() - (Offset + Size);
}
return true;
@@ -377,16 +384,13 @@ void TargetInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
llvm_unreachable("Not a MachO target");
}
-bool TargetInstrInfo::
-canFoldMemoryOperand(const MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops) const {
+bool TargetInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
+ ArrayRef<unsigned> Ops) const {
return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]);
}
-static MachineInstr* foldPatchpoint(MachineFunction &MF,
- MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex,
+static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI,
+ ArrayRef<unsigned> Ops, int FrameIndex,
const TargetInstrInfo &TII) {
unsigned StartIdx = 0;
switch (MI->getOpcode()) {
@@ -405,9 +409,8 @@ static MachineInstr* foldPatchpoint(MachineFunction &MF,
// Return false if any operands requested for folding are not foldable (not
// part of the stackmap's live values).
- for (SmallVectorImpl<unsigned>::const_iterator I = Ops.begin(), E = Ops.end();
- I != E; ++I) {
- if (*I < StartIdx)
+ for (unsigned Op : Ops) {
+ if (Op < StartIdx)
return nullptr;
}
@@ -427,8 +430,8 @@ static MachineInstr* foldPatchpoint(MachineFunction &MF,
// Compute the spill slot size and offset.
const TargetRegisterClass *RC =
MF.getRegInfo().getRegClass(MO.getReg());
- bool Valid = TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize,
- SpillOffset, &MF.getTarget());
+ bool Valid =
+ TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
if (!Valid)
report_fatal_error("cannot spill patchpoint subregister operand");
MIB.addImm(StackMaps::IndirectMemRefOp);
@@ -448,10 +451,9 @@ static MachineInstr* foldPatchpoint(MachineFunction &MF,
/// operand folded, otherwise NULL is returned. The client is responsible for
/// removing the old instruction and adding the new one in the instruction
/// stream.
-MachineInstr*
-TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FI) const {
+MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
+ ArrayRef<unsigned> Ops,
+ int FI) const {
unsigned Flags = 0;
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
if (MI->getOperand(Ops[i]).isDef())
@@ -469,11 +471,13 @@ TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
MI->getOpcode() == TargetOpcode::PATCHPOINT) {
// Fold stackmap/patchpoint.
NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
+ if (NewMI)
+ MBB->insert(MI, NewMI);
} else {
// Ask the target to do the actual folding.
- NewMI =foldMemoryOperandImpl(MF, MI, Ops, FI);
+ NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI);
}
-
+
if (NewMI) {
NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
// Add a memory operand, foldMemoryOperandImpl doesn't do that.
@@ -491,8 +495,7 @@ TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
MFI.getObjectAlignment(FI));
NewMI->addMemOperand(MF, MMO);
- // FIXME: change foldMemoryOperandImpl semantics to also insert NewMI.
- return MBB->insert(MI, NewMI);
+ return NewMI;
}
// Straight COPY may fold as load/store.
@@ -517,10 +520,9 @@ TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
/// foldMemoryOperand - Same as the previous version except it allows folding
/// of any load and store from / to any address, not just from a specific
/// stack slot.
-MachineInstr*
-TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
- const SmallVectorImpl<unsigned> &Ops,
- MachineInstr* LoadMI) const {
+MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
+ ArrayRef<unsigned> Ops,
+ MachineInstr *LoadMI) const {
assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!");
#ifndef NDEBUG
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
@@ -538,15 +540,15 @@ TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
isLoadFromStackSlot(LoadMI, FrameIndex)) {
// Fold stackmap/patchpoint.
NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
+ if (NewMI)
+ NewMI = MBB.insert(MI, NewMI);
} else {
// Ask the target to do the actual folding.
- NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
+ NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI);
}
if (!NewMI) return nullptr;
- NewMI = MBB.insert(MI, NewMI);
-
// Copy the memoperands from the load to the folded instruction.
if (MI->memoperands_empty()) {
NewMI->setMemRefs(LoadMI->memoperands_begin(),
@@ -651,8 +653,8 @@ int TargetInstrInfo::getSPAdjust(const MachineInstr *MI) const {
bool StackGrowsDown =
TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
- int FrameSetupOpcode = getCallFrameSetupOpcode();
- int FrameDestroyOpcode = getCallFrameDestroyOpcode();
+ unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
+ unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
if (MI->getOpcode() != FrameSetupOpcode &&
MI->getOpcode() != FrameDestroyOpcode)
OpenPOWER on IntegriCloud