summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp')
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp77
1 files changed, 39 insertions, 38 deletions
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 82111e5..f5b8c35 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -124,23 +124,23 @@ bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const {
if (MF.getFunction()->hasFnAttribute(Attribute::NoRedZone))
return false;
- const MachineFrameInfo *MFI = MF.getFrameInfo();
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
unsigned NumBytes = AFI->getLocalStackSize();
- return !(MFI->hasCalls() || hasFP(MF) || NumBytes > 128);
+ return !(MFI.hasCalls() || hasFP(MF) || NumBytes > 128);
}
/// hasFP - Return true if the specified function should have a dedicated frame
/// pointer register.
bool AArch64FrameLowering::hasFP(const MachineFunction &MF) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
// Retain behavior of always omitting the FP for leaf functions when possible.
- return (MFI->hasCalls() &&
+ return (MFI.hasCalls() &&
MF.getTarget().Options.DisableFramePointerElim(MF)) ||
- MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken() ||
- MFI->hasStackMap() || MFI->hasPatchPoint() ||
+ MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() ||
+ MFI.hasStackMap() || MFI.hasPatchPoint() ||
RegInfo->needsStackRealignment(MF);
}
@@ -151,7 +151,7 @@ bool AArch64FrameLowering::hasFP(const MachineFunction &MF) const {
/// included as part of the stack frame.
bool
AArch64FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
- return !MF.getFrameInfo()->hasVarSizedObjects();
+ return !MF.getFrameInfo().hasVarSizedObjects();
}
MachineBasicBlock::iterator AArch64FrameLowering::eliminateCallFramePseudoInstr(
@@ -203,23 +203,23 @@ MachineBasicBlock::iterator AArch64FrameLowering::eliminateCallFramePseudoInstr(
void AArch64FrameLowering::emitCalleeSavedFrameMoves(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const {
MachineFunction &MF = *MBB.getParent();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MachineModuleInfo &MMI = MF.getMMI();
- const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
- const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ const TargetSubtargetInfo &STI = MF.getSubtarget();
+ const MCRegisterInfo *MRI = STI.getRegisterInfo();
+ const TargetInstrInfo *TII = STI.getInstrInfo();
DebugLoc DL = MBB.findDebugLoc(MBBI);
// Add callee saved registers to move list.
- const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
+ const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
if (CSI.empty())
return;
for (const auto &Info : CSI) {
unsigned Reg = Info.getReg();
int64_t Offset =
- MFI->getObjectOffset(Info.getFrameIdx()) - getOffsetOfLocalArea();
+ MFI.getObjectOffset(Info.getFrameIdx()) - getOffsetOfLocalArea();
unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
- unsigned CFIIndex = MMI.addFrameInst(
+ unsigned CFIIndex = MF.addFrameInst(
MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex)
@@ -286,7 +286,7 @@ bool AArch64FrameLowering::canUseAsPrologue(
bool AArch64FrameLowering::shouldCombineCSRLocalStackBump(
MachineFunction &MF, unsigned StackBumpBytes) const {
AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
- const MachineFrameInfo *MFI = MF.getFrameInfo();
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
@@ -298,7 +298,7 @@ bool AArch64FrameLowering::shouldCombineCSRLocalStackBump(
if (StackBumpBytes >= 512)
return false;
- if (MFI->hasVarSizedObjects())
+ if (MFI.hasVarSizedObjects())
return false;
if (RegInfo->needsStackRealignment(MF))
@@ -407,7 +407,7 @@ static void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI,
void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = MBB.begin();
- const MachineFrameInfo *MFI = MF.getFrameInfo();
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
const Function *Fn = MF.getFunction();
const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
@@ -426,7 +426,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
return;
- int NumBytes = (int)MFI->getStackSize();
+ int NumBytes = (int)MFI.getStackSize();
if (!AFI->hasStackFrame()) {
assert(!HasFP && "unexpected function without stack frame but with FP");
@@ -446,7 +446,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
// Label used to tie together the PROLOG_LABEL and the MachineMoves.
MCSymbol *FrameLabel = MMI.getContext().createTempSymbol();
// Encode the stack size of the leaf function.
- unsigned CFIIndex = MMI.addFrameInst(
+ unsigned CFIIndex = MF.addFrameInst(
MCCFIInstruction::createDefCfaOffset(FrameLabel, -NumBytes));
BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex)
@@ -513,7 +513,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
MachineInstr::FrameSetup);
if (NeedsRealignment) {
- const unsigned Alignment = MFI->getMaxAlignment();
+ const unsigned Alignment = MFI.getMaxAlignment();
const unsigned NrBitsToZero = countTrailingZeros(Alignment);
assert(NrBitsToZero > 1);
assert(scratchSPReg != AArch64::SP);
@@ -621,15 +621,15 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
if (HasFP) {
// Define the current CFA rule to use the provided FP.
unsigned Reg = RegInfo->getDwarfRegNum(FramePtr, true);
- unsigned CFIIndex = MMI.addFrameInst(
+ unsigned CFIIndex = MF.addFrameInst(
MCCFIInstruction::createDefCfa(nullptr, Reg, 2 * StackGrowth));
BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex)
.setMIFlags(MachineInstr::FrameSetup);
} else {
// Encode the stack size of the leaf function.
- unsigned CFIIndex = MMI.addFrameInst(
- MCCFIInstruction::createDefCfaOffset(nullptr, -MFI->getStackSize()));
+ unsigned CFIIndex = MF.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, -MFI.getStackSize()));
BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex)
.setMIFlags(MachineInstr::FrameSetup);
@@ -644,7 +644,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
- MachineFrameInfo *MFI = MF.getFrameInfo();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL;
@@ -655,7 +655,7 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
IsTailCallReturn = RetOpcode == AArch64::TCRETURNdi ||
RetOpcode == AArch64::TCRETURNri;
}
- int NumBytes = MFI->getStackSize();
+ int NumBytes = MFI.getStackSize();
const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
// All calls are tail calls in GHC calling conv, and functions have no
@@ -762,7 +762,7 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
// FIXME: Rather than doing the math here, we should instead just use
// non-post-indexed loads for the restores if we aren't actually going to
// be able to save any instructions.
- if (MFI->hasVarSizedObjects() || AFI->isStackRealigned())
+ if (MFI.hasVarSizedObjects() || AFI->isStackRealigned())
emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::FP,
-CSStackSize + 16, TII, MachineInstr::FrameDestroy);
else if (NumBytes)
@@ -790,13 +790,13 @@ int AArch64FrameLowering::getFrameIndexReference(const MachineFunction &MF,
int AArch64FrameLowering::resolveFrameIndexReference(const MachineFunction &MF,
int FI, unsigned &FrameReg,
bool PreferFP) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>(
MF.getSubtarget().getRegisterInfo());
const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
- int FPOffset = MFI->getObjectOffset(FI) + 16;
- int Offset = MFI->getObjectOffset(FI) + MFI->getStackSize();
- bool isFixed = MFI->isFixedObjectIndex(FI);
+ int FPOffset = MFI.getObjectOffset(FI) + 16;
+ int Offset = MFI.getObjectOffset(FI) + MFI.getStackSize();
+ bool isFixed = MFI.isFixedObjectIndex(FI);
// Use frame pointer to reference fixed objects. Use it for locals if
// there are VLAs or a dynamically realigned SP (and thus the SP isn't
@@ -821,7 +821,7 @@ int AArch64FrameLowering::resolveFrameIndexReference(const MachineFunction &MF,
// using the FP regardless, though, as the SP offset is unknown
// and we don't have a base pointer available. If an offset is
// available via the FP and the SP, use whichever is closest.
- if (PreferFP || MFI->hasVarSizedObjects() || FPOffset >= 0 ||
+ if (PreferFP || MFI.hasVarSizedObjects() || FPOffset >= 0 ||
(FPOffset >= -256 && Offset > -FPOffset))
UseFP = true;
}
@@ -869,7 +869,7 @@ static bool produceCompactUnwindFrame(MachineFunction &MF) {
Attrs.hasAttrSomewhere(Attribute::SwiftError));
}
-
+namespace {
struct RegPairInfo {
RegPairInfo() : Reg1(AArch64::NoRegister), Reg2(AArch64::NoRegister) {}
unsigned Reg1;
@@ -879,6 +879,7 @@ struct RegPairInfo {
bool IsGPR;
bool isPaired() const { return Reg2 != AArch64::NoRegister; }
};
+} // end anonymous namespace
static void computeCalleeSaveRegisterPairs(
MachineFunction &MF, const std::vector<CalleeSavedInfo> &CSI,
@@ -888,7 +889,7 @@ static void computeCalleeSaveRegisterPairs(
return;
AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
- MachineFrameInfo *MFI = MF.getFrameInfo();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
CallingConv::ID CC = MF.getFunction()->getCallingConv();
unsigned Count = CSI.size();
(void)CC;
@@ -941,8 +942,8 @@ static void computeCalleeSaveRegisterPairs(
// Round up size of non-pair to pair size if we need to pad the
// callee-save area to ensure 16-byte alignment.
Offset -= 16;
- assert(MFI->getObjectAlignment(RPI.FrameIdx) <= 16);
- MFI->setObjectAlignment(RPI.FrameIdx, 16);
+ assert(MFI.getObjectAlignment(RPI.FrameIdx) <= 16);
+ MFI.setObjectAlignment(RPI.FrameIdx, 16);
AFI->setCalleeSaveStackHasFreeSpace(true);
} else
Offset -= RPI.isPaired() ? 16 : 8;
@@ -1149,8 +1150,8 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
// realistically that's not a big deal at this stage of the game.
// The CSR spill slots have not been allocated yet, so estimateStackSize
// won't include them.
- MachineFrameInfo *MFI = MF.getFrameInfo();
- unsigned CFSize = MFI->estimateStackSize(MF) + 8 * NumRegsSpilled;
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ unsigned CFSize = MFI.estimateStackSize(MF) + 8 * NumRegsSpilled;
DEBUG(dbgs() << "Estimated stack frame size: " << CFSize << " bytes.\n");
bool BigStack = (CFSize >= 256);
if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF))
@@ -1180,7 +1181,7 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
// an emergency spill slot.
if (!ExtraCSSpill) {
const TargetRegisterClass *RC = &AArch64::GPR64RegClass;
- int FI = MFI->CreateStackObject(RC->getSize(), RC->getAlignment(), false);
+ int FI = MFI.CreateStackObject(RC->getSize(), RC->getAlignment(), false);
RS->addScavengingFrameIndex(FI);
DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI
<< " as the emergency spill slot.\n");
OpenPOWER on IntegriCloud