diff options
Diffstat (limited to 'lib/Target/ARM/ARMLoadStoreOptimizer.cpp')
-rw-r--r-- | lib/Target/ARM/ARMLoadStoreOptimizer.cpp | 300 |
1 files changed, 280 insertions, 20 deletions
diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp index 684ecb4..59cf125 100644 --- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -17,19 +17,22 @@ #include "ARMAddressingModes.h" #include "ARMMachineFunctionInfo.h" #include "ARMRegisterInfo.h" +#include "llvm/DerivedTypes.h" #include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/RegisterScavenging.h" -#include "llvm/Target/TargetRegisterInfo.h" +#include "llvm/Target/TargetData.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Support/Compiler.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" using namespace llvm; @@ -39,6 +42,12 @@ STATISTIC(NumSTMGened , "Number of stm instructions generated"); STATISTIC(NumFLDMGened, "Number of fldm instructions generated"); STATISTIC(NumFSTMGened, "Number of fstm instructions generated"); STATISTIC(NumLdStMoved, "Number of load / store instructions moved"); +STATISTIC(NumLDRDFormed,"Number of ldrd created before allocation"); +STATISTIC(NumSTRDFormed,"Number of strd created before allocation"); +STATISTIC(NumLDRD2LDM, "Number of ldrd instructions turned back into ldm"); +STATISTIC(NumSTRD2STM, "Number of strd instructions turned back into stm"); +STATISTIC(NumLDRD2LDR, "Number of ldrd instructions turned back into ldr's"); +STATISTIC(NumSTRD2STR, "Number of strd instructions turned back into str's"); /// ARMAllocLoadStoreOpt - Post- register allocation pass the combine /// load / store instructions to form ldm / stm instructions. @@ -82,6 +91,8 @@ namespace { SmallVector<MachineBasicBlock::iterator, 4> &Merges); void AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps); + bool FixInvalidRegPairOp(MachineBasicBlock &MBB, + MachineBasicBlock::iterator &MBBI); bool LoadStoreMultipleOpti(MachineBasicBlock &MBB); bool MergeReturnIntoLDM(MachineBasicBlock &MBB); }; @@ -586,13 +597,19 @@ void ARMLoadStoreOpt::AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps) { static int getMemoryOpOffset(const MachineInstr *MI) { int Opcode = MI->getOpcode(); bool isAM2 = Opcode == ARM::LDR || Opcode == ARM::STR; + bool isAM3 = Opcode == ARM::LDRD || Opcode == ARM::STRD; unsigned NumOperands = MI->getDesc().getNumOperands(); unsigned OffField = MI->getOperand(NumOperands-3).getImm(); int Offset = isAM2 - ? ARM_AM::getAM2Offset(OffField) : ARM_AM::getAM5Offset(OffField) * 4; + ? ARM_AM::getAM2Offset(OffField) + : (isAM3 ? ARM_AM::getAM3Offset(OffField) + : ARM_AM::getAM5Offset(OffField) * 4); if (isAM2) { if (ARM_AM::getAM2Op(OffField) == ARM_AM::sub) Offset = -Offset; + } else if (isAM3) { + if (ARM_AM::getAM3Op(OffField) == ARM_AM::sub) + Offset = -Offset; } else { if (ARM_AM::getAM5Op(OffField) == ARM_AM::sub) Offset = -Offset; @@ -600,6 +617,120 @@ static int getMemoryOpOffset(const MachineInstr *MI) { return Offset; } +static void InsertLDR_STR(MachineBasicBlock &MBB, + MachineBasicBlock::iterator &MBBI, + int OffImm, bool isDef, + DebugLoc dl, unsigned NewOpc, + unsigned Reg, bool RegDeadKill, + unsigned BaseReg, bool BaseKill, + unsigned OffReg, bool OffKill, + ARMCC::CondCodes Pred, unsigned PredReg, + const TargetInstrInfo *TII) { + unsigned Offset; + if (OffImm < 0) + Offset = ARM_AM::getAM2Opc(ARM_AM::sub, -OffImm, ARM_AM::no_shift); + else + Offset = ARM_AM::getAM2Opc(ARM_AM::add, OffImm, ARM_AM::no_shift); + if (isDef) + BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc)) + .addReg(Reg, getDefRegState(true) | getDeadRegState(RegDeadKill)) + .addReg(BaseReg, getKillRegState(BaseKill)) + .addReg(OffReg, getKillRegState(OffKill)) + .addImm(Offset) + .addImm(Pred).addReg(PredReg); + else + BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc)) + .addReg(Reg, getKillRegState(RegDeadKill)) + .addReg(BaseReg, getKillRegState(BaseKill)) + .addReg(OffReg, getKillRegState(OffKill)) + .addImm(Offset) + .addImm(Pred).addReg(PredReg); +} + +bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB, + MachineBasicBlock::iterator &MBBI) { + MachineInstr *MI = &*MBBI; + unsigned Opcode = MI->getOpcode(); + if (Opcode == ARM::LDRD || Opcode == ARM::STRD) { + unsigned EvenReg = MI->getOperand(0).getReg(); + unsigned OddReg = MI->getOperand(1).getReg(); + unsigned EvenRegNum = TRI->getDwarfRegNum(EvenReg, false); + unsigned OddRegNum = TRI->getDwarfRegNum(OddReg, false); + if ((EvenRegNum & 1) == 0 && (EvenRegNum + 1) == OddRegNum) + return false; + + bool isLd = Opcode == ARM::LDRD; + bool EvenDeadKill = isLd ? + MI->getOperand(0).isDead() : MI->getOperand(0).isKill(); + bool OddDeadKill = isLd ? + MI->getOperand(1).isDead() : MI->getOperand(1).isKill(); + const MachineOperand &BaseOp = MI->getOperand(2); + unsigned BaseReg = BaseOp.getReg(); + bool BaseKill = BaseOp.isKill(); + const MachineOperand &OffOp = MI->getOperand(3); + unsigned OffReg = OffOp.getReg(); + bool OffKill = OffOp.isKill(); + int OffImm = getMemoryOpOffset(MI); + unsigned PredReg = 0; + ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg); + + if (OddRegNum > EvenRegNum && OffReg == 0 && OffImm == 0) { + // Ascending register numbers and no offset. It's safe to change it to a + // ldm or stm. + unsigned NewOpc = (Opcode == ARM::LDRD) ? ARM::LDM : ARM::STM; + if (isLd) { + BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc)) + .addReg(BaseReg, getKillRegState(BaseKill)) + .addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia)) + .addImm(Pred).addReg(PredReg) + .addReg(EvenReg, getDefRegState(isLd) | getDeadRegState(EvenDeadKill)) + .addReg(OddReg, getDefRegState(isLd) | getDeadRegState(OddDeadKill)); + ++NumLDRD2LDM; + } else { + BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc)) + .addReg(BaseReg, getKillRegState(BaseKill)) + .addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia)) + .addImm(Pred).addReg(PredReg) + .addReg(EvenReg, getKillRegState(EvenDeadKill)) + .addReg(OddReg, getKillRegState(OddDeadKill)); + ++NumSTRD2STM; + } + } else { + // Split into two instructions. + unsigned NewOpc = (Opcode == ARM::LDRD) ? ARM::LDR : ARM::STR; + DebugLoc dl = MBBI->getDebugLoc(); + // If this is a load and base register is killed, it may have been + // re-defed by the load, make sure the first load does not clobber it. + if (isLd && + (BaseKill || OffKill) && + (TRI->regsOverlap(EvenReg, BaseReg) || + (OffReg && TRI->regsOverlap(EvenReg, OffReg)))) { + assert(!TRI->regsOverlap(OddReg, BaseReg) && + (!OffReg || !TRI->regsOverlap(OddReg, OffReg))); + InsertLDR_STR(MBB, MBBI, OffImm+4, isLd, dl, NewOpc, OddReg, OddDeadKill, + BaseReg, false, OffReg, false, Pred, PredReg, TII); + InsertLDR_STR(MBB, MBBI, OffImm, isLd, dl, NewOpc, EvenReg, EvenDeadKill, + BaseReg, BaseKill, OffReg, OffKill, Pred, PredReg, TII); + } else { + InsertLDR_STR(MBB, MBBI, OffImm, isLd, dl, NewOpc, + EvenReg, EvenDeadKill, BaseReg, false, OffReg, false, + Pred, PredReg, TII); + InsertLDR_STR(MBB, MBBI, OffImm+4, isLd, dl, NewOpc, + OddReg, OddDeadKill, BaseReg, BaseKill, OffReg, OffKill, + Pred, PredReg, TII); + } + if (isLd) + ++NumLDRD2LDR; + else + ++NumSTRD2STR; + } + + MBBI = prior(MBBI); + MBB.erase(MI); + } + return false; +} + /// LoadStoreMultipleOpti - An optimization pass to turn multiple LDR / STR /// ops of the same base and incrementing offset into LDM / STM ops. bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) { @@ -617,6 +748,9 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) { RS->enterBasicBlock(&MBB); MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end(); while (MBBI != E) { + if (FixInvalidRegPairOp(MBB, MBBI)) + continue; + bool Advance = false; bool TryMerge = false; bool Clobber = false; @@ -817,8 +951,10 @@ namespace { static char ID; ARMPreAllocLoadStoreOpt() : MachineFunctionPass(&ID) {} + const TargetData *TD; const TargetInstrInfo *TII; const TargetRegisterInfo *TRI; + const ARMSubtarget *STI; MachineRegisterInfo *MRI; virtual bool runOnMachineFunction(MachineFunction &Fn); @@ -828,6 +964,11 @@ namespace { } private: + bool CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1, DebugLoc &dl, + unsigned &NewOpc, unsigned &EvenReg, + unsigned &OddReg, unsigned &BaseReg, + unsigned &OffReg, unsigned &Offset, + unsigned &PredReg, ARMCC::CondCodes &Pred); bool RescheduleOps(MachineBasicBlock *MBB, SmallVector<MachineInstr*, 4> &Ops, unsigned Base, bool isLd, @@ -838,8 +979,10 @@ namespace { } bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) { + TD = Fn.getTarget().getTargetData(); TII = Fn.getTarget().getInstrInfo(); TRI = Fn.getTarget().getRegisterInfo(); + STI = &Fn.getTarget().getSubtarget<ARMSubtarget>(); MRI = &Fn.getRegInfo(); bool Modified = false; @@ -850,15 +993,19 @@ bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) { return Modified; } -static bool IsSafeToMove(bool isLd, unsigned Base, - MachineBasicBlock::iterator I, - MachineBasicBlock::iterator E, - SmallPtrSet<MachineInstr*, 4> MoveOps, - const TargetRegisterInfo *TRI) { +static bool IsSafeAndProfitableToMove(bool isLd, unsigned Base, + MachineBasicBlock::iterator I, + MachineBasicBlock::iterator E, + SmallPtrSet<MachineInstr*, 4> &MemOps, + SmallSet<unsigned, 4> &MemRegs, + const TargetRegisterInfo *TRI) { // Are there stores / loads / calls between them? // FIXME: This is overly conservative. We should make use of alias information // some day. + SmallSet<unsigned, 4> AddedRegPressure; while (++I != E) { + if (MemOps.count(&*I)) + continue; const TargetInstrDesc &TID = I->getDesc(); if (TID.isCall() || TID.isTerminator() || TID.hasUnmodeledSideEffects()) return false; @@ -871,15 +1018,76 @@ static bool IsSafeToMove(bool isLd, unsigned Base, // str r1, [r0] // strh r5, [r0] // str r4, [r0, #+4] - if (TID.mayStore() && !MoveOps.count(&*I)) + if (TID.mayStore()) return false; } for (unsigned j = 0, NumOps = I->getNumOperands(); j != NumOps; ++j) { MachineOperand &MO = I->getOperand(j); - if (MO.isReg() && MO.isDef() && TRI->regsOverlap(MO.getReg(), Base)) + if (!MO.isReg()) + continue; + unsigned Reg = MO.getReg(); + if (MO.isDef() && TRI->regsOverlap(Reg, Base)) return false; + if (Reg != Base && !MemRegs.count(Reg)) + AddedRegPressure.insert(Reg); } } + + // Estimate register pressure increase due to the transformation. + if (MemRegs.size() <= 4) + // Ok if we are moving small number of instructions. + return true; + return AddedRegPressure.size() <= MemRegs.size() * 2; +} + +bool +ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1, + DebugLoc &dl, + unsigned &NewOpc, unsigned &EvenReg, + unsigned &OddReg, unsigned &BaseReg, + unsigned &OffReg, unsigned &Offset, + unsigned &PredReg, + ARMCC::CondCodes &Pred) { + // FIXME: FLDS / FSTS -> FLDD / FSTD + unsigned Opcode = Op0->getOpcode(); + if (Opcode == ARM::LDR) + NewOpc = ARM::LDRD; + else if (Opcode == ARM::STR) + NewOpc = ARM::STRD; + else + return 0; + + // Must sure the base address satisfies i64 ld / st alignment requirement. + if (!Op0->hasOneMemOperand() || + !Op0->memoperands_begin()->getValue() || + Op0->memoperands_begin()->isVolatile()) + return false; + + unsigned Align = Op0->memoperands_begin()->getAlignment(); + unsigned ReqAlign = STI->hasV6Ops() + ? TD->getPrefTypeAlignment(Type::Int64Ty) : 8; // Pre-v6 need 8-byte align + if (Align < ReqAlign) + return false; + + // Then make sure the immediate offset fits. + int OffImm = getMemoryOpOffset(Op0); + ARM_AM::AddrOpc AddSub = ARM_AM::add; + if (OffImm < 0) { + AddSub = ARM_AM::sub; + OffImm = - OffImm; + } + if (OffImm >= 256) // 8 bits + return false; + Offset = ARM_AM::getAM3Opc(AddSub, OffImm); + + EvenReg = Op0->getOperand(0).getReg(); + OddReg = Op1->getOperand(0).getReg(); + if (EvenReg == OddReg) + return false; + BaseReg = Op0->getOperand(1).getReg(); + OffReg = Op0->getOperand(2).getReg(); + Pred = getInstrPredicate(Op0, PredReg); + dl = Op0->getDebugLoc(); return true; } @@ -902,6 +1110,7 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB, MachineInstr *FirstOp = 0; MachineInstr *LastOp = 0; int LastOffset = 0; + unsigned LastOpcode = 0; unsigned LastBytes = 0; unsigned NumMove = 0; for (int i = Ops.size() - 1; i >= 0; --i) { @@ -916,6 +1125,10 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB, LastOp = Op; } + unsigned Opcode = Op->getOpcode(); + if (LastOpcode && Opcode != LastOpcode) + break; + int Offset = getMemoryOpOffset(Op); unsigned Bytes = getLSMultipleTransferSize(Op); if (LastBytes) { @@ -924,34 +1137,80 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB, } LastOffset = Offset; LastBytes = Bytes; - if (++NumMove == 4) + LastOpcode = Opcode; + if (++NumMove == 8) // FIXME: Tune break; } if (NumMove <= 1) Ops.pop_back(); else { - SmallPtrSet<MachineInstr*, 4> MoveOps; - for (int i = NumMove-1; i >= 0; --i) - MoveOps.insert(Ops[i]); + SmallPtrSet<MachineInstr*, 4> MemOps; + SmallSet<unsigned, 4> MemRegs; + for (int i = NumMove-1; i >= 0; --i) { + MemOps.insert(Ops[i]); + MemRegs.insert(Ops[i]->getOperand(0).getReg()); + } // Be conservative, if the instructions are too far apart, don't // move them. We want to limit the increase of register pressure. - bool DoMove = (LastLoc - FirstLoc) < NumMove*4; + bool DoMove = (LastLoc - FirstLoc) <= NumMove*4; // FIXME: Tune this. if (DoMove) - DoMove = IsSafeToMove(isLd, Base, FirstOp, LastOp, MoveOps, TRI); + DoMove = IsSafeAndProfitableToMove(isLd, Base, FirstOp, LastOp, + MemOps, MemRegs, TRI); if (!DoMove) { for (unsigned i = 0; i != NumMove; ++i) Ops.pop_back(); } else { // This is the new location for the loads / stores. MachineBasicBlock::iterator InsertPos = isLd ? FirstOp : LastOp; - while (InsertPos != MBB->end() && MoveOps.count(InsertPos)) + while (InsertPos != MBB->end() && MemOps.count(InsertPos)) ++InsertPos; - for (unsigned i = 0; i != NumMove; ++i) { - MachineInstr *Op = Ops.back(); + + // If we are moving a pair of loads / stores, see if it makes sense + // to try to allocate a pair of registers that can form register pairs. + MachineInstr *Op0 = Ops.back(); + MachineInstr *Op1 = Ops[Ops.size()-2]; + unsigned EvenReg = 0, OddReg = 0; + unsigned BaseReg = 0, OffReg = 0, PredReg = 0; + ARMCC::CondCodes Pred = ARMCC::AL; + unsigned NewOpc = 0; + unsigned Offset = 0; + DebugLoc dl; + if (NumMove == 2 && CanFormLdStDWord(Op0, Op1, dl, NewOpc, + EvenReg, OddReg, BaseReg, OffReg, + Offset, PredReg, Pred)) { + Ops.pop_back(); Ops.pop_back(); - MBB->splice(InsertPos, MBB, Op); + + // Form the pair instruction. + if (isLd) { + BuildMI(*MBB, InsertPos, dl, TII->get(NewOpc)) + .addReg(EvenReg, RegState::Define) + .addReg(OddReg, RegState::Define) + .addReg(BaseReg).addReg(0).addImm(Offset) + .addImm(Pred).addReg(PredReg); + ++NumLDRDFormed; + } else { + BuildMI(*MBB, InsertPos, dl, TII->get(NewOpc)) + .addReg(EvenReg) + .addReg(OddReg) + .addReg(BaseReg).addReg(0).addImm(Offset) + .addImm(Pred).addReg(PredReg); + ++NumSTRDFormed; + } + MBB->erase(Op0); + MBB->erase(Op1); + + // Add register allocation hints to form register pairs. + MRI->setRegAllocationHint(EvenReg, ARMRI::RegPairEven, OddReg); + MRI->setRegAllocationHint(OddReg, ARMRI::RegPairOdd, EvenReg); + } else { + for (unsigned i = 0; i != NumMove; ++i) { + MachineInstr *Op = Ops.back(); + Ops.pop_back(); + MBB->splice(InsertPos, MBB, Op); + } } NumLdStMoved += NumMove; @@ -1039,7 +1298,8 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) { } if (StopHere) { - // Found a duplicate (a base+offset combination that's seen earlier). Backtrack. + // Found a duplicate (a base+offset combination that's seen earlier). + // Backtrack. --Loc; break; } |