summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp')
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp197
1 files changed, 124 insertions, 73 deletions
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 658af83..0288db9 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -32,16 +32,19 @@
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCExpr.h"
#include "llvm/Support/BranchProbability.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "arm-instrinfo"
+
#define GET_INSTRINFO_CTOR_DTOR
#include "ARMGenInstrInfo.inc"
-using namespace llvm;
-
static cl::opt<bool>
EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
cl::desc("Enable ARM 2-addr to 3-addr conv"));
@@ -100,14 +103,15 @@ ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI)
// Use a ScoreboardHazardRecognizer for prepass ARM scheduling. TargetInstrImpl
// currently defaults to no prepass hazard recognizer.
-ScheduleHazardRecognizer *ARMBaseInstrInfo::
-CreateTargetHazardRecognizer(const TargetMachine *TM,
- const ScheduleDAG *DAG) const {
+ScheduleHazardRecognizer *
+ARMBaseInstrInfo::CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
+ const ScheduleDAG *DAG) const {
if (usePreRAHazardRecognizer()) {
- const InstrItineraryData *II = TM->getInstrItineraryData();
+ const InstrItineraryData *II =
+ &static_cast<const ARMSubtarget *>(STI)->getInstrItineraryData();
return new ScoreboardHazardRecognizer(II, DAG, "pre-RA-sched");
}
- return TargetInstrInfo::CreateTargetHazardRecognizer(TM, DAG);
+ return TargetInstrInfo::CreateTargetHazardRecognizer(STI, DAG);
}
ScheduleHazardRecognizer *ARMBaseInstrInfo::
@@ -125,14 +129,14 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
// FIXME: Thumb2 support.
if (!EnableARM3Addr)
- return NULL;
+ return nullptr;
MachineInstr *MI = MBBI;
MachineFunction &MF = *MI->getParent()->getParent();
uint64_t TSFlags = MI->getDesc().TSFlags;
bool isPre = false;
switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
- default: return NULL;
+ default: return nullptr;
case ARMII::IndexModePre:
isPre = true;
break;
@@ -144,10 +148,10 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
// operation.
unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
if (MemOpc == 0)
- return NULL;
+ return nullptr;
- MachineInstr *UpdateMI = NULL;
- MachineInstr *MemMI = NULL;
+ MachineInstr *UpdateMI = nullptr;
+ MachineInstr *MemMI = nullptr;
unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
const MCInstrDesc &MCID = MI->getDesc();
unsigned NumOps = MCID.getNumOperands();
@@ -169,7 +173,7 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
if (ARM_AM::getSOImmVal(Amt) == -1)
// Can't encode it in a so_imm operand. This transformation will
// add more than 1 instruction. Abandon!
- return NULL;
+ return nullptr;
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
.addReg(BaseReg).addImm(Amt)
@@ -273,8 +277,8 @@ ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const {
- TBB = 0;
- FBB = 0;
+ TBB = nullptr;
+ FBB = nullptr;
MachineBasicBlock::iterator I = MBB.end();
if (I == MBB.begin())
@@ -283,7 +287,7 @@ ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
// Walk backwards from the end of the basic block until the branch is
// analyzed or we give up.
- while (isPredicated(I) || I->isTerminator()) {
+ while (isPredicated(I) || I->isTerminator() || I->isDebugValue()) {
// Flag to be raised on unanalyzeable instructions. This is useful in cases
// where we want to clean up on the end of the basic block before we bail
@@ -331,12 +335,12 @@ ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
I->isReturn())) {
// Forget any previous condition branch information - it no longer applies.
Cond.clear();
- FBB = 0;
+ FBB = nullptr;
// If we can modify the function, delete everything below this
// unconditional branch.
if (AllowModify) {
- MachineBasicBlock::iterator DI = llvm::next(I);
+ MachineBasicBlock::iterator DI = std::next(I);
while (DI != MBB.end()) {
MachineInstr *InstToDelete = DI;
++DI;
@@ -405,7 +409,7 @@ ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
assert((Cond.size() == 2 || Cond.size() == 0) &&
"ARM branch conditions have two components!");
- if (FBB == 0) {
+ if (!FBB) {
if (Cond.empty()) { // Unconditional branch?
if (isThumb)
BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB).addImm(ARMCC::AL).addReg(0);
@@ -535,6 +539,22 @@ bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const {
return true;
}
+namespace llvm {
+template <> bool IsCPSRDead<MachineInstr>(MachineInstr *MI) {
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || MO.isUndef() || MO.isUse())
+ continue;
+ if (MO.getReg() != ARM::CPSR)
+ continue;
+ if (!MO.isDead())
+ return false;
+ }
+ // all definitions of CPSR are dead
+ return true;
+}
+}
+
/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing.
LLVM_ATTRIBUTE_NOINLINE
static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
@@ -559,15 +579,10 @@ unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
// If this machine instr is an inline asm, measure it.
if (MI->getOpcode() == ARM::INLINEASM)
return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
- if (MI->isLabel())
- return 0;
unsigned Opc = MI->getOpcode();
switch (Opc) {
- case TargetOpcode::IMPLICIT_DEF:
- case TargetOpcode::KILL:
- case TargetOpcode::PROLOG_LABEL:
- case TargetOpcode::EH_LABEL:
- case TargetOpcode::DBG_VALUE:
+ default:
+ // pseudo-instruction sizes are zero.
return 0;
case TargetOpcode::BUNDLE:
return getInstBundleLength(MI);
@@ -611,7 +626,7 @@ unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
MI->getOperand(NumOps - (MI->isPredicable() ? 3 : 2));
unsigned JTI = JTOP.getIndex();
const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
- assert(MJTI != 0);
+ assert(MJTI != nullptr);
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
assert(JTI < JT.size());
// Thumb instructions are 2 byte aligned, but JT entries are 4 byte
@@ -630,9 +645,6 @@ unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
++NumEntries;
return NumEntries * EntrySize + InstSize;
}
- default:
- // Otherwise, pseudo-instruction sizes are zero.
- return 0;
}
}
@@ -1242,7 +1254,8 @@ static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal);
unsigned PCLabelId = AFI->createPICLabelUId();
- ARMConstantPoolValue *NewCPV = 0;
+ ARMConstantPoolValue *NewCPV = nullptr;
+
// FIXME: The below assumes PIC relocation model and that the function
// is Thumb mode (t1 or t2). PCAdjustment would be 8 for ARM mode PIC, and
// zero for non-PIC in ARM or Thumb. The callers are all of thumb LDR
@@ -1325,10 +1338,11 @@ bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0,
Opcode == ARM::t2LDRpci_pic ||
Opcode == ARM::tLDRpci ||
Opcode == ARM::tLDRpci_pic ||
- Opcode == ARM::MOV_ga_dyn ||
+ Opcode == ARM::LDRLIT_ga_pcrel ||
+ Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
+ Opcode == ARM::tLDRLIT_ga_pcrel ||
Opcode == ARM::MOV_ga_pcrel ||
Opcode == ARM::MOV_ga_pcrel_ldr ||
- Opcode == ARM::t2MOV_ga_dyn ||
Opcode == ARM::t2MOV_ga_pcrel) {
if (MI1->getOpcode() != Opcode)
return false;
@@ -1340,10 +1354,11 @@ bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0,
if (MO0.getOffset() != MO1.getOffset())
return false;
- if (Opcode == ARM::MOV_ga_dyn ||
+ if (Opcode == ARM::LDRLIT_ga_pcrel ||
+ Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
+ Opcode == ARM::tLDRLIT_ga_pcrel ||
Opcode == ARM::MOV_ga_pcrel ||
Opcode == ARM::MOV_ga_pcrel_ldr ||
- Opcode == ARM::t2MOV_ga_dyn ||
Opcode == ARM::t2MOV_ga_pcrel)
// Ignore the PC labels.
return MO0.getGlobal() == MO1.getGlobal();
@@ -1534,7 +1549,7 @@ bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
return false;
// Terminators and labels can't be scheduled around.
- if (MI->isTerminator() || MI->isLabel())
+ if (MI->isTerminator() || MI->isPosition())
return true;
// Treat the start of the IT block as a scheduling boundary, but schedule
@@ -1650,10 +1665,10 @@ ARMBaseInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
ARMCC::CondCodes CC = getInstrPredicate(MI, PredReg);
// MOVCC AL can't be inverted. Shouldn't happen.
if (CC == ARMCC::AL || PredReg != ARM::CPSR)
- return NULL;
+ return nullptr;
MI = TargetInstrInfo::commuteInstruction(MI, NewMI);
if (!MI)
- return NULL;
+ return nullptr;
// After swapping the MOVCC operands, also invert the condition.
MI->getOperand(MI->findFirstPredOperandIdx())
.setImm(ARMCC::getOppositeCondition(CC));
@@ -1669,35 +1684,36 @@ static MachineInstr *canFoldIntoMOVCC(unsigned Reg,
const MachineRegisterInfo &MRI,
const TargetInstrInfo *TII) {
if (!TargetRegisterInfo::isVirtualRegister(Reg))
- return 0;
+ return nullptr;
if (!MRI.hasOneNonDBGUse(Reg))
- return 0;
+ return nullptr;
MachineInstr *MI = MRI.getVRegDef(Reg);
if (!MI)
- return 0;
+ return nullptr;
// MI is folded into the MOVCC by predicating it.
if (!MI->isPredicable())
- return 0;
+ return nullptr;
// Check if MI has any non-dead defs or physreg uses. This also detects
// predicated instructions which will be reading CPSR.
for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
// Reject frame index operands, PEI can't handle the predicated pseudos.
if (MO.isFI() || MO.isCPI() || MO.isJTI())
- return 0;
+ return nullptr;
if (!MO.isReg())
continue;
// MI can't have any tied operands, that would conflict with predication.
if (MO.isTied())
- return 0;
+ return nullptr;
if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
- return 0;
+ return nullptr;
if (MO.isDef() && !MO.isDead())
- return 0;
+ return nullptr;
}
bool DontMoveAcrossStores = true;
- if (!MI->isSafeToMove(TII, /* AliasAnalysis = */ 0, DontMoveAcrossStores))
- return 0;
+ if (!MI->isSafeToMove(TII, /* AliasAnalysis = */ nullptr,
+ DontMoveAcrossStores))
+ return nullptr;
return MI;
}
@@ -1732,14 +1748,14 @@ MachineInstr *ARMBaseInstrInfo::optimizeSelect(MachineInstr *MI,
if (!DefMI)
DefMI = canFoldIntoMOVCC(MI->getOperand(1).getReg(), MRI, this);
if (!DefMI)
- return 0;
+ return nullptr;
// Find new register class to use.
MachineOperand FalseReg = MI->getOperand(Invert ? 2 : 1);
unsigned DestReg = MI->getOperand(0).getReg();
const TargetRegisterClass *PreviousClass = MRI.getRegClass(FalseReg.getReg());
if (!MRI.constrainRegClass(DestReg, PreviousClass))
- return 0;
+ return nullptr;
// Create a new predicated version of DefMI.
// Rfalse is the first use.
@@ -1857,12 +1873,22 @@ void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
}
}
-bool llvm::tryFoldSPUpdateIntoPushPop(MachineFunction &MF,
- MachineInstr *MI,
+static bool isAnySubRegLive(unsigned Reg, const TargetRegisterInfo *TRI,
+ MachineInstr *MI) {
+ for (MCSubRegIterator Subreg(Reg, TRI, /* IncludeSelf */ true);
+ Subreg.isValid(); ++Subreg)
+ if (MI->getParent()->computeRegisterLiveness(TRI, *Subreg, MI) !=
+ MachineBasicBlock::LQR_Dead)
+ return true;
+ return false;
+}
+bool llvm::tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget,
+ MachineFunction &MF, MachineInstr *MI,
unsigned NumBytes) {
// This optimisation potentially adds lots of load and store
// micro-operations, it's only really a great benefit to code-size.
- if (!MF.getFunction()->hasFnAttribute(Attribute::MinSize))
+ if (!MF.getFunction()->getAttributes().hasAttribute(
+ AttributeSet::FunctionIndex, Attribute::MinSize))
return false;
// If only one register is pushed/popped, LLVM can use an LDR/STR
@@ -1911,7 +1937,6 @@ bool llvm::tryFoldSPUpdateIntoPushPop(MachineFunction &MF,
for (int i = MI->getNumOperands() - 1; i >= RegListIdx; --i)
RegList.push_back(MI->getOperand(i));
- MachineBasicBlock *MBB = MI->getParent();
const TargetRegisterInfo *TRI = MF.getRegInfo().getTargetRegisterInfo();
const MCPhysReg *CSRegs = TRI->getCalleeSavedRegs(&MF);
@@ -1932,9 +1957,11 @@ bool llvm::tryFoldSPUpdateIntoPushPop(MachineFunction &MF,
// registers live within the function we might clobber a return value
// register; the other way a register can be live here is if it's
// callee-saved.
+ // TODO: Currently, computeRegisterLiveness() does not report "live" if a
+ // sub reg is live. When computeRegisterLiveness() works for sub reg, it
+ // can replace isAnySubRegLive().
if (isCalleeSavedRegister(CurReg, CSRegs) ||
- MBB->computeRegisterLiveness(TRI, CurReg, MI) !=
- MachineBasicBlock::LQR_Dead) {
+ isAnySubRegLive(CurReg, TRI, MI)) {
// VFP pops don't allow holes in the register list, so any skip is fatal
// for our transformation. GPR pops do, so we should just keep looking.
if (IsVFPPushPop)
@@ -2159,7 +2186,7 @@ static bool isSuitableForMask(MachineInstr *&MI, unsigned SrcReg,
// Walk down one instruction which is potentially an 'and'.
const MachineInstr &Copy = *MI;
MachineBasicBlock::iterator AND(
- llvm::next(MachineBasicBlock::iterator(MI)));
+ std::next(MachineBasicBlock::iterator(MI)));
if (AND == MI->getParent()->end()) return false;
MI = AND;
return isSuitableForMask(MI, Copy.getOperand(0).getReg(),
@@ -2235,9 +2262,10 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// Masked compares sometimes use the same register as the corresponding 'and'.
if (CmpMask != ~0) {
if (!isSuitableForMask(MI, SrcReg, CmpMask, false) || isPredicated(MI)) {
- MI = 0;
- for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(SrcReg),
- UE = MRI->use_end(); UI != UE; ++UI) {
+ MI = nullptr;
+ for (MachineRegisterInfo::use_instr_iterator
+ UI = MRI->use_instr_begin(SrcReg), UE = MRI->use_instr_end();
+ UI != UE; ++UI) {
if (UI->getParent() != CmpInstr->getParent()) continue;
MachineInstr *PotentialAND = &*UI;
if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true) ||
@@ -2261,17 +2289,17 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// One is MI, the other is a SUB instruction.
// For CMPrr(r1,r2), we are looking for SUB(r1,r2) or SUB(r2,r1).
// For CMPri(r1, CmpValue), we are looking for SUBri(r1, CmpValue).
- MachineInstr *Sub = NULL;
+ MachineInstr *Sub = nullptr;
if (SrcReg2 != 0)
// MI is not a candidate for CMPrr.
- MI = NULL;
+ MI = nullptr;
else if (MI->getParent() != CmpInstr->getParent() || CmpValue != 0) {
// Conservatively refuse to convert an instruction which isn't in the same
// BB as the comparison.
// For CMPri, we need to check Sub, thus we can't return here.
if (CmpInstr->getOpcode() == ARM::CMPri ||
CmpInstr->getOpcode() == ARM::t2CMPri)
- MI = NULL;
+ MI = nullptr;
else
return false;
}
@@ -2947,7 +2975,7 @@ ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
break;
}
return UOps;
- } else if (Subtarget.isCortexA8()) {
+ } else if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
if (NumRegs < 4)
return 2;
// 4 registers would be issued: 2, 2.
@@ -2984,7 +3012,7 @@ ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData,
return ItinData->getOperandCycle(DefClass, DefIdx);
int DefCycle;
- if (Subtarget.isCortexA8()) {
+ if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
// (regno / 2) + (regno % 2) + 1
DefCycle = RegNo / 2 + 1;
if (RegNo % 2)
@@ -3025,7 +3053,7 @@ ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData,
return ItinData->getOperandCycle(DefClass, DefIdx);
int DefCycle;
- if (Subtarget.isCortexA8()) {
+ if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
// 4 registers would be issued: 1, 2, 1.
// 5 registers would be issued: 1, 2, 2.
DefCycle = RegNo / 2;
@@ -3059,7 +3087,7 @@ ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData,
return ItinData->getOperandCycle(UseClass, UseIdx);
int UseCycle;
- if (Subtarget.isCortexA8()) {
+ if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
// (regno / 2) + (regno % 2) + 1
UseCycle = RegNo / 2 + 1;
if (RegNo % 2)
@@ -3099,7 +3127,7 @@ ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData,
return ItinData->getOperandCycle(UseClass, UseIdx);
int UseCycle;
- if (Subtarget.isCortexA8()) {
+ if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
UseCycle = RegNo / 2;
if (UseCycle < 2)
UseCycle = 2;
@@ -3236,8 +3264,7 @@ static const MachineInstr *getBundledDefMI(const TargetRegisterInfo *TRI,
Dist = 0;
MachineBasicBlock::const_iterator I = MI; ++I;
- MachineBasicBlock::const_instr_iterator II =
- llvm::prior(I.getInstrIterator());
+ MachineBasicBlock::const_instr_iterator II = std::prev(I.getInstrIterator());
assert(II->isInsideBundle() && "Empty bundle?");
int Idx = -1;
@@ -3276,7 +3303,7 @@ static const MachineInstr *getBundledUseMI(const TargetRegisterInfo *TRI,
if (Idx == -1) {
Dist = 0;
- return 0;
+ return nullptr;
}
UseIdx = Idx;
@@ -3290,7 +3317,7 @@ static int adjustDefLatency(const ARMSubtarget &Subtarget,
const MachineInstr *DefMI,
const MCInstrDesc *DefMCID, unsigned DefAlign) {
int Adjust = 0;
- if (Subtarget.isCortexA8() || Subtarget.isLikeA9()) {
+ if (Subtarget.isCortexA8() || Subtarget.isLikeA9() || Subtarget.isCortexA7()) {
// FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
// variants are one cycle cheaper.
switch (DefMCID->getOpcode()) {
@@ -3591,7 +3618,8 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
UseMCID, UseIdx, UseAlign);
if (Latency > 1 &&
- (Subtarget.isCortexA8() || Subtarget.isLikeA9())) {
+ (Subtarget.isCortexA8() || Subtarget.isLikeA9() ||
+ Subtarget.isCortexA7())) {
// FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
// variants are one cycle cheaper.
switch (DefMCID.getOpcode()) {
@@ -4333,6 +4361,29 @@ breakPartialRegDependency(MachineBasicBlock::iterator MI,
MI->addRegisterKilled(DReg, TRI, true);
}
+void ARMBaseInstrInfo::getUnconditionalBranch(
+ MCInst &Branch, const MCSymbolRefExpr *BranchTarget) const {
+ if (Subtarget.isThumb())
+ Branch.setOpcode(ARM::tB);
+ else if (Subtarget.isThumb2())
+ Branch.setOpcode(ARM::t2B);
+ else
+ Branch.setOpcode(ARM::Bcc);
+
+ Branch.addOperand(MCOperand::CreateExpr(BranchTarget));
+ Branch.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ Branch.addOperand(MCOperand::CreateReg(0));
+}
+
+void ARMBaseInstrInfo::getTrap(MCInst &MI) const {
+ if (Subtarget.isThumb())
+ MI.setOpcode(ARM::tTRAP);
+ else if (Subtarget.useNaClTrap())
+ MI.setOpcode(ARM::TRAPNaCl);
+ else
+ MI.setOpcode(ARM::TRAP);
+}
+
bool ARMBaseInstrInfo::hasNOP() const {
return (Subtarget.getFeatureBits() & ARM::HasV6T2Ops) != 0;
}
OpenPOWER on IntegriCloud