summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp')
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp365
1 files changed, 294 insertions, 71 deletions
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp b/contrib/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
index b514735..7e275e4 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
@@ -12,17 +12,20 @@
/// \todo This should be generated by TableGen.
//===----------------------------------------------------------------------===//
-#include "AArch64InstructionSelector.h"
#include "AArch64InstrInfo.h"
+#include "AArch64MachineFunctionInfo.h"
#include "AArch64RegisterBankInfo.h"
#include "AArch64RegisterInfo.h"
#include "AArch64Subtarget.h"
#include "AArch64TargetMachine.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
+#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/Debug.h"
@@ -30,19 +33,79 @@
#define DEBUG_TYPE "aarch64-isel"
+#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
+
using namespace llvm;
#ifndef LLVM_BUILD_GLOBAL_ISEL
#error "You shouldn't build this"
#endif
+namespace {
+
+#define GET_GLOBALISEL_PREDICATE_BITSET
+#include "AArch64GenGlobalISel.inc"
+#undef GET_GLOBALISEL_PREDICATE_BITSET
+
+class AArch64InstructionSelector : public InstructionSelector {
+public:
+ AArch64InstructionSelector(const AArch64TargetMachine &TM,
+ const AArch64Subtarget &STI,
+ const AArch64RegisterBankInfo &RBI);
+
+ bool select(MachineInstr &I) const override;
+
+private:
+ /// tblgen-erated 'select' implementation, used as the initial selector for
+ /// the patterns that don't require complex C++.
+ bool selectImpl(MachineInstr &I) const;
+
+ bool selectVaStartAAPCS(MachineInstr &I, MachineFunction &MF,
+ MachineRegisterInfo &MRI) const;
+ bool selectVaStartDarwin(MachineInstr &I, MachineFunction &MF,
+ MachineRegisterInfo &MRI) const;
+
+ bool selectCompareBranch(MachineInstr &I, MachineFunction &MF,
+ MachineRegisterInfo &MRI) const;
+
+ ComplexRendererFn selectArithImmed(MachineOperand &Root) const;
+
+ const AArch64TargetMachine &TM;
+ const AArch64Subtarget &STI;
+ const AArch64InstrInfo &TII;
+ const AArch64RegisterInfo &TRI;
+ const AArch64RegisterBankInfo &RBI;
+
+#define GET_GLOBALISEL_PREDICATES_DECL
#include "AArch64GenGlobalISel.inc"
+#undef GET_GLOBALISEL_PREDICATES_DECL
+
+// We declare the temporaries used by selectImpl() in the class to minimize the
+// cost of constructing placeholder values.
+#define GET_GLOBALISEL_TEMPORARIES_DECL
+#include "AArch64GenGlobalISel.inc"
+#undef GET_GLOBALISEL_TEMPORARIES_DECL
+};
+
+} // end anonymous namespace
+
+#define GET_GLOBALISEL_IMPL
+#include "AArch64GenGlobalISel.inc"
+#undef GET_GLOBALISEL_IMPL
AArch64InstructionSelector::AArch64InstructionSelector(
const AArch64TargetMachine &TM, const AArch64Subtarget &STI,
const AArch64RegisterBankInfo &RBI)
- : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
- TRI(*STI.getRegisterInfo()), RBI(RBI) {}
+ : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
+ TRI(*STI.getRegisterInfo()), RBI(RBI),
+#define GET_GLOBALISEL_PREDICATES_INIT
+#include "AArch64GenGlobalISel.inc"
+#undef GET_GLOBALISEL_PREDICATES_INIT
+#define GET_GLOBALISEL_TEMPORARIES_INIT
+#include "AArch64GenGlobalISel.inc"
+#undef GET_GLOBALISEL_TEMPORARIES_INIT
+{
+}
// FIXME: This should be target-independent, inferred from the types declared
// for each class in the bank.
@@ -119,71 +182,39 @@ static bool unsupportedBinOp(const MachineInstr &I,
}
/// Select the AArch64 opcode for the basic binary operation \p GenericOpc
-/// (such as G_OR or G_ADD), appropriate for the register bank \p RegBankID
+/// (such as G_OR or G_SDIV), appropriate for the register bank \p RegBankID
/// and of size \p OpSize.
/// \returns \p GenericOpc if the combination is unsupported.
static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID,
unsigned OpSize) {
switch (RegBankID) {
case AArch64::GPRRegBankID:
- if (OpSize <= 32) {
- assert((OpSize == 32 || (GenericOpc != TargetOpcode::G_SDIV &&
- GenericOpc != TargetOpcode::G_UDIV &&
- GenericOpc != TargetOpcode::G_LSHR &&
- GenericOpc != TargetOpcode::G_ASHR)) &&
- "operation should have been legalized before now");
-
+ if (OpSize == 32) {
switch (GenericOpc) {
- case TargetOpcode::G_OR:
- return AArch64::ORRWrr;
- case TargetOpcode::G_XOR:
- return AArch64::EORWrr;
- case TargetOpcode::G_AND:
- return AArch64::ANDWrr;
- case TargetOpcode::G_ADD:
- assert(OpSize != 32 && "s32 G_ADD should have been selected");
- return AArch64::ADDWrr;
- case TargetOpcode::G_SUB:
- return AArch64::SUBWrr;
case TargetOpcode::G_SHL:
return AArch64::LSLVWr;
case TargetOpcode::G_LSHR:
return AArch64::LSRVWr;
case TargetOpcode::G_ASHR:
return AArch64::ASRVWr;
- case TargetOpcode::G_SDIV:
- return AArch64::SDIVWr;
- case TargetOpcode::G_UDIV:
- return AArch64::UDIVWr;
default:
return GenericOpc;
}
} else if (OpSize == 64) {
switch (GenericOpc) {
- case TargetOpcode::G_OR:
- return AArch64::ORRXrr;
- case TargetOpcode::G_XOR:
- return AArch64::EORXrr;
- case TargetOpcode::G_AND:
- return AArch64::ANDXrr;
case TargetOpcode::G_GEP:
return AArch64::ADDXrr;
- case TargetOpcode::G_SUB:
- return AArch64::SUBXrr;
case TargetOpcode::G_SHL:
return AArch64::LSLVXr;
case TargetOpcode::G_LSHR:
return AArch64::LSRVXr;
case TargetOpcode::G_ASHR:
return AArch64::ASRVXr;
- case TargetOpcode::G_SDIV:
- return AArch64::SDIVXr;
- case TargetOpcode::G_UDIV:
- return AArch64::UDIVXr;
default:
return GenericOpc;
}
}
+ break;
case AArch64::FPRRegBankID:
switch (OpSize) {
case 32:
@@ -215,7 +246,8 @@ static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID,
return GenericOpc;
}
}
- };
+ break;
+ }
return GenericOpc;
}
@@ -239,6 +271,7 @@ static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID,
case 64:
return isStore ? AArch64::STRXui : AArch64::LDRXui;
}
+ break;
case AArch64::FPRRegBankID:
switch (OpSize) {
case 8:
@@ -250,7 +283,8 @@ static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID,
case 64:
return isStore ? AArch64::STRDui : AArch64::LDRDui;
}
- };
+ break;
+ }
return GenericOpc;
}
@@ -473,6 +507,82 @@ static void changeFCMPPredToAArch64CC(CmpInst::Predicate P,
}
}
+bool AArch64InstructionSelector::selectCompareBranch(
+ MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
+
+ const unsigned CondReg = I.getOperand(0).getReg();
+ MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
+ MachineInstr *CCMI = MRI.getVRegDef(CondReg);
+ if (CCMI->getOpcode() != TargetOpcode::G_ICMP)
+ return false;
+
+ unsigned LHS = CCMI->getOperand(2).getReg();
+ unsigned RHS = CCMI->getOperand(3).getReg();
+ if (!getConstantVRegVal(RHS, MRI))
+ std::swap(RHS, LHS);
+
+ const auto RHSImm = getConstantVRegVal(RHS, MRI);
+ if (!RHSImm || *RHSImm != 0)
+ return false;
+
+ const RegisterBank &RB = *RBI.getRegBank(LHS, MRI, TRI);
+ if (RB.getID() != AArch64::GPRRegBankID)
+ return false;
+
+ const auto Pred = (CmpInst::Predicate)CCMI->getOperand(1).getPredicate();
+ if (Pred != CmpInst::ICMP_NE && Pred != CmpInst::ICMP_EQ)
+ return false;
+
+ const unsigned CmpWidth = MRI.getType(LHS).getSizeInBits();
+ unsigned CBOpc = 0;
+ if (CmpWidth <= 32)
+ CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZW : AArch64::CBNZW);
+ else if (CmpWidth == 64)
+ CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZX : AArch64::CBNZX);
+ else
+ return false;
+
+ auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CBOpc))
+ .addUse(LHS)
+ .addMBB(DestMBB);
+
+ constrainSelectedInstRegOperands(*MIB.getInstr(), TII, TRI, RBI);
+ I.eraseFromParent();
+ return true;
+}
+
+bool AArch64InstructionSelector::selectVaStartAAPCS(
+ MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
+ return false;
+}
+
+bool AArch64InstructionSelector::selectVaStartDarwin(
+ MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
+ AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
+ unsigned ListReg = I.getOperand(0).getReg();
+
+ unsigned ArgsAddrReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
+
+ auto MIB =
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::ADDXri))
+ .addDef(ArgsAddrReg)
+ .addFrameIndex(FuncInfo->getVarArgsStackIndex())
+ .addImm(0)
+ .addImm(0);
+
+ constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
+
+ MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::STRXui))
+ .addUse(ArgsAddrReg)
+ .addUse(ListReg)
+ .addImm(0)
+ .addMemOperand(*I.memoperands_begin());
+
+ constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
+ I.eraseFromParent();
+ return true;
+}
+
bool AArch64InstructionSelector::select(MachineInstr &I) const {
assert(I.getParent() && "Instruction should be in a basic block!");
assert(I.getParent()->getParent() && "Instruction should be in a function!");
@@ -549,6 +659,9 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {
const unsigned CondReg = I.getOperand(0).getReg();
MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
+ if (selectCompareBranch(I, MF, MRI))
+ return true;
+
auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::TBNZW))
.addUse(CondReg)
.addImm(/*bit offset=*/0)
@@ -558,6 +671,11 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {
return constrainSelectedInstRegOperands(*MIB.getInstr(), TII, TRI, RBI);
}
+ case TargetOpcode::G_BRINDIRECT: {
+ I.setDesc(TII.get(AArch64::BR));
+ return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
+ }
+
case TargetOpcode::G_FCONSTANT:
case TargetOpcode::G_CONSTANT: {
const bool isFP = Opcode == TargetOpcode::G_FCONSTANT;
@@ -629,9 +747,12 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {
// FIXME: Is going through int64_t always correct?
ImmOp.ChangeToImmediate(
ImmOp.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
- } else {
+ } else if (I.getOperand(1).isCImm()) {
uint64_t Val = I.getOperand(1).getCImm()->getZExtValue();
I.getOperand(1).ChangeToImmediate(Val);
+ } else if (I.getOperand(1).isImm()) {
+ uint64_t Val = I.getOperand(1).getImm();
+ I.getOperand(1).ChangeToImmediate(Val);
}
constrainSelectedInstRegOperands(I, TII, TRI, RBI);
@@ -686,10 +807,16 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {
return false;
}
-#ifndef NDEBUG
- // Sanity-check the pointer register.
+ auto &MemOp = **I.memoperands_begin();
+ if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
+ DEBUG(dbgs() << "Atomic load/store not supported yet\n");
+ return false;
+ }
+
const unsigned PtrReg = I.getOperand(1).getReg();
+#ifndef NDEBUG
const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI);
+ // Sanity-check the pointer register.
assert(PtrRB.getID() == AArch64::GPRRegBankID &&
"Load/Store pointer operand isn't a GPR");
assert(MRI.getType(PtrReg).isPointer() &&
@@ -706,11 +833,46 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {
I.setDesc(TII.get(NewOpc));
- I.addOperand(MachineOperand::CreateImm(0));
+ uint64_t Offset = 0;
+ auto *PtrMI = MRI.getVRegDef(PtrReg);
+
+ // Try to fold a GEP into our unsigned immediate addressing mode.
+ if (PtrMI->getOpcode() == TargetOpcode::G_GEP) {
+ if (auto COff = getConstantVRegVal(PtrMI->getOperand(2).getReg(), MRI)) {
+ int64_t Imm = *COff;
+ const unsigned Size = MemTy.getSizeInBits() / 8;
+ const unsigned Scale = Log2_32(Size);
+ if ((Imm & (Size - 1)) == 0 && Imm >= 0 && Imm < (0x1000 << Scale)) {
+ unsigned Ptr2Reg = PtrMI->getOperand(1).getReg();
+ I.getOperand(1).setReg(Ptr2Reg);
+ PtrMI = MRI.getVRegDef(Ptr2Reg);
+ Offset = Imm / Size;
+ }
+ }
+ }
+
+ // If we haven't folded anything into our addressing mode yet, try to fold
+ // a frame index into the base+offset.
+ if (!Offset && PtrMI->getOpcode() == TargetOpcode::G_FRAME_INDEX)
+ I.getOperand(1).ChangeToFrameIndex(PtrMI->getOperand(1).getIndex());
+
+ I.addOperand(MachineOperand::CreateImm(Offset));
+
+ // If we're storing a 0, use WZR/XZR.
+ if (auto CVal = getConstantVRegVal(ValReg, MRI)) {
+ if (*CVal == 0 && Opcode == TargetOpcode::G_STORE) {
+ if (I.getOpcode() == AArch64::STRWui)
+ I.getOperand(0).setReg(AArch64::WZR);
+ else if (I.getOpcode() == AArch64::STRXui)
+ I.getOperand(0).setReg(AArch64::XZR);
+ }
+ }
+
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
- case TargetOpcode::G_MUL: {
+ case TargetOpcode::G_SMULH:
+ case TargetOpcode::G_UMULH: {
// Reject the various things we don't support yet.
if (unsupportedBinOp(I, RBI, MRI, TRI))
return false;
@@ -719,48 +881,33 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {
const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
if (RB.getID() != AArch64::GPRRegBankID) {
- DEBUG(dbgs() << "G_MUL on bank: " << RB << ", expected: GPR\n");
+ DEBUG(dbgs() << "G_[SU]MULH on bank: " << RB << ", expected: GPR\n");
return false;
}
- unsigned ZeroReg;
- unsigned NewOpc;
- if (Ty.isScalar() && Ty.getSizeInBits() <= 32) {
- NewOpc = AArch64::MADDWrrr;
- ZeroReg = AArch64::WZR;
- } else if (Ty == LLT::scalar(64)) {
- NewOpc = AArch64::MADDXrrr;
- ZeroReg = AArch64::XZR;
- } else {
- DEBUG(dbgs() << "G_MUL has type: " << Ty << ", expected: "
- << LLT::scalar(32) << " or " << LLT::scalar(64) << '\n');
+ if (Ty != LLT::scalar(64)) {
+ DEBUG(dbgs() << "G_[SU]MULH has type: " << Ty
+ << ", expected: " << LLT::scalar(64) << '\n');
return false;
}
+ unsigned NewOpc = I.getOpcode() == TargetOpcode::G_SMULH ? AArch64::SMULHrr
+ : AArch64::UMULHrr;
I.setDesc(TII.get(NewOpc));
- I.addOperand(MachineOperand::CreateReg(ZeroReg, /*isDef=*/false));
-
// Now that we selected an opcode, we need to constrain the register
// operands to use appropriate classes.
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
-
case TargetOpcode::G_FADD:
case TargetOpcode::G_FSUB:
case TargetOpcode::G_FMUL:
case TargetOpcode::G_FDIV:
case TargetOpcode::G_OR:
- case TargetOpcode::G_XOR:
- case TargetOpcode::G_AND:
case TargetOpcode::G_SHL:
case TargetOpcode::G_LSHR:
case TargetOpcode::G_ASHR:
- case TargetOpcode::G_SDIV:
- case TargetOpcode::G_UDIV:
- case TargetOpcode::G_ADD:
- case TargetOpcode::G_SUB:
case TargetOpcode::G_GEP: {
// Reject the various things we don't support yet.
if (unsupportedBinOp(I, RBI, MRI, TRI))
@@ -783,6 +930,17 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
+ case TargetOpcode::G_PTR_MASK: {
+ uint64_t Align = I.getOperand(2).getImm();
+ if (Align >= 64 || Align == 0)
+ return false;
+
+ uint64_t Mask = ~((1ULL << Align) - 1);
+ I.setDesc(TII.get(AArch64::ANDXri));
+ I.getOperand(2).setImm(AArch64_AM::encodeLogicalImmediate(Mask, 64));
+
+ return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
+ }
case TargetOpcode::G_PTRTOINT:
case TargetOpcode::G_TRUNC: {
const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
@@ -795,7 +953,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {
const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
if (DstRB.getID() != SrcRB.getID()) {
- DEBUG(dbgs() << "G_TRUNC input/output on different banks\n");
+ DEBUG(dbgs() << "G_TRUNC/G_PTRTOINT input/output on different banks\n");
return false;
}
@@ -812,16 +970,21 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {
if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
- DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
+ DEBUG(dbgs() << "Failed to constrain G_TRUNC/G_PTRTOINT\n");
return false;
}
if (DstRC == SrcRC) {
// Nothing to be done
+ } else if (Opcode == TargetOpcode::G_TRUNC && DstTy == LLT::scalar(32) &&
+ SrcTy == LLT::scalar(64)) {
+ llvm_unreachable("TableGen can import this case");
+ return false;
} else if (DstRC == &AArch64::GPR32RegClass &&
SrcRC == &AArch64::GPR64RegClass) {
I.getOperand(1).setSubReg(AArch64::sub_32);
} else {
+ DEBUG(dbgs() << "Unhandled mismatched classes in G_TRUNC/G_PTRTOINT\n");
return false;
}
@@ -1026,7 +1189,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {
if (Ty == LLT::scalar(32)) {
CSelOpc = AArch64::CSELWr;
- } else if (Ty == LLT::scalar(64)) {
+ } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
CSelOpc = AArch64::CSELXr;
} else {
return false;
@@ -1134,7 +1297,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {
.addDef(Def1Reg)
.addUse(AArch64::WZR)
.addUse(AArch64::WZR)
- .addImm(CC1);
+ .addImm(getInvertedCondCode(CC1));
if (CC2 != AArch64CC::AL) {
unsigned Def2Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
@@ -1143,7 +1306,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {
.addDef(Def2Reg)
.addUse(AArch64::WZR)
.addUse(AArch64::WZR)
- .addImm(CC2);
+ .addImm(getInvertedCondCode(CC2));
MachineInstr &OrMI =
*BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ORRWrr))
.addDef(DefReg)
@@ -1159,7 +1322,67 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {
I.eraseFromParent();
return true;
}
+ case TargetOpcode::G_VASTART:
+ return STI.isTargetDarwin() ? selectVaStartDarwin(I, MF, MRI)
+ : selectVaStartAAPCS(I, MF, MRI);
+ case TargetOpcode::G_IMPLICIT_DEF:
+ I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
+ return true;
}
return false;
}
+
+/// SelectArithImmed - Select an immediate value that can be represented as
+/// a 12-bit value shifted left by either 0 or 12. If so, return true with
+/// Val set to the 12-bit value and Shift set to the shifter operand.
+InstructionSelector::ComplexRendererFn
+AArch64InstructionSelector::selectArithImmed(MachineOperand &Root) const {
+ MachineInstr &MI = *Root.getParent();
+ MachineBasicBlock &MBB = *MI.getParent();
+ MachineFunction &MF = *MBB.getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ // This function is called from the addsub_shifted_imm ComplexPattern,
+ // which lists [imm] as the list of opcode it's interested in, however
+ // we still need to check whether the operand is actually an immediate
+ // here because the ComplexPattern opcode list is only used in
+ // root-level opcode matching.
+ uint64_t Immed;
+ if (Root.isImm())
+ Immed = Root.getImm();
+ else if (Root.isCImm())
+ Immed = Root.getCImm()->getZExtValue();
+ else if (Root.isReg()) {
+ MachineInstr *Def = MRI.getVRegDef(Root.getReg());
+ if (Def->getOpcode() != TargetOpcode::G_CONSTANT)
+ return nullptr;
+ MachineOperand &Op1 = Def->getOperand(1);
+ if (!Op1.isCImm() || Op1.getCImm()->getBitWidth() > 64)
+ return nullptr;
+ Immed = Op1.getCImm()->getZExtValue();
+ } else
+ return nullptr;
+
+ unsigned ShiftAmt;
+
+ if (Immed >> 12 == 0) {
+ ShiftAmt = 0;
+ } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
+ ShiftAmt = 12;
+ Immed = Immed >> 12;
+ } else
+ return nullptr;
+
+ unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
+ return [=](MachineInstrBuilder &MIB) { MIB.addImm(Immed).addImm(ShVal); };
+}
+
+namespace llvm {
+InstructionSelector *
+createAArch64InstructionSelector(const AArch64TargetMachine &TM,
+ AArch64Subtarget &Subtarget,
+ AArch64RegisterBankInfo &RBI) {
+ return new AArch64InstructionSelector(TM, Subtarget, RBI);
+}
+}
OpenPOWER on IntegriCloud