summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp')
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp438
1 files changed, 287 insertions, 151 deletions
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/contrib/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index bb2e1e2..1ea4abc 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -53,12 +53,10 @@ public:
}
bool runOnMachineFunction(MachineFunction &MF) override {
- AttributeSet FnAttrs = MF.getFunction()->getAttributes();
ForCodeSize =
- FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
- Attribute::OptimizeForSize) ||
- FnAttrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
- Subtarget = &TM.getSubtarget<AArch64Subtarget>();
+ MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) ||
+ MF.getFunction()->hasFnAttribute(Attribute::MinSize);
+ Subtarget = &MF.getSubtarget<AArch64Subtarget>();
return SelectionDAGISel::runOnMachineFunction(MF);
}
@@ -67,7 +65,7 @@ public:
/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
/// inline asm expressions.
bool SelectInlineAsmMemoryOperand(const SDValue &Op,
- char ConstraintCode,
+ unsigned ConstraintID,
std::vector<SDValue> &OutOps) override;
SDNode *SelectMLAV64LaneV128(SDNode *N);
@@ -134,8 +132,8 @@ public:
/// Generic helper for the createDTuple/createQTuple
/// functions. Those should almost always be called instead.
- SDValue createTuple(ArrayRef<SDValue> Vecs, unsigned RegClassIDs[],
- unsigned SubRegs[]);
+ SDValue createTuple(ArrayRef<SDValue> Vecs, const unsigned RegClassIDs[],
+ const unsigned SubRegs[]);
SDNode *SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
@@ -158,6 +156,9 @@ public:
SDNode *SelectLIBM(SDNode *N);
+ SDNode *SelectReadRegister(SDNode *N);
+ SDNode *SelectWriteRegister(SDNode *N);
+
// Include the pieces autogenerated from the target description.
#include "AArch64GenDAGISel.inc"
@@ -213,13 +214,20 @@ static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
}
bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
- const SDValue &Op, char ConstraintCode, std::vector<SDValue> &OutOps) {
- assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
- // Require the address to be in a register. That is safe for all AArch64
- // variants and it is hard to do anything much smarter without knowing
- // how the operand is used.
- OutOps.push_back(Op);
- return false;
+ const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
+ switch(ConstraintID) {
+ default:
+ llvm_unreachable("Unexpected asm memory constraint");
+ case InlineAsm::Constraint_i:
+ case InlineAsm::Constraint_m:
+ case InlineAsm::Constraint_Q:
+ // Require the address to be in a register. That is safe for all AArch64
+ // variants and it is hard to do anything much smarter without knowing
+ // how the operand is used.
+ OutOps.push_back(Op);
+ return false;
+ }
+ return true;
}
/// SelectArithImmed - Select an immediate value that can be represented as
@@ -247,8 +255,9 @@ bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
return false;
unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
- Val = CurDAG->getTargetConstant(Immed, MVT::i32);
- Shift = CurDAG->getTargetConstant(ShVal, MVT::i32);
+ SDLoc dl(N);
+ Val = CurDAG->getTargetConstant(Immed, dl, MVT::i32);
+ Shift = CurDAG->getTargetConstant(ShVal, dl, MVT::i32);
return true;
}
@@ -281,7 +290,8 @@ bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
return false;
Immed &= 0xFFFFFFULL;
- return SelectArithImmed(CurDAG->getConstant(Immed, MVT::i32), Val, Shift);
+ return SelectArithImmed(CurDAG->getConstant(Immed, SDLoc(N), MVT::i32), Val,
+ Shift);
}
/// getShiftTypeForNode - Translate a shift node to the corresponding
@@ -301,7 +311,7 @@ static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
}
}
-/// \brief Determine wether it is worth to fold V into an extended register.
+/// \brief Determine whether it is worth to fold V into an extended register.
bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const {
// it hurts if the value is used at least twice, unless we are optimizing
// for code size.
@@ -329,7 +339,7 @@ bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val);
Reg = N.getOperand(0);
- Shift = CurDAG->getTargetConstant(ShVal, MVT::i32);
+ Shift = CurDAG->getTargetConstant(ShVal, SDLoc(N), MVT::i32);
return isWorthFolding(N);
}
@@ -430,6 +440,7 @@ static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
/// is a lane in the upper half of a 128-bit vector. Recognize and select this
/// so that we don't emit unnecessary lane extracts.
SDNode *AArch64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
+ SDLoc dl(N);
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
@@ -446,7 +457,7 @@ SDNode *AArch64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
return nullptr;
}
- SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
+ SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
@@ -469,10 +480,11 @@ SDNode *AArch64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
break;
}
- return CurDAG->getMachineNode(MLAOpc, SDLoc(N), N->getValueType(0), Ops);
+ return CurDAG->getMachineNode(MLAOpc, dl, N->getValueType(0), Ops);
}
SDNode *AArch64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
+ SDLoc dl(N);
SDValue SMULLOp0;
SDValue SMULLOp1;
int LaneIdx;
@@ -481,7 +493,7 @@ SDNode *AArch64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
LaneIdx))
return nullptr;
- SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
+ SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
@@ -512,7 +524,7 @@ SDNode *AArch64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
} else
llvm_unreachable("Unrecognized intrinsic.");
- return CurDAG->getMachineNode(SMULLOpc, SDLoc(N), N->getValueType(0), Ops);
+ return CurDAG->getMachineNode(SMULLOpc, dl, N->getValueType(0), Ops);
}
/// Instructions that accept extend modifiers like UXTW expect the register
@@ -523,9 +535,10 @@ static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
if (N.getValueType() == MVT::i32)
return N;
- SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
+ SDLoc dl(N);
+ SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
MachineSDNode *Node = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
- SDLoc(N), MVT::i32, N, SubReg);
+ dl, MVT::i32, N, SubReg);
return SDValue(Node, 0);
}
@@ -565,7 +578,8 @@ bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
// (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX);
Reg = narrowIfNeeded(CurDAG, Reg);
- Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), MVT::i32);
+ Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), SDLoc(N),
+ MVT::i32);
return isWorthFolding(N);
}
@@ -595,11 +609,12 @@ static bool isWorthFoldingADDlow(SDValue N) {
/// reference, which determines the scale.
bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
SDValue &Base, SDValue &OffImm) {
+ SDLoc dl(N);
const TargetLowering *TLI = getTargetLowering();
if (N.getOpcode() == ISD::FrameIndex) {
int FI = cast<FrameIndexSDNode>(N)->getIndex();
Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
- OffImm = CurDAG->getTargetConstant(0, MVT::i64);
+ OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
return true;
}
@@ -632,7 +647,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
}
- OffImm = CurDAG->getTargetConstant(RHSC >> Scale, MVT::i64);
+ OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
return true;
}
}
@@ -648,7 +663,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
// add x0, Xbase, #offset
// ldr x0, [x0]
Base = N;
- OffImm = CurDAG->getTargetConstant(0, MVT::i64);
+ OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
return true;
}
@@ -675,7 +690,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
const TargetLowering *TLI = getTargetLowering();
Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
}
- OffImm = CurDAG->getTargetConstant(RHSC, MVT::i64);
+ OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i64);
return true;
}
}
@@ -683,12 +698,12 @@ bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
}
static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
- SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
+ SDLoc dl(N);
+ SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
SDValue ImpDef = SDValue(
- CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, SDLoc(N), MVT::i64),
- 0);
+ CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, MVT::i64), 0);
MachineSDNode *Node = CurDAG->getMachineNode(
- TargetOpcode::INSERT_SUBREG, SDLoc(N), MVT::i64, ImpDef, N, SubReg);
+ TargetOpcode::INSERT_SUBREG, dl, MVT::i64, ImpDef, N, SubReg);
return SDValue(Node, 0);
}
@@ -702,6 +717,7 @@ bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
return false;
+ SDLoc dl(N);
if (WantExtend) {
AArch64_AM::ShiftExtendType Ext =
getExtendTypeForNode(N.getOperand(0), true);
@@ -709,10 +725,11 @@ bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
return false;
Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
- SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, MVT::i32);
+ SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
+ MVT::i32);
} else {
Offset = N.getOperand(0);
- SignExtend = CurDAG->getTargetConstant(0, MVT::i32);
+ SignExtend = CurDAG->getTargetConstant(0, dl, MVT::i32);
}
unsigned LegalShiftVal = Log2_32(Size);
@@ -735,6 +752,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
return false;
SDValue LHS = N.getOperand(0);
SDValue RHS = N.getOperand(1);
+ SDLoc dl(N);
// We don't want to match immediate adds here, because they are better lowered
// to the register-immediate addressing modes.
@@ -757,7 +775,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) {
Base = LHS;
- DoShift = CurDAG->getTargetConstant(true, MVT::i32);
+ DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
return true;
}
@@ -765,12 +783,12 @@ bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) {
Base = RHS;
- DoShift = CurDAG->getTargetConstant(true, MVT::i32);
+ DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
return true;
}
// There was no shift, whatever else we find.
- DoShift = CurDAG->getTargetConstant(false, MVT::i32);
+ DoShift = CurDAG->getTargetConstant(false, dl, MVT::i32);
AArch64_AM::ShiftExtendType Ext = AArch64_AM::InvalidShiftExtend;
// Try to match an unshifted extend on the LHS.
@@ -779,7 +797,8 @@ bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
AArch64_AM::InvalidShiftExtend) {
Base = RHS;
Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0));
- SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, MVT::i32);
+ SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
+ MVT::i32);
if (isWorthFolding(LHS))
return true;
}
@@ -790,7 +809,8 @@ bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
AArch64_AM::InvalidShiftExtend) {
Base = LHS;
Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0));
- SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, MVT::i32);
+ SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
+ MVT::i32);
if (isWorthFolding(RHS))
return true;
}
@@ -821,6 +841,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
return false;
SDValue LHS = N.getOperand(0);
SDValue RHS = N.getOperand(1);
+ SDLoc DL(N);
// Check if this particular node is reused in any non-memory related
// operation. If yes, do not try to fold this node into the address
@@ -843,7 +864,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
// MOV X0, WideImmediate
// LDR X2, [BaseReg, X0]
if (isa<ConstantSDNode>(RHS)) {
- int64_t ImmOff = (int64_t)dyn_cast<ConstantSDNode>(RHS)->getZExtValue();
+ int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue();
unsigned Scale = Log2_32(Size);
// Skip the immediate can be seleced by load/store addressing mode.
// Also skip the immediate can be encoded by a single ADD (SUB is also
@@ -852,7 +873,6 @@ bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
isPreferredADD(ImmOff) || isPreferredADD(-ImmOff))
return false;
- SDLoc DL(N.getNode());
SDValue Ops[] = { RHS };
SDNode *MOVI =
CurDAG->getMachineNode(AArch64::MOVi64imm, DL, MVT::i64, Ops);
@@ -868,7 +888,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) {
Base = LHS;
- DoShift = CurDAG->getTargetConstant(true, MVT::i32);
+ DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
return true;
}
@@ -876,40 +896,40 @@ bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) {
Base = RHS;
- DoShift = CurDAG->getTargetConstant(true, MVT::i32);
+ DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
return true;
}
// Match any non-shifted, non-extend, non-immediate add expression.
Base = LHS;
Offset = RHS;
- SignExtend = CurDAG->getTargetConstant(false, MVT::i32);
- DoShift = CurDAG->getTargetConstant(false, MVT::i32);
+ SignExtend = CurDAG->getTargetConstant(false, DL, MVT::i32);
+ DoShift = CurDAG->getTargetConstant(false, DL, MVT::i32);
// Reg1 + Reg2 is free: no check needed.
return true;
}
SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
- static unsigned RegClassIDs[] = {
+ static const unsigned RegClassIDs[] = {
AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
- static unsigned SubRegs[] = { AArch64::dsub0, AArch64::dsub1,
- AArch64::dsub2, AArch64::dsub3 };
+ static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1,
+ AArch64::dsub2, AArch64::dsub3};
return createTuple(Regs, RegClassIDs, SubRegs);
}
SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
- static unsigned RegClassIDs[] = {
+ static const unsigned RegClassIDs[] = {
AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
- static unsigned SubRegs[] = { AArch64::qsub0, AArch64::qsub1,
- AArch64::qsub2, AArch64::qsub3 };
+ static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1,
+ AArch64::qsub2, AArch64::qsub3};
return createTuple(Regs, RegClassIDs, SubRegs);
}
SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
- unsigned RegClassIDs[],
- unsigned SubRegs[]) {
+ const unsigned RegClassIDs[],
+ const unsigned SubRegs[]) {
// There's no special register-class for a vector-list of 1 element: it's just
// a vector.
if (Regs.size() == 1)
@@ -917,18 +937,18 @@ SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
assert(Regs.size() >= 2 && Regs.size() <= 4);
- SDLoc DL(Regs[0].getNode());
+ SDLoc DL(Regs[0]);
SmallVector<SDValue, 4> Ops;
// First operand of REG_SEQUENCE is the desired RegClass.
Ops.push_back(
- CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], MVT::i32));
+ CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], DL, MVT::i32));
// Then we get pairs of source & subregister-position for the components.
for (unsigned i = 0; i < Regs.size(); ++i) {
Ops.push_back(Regs[i]);
- Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], MVT::i32));
+ Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], DL, MVT::i32));
}
SDNode *N =
@@ -1025,19 +1045,21 @@ SDNode *AArch64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
SDValue Base = LD->getBasePtr();
ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
int OffsetVal = (int)OffsetOp->getZExtValue();
- SDValue Offset = CurDAG->getTargetConstant(OffsetVal, MVT::i64);
+ SDLoc dl(N);
+ SDValue Offset = CurDAG->getTargetConstant(OffsetVal, dl, MVT::i64);
SDValue Ops[] = { Base, Offset, Chain };
- SDNode *Res = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i64, DstVT,
+ SDNode *Res = CurDAG->getMachineNode(Opcode, dl, MVT::i64, DstVT,
MVT::Other, Ops);
// Either way, we're replacing the node, so tell the caller that.
Done = true;
SDValue LoadedVal = SDValue(Res, 1);
if (InsertTo64) {
- SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
+ SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
LoadedVal =
SDValue(CurDAG->getMachineNode(
- AArch64::SUBREG_TO_REG, SDLoc(N), MVT::i64,
- CurDAG->getTargetConstant(0, MVT::i64), LoadedVal, SubReg),
+ AArch64::SUBREG_TO_REG, dl, MVT::i64,
+ CurDAG->getTargetConstant(0, dl, MVT::i64), LoadedVal,
+ SubReg),
0);
}
@@ -1054,13 +1076,10 @@ SDNode *AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs,
EVT VT = N->getValueType(0);
SDValue Chain = N->getOperand(0);
- SmallVector<SDValue, 6> Ops;
- Ops.push_back(N->getOperand(2)); // Mem operand;
- Ops.push_back(Chain);
+ SDValue Ops[] = {N->getOperand(2), // Mem operand;
+ Chain};
- std::vector<EVT> ResTys;
- ResTys.push_back(MVT::Untyped);
- ResTys.push_back(MVT::Other);
+ const EVT ResTys[] = {MVT::Untyped, MVT::Other};
SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
SDValue SuperReg = SDValue(Ld, 0);
@@ -1078,15 +1097,12 @@ SDNode *AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
EVT VT = N->getValueType(0);
SDValue Chain = N->getOperand(0);
- SmallVector<SDValue, 6> Ops;
- Ops.push_back(N->getOperand(1)); // Mem operand
- Ops.push_back(N->getOperand(2)); // Incremental
- Ops.push_back(Chain);
+ SDValue Ops[] = {N->getOperand(1), // Mem operand
+ N->getOperand(2), // Incremental
+ Chain};
- std::vector<EVT> ResTys;
- ResTys.push_back(MVT::i64); // Type of the write back register
- ResTys.push_back(MVT::Untyped);
- ResTys.push_back(MVT::Other);
+ const EVT ResTys[] = {MVT::i64, // Type of the write back register
+ MVT::Untyped, MVT::Other};
SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
@@ -1117,10 +1133,7 @@ SDNode *AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
- SmallVector<SDValue, 6> Ops;
- Ops.push_back(RegSeq);
- Ops.push_back(N->getOperand(NumVecs + 2));
- Ops.push_back(N->getOperand(0));
+ SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), N->getOperand(0)};
SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
return St;
@@ -1130,25 +1143,24 @@ SDNode *AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
unsigned Opc) {
SDLoc dl(N);
EVT VT = N->getOperand(2)->getValueType(0);
- SmallVector<EVT, 2> ResTys;
- ResTys.push_back(MVT::i64); // Type of the write back register
- ResTys.push_back(MVT::Other); // Type for the Chain
+ const EVT ResTys[] = {MVT::i64, // Type of the write back register
+ MVT::Other}; // Type for the Chain
// Form a REG_SEQUENCE to force register allocation.
bool Is128Bit = VT.getSizeInBits() == 128;
SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
- SmallVector<SDValue, 6> Ops;
- Ops.push_back(RegSeq);
- Ops.push_back(N->getOperand(NumVecs + 1)); // base register
- Ops.push_back(N->getOperand(NumVecs + 2)); // Incremental
- Ops.push_back(N->getOperand(0)); // Chain
+ SDValue Ops[] = {RegSeq,
+ N->getOperand(NumVecs + 1), // base register
+ N->getOperand(NumVecs + 2), // Incremental
+ N->getOperand(0)}; // Chain
SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
return St;
}
+namespace {
/// WidenVector - Given a value in the V64 register class, produce the
/// equivalent value in the V128 register class.
class WidenVector {
@@ -1169,6 +1181,7 @@ public:
return DAG.getTargetInsertSubreg(AArch64::dsub, DL, WideTy, Undef, V64Reg);
}
};
+} // namespace
/// NarrowVector - Given a value in the V128 register class, produce the
/// equivalent value in the V64 register class.
@@ -1197,18 +1210,13 @@ SDNode *AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
SDValue RegSeq = createQTuple(Regs);
- std::vector<EVT> ResTys;
- ResTys.push_back(MVT::Untyped);
- ResTys.push_back(MVT::Other);
+ const EVT ResTys[] = {MVT::Untyped, MVT::Other};
unsigned LaneNo =
cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
- SmallVector<SDValue, 6> Ops;
- Ops.push_back(RegSeq);
- Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
- Ops.push_back(N->getOperand(NumVecs + 3));
- Ops.push_back(N->getOperand(0));
+ SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
+ N->getOperand(NumVecs + 3), N->getOperand(0)};
SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
SDValue SuperReg = SDValue(Ld, 0);
@@ -1242,20 +1250,18 @@ SDNode *AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
SDValue RegSeq = createQTuple(Regs);
- std::vector<EVT> ResTys;
- ResTys.push_back(MVT::i64); // Type of the write back register
- ResTys.push_back(MVT::Untyped);
- ResTys.push_back(MVT::Other);
+ const EVT ResTys[] = {MVT::i64, // Type of the write back register
+ RegSeq->getValueType(0), MVT::Other};
unsigned LaneNo =
cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
- SmallVector<SDValue, 6> Ops;
- Ops.push_back(RegSeq);
- Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64)); // Lane Number
- Ops.push_back(N->getOperand(NumVecs + 2)); // Base register
- Ops.push_back(N->getOperand(NumVecs + 3)); // Incremental
- Ops.push_back(N->getOperand(0));
+ SDValue Ops[] = {RegSeq,
+ CurDAG->getTargetConstant(LaneNo, dl,
+ MVT::i64), // Lane Number
+ N->getOperand(NumVecs + 2), // Base register
+ N->getOperand(NumVecs + 3), // Incremental
+ N->getOperand(0)};
SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
// Update uses of the write back register
@@ -1303,11 +1309,8 @@ SDNode *AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
unsigned LaneNo =
cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
- SmallVector<SDValue, 6> Ops;
- Ops.push_back(RegSeq);
- Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
- Ops.push_back(N->getOperand(NumVecs + 3));
- Ops.push_back(N->getOperand(0));
+ SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
+ N->getOperand(NumVecs + 3), N->getOperand(0)};
SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
// Transfer memoperands.
@@ -1333,19 +1336,16 @@ SDNode *AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
SDValue RegSeq = createQTuple(Regs);
- SmallVector<EVT, 2> ResTys;
- ResTys.push_back(MVT::i64); // Type of the write back register
- ResTys.push_back(MVT::Other);
+ const EVT ResTys[] = {MVT::i64, // Type of the write back register
+ MVT::Other};
unsigned LaneNo =
cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
- SmallVector<SDValue, 6> Ops;
- Ops.push_back(RegSeq);
- Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
- Ops.push_back(N->getOperand(NumVecs + 2)); // Base Register
- Ops.push_back(N->getOperand(NumVecs + 3)); // Incremental
- Ops.push_back(N->getOperand(0));
+ SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
+ N->getOperand(NumVecs + 2), // Base Register
+ N->getOperand(NumVecs + 3), // Incremental
+ N->getOperand(0)};
SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
// Transfer memoperands.
@@ -1424,12 +1424,17 @@ static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
} else
return false;
- assert((BiggerPattern || (Srl_imm > 0 && Srl_imm < VT.getSizeInBits())) &&
- "bad amount in shift node!");
+ // Bail out on large immediates. This happens when no proper
+ // combining/constant folding was performed.
+ if (!BiggerPattern && (Srl_imm <= 0 || Srl_imm >= VT.getSizeInBits())) {
+ DEBUG((dbgs() << N
+ << ": Found large shift immediate, this should not happen\n"));
+ return false;
+ }
LSB = Srl_imm;
- MSB = Srl_imm + (VT == MVT::i32 ? CountTrailingOnes_32(And_imm)
- : CountTrailingOnes_64(And_imm)) -
+ MSB = Srl_imm + (VT == MVT::i32 ? countTrailingOnes<uint32_t>(And_imm)
+ : countTrailingOnes<uint64_t>(And_imm)) -
1;
if (ClampMSB)
// Since we're moving the extend before the right shift operation, we need
@@ -1473,7 +1478,7 @@ static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
return false;
// Check whether we really have several bits extract here.
- unsigned BitWide = 64 - CountLeadingOnes_64(~(And_mask >> Srl_imm));
+ unsigned BitWide = 64 - countLeadingOnes(~(And_mask >> Srl_imm));
if (BitWide && isMask_64(And_mask >> Srl_imm)) {
if (N->getValueType(0) == MVT::i32)
Opc = AArch64::UBFMWri;
@@ -1529,7 +1534,14 @@ static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
} else
return false;
- assert(Shl_imm < VT.getSizeInBits() && "bad amount in shift node!");
+ // Missing combines/constant folding may have left us with strange
+ // constants.
+ if (Shl_imm >= VT.getSizeInBits()) {
+ DEBUG((dbgs() << N
+ << ": Found large shift immediate, this should not happen\n"));
+ return false;
+ }
+
uint64_t Srl_imm = 0;
if (!isIntImmediate(N->getOperand(1), Srl_imm))
return false;
@@ -1596,23 +1608,24 @@ SDNode *AArch64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) {
return nullptr;
EVT VT = N->getValueType(0);
+ SDLoc dl(N);
// If the bit extract operation is 64bit but the original type is 32bit, we
// need to add one EXTRACT_SUBREG.
if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) {
- SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(LSB, MVT::i64),
- CurDAG->getTargetConstant(MSB, MVT::i64)};
+ SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(LSB, dl, MVT::i64),
+ CurDAG->getTargetConstant(MSB, dl, MVT::i64)};
- SDNode *BFM = CurDAG->getMachineNode(Opc, SDLoc(N), MVT::i64, Ops64);
- SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
+ SDNode *BFM = CurDAG->getMachineNode(Opc, dl, MVT::i64, Ops64);
+ SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
MachineSDNode *Node =
- CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, SDLoc(N), MVT::i32,
+ CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, MVT::i32,
SDValue(BFM, 0), SubReg);
return Node;
}
- SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(LSB, VT),
- CurDAG->getTargetConstant(MSB, VT)};
+ SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(LSB, dl, VT),
+ CurDAG->getTargetConstant(MSB, dl, VT)};
return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
}
@@ -1816,6 +1829,7 @@ static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
return Op;
EVT VT = Op.getValueType();
+ SDLoc dl(Op);
unsigned BitWidth = VT.getSizeInBits();
unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri;
@@ -1823,16 +1837,16 @@ static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
if (ShlAmount > 0) {
// LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
ShiftNode = CurDAG->getMachineNode(
- UBFMOpc, SDLoc(Op), VT, Op,
- CurDAG->getTargetConstant(BitWidth - ShlAmount, VT),
- CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, VT));
+ UBFMOpc, dl, VT, Op,
+ CurDAG->getTargetConstant(BitWidth - ShlAmount, dl, VT),
+ CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, dl, VT));
} else {
// LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
assert(ShlAmount < 0 && "expected right shift");
int ShrAmount = -ShlAmount;
ShiftNode = CurDAG->getMachineNode(
- UBFMOpc, SDLoc(Op), VT, Op, CurDAG->getTargetConstant(ShrAmount, VT),
- CurDAG->getTargetConstant(BitWidth - 1, VT));
+ UBFMOpc, dl, VT, Op, CurDAG->getTargetConstant(ShrAmount, dl, VT),
+ CurDAG->getTargetConstant(BitWidth - 1, dl, VT));
}
return SDValue(ShiftNode, 0);
@@ -1872,7 +1886,7 @@ static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
return false;
ShiftAmount = countTrailingZeros(NonZeroBits);
- MaskWidth = CountTrailingOnes_64(NonZeroBits >> ShiftAmount);
+ MaskWidth = countTrailingOnes(NonZeroBits >> ShiftAmount);
// BFI encompasses sufficiently many nodes that it's worth inserting an extra
// LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
@@ -1997,10 +2011,11 @@ SDNode *AArch64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) {
return nullptr;
EVT VT = N->getValueType(0);
+ SDLoc dl(N);
SDValue Ops[] = { Opd0,
Opd1,
- CurDAG->getTargetConstant(LSB, VT),
- CurDAG->getTargetConstant(MSB, VT) };
+ CurDAG->getTargetConstant(LSB, dl, VT),
+ CurDAG->getTargetConstant(MSB, dl, VT) };
return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
}
@@ -2098,10 +2113,124 @@ AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
// finding FBits, but it must still be in range.
if (FBits == 0 || FBits > RegWidth) return false;
- FixedPos = CurDAG->getTargetConstant(FBits, MVT::i32);
+ FixedPos = CurDAG->getTargetConstant(FBits, SDLoc(N), MVT::i32);
return true;
}
+// Inspects a register string of the form o0:op1:CRn:CRm:op2 gets the fields
+// of the string and obtains the integer values from them and combines these
+// into a single value to be used in the MRS/MSR instruction.
+static int getIntOperandFromRegisterString(StringRef RegString) {
+ SmallVector<StringRef, 5> Fields;
+ RegString.split(Fields, ":");
+
+ if (Fields.size() == 1)
+ return -1;
+
+ assert(Fields.size() == 5
+ && "Invalid number of fields in read register string");
+
+ SmallVector<int, 5> Ops;
+ bool AllIntFields = true;
+
+ for (StringRef Field : Fields) {
+ unsigned IntField;
+ AllIntFields &= !Field.getAsInteger(10, IntField);
+ Ops.push_back(IntField);
+ }
+
+ assert(AllIntFields &&
+ "Unexpected non-integer value in special register string.");
+
+ // Need to combine the integer fields of the string into a single value
+ // based on the bit encoding of MRS/MSR instruction.
+ return (Ops[0] << 14) | (Ops[1] << 11) | (Ops[2] << 7) |
+ (Ops[3] << 3) | (Ops[4]);
+}
+
+// Lower the read_register intrinsic to an MRS instruction node if the special
+// register string argument is either of the form detailed in the ALCE (the
+// form described in getIntOperandsFromRegsterString) or is a named register
+// known by the MRS SysReg mapper.
+SDNode *AArch64DAGToDAGISel::SelectReadRegister(SDNode *N) {
+ const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
+ const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
+ SDLoc DL(N);
+
+ int Reg = getIntOperandFromRegisterString(RegString->getString());
+ if (Reg != -1)
+ return CurDAG->getMachineNode(AArch64::MRS, DL, N->getSimpleValueType(0),
+ MVT::Other,
+ CurDAG->getTargetConstant(Reg, DL, MVT::i32),
+ N->getOperand(0));
+
+ // Use the sysreg mapper to map the remaining possible strings to the
+ // value for the register to be used for the instruction operand.
+ AArch64SysReg::MRSMapper mapper;
+ bool IsValidSpecialReg;
+ Reg = mapper.fromString(RegString->getString(),
+ Subtarget->getFeatureBits(),
+ IsValidSpecialReg);
+ if (IsValidSpecialReg)
+ return CurDAG->getMachineNode(AArch64::MRS, DL, N->getSimpleValueType(0),
+ MVT::Other,
+ CurDAG->getTargetConstant(Reg, DL, MVT::i32),
+ N->getOperand(0));
+
+ return nullptr;
+}
+
+// Lower the write_register intrinsic to an MSR instruction node if the special
+// register string argument is either of the form detailed in the ALCE (the
+// form described in getIntOperandsFromRegsterString) or is a named register
+// known by the MSR SysReg mapper.
+SDNode *AArch64DAGToDAGISel::SelectWriteRegister(SDNode *N) {
+ const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
+ const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
+ SDLoc DL(N);
+
+ int Reg = getIntOperandFromRegisterString(RegString->getString());
+ if (Reg != -1)
+ return CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
+ CurDAG->getTargetConstant(Reg, DL, MVT::i32),
+ N->getOperand(2), N->getOperand(0));
+
+ // Check if the register was one of those allowed as the pstatefield value in
+ // the MSR (immediate) instruction. To accept the values allowed in the
+ // pstatefield for the MSR (immediate) instruction, we also require that an
+ // immediate value has been provided as an argument, we know that this is
+ // the case as it has been ensured by semantic checking.
+ AArch64PState::PStateMapper PMapper;
+ bool IsValidSpecialReg;
+ Reg = PMapper.fromString(RegString->getString(),
+ Subtarget->getFeatureBits(),
+ IsValidSpecialReg);
+ if (IsValidSpecialReg) {
+ assert (isa<ConstantSDNode>(N->getOperand(2))
+ && "Expected a constant integer expression.");
+ uint64_t Immed = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
+ return CurDAG->getMachineNode(AArch64::MSRpstate, DL, MVT::Other,
+ CurDAG->getTargetConstant(Reg, DL, MVT::i32),
+ CurDAG->getTargetConstant(Immed, DL, MVT::i16),
+ N->getOperand(0));
+ }
+
+ // Use the sysreg mapper to attempt to map the remaining possible strings
+ // to the value for the register to be used for the MSR (register)
+ // instruction operand.
+ AArch64SysReg::MSRMapper Mapper;
+ Reg = Mapper.fromString(RegString->getString(),
+ Subtarget->getFeatureBits(),
+ IsValidSpecialReg);
+
+ if (IsValidSpecialReg)
+ return CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
+ CurDAG->getTargetConstant(Reg, DL, MVT::i32),
+ N->getOperand(2), N->getOperand(0));
+
+ return nullptr;
+}
+
SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
// Dump information about the Node being selected
DEBUG(errs() << "Selecting: ");
@@ -2123,6 +2252,16 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
default:
break;
+ case ISD::READ_REGISTER:
+ if (SDNode *Res = SelectReadRegister(Node))
+ return Res;
+ break;
+
+ case ISD::WRITE_REGISTER:
+ if (SDNode *Res = SelectWriteRegister(Node))
+ return Res;
+ break;
+
case ISD::ADD:
if (SDNode *I = SelectMLAV64LaneV128(Node))
return I;
@@ -2213,8 +2352,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
const TargetLowering *TLI = getTargetLowering();
SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
- SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
- CurDAG->getTargetConstant(Shifter, MVT::i32) };
+ SDLoc DL(Node);
+ SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, DL, MVT::i32),
+ CurDAG->getTargetConstant(Shifter, DL, MVT::i32) };
return CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
}
case ISD::INTRINSIC_W_CHAIN: {
@@ -2250,11 +2390,7 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
SDValue MemAddr = Node->getOperand(4);
// Place arguments in the right order.
- SmallVector<SDValue, 7> Ops;
- Ops.push_back(ValLo);
- Ops.push_back(ValHi);
- Ops.push_back(MemAddr);
- Ops.push_back(Chain);
+ SDValue Ops[] = {ValLo, ValHi, MemAddr, Chain};
SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
// Transfer memoperands.
OpenPOWER on IntegriCloud