diff options
Diffstat (limited to 'lib/Target/AArch64/AArch64ISelDAGToDAG.cpp')
-rw-r--r-- | lib/Target/AArch64/AArch64ISelDAGToDAG.cpp | 181 |
1 files changed, 162 insertions, 19 deletions
diff --git a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 46b8221..102c71b 100644 --- a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -70,6 +70,15 @@ public: return SelectCVTFixedPosOperand(N, FixedPos, RegWidth); } + /// Used for pre-lowered address-reference nodes, so we already know + /// the fields match. This operand's job is simply to add an + /// appropriate shift operand (i.e. 0) to the MOVZ/MOVK instruction. + bool SelectMOVWAddressRef(SDValue N, SDValue &Imm, SDValue &Shift) { + Imm = N; + Shift = CurDAG->getTargetConstant(0, MVT::i32); + return true; + } + bool SelectFPZeroOperand(SDValue N, SDValue &Dummy); bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, @@ -88,6 +97,13 @@ public: bool SelectTSTBOperand(SDValue N, SDValue &FixedPos, unsigned RegWidth); + SDNode *SelectAtomic(SDNode *N, unsigned Op8, unsigned Op16, unsigned Op32, + unsigned Op64); + + /// Put the given constant into a pool and return a DAG which will give its + /// address. + SDValue getConstantPoolItemAddress(DebugLoc DL, const Constant *CV); + SDNode *TrySelectToMoveImm(SDNode *N); SDNode *LowerToFPLitPool(SDNode *Node); SDNode *SelectToLitPool(SDNode *N); @@ -224,12 +240,51 @@ SDNode *AArch64DAGToDAGISel::TrySelectToMoveImm(SDNode *Node) { return ResNode; } +SDValue +AArch64DAGToDAGISel::getConstantPoolItemAddress(DebugLoc DL, + const Constant *CV) { + EVT PtrVT = TLI.getPointerTy(); + + switch (TLI.getTargetMachine().getCodeModel()) { + case CodeModel::Small: { + unsigned Alignment = + TLI.getDataLayout()->getABITypeAlignment(CV->getType()); + return CurDAG->getNode( + AArch64ISD::WrapperSmall, DL, PtrVT, + CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_NO_FLAG), + CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_LO12), + CurDAG->getConstant(Alignment, MVT::i32)); + } + case CodeModel::Large: { + SDNode *LitAddr; + LitAddr = CurDAG->getMachineNode( + AArch64::MOVZxii, DL, PtrVT, + CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G3), + CurDAG->getTargetConstant(0, MVT::i32)); + LitAddr = CurDAG->getMachineNode( + AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0), + CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G2_NC), + CurDAG->getTargetConstant(0, MVT::i32)); + LitAddr = CurDAG->getMachineNode( + AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0), + CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G1_NC), + CurDAG->getTargetConstant(0, MVT::i32)); + LitAddr = CurDAG->getMachineNode( + AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0), + CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G0_NC), + CurDAG->getTargetConstant(0, MVT::i32)); + return SDValue(LitAddr, 0); + } + default: + llvm_unreachable("Only small and large code models supported now"); + } +} + SDNode *AArch64DAGToDAGISel::SelectToLitPool(SDNode *Node) { DebugLoc DL = Node->getDebugLoc(); uint64_t UnsignedVal = cast<ConstantSDNode>(Node)->getZExtValue(); int64_t SignedVal = cast<ConstantSDNode>(Node)->getSExtValue(); EVT DestType = Node->getValueType(0); - EVT PtrVT = TLI.getPointerTy(); // Since we may end up loading a 64-bit constant from a 32-bit entry the // constant in the pool may have a different type to the eventual node. @@ -256,14 +311,8 @@ SDNode *AArch64DAGToDAGISel::SelectToLitPool(SDNode *Node) { Constant *CV = ConstantInt::get(Type::getIntNTy(*CurDAG->getContext(), MemType.getSizeInBits()), UnsignedVal); - SDValue PoolAddr; + SDValue PoolAddr = getConstantPoolItemAddress(DL, CV); unsigned Alignment = TLI.getDataLayout()->getABITypeAlignment(CV->getType()); - PoolAddr = CurDAG->getNode(AArch64ISD::WrapperSmall, DL, PtrVT, - CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, - AArch64II::MO_NO_FLAG), - CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, - AArch64II::MO_LO12), - CurDAG->getConstant(Alignment, MVT::i32)); return CurDAG->getExtLoad(Extension, DL, DestType, CurDAG->getEntryNode(), PoolAddr, @@ -276,20 +325,10 @@ SDNode *AArch64DAGToDAGISel::SelectToLitPool(SDNode *Node) { SDNode *AArch64DAGToDAGISel::LowerToFPLitPool(SDNode *Node) { DebugLoc DL = Node->getDebugLoc(); const ConstantFP *FV = cast<ConstantFPSDNode>(Node)->getConstantFPValue(); - EVT PtrVT = TLI.getPointerTy(); EVT DestType = Node->getValueType(0); unsigned Alignment = TLI.getDataLayout()->getABITypeAlignment(FV->getType()); - SDValue PoolAddr; - - assert(TM.getCodeModel() == CodeModel::Small && - "Only small code model supported"); - PoolAddr = CurDAG->getNode(AArch64ISD::WrapperSmall, DL, PtrVT, - CurDAG->getTargetConstantPool(FV, PtrVT, 0, 0, - AArch64II::MO_NO_FLAG), - CurDAG->getTargetConstantPool(FV, PtrVT, 0, 0, - AArch64II::MO_LO12), - CurDAG->getConstant(Alignment, MVT::i32)); + SDValue PoolAddr = getConstantPoolItemAddress(DL, FV); return CurDAG->getLoad(DestType, DL, CurDAG->getEntryNode(), PoolAddr, MachinePointerInfo::getConstantPool(), @@ -318,6 +357,38 @@ AArch64DAGToDAGISel::SelectTSTBOperand(SDValue N, SDValue &FixedPos, return true; } +SDNode *AArch64DAGToDAGISel::SelectAtomic(SDNode *Node, unsigned Op8, + unsigned Op16,unsigned Op32, + unsigned Op64) { + // Mostly direct translation to the given operations, except that we preserve + // the AtomicOrdering for use later on. + AtomicSDNode *AN = cast<AtomicSDNode>(Node); + EVT VT = AN->getMemoryVT(); + + unsigned Op; + if (VT == MVT::i8) + Op = Op8; + else if (VT == MVT::i16) + Op = Op16; + else if (VT == MVT::i32) + Op = Op32; + else if (VT == MVT::i64) + Op = Op64; + else + llvm_unreachable("Unexpected atomic operation"); + + SmallVector<SDValue, 4> Ops; + for (unsigned i = 1; i < AN->getNumOperands(); ++i) + Ops.push_back(AN->getOperand(i)); + + Ops.push_back(CurDAG->getTargetConstant(AN->getOrdering(), MVT::i32)); + Ops.push_back(AN->getOperand(0)); // Chain moves to the end + + return CurDAG->SelectNodeTo(Node, Op, + AN->getValueType(0), MVT::Other, + &Ops[0], Ops.size()); +} + SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) { // Dump information about the Node being selected DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << "\n"); @@ -328,6 +399,78 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) { } switch (Node->getOpcode()) { + case ISD::ATOMIC_LOAD_ADD: + return SelectAtomic(Node, + AArch64::ATOMIC_LOAD_ADD_I8, + AArch64::ATOMIC_LOAD_ADD_I16, + AArch64::ATOMIC_LOAD_ADD_I32, + AArch64::ATOMIC_LOAD_ADD_I64); + case ISD::ATOMIC_LOAD_SUB: + return SelectAtomic(Node, + AArch64::ATOMIC_LOAD_SUB_I8, + AArch64::ATOMIC_LOAD_SUB_I16, + AArch64::ATOMIC_LOAD_SUB_I32, + AArch64::ATOMIC_LOAD_SUB_I64); + case ISD::ATOMIC_LOAD_AND: + return SelectAtomic(Node, + AArch64::ATOMIC_LOAD_AND_I8, + AArch64::ATOMIC_LOAD_AND_I16, + AArch64::ATOMIC_LOAD_AND_I32, + AArch64::ATOMIC_LOAD_AND_I64); + case ISD::ATOMIC_LOAD_OR: + return SelectAtomic(Node, + AArch64::ATOMIC_LOAD_OR_I8, + AArch64::ATOMIC_LOAD_OR_I16, + AArch64::ATOMIC_LOAD_OR_I32, + AArch64::ATOMIC_LOAD_OR_I64); + case ISD::ATOMIC_LOAD_XOR: + return SelectAtomic(Node, + AArch64::ATOMIC_LOAD_XOR_I8, + AArch64::ATOMIC_LOAD_XOR_I16, + AArch64::ATOMIC_LOAD_XOR_I32, + AArch64::ATOMIC_LOAD_XOR_I64); + case ISD::ATOMIC_LOAD_NAND: + return SelectAtomic(Node, + AArch64::ATOMIC_LOAD_NAND_I8, + AArch64::ATOMIC_LOAD_NAND_I16, + AArch64::ATOMIC_LOAD_NAND_I32, + AArch64::ATOMIC_LOAD_NAND_I64); + case ISD::ATOMIC_LOAD_MIN: + return SelectAtomic(Node, + AArch64::ATOMIC_LOAD_MIN_I8, + AArch64::ATOMIC_LOAD_MIN_I16, + AArch64::ATOMIC_LOAD_MIN_I32, + AArch64::ATOMIC_LOAD_MIN_I64); + case ISD::ATOMIC_LOAD_MAX: + return SelectAtomic(Node, + AArch64::ATOMIC_LOAD_MAX_I8, + AArch64::ATOMIC_LOAD_MAX_I16, + AArch64::ATOMIC_LOAD_MAX_I32, + AArch64::ATOMIC_LOAD_MAX_I64); + case ISD::ATOMIC_LOAD_UMIN: + return SelectAtomic(Node, + AArch64::ATOMIC_LOAD_UMIN_I8, + AArch64::ATOMIC_LOAD_UMIN_I16, + AArch64::ATOMIC_LOAD_UMIN_I32, + AArch64::ATOMIC_LOAD_UMIN_I64); + case ISD::ATOMIC_LOAD_UMAX: + return SelectAtomic(Node, + AArch64::ATOMIC_LOAD_UMAX_I8, + AArch64::ATOMIC_LOAD_UMAX_I16, + AArch64::ATOMIC_LOAD_UMAX_I32, + AArch64::ATOMIC_LOAD_UMAX_I64); + case ISD::ATOMIC_SWAP: + return SelectAtomic(Node, + AArch64::ATOMIC_SWAP_I8, + AArch64::ATOMIC_SWAP_I16, + AArch64::ATOMIC_SWAP_I32, + AArch64::ATOMIC_SWAP_I64); + case ISD::ATOMIC_CMP_SWAP: + return SelectAtomic(Node, + AArch64::ATOMIC_CMP_SWAP_I8, + AArch64::ATOMIC_CMP_SWAP_I16, + AArch64::ATOMIC_CMP_SWAP_I32, + AArch64::ATOMIC_CMP_SWAP_I64); case ISD::FrameIndex: { int FI = cast<FrameIndexSDNode>(Node)->getIndex(); EVT PtrTy = TLI.getPointerTy(); |