summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.h
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.h')
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.h147
1 files changed, 129 insertions, 18 deletions
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index d49b3ee..8ad5a79 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -19,7 +19,7 @@
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Target/TargetLowering.h"
-
+#include "llvm/IR/Intrinsics.h"
namespace llvm {
namespace AArch64ISD {
@@ -111,7 +111,92 @@ namespace AArch64ISD {
// created using the small memory model style: i.e. adrp/add or
// adrp/mem-op. This exists to prevent bare TargetAddresses which may never
// get selected.
- WrapperSmall
+ WrapperSmall,
+
+ // Vector bitwise select
+ NEON_BSL,
+
+ // Vector move immediate
+ NEON_MOVIMM,
+
+ // Vector Move Inverted Immediate
+ NEON_MVNIMM,
+
+ // Vector FP move immediate
+ NEON_FMOVIMM,
+
+ // Vector permute
+ NEON_UZP1,
+ NEON_UZP2,
+ NEON_ZIP1,
+ NEON_ZIP2,
+ NEON_TRN1,
+ NEON_TRN2,
+
+ // Vector Element reverse
+ NEON_REV64,
+ NEON_REV32,
+ NEON_REV16,
+
+ // Vector compare
+ NEON_CMP,
+
+ // Vector compare zero
+ NEON_CMPZ,
+
+ // Vector compare bitwise test
+ NEON_TST,
+
+ // Vector saturating shift
+ NEON_QSHLs,
+ NEON_QSHLu,
+
+ // Vector dup
+ NEON_VDUP,
+
+ // Vector dup by lane
+ NEON_VDUPLANE,
+
+ // Vector extract
+ NEON_VEXTRACT,
+
+ // NEON duplicate lane loads
+ NEON_LD2DUP = ISD::FIRST_TARGET_MEMORY_OPCODE,
+ NEON_LD3DUP,
+ NEON_LD4DUP,
+
+ // NEON loads with post-increment base updates:
+ NEON_LD1_UPD,
+ NEON_LD2_UPD,
+ NEON_LD3_UPD,
+ NEON_LD4_UPD,
+ NEON_LD1x2_UPD,
+ NEON_LD1x3_UPD,
+ NEON_LD1x4_UPD,
+
+ // NEON stores with post-increment base updates:
+ NEON_ST1_UPD,
+ NEON_ST2_UPD,
+ NEON_ST3_UPD,
+ NEON_ST4_UPD,
+ NEON_ST1x2_UPD,
+ NEON_ST1x3_UPD,
+ NEON_ST1x4_UPD,
+
+ // NEON duplicate lane loads with post-increment base updates:
+ NEON_LD2DUP_UPD,
+ NEON_LD3DUP_UPD,
+ NEON_LD4DUP_UPD,
+
+ // NEON lane loads with post-increment base updates:
+ NEON_LD2LN_UPD,
+ NEON_LD3LN_UPD,
+ NEON_LD4LN_UPD,
+
+ // NEON lane store with post-increment base updates:
+ NEON_ST2LN_UPD,
+ NEON_ST3LN_UPD,
+ NEON_ST4LN_UPD
};
}
@@ -130,14 +215,14 @@ public:
SDValue LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const;
+ SDLoc dl, SelectionDAG &DAG) const;
SDValue LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const;
@@ -145,12 +230,18 @@ public:
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
- void SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG,
- DebugLoc DL, SDValue &Chain) const;
+ bool isKnownShuffleVector(SDValue Op, SelectionDAG &DAG, SDValue &Res) const;
+
+ SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
+ const AArch64Subtarget *ST) const;
+
+ SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
+ void SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, SDLoc DL,
+ SDValue &Chain) const;
/// IsEligibleForTailCallOptimization - Check whether the call is eligible
/// for tail call optimization. Targets which want to do tail call
@@ -171,7 +262,7 @@ public:
SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
MachineFrameInfo *MFI, int ClobberedFI) const;
- EVT getSetCCResultType(EVT VT) const;
+ EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
@@ -181,7 +272,7 @@ public:
bool isLegalICmpImmediate(int64_t Val) const;
SDValue getSelectableIntSetCC(SDValue LHS, SDValue RHS, ISD::CondCode CC,
- SDValue &A64cc, SelectionDAG &DAG, DebugLoc &dl) const;
+ SDValue &A64cc, SelectionDAG &DAG, SDLoc &dl) const;
virtual MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const;
@@ -211,12 +302,14 @@ public:
SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, bool IsSigned) const;
+ SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalAddressELFSmall(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalAddressELFLarge(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerTLSDescCall(SDValue SymAddr, SDValue DescAddr, DebugLoc DL,
+ SDValue LowerTLSDescCall(SDValue SymAddr, SDValue DescAddr, SDLoc DL,
SelectionDAG &DAG) const;
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG, bool IsSigned) const;
@@ -229,11 +322,11 @@ public:
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
- /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than
- /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to
- /// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd
- /// is expanded to mul + add.
- virtual bool isFMAFasterThanMulAndAdd(EVT) const { return true; }
+ /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
+ /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
+ /// expanded to FMAs when this method returns true, otherwise fmuladd is
+ /// expanded to fmul + fadd.
+ virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const;
ConstraintType getConstraintType(const std::string &Constraint) const;
@@ -245,12 +338,30 @@ public:
SelectionDAG &DAG) const;
std::pair<unsigned, const TargetRegisterClass*>
- getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const;
+ getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const;
+
+ virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
+ unsigned Intrinsic) const LLVM_OVERRIDE;
+
+protected:
+ std::pair<const TargetRegisterClass*, uint8_t>
+ findRepresentativeClass(MVT VT) const;
+
private:
- const AArch64Subtarget *Subtarget;
- const TargetRegisterInfo *RegInfo;
const InstrItineraryData *Itins;
+
+ const AArch64Subtarget *getSubtarget() const {
+ return &getTargetMachine().getSubtarget<AArch64Subtarget>();
+ }
};
+enum NeonModImmType {
+ Neon_Mov_Imm,
+ Neon_Mvn_Imm
+};
+
+extern SDValue ScanBUILD_VECTOR(SDValue Op, bool &isOnlyLowElement,
+ bool &usesOnlyOneValue, bool &hasDominantValue,
+ bool &isConstant, bool &isUNDEF);
} // namespace llvm
#endif // LLVM_TARGET_AARCH64_ISELLOWERING_H
OpenPOWER on IntegriCloud