summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Target/X86/X86ISelLowering.h
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Target/X86/X86ISelLowering.h')
-rw-r--r--contrib/llvm/lib/Target/X86/X86ISelLowering.h136
1 files changed, 87 insertions, 49 deletions
diff --git a/contrib/llvm/lib/Target/X86/X86ISelLowering.h b/contrib/llvm/lib/Target/X86/X86ISelLowering.h
index 2727e22..bc3dd60 100644
--- a/contrib/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/contrib/llvm/lib/Target/X86/X86ISelLowering.h
@@ -53,6 +53,10 @@ namespace llvm {
/// to X86::XORPS or X86::XORPD.
FXOR,
+ /// FANDN - Bitwise logical ANDNOT of floating point values. This
+ /// corresponds to X86::ANDNPS or X86::ANDNPD.
+ FANDN,
+
/// FSRL - Bitwise logical right shift of floating point values. These
/// corresponds to X86::PSRLDQ.
FSRL,
@@ -250,6 +254,12 @@ namespace llvm {
// VSEXT - Vector integer signed-extend.
VSEXT,
+ // VTRUNC - Vector integer truncate.
+ VTRUNC,
+
+ // VTRUNC - Vector integer truncate with mask.
+ VTRUNCM,
+
// VFPEXT - Vector FP extend.
VFPEXT,
@@ -270,6 +280,13 @@ namespace llvm {
// PCMP* - Vector integer comparisons.
PCMPEQ, PCMPGT,
+ // PCMP*M - Vector integer comparisons, the result is in a mask vector.
+ PCMPEQM, PCMPGTM,
+
+ /// CMPM, CMPMU - Vector comparison generating mask bits for fp and
+ /// integer signed and unsigned data types.
+ CMPM,
+ CMPMU,
// ADD, SUB, SMUL, etc. - Arithmetic operations with FLAGS results.
ADD, SUB, ADC, SBB, SMUL,
@@ -278,18 +295,27 @@ namespace llvm {
BLSI, // BLSI - Extract lowest set isolated bit
BLSMSK, // BLSMSK - Get mask up to lowest set bit
BLSR, // BLSR - Reset lowest set bit
+ BZHI, // BZHI - Zero high bits
+ BEXTR, // BEXTR - Bit field extract
UMUL, // LOW, HI, FLAGS = umul LHS, RHS
// MUL_IMM - X86 specific multiply by immediate.
MUL_IMM,
- // PTEST - Vector bitwise comparisons
+ // PTEST - Vector bitwise comparisons.
PTEST,
- // TESTP - Vector packed fp sign bitwise comparisons
+ // TESTP - Vector packed fp sign bitwise comparisons.
TESTP,
+ // TESTM - Vector "test" in AVX-512, the result is in a mask vector.
+ TESTM,
+
+ // OR/AND test for masks
+ KORTEST,
+ KTEST,
+
// Several flavors of instructions with vector shuffle behaviors.
PALIGNR,
PSHUFD,
@@ -310,9 +336,13 @@ namespace llvm {
UNPCKH,
VPERMILP,
VPERMV,
+ VPERMV3,
VPERMI,
VPERM2X128,
VBROADCAST,
+ // masked broadcast
+ VBROADCASTM,
+ VINSERT,
// PMULUDQ - Vector multiply packed unsigned doubleword integers
PMULUDQ,
@@ -434,25 +464,45 @@ namespace llvm {
/// Define some predicates that are used for node matching.
namespace X86 {
- /// isVEXTRACTF128Index - Return true if the specified
+ /// isVEXTRACT128Index - Return true if the specified
+ /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
+ /// suitable for input to VEXTRACTF128, VEXTRACTI128 instructions.
+ bool isVEXTRACT128Index(SDNode *N);
+
+ /// isVINSERT128Index - Return true if the specified
+ /// INSERT_SUBVECTOR operand specifies a subvector insert that is
+ /// suitable for input to VINSERTF128, VINSERTI128 instructions.
+ bool isVINSERT128Index(SDNode *N);
+
+ /// isVEXTRACT256Index - Return true if the specified
/// EXTRACT_SUBVECTOR operand specifies a vector extract that is
- /// suitable for input to VEXTRACTF128.
- bool isVEXTRACTF128Index(SDNode *N);
+ /// suitable for input to VEXTRACTF64X4, VEXTRACTI64X4 instructions.
+ bool isVEXTRACT256Index(SDNode *N);
- /// isVINSERTF128Index - Return true if the specified
+ /// isVINSERT256Index - Return true if the specified
/// INSERT_SUBVECTOR operand specifies a subvector insert that is
- /// suitable for input to VINSERTF128.
- bool isVINSERTF128Index(SDNode *N);
+ /// suitable for input to VINSERTF64X4, VINSERTI64X4 instructions.
+ bool isVINSERT256Index(SDNode *N);
- /// getExtractVEXTRACTF128Immediate - Return the appropriate
+ /// getExtractVEXTRACT128Immediate - Return the appropriate
/// immediate to extract the specified EXTRACT_SUBVECTOR index
- /// with VEXTRACTF128 instructions.
- unsigned getExtractVEXTRACTF128Immediate(SDNode *N);
+ /// with VEXTRACTF128, VEXTRACTI128 instructions.
+ unsigned getExtractVEXTRACT128Immediate(SDNode *N);
- /// getInsertVINSERTF128Immediate - Return the appropriate
+ /// getInsertVINSERT128Immediate - Return the appropriate
/// immediate to insert at the specified INSERT_SUBVECTOR index
- /// with VINSERTF128 instructions.
- unsigned getInsertVINSERTF128Immediate(SDNode *N);
+ /// with VINSERTF128, VINSERT128 instructions.
+ unsigned getInsertVINSERT128Immediate(SDNode *N);
+
+ /// getExtractVEXTRACT256Immediate - Return the appropriate
+ /// immediate to extract the specified EXTRACT_SUBVECTOR index
+ /// with VEXTRACTF64X4, VEXTRACTI64x4 instructions.
+ unsigned getExtractVEXTRACT256Immediate(SDNode *N);
+
+ /// getInsertVINSERT256Immediate - Return the appropriate
+ /// immediate to insert at the specified INSERT_SUBVECTOR index
+ /// with VINSERTF64x4, VINSERTI64x4 instructions.
+ unsigned getInsertVINSERT256Immediate(SDNode *N);
/// isZeroNode - Returns true if Elt is a constant zero or a floating point
/// constant +0.0.
@@ -511,7 +561,7 @@ namespace llvm {
/// It returns EVT::Other if the type should be determined using generic
/// target-independent logic.
virtual EVT
- getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
+ getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
MachineFunction &MF) const;
@@ -563,7 +613,7 @@ namespace llvm {
virtual const char *getTargetNodeName(unsigned Opcode) const;
/// getSetCCResultType - Return the value type to use for ISD::SETCC.
- virtual EVT getSetCCResultType(EVT VT) const;
+ virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
/// computeMaskedBitsForTargetNode - Determine which of the bits specified
/// in Mask are known to be either zero or one and return them in the
@@ -610,7 +660,7 @@ namespace llvm {
/// error, this returns a register number of 0.
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const;
+ MVT VT) const;
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
@@ -634,6 +684,8 @@ namespace llvm {
virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const;
virtual bool isTruncateFree(EVT VT1, EVT VT2) const;
+ virtual bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const;
+
/// isZExtFree - Return true if any actual instruction that defines a
/// value of type Ty1 implicit zero-extends the value to Ty2 in the result
/// register. This does not necessarily include registers defined in
@@ -646,11 +698,11 @@ namespace llvm {
virtual bool isZExtFree(EVT VT1, EVT VT2) const;
virtual bool isZExtFree(SDValue Val, EVT VT2) const;
- /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than
- /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to
- /// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd
- /// is expanded to mul + add.
- virtual bool isFMAFasterThanMulAndAdd(EVT) const { return true; }
+ /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
+ /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
+ /// expanded to FMAs when this method returns true, otherwise fmuladd is
+ /// expanded to fmul + fadd.
+ virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const;
/// isNarrowingProfitable - Return true if it's profitable to narrow
/// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow
@@ -723,6 +775,8 @@ namespace llvm {
SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot,
SelectionDAG &DAG) const;
+ virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const LLVM_OVERRIDE;
+
/// \brief Reset the operation actions based on target options.
virtual void resetOperationActions();
@@ -734,7 +788,6 @@ namespace llvm {
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
/// make the right decision when generating code for different targets.
const X86Subtarget *Subtarget;
- const X86RegisterInfo *RegInfo;
const DataLayout *TD;
/// Used to store the TargetOptions so that we don't waste time resetting
@@ -760,16 +813,16 @@ namespace llvm {
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerMemArgument(SDValue Chain,
CallingConv::ID CallConv,
const SmallVectorImpl<ISD::InputArg> &ArgInfo,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
const CCValAssign &VA, MachineFrameInfo *MFI,
unsigned i) const;
SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
const CCValAssign &VA,
ISD::ArgFlagsTy Flags) const;
@@ -791,7 +844,7 @@ namespace llvm {
bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv) const;
SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
SDValue Chain, bool IsTailCall, bool Is64Bit,
- int FPDiff, DebugLoc dl) const;
+ int FPDiff, SDLoc dl) const;
unsigned GetAlignedArgumentStackSize(unsigned StackSize,
SelectionDAG &DAG) const;
@@ -800,37 +853,32 @@ namespace llvm {
bool isSigned,
bool isReplace) const;
- SDValue LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
- SelectionDAG &DAG) const;
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
+ SDValue LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
int64_t Offset, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerBITCAST(SDValue op, SelectionDAG &DAG) const;
SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerANY_EXTEND(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFABS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerToBT(SDValue And, ISD::CondCode CC,
- DebugLoc dl, SelectionDAG &DAG) const;
+ SDLoc dl, SelectionDAG &DAG) const;
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
@@ -847,25 +895,13 @@ namespace llvm {
SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
-
- // Utility functions to help LowerVECTOR_SHUFFLE & LowerBUILD_VECTOR
- SDValue LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const;
- SDValue NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const;
- SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) const;
-
- SDValue LowerVectorAllZeroTest(SDValue Op, SelectionDAG &DAG) const;
-
- SDValue LowerVectorIntExtend(SDValue Op, SelectionDAG &DAG) const;
virtual SDValue
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
LowerCall(CallLoweringInfo &CLI,
@@ -876,7 +912,7 @@ namespace llvm {
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const;
+ SDLoc dl, SelectionDAG &DAG) const;
virtual bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const;
@@ -891,6 +927,8 @@ namespace llvm {
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const;
+ virtual const uint16_t *getScratchRegisters(CallingConv::ID CC) const;
+
/// Utility function to emit atomic-load-arith operations (and, or, xor,
/// nand, max, min, umax, umin). It takes the corresponding instruction to
/// expand, the associated machine basic block, and the associated X86
OpenPOWER on IntegriCloud