diff options
Diffstat (limited to 'include/llvm/Target/TargetLowering.h')
-rw-r--r-- | include/llvm/Target/TargetLowering.h | 282 |
1 files changed, 144 insertions, 138 deletions
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index 5efebe6..2b6e4fa 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -24,6 +24,7 @@ #include "llvm/CallingConv.h" #include "llvm/InlineAsm.h" +#include "llvm/Attributes.h" #include "llvm/CodeGen/SelectionDAGNodes.h" #include "llvm/CodeGen/RuntimeLibcalls.h" #include "llvm/ADT/APFloat.h" @@ -32,6 +33,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Support/DebugLoc.h" +#include "llvm/Target/TargetCallingConv.h" #include "llvm/Target/TargetMachine.h" #include <climits> #include <map> @@ -42,6 +44,7 @@ namespace llvm { class CallInst; class Function; class FastISel; + class FunctionLoweringInfo; class MachineBasicBlock; class MachineFunction; class MachineFrameInfo; @@ -114,7 +117,7 @@ public: /// isSelectExpensive - Return true if the select operation is expensive for /// this target. bool isSelectExpensive() const { return SelectIsExpensive; } - + /// isIntDivCheap() - Return true if integer divide is usually cheaper than /// a sequence of several shifts, adds, and multiplies for this target. bool isIntDivCheap() const { return IntDivIsCheap; } @@ -131,10 +134,10 @@ public: virtual MVT::SimpleValueType getSetCCResultType(EVT VT) const; - /// getCmpLibcallReturnType - Return the ValueType for comparison + /// getCmpLibcallReturnType - Return the ValueType for comparison /// libcalls. Comparions libcalls include floating point comparion calls, /// and Ordered/Unordered check calls on floating point numbers. - virtual + virtual MVT::SimpleValueType getCmpLibcallReturnType() const; /// getBooleanContents - For targets without i1 registers, this gives the @@ -208,7 +211,7 @@ public: ValueTypeActions[I] = Action; } }; - + const ValueTypeActionImpl &getValueTypeActions() const { return ValueTypeActions; } @@ -229,7 +232,7 @@ public: /// returns the integer type to transform to. EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const { if (VT.isSimple()) { - assert((unsigned)VT.getSimpleVT().SimpleTy < + assert((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(TransformToType)); EVT NVT = TransformToType[VT.getSimpleVT().SimpleTy]; assert(getTypeAction(Context, NVT) != Promote && @@ -256,7 +259,7 @@ public: return EVT::getIntegerVT(Context, VT.getSizeInBits() / 2); else // Promote to a power of two size, avoiding multi-step promotion. - return getTypeAction(Context, NVT) == Promote ? + return getTypeAction(Context, NVT) == Promote ? getTypeToTransformTo(Context, NVT) : NVT; } assert(0 && "Unsupported extended type!"); @@ -302,11 +305,11 @@ public: /// intrinsic will need to map to a MemIntrinsicNode (touches memory). If /// this is the case, it returns true and store the intrinsic /// information into the IntrinsicInfo that was passed to the function. - struct IntrinsicInfo { + struct IntrinsicInfo { unsigned opc; // target opcode EVT memVT; // memory VT const Value* ptrVal; // value representing memory location - int offset; // offset off of ptrVal + int offset; // offset off of ptrVal unsigned align; // alignment bool vol; // is volatile? bool readMem; // reads memory? @@ -324,7 +327,7 @@ public: virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const { return false; } - + /// isShuffleMaskLegal - Targets can use this to indicate that they only /// support *some* VECTOR_SHUFFLE operations, those with specific masks. /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values @@ -446,7 +449,7 @@ public: "Table isn't big enough!"); unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy; return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f); - } + } /// isIndexedStoreLegal - Return true if the specified indexed load is legal /// on this target. @@ -492,7 +495,7 @@ public: assert((VT.isInteger() || VT.isFloatingPoint()) && "Cannot autopromote this type, add it with AddPromotedToType."); - + EVT NVT = VT; do { NVT = (MVT::SimpleValueType)(NVT.getSimpleVT().SimpleTy+1); @@ -516,14 +519,14 @@ public: /// function arguments in the caller parameter area. This is the actual /// alignment, not its logarithm. virtual unsigned getByValTypeAlignment(const Type *Ty) const; - + /// getRegisterType - Return the type of registers that this ValueType will /// eventually require. EVT getRegisterType(MVT VT) const { assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT)); return RegisterTypeForVT[VT.SimpleTy]; } - + /// getRegisterType - Return the type of registers that this ValueType will /// eventually require. EVT getRegisterType(LLVMContext &Context, EVT VT) const { @@ -606,7 +609,7 @@ public: /// of the specified type. This is used, for example, in situations where an /// array copy/move/set is converted to a sequence of store operations. It's /// use helps to ensure that such replacements don't generate code that causes - /// an alignment error (trap) on the target machine. + /// an alignment error (trap) on the target machine. /// @brief Determine if the target supports unaligned memory accesses. virtual bool allowsUnalignedMemoryAccesses(EVT VT) const { return false; @@ -637,7 +640,7 @@ public: MachineFunction &MF) const { return MVT::Other; } - + /// usesUnderscoreSetJmp - Determine if we should use _setjmp or setjmp /// to implement llvm.setjmp. bool usesUnderscoreSetJmp() const { @@ -683,17 +686,10 @@ public: return JumpBufAlignment; } - /// getIfCvtBlockLimit - returns the target specific if-conversion block size - /// limit. Any block whose size is greater should not be predicated. - unsigned getIfCvtBlockSizeLimit() const { - return IfCvtBlockSizeLimit; - } - - /// getIfCvtDupBlockLimit - returns the target specific size limit for a - /// block to be considered for duplication. Any block whose size is greater - /// should not be duplicated to facilitate its predication. - unsigned getIfCvtDupBlockSizeLimit() const { - return IfCvtDupBlockSizeLimit; + /// getMinStackArgumentAlignment - return the minimum stack alignment of an + /// argument. + unsigned getMinStackArgumentAlignment() const { + return MinStackArgumentAlignment; } /// getPrefLoopAlignment - return the preferred loop alignment. @@ -701,7 +697,14 @@ public: unsigned getPrefLoopAlignment() const { return PrefLoopAlignment; } - + + /// getShouldFoldAtomicFences - return whether the combiner should fold + /// fence MEMBARRIER instructions into the atomic intrinsic instructions. + /// + bool getShouldFoldAtomicFences() const { + return ShouldFoldAtomicFences; + } + /// getPreIndexedAddressParts - returns true by value, base pointer and /// offset pointer and addressing mode by reference if the node's address /// can be legally represented as pre-indexed load / store address. @@ -711,7 +714,7 @@ public: SelectionDAG &DAG) const { return false; } - + /// getPostIndexedAddressParts - returns true by value, base pointer and /// offset pointer and addressing mode by reference if this node can be /// combined with a load / store to form a post-indexed load / store. @@ -721,12 +724,12 @@ public: SelectionDAG &DAG) const { return false; } - + /// getJumpTableEncoding - Return the entry encoding for a jump table in the /// current function. The returned value is a member of the /// MachineJumpTableInfo::JTEntryKind enum. virtual unsigned getJumpTableEncoding() const; - + virtual const MCExpr * LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB, unsigned uid, @@ -734,7 +737,7 @@ public: assert(0 && "Need to implement this hook if target has custom JTIs"); return 0; } - + /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC /// jumptable. virtual SDValue getPICJumpTableRelocBase(SDValue Table, @@ -746,7 +749,7 @@ public: virtual const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const; - + /// isOffsetFoldingLegal - Return true if folding a constant offset /// with the given GlobalAddress is legal. It is frequently not legal in /// PIC relocation models. @@ -755,36 +758,42 @@ public: /// getFunctionAlignment - Return the Log2 alignment of this function. virtual unsigned getFunctionAlignment(const Function *) const = 0; + /// getStackCookieLocation - Return true if the target stores stack + /// protector cookies at a fixed offset in some non-standard address + /// space, and populates the address space and offset as + /// appropriate. + virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const { + return false; + } + //===--------------------------------------------------------------------===// // TargetLowering Optimization Methods // - + /// TargetLoweringOpt - A convenience struct that encapsulates a DAG, and two /// SDValues for returning information from TargetLowering to its clients - /// that want to combine + /// that want to combine struct TargetLoweringOpt { SelectionDAG &DAG; bool LegalTys; bool LegalOps; - bool ShrinkOps; SDValue Old; SDValue New; explicit TargetLoweringOpt(SelectionDAG &InDAG, - bool LT, bool LO, - bool Shrink = false) : - DAG(InDAG), LegalTys(LT), LegalOps(LO), ShrinkOps(Shrink) {} + bool LT, bool LO) : + DAG(InDAG), LegalTys(LT), LegalOps(LO) {} bool LegalTypes() const { return LegalTys; } bool LegalOperations() const { return LegalOps; } - - bool CombineTo(SDValue O, SDValue N) { - Old = O; - New = N; + + bool CombineTo(SDValue O, SDValue N) { + Old = O; + New = N; return true; } - - /// ShrinkDemandedConstant - Check to see if the specified operand of the + + /// ShrinkDemandedConstant - Check to see if the specified operand of the /// specified instruction is a constant integer. If so, check to see if /// there are any bits set in the constant that are not demanded. If so, /// shrink the constant and return true. @@ -797,25 +806,25 @@ public: bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded, DebugLoc dl); }; - + /// SimplifyDemandedBits - Look at Op. At this point, we know that only the /// DemandedMask bits of the result of Op are ever used downstream. If we can /// use this information to simplify Op, create a new simplified DAG node and - /// return true, returning the original and new nodes in Old and New. - /// Otherwise, analyze the expression and return a mask of KnownOne and - /// KnownZero bits for the expression (used to simplify the caller). - /// The KnownZero/One bits may only be accurate for those bits in the + /// return true, returning the original and new nodes in Old and New. + /// Otherwise, analyze the expression and return a mask of KnownOne and + /// KnownZero bits for the expression (used to simplify the caller). + /// The KnownZero/One bits may only be accurate for those bits in the /// DemandedMask. - bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask, + bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask, APInt &KnownZero, APInt &KnownOne, TargetLoweringOpt &TLO, unsigned Depth = 0) const; - + /// computeMaskedBitsForTargetNode - Determine which of the bits specified in - /// Mask are known to be either zero or one and return them in the + /// Mask are known to be either zero or one and return them in the /// KnownZero/KnownOne bitsets. virtual void computeMaskedBitsForTargetNode(const SDValue Op, const APInt &Mask, - APInt &KnownZero, + APInt &KnownZero, APInt &KnownOne, const SelectionDAG &DAG, unsigned Depth = 0) const; @@ -825,7 +834,7 @@ public: /// DAG Combiner. virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, unsigned Depth = 0) const; - + struct DAGCombinerInfo { void *DC; // The DAG Combiner object. bool BeforeLegalize; @@ -833,15 +842,15 @@ public: bool CalledByLegalizer; public: SelectionDAG &DAG; - + DAGCombinerInfo(SelectionDAG &dag, bool bl, bool blo, bool cl, void *dc) : DC(dc), BeforeLegalize(bl), BeforeLegalizeOps(blo), CalledByLegalizer(cl), DAG(dag) {} - + bool isBeforeLegalize() const { return BeforeLegalize; } bool isBeforeLegalizeOps() const { return BeforeLegalizeOps; } bool isCalledByLegalizer() const { return CalledByLegalizer; } - + void AddToWorklist(SDNode *N); SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To, bool AddTo = true); @@ -851,7 +860,7 @@ public: void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO); }; - /// SimplifySetCC - Try to simplify a setcc built with the specified operands + /// SimplifySetCC - Try to simplify a setcc built with the specified operands /// and cc. If it is unable to simplify it, return a null SDValue. SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, bool foldBooleans, @@ -892,7 +901,7 @@ public: virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const { return false; } - + //===--------------------------------------------------------------------===// // TargetLowering Configuration Methods - These methods should be invoked by // the derived class constructor to configure this object for the target. @@ -932,7 +941,7 @@ protected: void setStackPointerRegisterToSaveRestore(unsigned R) { StackPointerRegisterToSaveRestore = R; } - + /// setExceptionPointerRegister - If set to a physical register, this sets /// the register that receives the exception address on entry to a landing /// pad. @@ -955,12 +964,12 @@ protected: /// expensive, and if possible, should be replaced by an alternate sequence /// of instructions not containing an integer divide. void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; } - + /// setPow2DivIsCheap - Tells the code generator that it shouldn't generate /// srl/add/sra for a signed divide by power of two, and let the target handle /// it. void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; } - + /// addRegisterClass - Add the specified register class as an available /// regclass for the specified value type. This indicates the selector can /// handle values of that class natively. @@ -983,7 +992,7 @@ protected: assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!"); OpActions[(unsigned)VT.SimpleTy][Op] = (uint8_t)Action; } - + /// setLoadExtAction - Indicate that the specified load with extension does /// not work with the specified type and indicate what to do about it. void setLoadExtAction(unsigned ExtType, MVT VT, @@ -993,7 +1002,7 @@ protected: "Table isn't big enough!"); LoadExtActions[VT.SimpleTy][ExtType] = (uint8_t)Action; } - + /// setTruncStoreAction - Indicate that the specified truncating store does /// not work with the specified type and indicate what to do about it. void setTruncStoreAction(MVT ValVT, MVT MemVT, @@ -1018,7 +1027,7 @@ protected: IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0; IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4; } - + /// setIndexedStoreAction - Indicate that the specified indexed store does or /// does not work with the specified type and indicate what to do about /// it. NOTE: All indexed mode stores are initialized to Expand in @@ -1033,7 +1042,7 @@ protected: IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f; IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action); } - + /// setCondCodeAction - Indicate that the specified condition code is or isn't /// supported on the target and indicate what to do about it. void setCondCodeAction(ISD::CondCode CC, MVT VT, @@ -1060,7 +1069,7 @@ protected: assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)); TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7); } - + /// setJumpBufSize - Set the target's required jmp_buf buffer size (in /// bytes); default is 200 void setJumpBufSize(unsigned Size) { @@ -1073,25 +1082,24 @@ protected: JumpBufAlignment = Align; } - /// setIfCvtBlockSizeLimit - Set the target's if-conversion block size - /// limit (in number of instructions); default is 2. - void setIfCvtBlockSizeLimit(unsigned Limit) { - IfCvtBlockSizeLimit = Limit; - } - - /// setIfCvtDupBlockSizeLimit - Set the target's block size limit (in number - /// of instructions) to be considered for code duplication during - /// if-conversion; default is 2. - void setIfCvtDupBlockSizeLimit(unsigned Limit) { - IfCvtDupBlockSizeLimit = Limit; - } - /// setPrefLoopAlignment - Set the target's preferred loop alignment. Default /// alignment is zero, it means the target does not care about loop alignment. void setPrefLoopAlignment(unsigned Align) { PrefLoopAlignment = Align; } - + + /// setMinStackArgumentAlignment - Set the minimum stack alignment of an + /// argument. + void setMinStackArgumentAlignment(unsigned Align) { + MinStackArgumentAlignment = Align; + } + + /// setShouldFoldAtomicFences - Set if the target's implementation of the + /// atomic operation intrinsics includes locking. Default is false. + void setShouldFoldAtomicFences(bool fold) { + ShouldFoldAtomicFences = fold; + } + public: //===--------------------------------------------------------------------===// // Lowering methods - These methods must be implemented by targets so that @@ -1151,6 +1159,7 @@ public: LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, bool &isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<SDValue> &OutVals, const SmallVectorImpl<ISD::InputArg> &Ins, DebugLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { @@ -1163,9 +1172,8 @@ public: /// registers. If false is returned, an sret-demotion is performed. /// virtual bool CanLowerReturn(CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl<EVT> &OutTys, - const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags, - SelectionDAG &DAG) const + const SmallVectorImpl<ISD::OutputArg> &Outs, + LLVMContext &Context) const { // Return true by default to get preexisting behavior. return true; @@ -1179,6 +1187,7 @@ public: virtual SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<SDValue> &OutVals, DebugLoc dl, SelectionDAG &DAG) const { assert(0 && "Not Implemented"); return SDValue(); // this is here to silence compiler errors @@ -1200,7 +1209,7 @@ public: SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const; - /// LowerOperation - This callback is invoked for operations that are + /// LowerOperation - This callback is invoked for operations that are /// unsupported by the target, which are registered to use 'custom' lowering, /// and whose defined values are all legal. /// If the target has no operations that require custom lowering, it need not @@ -1227,23 +1236,14 @@ public: /// createFastISel - This method returns a target specific FastISel object, /// or null if the target does not support "fast" ISel. - virtual FastISel * - createFastISel(MachineFunction &, - DenseMap<const Value *, unsigned> &, - DenseMap<const BasicBlock *, MachineBasicBlock *> &, - DenseMap<const AllocaInst *, int> &, - std::vector<std::pair<MachineInstr*, unsigned> > & -#ifndef NDEBUG - , SmallSet<const Instruction *, 8> &CatchInfoLost -#endif - ) const { + virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const { return 0; } //===--------------------------------------------------------------------===// // Inline Asm Support hooks // - + /// ExpandInlineAsm - This hook allows the target to expand an inline asm /// call to be explicit llvm code if it wants to. This is useful for /// turning simple inline asms into LLVM intrinsics, which gives the @@ -1251,7 +1251,7 @@ public: virtual bool ExpandInlineAsm(CallInst *CI) const { return false; } - + enum ConstraintType { C_Register, // Constraint represents specific register(s). C_RegisterClass, // Constraint represents any of register(s) in class. @@ -1259,7 +1259,7 @@ public: C_Other, // Something else. C_Unknown // Unsupported constraint. }; - + /// AsmOperandInfo - This contains information for each constraint that we are /// lowering. struct AsmOperandInfo : public InlineAsm::ConstraintInfo { @@ -1271,25 +1271,25 @@ public: /// ConstraintType - Information about the constraint code, e.g. Register, /// RegisterClass, Memory, Other, Unknown. TargetLowering::ConstraintType ConstraintType; - + /// CallOperandval - If this is the result output operand or a /// clobber, this is null, otherwise it is the incoming operand to the /// CallInst. This gets modified as the asm is processed. Value *CallOperandVal; - + /// ConstraintVT - The ValueType for the operand value. EVT ConstraintVT; - + /// isMatchingInputConstraint - Return true of this is an input operand that /// is a matching constraint like "4". bool isMatchingInputConstraint() const; - + /// getMatchedOperand - If this is an input matching constraint, this method /// returns the output operand it matches. unsigned getMatchedOperand() const; - + AsmOperandInfo(const InlineAsm::ConstraintInfo &info) - : InlineAsm::ConstraintInfo(info), + : InlineAsm::ConstraintInfo(info), ConstraintType(TargetLowering::C_Unknown), CallOperandVal(0), ConstraintVT(MVT::Other) { } @@ -1299,21 +1299,19 @@ public: /// type to use for the specific AsmOperandInfo, setting /// OpInfo.ConstraintCode and OpInfo.ConstraintType. If the actual operand /// being passed in is available, it can be passed in as Op, otherwise an - /// empty SDValue can be passed. If hasMemory is true it means one of the asm - /// constraint of the inline asm instruction being processed is 'm'. + /// empty SDValue can be passed. virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, - bool hasMemory, SelectionDAG *DAG = 0) const; - + /// getConstraintType - Given a constraint, return the type of constraint it /// is for this target. virtual ConstraintType getConstraintType(const std::string &Constraint) const; - + /// getRegClassForInlineAsmConstraint - Given a constraint letter (e.g. "r"), /// return a list of registers that can be used to satisfy the constraint. /// This should only be used for C_RegisterClass constraints. - virtual std::vector<unsigned> + virtual std::vector<unsigned> getRegClassForInlineAsmConstraint(const std::string &Constraint, EVT VT) const; @@ -1327,29 +1325,26 @@ public: /// /// This should only be used for C_Register constraints. On error, /// this returns a register number of 0 and a null register class pointer.. - virtual std::pair<unsigned, const TargetRegisterClass*> + virtual std::pair<unsigned, const TargetRegisterClass*> getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const; - + /// LowerXConstraint - try to replace an X constraint, which matches anything, /// with another that has more specific requirements based on the type of the /// corresponding operand. This returns null if there is no replacement to /// make. virtual const char *LowerXConstraint(EVT ConstraintVT) const; - + /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops - /// vector. If it is invalid, don't add anything to Ops. If hasMemory is true - /// it means one of the asm constraint of the inline asm instruction being - /// processed is 'm'. + /// vector. If it is invalid, don't add anything to Ops. virtual void LowerAsmOperandForConstraint(SDValue Op, char ConstraintLetter, - bool hasMemory, std::vector<SDValue> &Ops, SelectionDAG &DAG) const; - + //===--------------------------------------------------------------------===// // Instruction Emitting Hooks // - + // EmitInstrWithCustomInserter - This method should be implemented by targets // that mark instructions with the 'usesCustomInserter' flag. These // instructions are special in various ways, which require special support to @@ -1378,7 +1373,7 @@ public: int64_t Scale; AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {} }; - + /// isLegalAddressingMode - Return true if the addressing mode represented by /// AM is legal for this target, for a load/store of the specified type. /// The type may be VoidTy, in which case only return true if the addressing @@ -1431,9 +1426,9 @@ public: //===--------------------------------------------------------------------===// // Div utility functions // - SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, + SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, std::vector<SDNode*>* Created) const; - SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, + SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, std::vector<SDNode*>* Created) const; @@ -1470,7 +1465,7 @@ public: void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) { LibcallCallingConvs[Call] = CC; } - + /// getLibcallCallingConv - Get the CallingConv that should be used for the /// specified libcall. CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const { @@ -1499,12 +1494,12 @@ private: /// a real cost model is in place. If we ever optimize for size, this will be /// set to true unconditionally. bool IntDivIsCheap; - + /// Pow2DivIsCheap - Tells the code generator that it shouldn't generate /// srl/add/sra for a signed divide by power of two, and let the target handle /// it. bool Pow2DivIsCheap; - + /// UseUnderscoreSetJmp - This target prefers to use _setjmp to implement /// llvm.setjmp. Defaults to false. bool UseUnderscoreSetJmp; @@ -1524,26 +1519,28 @@ private: /// SchedPreferenceInfo - The target scheduling preference: shortest possible /// total cycles or lowest register usage. Sched::Preference SchedPreferenceInfo; - + /// JumpBufSize - The size, in bytes, of the target's jmp_buf buffers unsigned JumpBufSize; - + /// JumpBufAlignment - The alignment, in bytes, of the target's jmp_buf /// buffers unsigned JumpBufAlignment; - /// IfCvtBlockSizeLimit - The maximum allowed size for a block to be - /// if-converted. - unsigned IfCvtBlockSizeLimit; - - /// IfCvtDupBlockSizeLimit - The maximum allowed size for a block to be - /// duplicated during if-conversion. - unsigned IfCvtDupBlockSizeLimit; + /// MinStackArgumentAlignment - The minimum alignment that any argument + /// on the stack needs to have. + /// + unsigned MinStackArgumentAlignment; /// PrefLoopAlignment - The perferred loop alignment. /// unsigned PrefLoopAlignment; + /// ShouldFoldAtomicFences - Whether fencing MEMBARRIER instructions should + /// be folded into the enclosed atomic intrinsic instruction by the + /// combiner. + bool ShouldFoldAtomicFences; + /// StackPointerRegisterToSaveRestore - If set to a physical register, this /// specifies the register that llvm.savestack/llvm.restorestack should save /// and restore. @@ -1583,12 +1580,12 @@ private: /// operations that are not should be described. Note that operations on /// non-legal value types are not described here. uint8_t OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END]; - + /// LoadExtActions - For each load extension type and each value type, /// keep a LegalizeAction that indicates how instruction selection should deal /// with a load of a specific value type and extension type. uint8_t LoadExtActions[MVT::LAST_VALUETYPE][ISD::LAST_LOADEXT_TYPE]; - + /// TruncStoreActions - For each value type pair keep a LegalizeAction that /// indicates whether a truncating store of a specific value type and /// truncating type is legal. @@ -1600,7 +1597,7 @@ private: /// value_type for the reference. The second dimension represents the various /// modes for load store. uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE]; - + /// CondCodeActions - For each condition code (ISD::CondCode) keep a /// LegalizeAction that indicates how instruction selection should /// deal with the condition code. @@ -1615,7 +1612,7 @@ private: /// which sets a bit in this array. unsigned char TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT]; - + /// PromoteToType - For operations that must be promoted to a specific type, /// this holds the destination type. This map should be sparse, so don't hold /// it as an array. @@ -1676,6 +1673,15 @@ protected: /// optimization. bool benefitFromCodePlacementOpt; }; + +/// GetReturnInfo - Given an LLVM IR type and return type attributes, +/// compute the return value EVTs and flags, and optionally also +/// the offsets, if the return value is being lowered to memory. +void GetReturnInfo(const Type* ReturnType, Attributes attr, + SmallVectorImpl<ISD::OutputArg> &Outs, + const TargetLowering &TLI, + SmallVectorImpl<uint64_t> *Offsets = 0); + } // end llvm namespace #endif |