diff options
Diffstat (limited to 'include/llvm/CodeGen')
21 files changed, 301 insertions, 184 deletions
diff --git a/include/llvm/CodeGen/Analysis.h b/include/llvm/CodeGen/Analysis.h index c4b94ed..82d1e8a 100644 --- a/include/llvm/CodeGen/Analysis.h +++ b/include/llvm/CodeGen/Analysis.h @@ -64,7 +64,7 @@ inline unsigned ComputeLinearIndex(Type *Ty, /// If Offsets is non-null, it points to a vector to be filled in /// with the in-memory offsets of each of the individual values. /// -void ComputeValueVTs(const TargetLowering &TLI, Type *Ty, +void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl<EVT> &ValueVTs, SmallVectorImpl<uint64_t> *Offsets = nullptr, uint64_t StartingOffset = 0); diff --git a/include/llvm/CodeGen/BasicTTIImpl.h b/include/llvm/CodeGen/BasicTTIImpl.h index 3e464f4..9ba2516 100644 --- a/include/llvm/CodeGen/BasicTTIImpl.h +++ b/include/llvm/CodeGen/BasicTTIImpl.h @@ -91,8 +91,10 @@ private: } protected: - explicit BasicTTIImplBase(const TargetMachine *TM) - : BaseT(TM->getDataLayout()) {} + explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL) + : BaseT(DL) {} + + using TargetTransformInfoImplBase::DL; public: // Provide value semantics. MSVC requires that we spell all of these out. @@ -100,14 +102,6 @@ public: : BaseT(static_cast<const BaseT &>(Arg)) {} BasicTTIImplBase(BasicTTIImplBase &&Arg) : BaseT(std::move(static_cast<BaseT &>(Arg))) {} - BasicTTIImplBase &operator=(const BasicTTIImplBase &RHS) { - BaseT::operator=(static_cast<const BaseT &>(RHS)); - return *this; - } - BasicTTIImplBase &operator=(BasicTTIImplBase &&RHS) { - BaseT::operator=(std::move(static_cast<BaseT &>(RHS))); - return *this; - } /// \name Scalar TTI Implementations /// @{ @@ -132,7 +126,7 @@ public: AM.BaseOffs = BaseOffset; AM.HasBaseReg = HasBaseReg; AM.Scale = Scale; - return getTLI()->isLegalAddressingMode(AM, Ty, AddrSpace); + return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace); } int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, @@ -142,7 +136,7 @@ public: AM.BaseOffs = BaseOffset; AM.HasBaseReg = HasBaseReg; AM.Scale = Scale; - return getTLI()->getScalingFactorCost(AM, Ty, AddrSpace); + return getTLI()->getScalingFactorCost(DL, AM, Ty, AddrSpace); } bool isTruncateFree(Type *Ty1, Type *Ty2) { @@ -154,7 +148,7 @@ public: } bool isTypeLegal(Type *Ty) { - EVT VT = getTLI()->getValueType(Ty); + EVT VT = getTLI()->getValueType(DL, Ty); return getTLI()->isTypeLegal(VT); } @@ -192,7 +186,7 @@ public: bool haveFastSqrt(Type *Ty) { const TargetLoweringBase *TLI = getTLI(); - EVT VT = TLI->getValueType(Ty); + EVT VT = TLI->getValueType(DL, Ty); return TLI->isTypeLegal(VT) && TLI->isOperationLegalOrCustom(ISD::FSQRT, VT); } @@ -282,7 +276,7 @@ public: /// \name Vector TTI Implementations /// @{ - unsigned getNumberOfRegisters(bool Vector) { return 1; } + unsigned getNumberOfRegisters(bool Vector) { return Vector ? 0 : 1; } unsigned getRegisterBitWidth(bool Vector) { return 32; } @@ -299,7 +293,7 @@ public: int ISD = TLI->InstructionOpcodeToISD(Opcode); assert(ISD && "Invalid opcode"); - std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty); + std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); bool IsFloat = Ty->getScalarType()->isFloatingPointTy(); // Assume that floating point arithmetic operations cost twice as much as @@ -349,9 +343,8 @@ public: const TargetLoweringBase *TLI = getTLI(); int ISD = TLI->InstructionOpcodeToISD(Opcode); assert(ISD && "Invalid opcode"); - - std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(Src); - std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(Dst); + std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, Src); + std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(DL, Dst); // Check for NOOP conversions. if (SrcLT.first == DstLT.first && @@ -455,8 +448,7 @@ public: if (CondTy->isVectorTy()) ISD = ISD::VSELECT; } - - std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy); + std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); if (!(ValTy->isVectorTy() && !LT.second.isVector()) && !TLI->isOperationExpand(ISD, LT.second)) { @@ -485,7 +477,7 @@ public: unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { std::pair<unsigned, MVT> LT = - getTLI()->getTypeLegalizationCost(Val->getScalarType()); + getTLI()->getTypeLegalizationCost(DL, Val->getScalarType()); return LT.first; } @@ -493,7 +485,7 @@ public: unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, unsigned AddressSpace) { assert(!Src->isVoidTy() && "Invalid type"); - std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Src); + std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Src); // Assuming that all loads of legal types cost 1. unsigned Cost = LT.first; @@ -504,7 +496,7 @@ public: // itself. Unless the corresponding extending load or truncating store is // legal, then this will scalarize. TargetLowering::LegalizeAction LA = TargetLowering::Expand; - EVT MemVT = getTLI()->getValueType(Src, true); + EVT MemVT = getTLI()->getValueType(DL, Src, true); if (MemVT.isSimple() && MemVT != MVT::Other) { if (Opcode == Instruction::Store) LA = getTLI()->getTruncStoreAction(LT.second, MemVT.getSimpleVT()); @@ -700,7 +692,7 @@ public: } const TargetLoweringBase *TLI = getTLI(); - std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(RetTy); + std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy); if (TLI->isOperationLegalOrPromote(ISD, LT.second)) { // The operation is legal. Assume it costs 1. @@ -771,7 +763,7 @@ public: } unsigned getNumberOfParts(Type *Tp) { - std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Tp); + std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Tp); return LT.first; } @@ -816,18 +808,6 @@ public: BasicTTIImpl(BasicTTIImpl &&Arg) : BaseT(std::move(static_cast<BaseT &>(Arg))), ST(std::move(Arg.ST)), TLI(std::move(Arg.TLI)) {} - BasicTTIImpl &operator=(const BasicTTIImpl &RHS) { - BaseT::operator=(static_cast<const BaseT &>(RHS)); - ST = RHS.ST; - TLI = RHS.TLI; - return *this; - } - BasicTTIImpl &operator=(BasicTTIImpl &&RHS) { - BaseT::operator=(std::move(static_cast<BaseT &>(RHS))); - ST = std::move(RHS.ST); - TLI = std::move(RHS.TLI); - return *this; - } }; } diff --git a/include/llvm/CodeGen/CommandFlags.h b/include/llvm/CodeGen/CommandFlags.h index 554511d..4b2e0b0 100644 --- a/include/llvm/CodeGen/CommandFlags.h +++ b/include/llvm/CodeGen/CommandFlags.h @@ -206,6 +206,10 @@ cl::opt<std::string> StartAfter("start-after", cl::value_desc("pass-name"), cl::init("")); +cl::opt<std::string> + RunPass("run-pass", cl::desc("Run compiler only for one specific pass"), + cl::value_desc("pass-name"), cl::init("")); + cl::opt<bool> DataSections("data-sections", cl::desc("Emit data into separate sections"), cl::init(false)); diff --git a/include/llvm/CodeGen/ISDOpcodes.h b/include/llvm/CodeGen/ISDOpcodes.h index c7237fd..fa44301 100644 --- a/include/llvm/CodeGen/ISDOpcodes.h +++ b/include/llvm/CodeGen/ISDOpcodes.h @@ -72,10 +72,13 @@ namespace ISD { /// the parent's frame or return address, and so on. FRAMEADDR, RETURNADDR, - /// FRAME_ALLOC_RECOVER - Represents the llvm.framerecover - /// intrinsic. Materializes the offset from the frame pointer of another - /// function to the result of llvm.frameallocate. - FRAME_ALLOC_RECOVER, + /// LOCAL_RECOVER - Represents the llvm.localrecover intrinsic. + /// Materializes the offset from the local object pointer of another + /// function to a particular local object passed to llvm.localescape. The + /// operand is the MCSymbol label used to represent this offset, since + /// typically the offset is not known until after code generation of the + /// parent. + LOCAL_RECOVER, /// READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on /// the DAG, which implements the named register global variables extension. @@ -725,7 +728,7 @@ namespace ISD { /// which do not reference a specific memory location should be less than /// this value. Those that do must not be less than this value, and can /// be used with SelectionDAG::getMemIntrinsicNode. - static const int FIRST_TARGET_MEMORY_OPCODE = BUILTIN_OP_END+200; + static const int FIRST_TARGET_MEMORY_OPCODE = BUILTIN_OP_END+300; //===--------------------------------------------------------------------===// /// MemIndexedMode enum - This enum defines the load / store indexed diff --git a/include/llvm/CodeGen/LiveIntervalUnion.h b/include/llvm/CodeGen/LiveIntervalUnion.h index 967f0cb..f0f1637 100644 --- a/include/llvm/CodeGen/LiveIntervalUnion.h +++ b/include/llvm/CodeGen/LiveIntervalUnion.h @@ -203,6 +203,11 @@ public: assert(idx < Size && "idx out of bounds"); return LIUs[idx]; } + + const LiveIntervalUnion& operator[](unsigned Idx) const { + assert(Idx < Size && "Idx out of bounds"); + return LIUs[Idx]; + } }; }; diff --git a/include/llvm/CodeGen/LiveRegMatrix.h b/include/llvm/CodeGen/LiveRegMatrix.h index 878b4d9..e169058 100644 --- a/include/llvm/CodeGen/LiveRegMatrix.h +++ b/include/llvm/CodeGen/LiveRegMatrix.h @@ -32,13 +32,11 @@ namespace llvm { class LiveInterval; class LiveIntervalAnalysis; -class MachineRegisterInfo; class TargetRegisterInfo; class VirtRegMap; class LiveRegMatrix : public MachineFunctionPass { const TargetRegisterInfo *TRI; - MachineRegisterInfo *MRI; LiveIntervals *LIS; VirtRegMap *VRM; @@ -114,6 +112,9 @@ public: /// the assignment and updates VirtRegMap accordingly. void unassign(LiveInterval &VirtReg); + /// Returns true if the given \p PhysReg has any live intervals assigned. + bool isPhysRegUsed(unsigned PhysReg) const; + //===--------------------------------------------------------------------===// // Low-level interface. //===--------------------------------------------------------------------===// diff --git a/include/llvm/CodeGen/MIRYamlMapping.h b/include/llvm/CodeGen/MIRYamlMapping.h index a6ffeb3..9798e5c 100644 --- a/include/llvm/CodeGen/MIRYamlMapping.h +++ b/include/llvm/CodeGen/MIRYamlMapping.h @@ -81,15 +81,30 @@ LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(llvm::yaml::FlowStringValue) namespace llvm { namespace yaml { +struct VirtualRegisterDefinition { + unsigned ID; + StringValue Class; + // TODO: Serialize the virtual register hints. +}; + +template <> struct MappingTraits<VirtualRegisterDefinition> { + static void mapping(IO &YamlIO, VirtualRegisterDefinition &Reg) { + YamlIO.mapRequired("id", Reg.ID); + YamlIO.mapRequired("class", Reg.Class); + } + + static const bool flow = true; +}; + struct MachineBasicBlock { unsigned ID; - std::string Name; + StringValue Name; unsigned Alignment = 0; bool IsLandingPad = false; bool AddressTaken = false; - // TODO: Serialize the successor weights and liveins. + // TODO: Serialize the successor weights. std::vector<FlowStringValue> Successors; - + std::vector<FlowStringValue> LiveIns; std::vector<StringValue> Instructions; }; @@ -97,23 +112,153 @@ template <> struct MappingTraits<MachineBasicBlock> { static void mapping(IO &YamlIO, MachineBasicBlock &MBB) { YamlIO.mapRequired("id", MBB.ID); YamlIO.mapOptional("name", MBB.Name, - std::string()); // Don't print out an empty name. + StringValue()); // Don't print out an empty name. YamlIO.mapOptional("alignment", MBB.Alignment); YamlIO.mapOptional("isLandingPad", MBB.IsLandingPad); YamlIO.mapOptional("addressTaken", MBB.AddressTaken); YamlIO.mapOptional("successors", MBB.Successors); + YamlIO.mapOptional("liveins", MBB.LiveIns); YamlIO.mapOptional("instructions", MBB.Instructions); } }; +/// Serializable representation of stack object from the MachineFrameInfo class. +/// +/// The flags 'isImmutable' and 'isAliased' aren't serialized, as they are +/// determined by the object's type and frame information flags. +/// Dead stack objects aren't serialized. +/// +/// TODO: Determine isPreallocated flag by mapping between objects and local +/// objects (Serialize local objects). +struct MachineStackObject { + enum ObjectType { DefaultType, SpillSlot, VariableSized }; + // TODO: Serialize LLVM alloca reference. + unsigned ID; + ObjectType Type = DefaultType; + int64_t Offset = 0; + uint64_t Size = 0; + unsigned Alignment = 0; +}; + +template <> struct ScalarEnumerationTraits<MachineStackObject::ObjectType> { + static void enumeration(yaml::IO &IO, MachineStackObject::ObjectType &Type) { + IO.enumCase(Type, "default", MachineStackObject::DefaultType); + IO.enumCase(Type, "spill-slot", MachineStackObject::SpillSlot); + IO.enumCase(Type, "variable-sized", MachineStackObject::VariableSized); + } +}; + +template <> struct MappingTraits<MachineStackObject> { + static void mapping(yaml::IO &YamlIO, MachineStackObject &Object) { + YamlIO.mapRequired("id", Object.ID); + YamlIO.mapOptional( + "type", Object.Type, + MachineStackObject::DefaultType); // Don't print the default type. + YamlIO.mapOptional("offset", Object.Offset); + if (Object.Type != MachineStackObject::VariableSized) + YamlIO.mapRequired("size", Object.Size); + YamlIO.mapOptional("alignment", Object.Alignment); + } + + static const bool flow = true; +}; + +/// Serializable representation of the fixed stack object from the +/// MachineFrameInfo class. +struct FixedMachineStackObject { + enum ObjectType { DefaultType, SpillSlot }; + unsigned ID; + ObjectType Type = DefaultType; + int64_t Offset = 0; + uint64_t Size = 0; + unsigned Alignment = 0; + bool IsImmutable = false; + bool IsAliased = false; +}; + +template <> +struct ScalarEnumerationTraits<FixedMachineStackObject::ObjectType> { + static void enumeration(yaml::IO &IO, + FixedMachineStackObject::ObjectType &Type) { + IO.enumCase(Type, "default", FixedMachineStackObject::DefaultType); + IO.enumCase(Type, "spill-slot", FixedMachineStackObject::SpillSlot); + } +}; + +template <> struct MappingTraits<FixedMachineStackObject> { + static void mapping(yaml::IO &YamlIO, FixedMachineStackObject &Object) { + YamlIO.mapRequired("id", Object.ID); + YamlIO.mapOptional( + "type", Object.Type, + FixedMachineStackObject::DefaultType); // Don't print the default type. + YamlIO.mapOptional("offset", Object.Offset); + YamlIO.mapOptional("size", Object.Size); + YamlIO.mapOptional("alignment", Object.Alignment); + if (Object.Type != FixedMachineStackObject::SpillSlot) { + YamlIO.mapOptional("isImmutable", Object.IsImmutable); + YamlIO.mapOptional("isAliased", Object.IsAliased); + } + } + + static const bool flow = true; +}; + } // end namespace yaml } // end namespace llvm +LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::VirtualRegisterDefinition) LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::MachineBasicBlock) +LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::MachineStackObject) +LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::FixedMachineStackObject) namespace llvm { namespace yaml { +/// Serializable representation of MachineFrameInfo. +/// +/// Doesn't serialize attributes like 'StackAlignment', 'IsStackRealignable' and +/// 'RealignOption' as they are determined by the target and LLVM function +/// attributes. +/// It also doesn't serialize attributes like 'NumFixedObject' and +/// 'HasVarSizedObjects' as they are determined by the frame objects themselves. +struct MachineFrameInfo { + bool IsFrameAddressTaken = false; + bool IsReturnAddressTaken = false; + bool HasStackMap = false; + bool HasPatchPoint = false; + uint64_t StackSize = 0; + int OffsetAdjustment = 0; + unsigned MaxAlignment = 0; + bool AdjustsStack = false; + bool HasCalls = false; + // TODO: Serialize StackProtectorIdx and FunctionContextIdx + unsigned MaxCallFrameSize = 0; + // TODO: Serialize callee saved info. + // TODO: Serialize local frame objects. + bool HasOpaqueSPAdjustment = false; + bool HasVAStart = false; + bool HasMustTailInVarArgFunc = false; + // TODO: Serialize save and restore MBB references. +}; + +template <> struct MappingTraits<MachineFrameInfo> { + static void mapping(IO &YamlIO, MachineFrameInfo &MFI) { + YamlIO.mapOptional("isFrameAddressTaken", MFI.IsFrameAddressTaken); + YamlIO.mapOptional("isReturnAddressTaken", MFI.IsReturnAddressTaken); + YamlIO.mapOptional("hasStackMap", MFI.HasStackMap); + YamlIO.mapOptional("hasPatchPoint", MFI.HasPatchPoint); + YamlIO.mapOptional("stackSize", MFI.StackSize); + YamlIO.mapOptional("offsetAdjustment", MFI.OffsetAdjustment); + YamlIO.mapOptional("maxAlignment", MFI.MaxAlignment); + YamlIO.mapOptional("adjustsStack", MFI.AdjustsStack); + YamlIO.mapOptional("hasCalls", MFI.HasCalls); + YamlIO.mapOptional("maxCallFrameSize", MFI.MaxCallFrameSize); + YamlIO.mapOptional("hasOpaqueSPAdjustment", MFI.HasOpaqueSPAdjustment); + YamlIO.mapOptional("hasVAStart", MFI.HasVAStart); + YamlIO.mapOptional("hasMustTailInVarArgFunc", MFI.HasMustTailInVarArgFunc); + } +}; + struct MachineFunction { StringRef Name; unsigned Alignment = 0; @@ -123,9 +268,13 @@ struct MachineFunction { bool IsSSA = false; bool TracksRegLiveness = false; bool TracksSubRegLiveness = false; - // TODO: Serialize virtual register definitions. + std::vector<VirtualRegisterDefinition> VirtualRegisters; // TODO: Serialize the various register masks. // TODO: Serialize live in registers. + // Frame information + MachineFrameInfo FrameInfo; + std::vector<FixedMachineStackObject> FixedStackObjects; + std::vector<MachineStackObject> StackObjects; std::vector<MachineBasicBlock> BasicBlocks; }; @@ -139,6 +288,10 @@ template <> struct MappingTraits<MachineFunction> { YamlIO.mapOptional("isSSA", MF.IsSSA); YamlIO.mapOptional("tracksRegLiveness", MF.TracksRegLiveness); YamlIO.mapOptional("tracksSubRegLiveness", MF.TracksSubRegLiveness); + YamlIO.mapOptional("registers", MF.VirtualRegisters); + YamlIO.mapOptional("frameInfo", MF.FrameInfo); + YamlIO.mapOptional("fixedStack", MF.FixedStackObjects); + YamlIO.mapOptional("stack", MF.StackObjects); YamlIO.mapOptional("body", MF.BasicBlocks); } }; diff --git a/include/llvm/CodeGen/MachineConstantPool.h b/include/llvm/CodeGen/MachineConstantPool.h index c619afb..6284003 100644 --- a/include/llvm/CodeGen/MachineConstantPool.h +++ b/include/llvm/CodeGen/MachineConstantPool.h @@ -135,17 +135,18 @@ public: /// address of the function constant pool values. /// @brief The machine constant pool. class MachineConstantPool { - const TargetMachine &TM; ///< The target machine. unsigned PoolAlignment; ///< The alignment for the pool. std::vector<MachineConstantPoolEntry> Constants; ///< The pool of constants. /// MachineConstantPoolValues that use an existing MachineConstantPoolEntry. DenseSet<MachineConstantPoolValue*> MachineCPVsSharingEntries; + const DataLayout &DL; + + const DataLayout &getDataLayout() const { return DL; } - const DataLayout *getDataLayout() const; public: /// @brief The only constructor. - explicit MachineConstantPool(const TargetMachine &TM) - : TM(TM), PoolAlignment(1) {} + explicit MachineConstantPool(const DataLayout &DL) + : PoolAlignment(1), DL(DL) {} ~MachineConstantPool(); /// getConstantPoolAlignment - Return the alignment required by diff --git a/include/llvm/CodeGen/MachineDominators.h b/include/llvm/CodeGen/MachineDominators.h index 4428fa6..735dd06 100644 --- a/include/llvm/CodeGen/MachineDominators.h +++ b/include/llvm/CodeGen/MachineDominators.h @@ -29,8 +29,8 @@ inline void DominatorTreeBase<MachineBasicBlock>::addRoot(MachineBasicBlock* MBB this->Roots.push_back(MBB); } -EXTERN_TEMPLATE_INSTANTIATION(class DomTreeNodeBase<MachineBasicBlock>); -EXTERN_TEMPLATE_INSTANTIATION(class DominatorTreeBase<MachineBasicBlock>); +extern template class DomTreeNodeBase<MachineBasicBlock>; +extern template class DominatorTreeBase<MachineBasicBlock>; typedef DomTreeNodeBase<MachineBasicBlock> MachineDomTreeNode; diff --git a/include/llvm/CodeGen/MachineFrameInfo.h b/include/llvm/CodeGen/MachineFrameInfo.h index 0f5a4b1..cbc4e66 100644 --- a/include/llvm/CodeGen/MachineFrameInfo.h +++ b/include/llvm/CodeGen/MachineFrameInfo.h @@ -229,9 +229,9 @@ class MachineFrameInfo { /// Whether the "realign-stack" option is on. bool RealignOption; - /// True if the function includes inline assembly that adjusts the stack - /// pointer. - bool HasInlineAsmWithSPAdjust; + /// True if the function dynamically adjusts the stack pointer through some + /// opaque mechanism like inline assembly or Win32 EH. + bool HasOpaqueSPAdjustment; /// True if the function contains a call to the llvm.vastart intrinsic. bool HasVAStart; @@ -269,7 +269,7 @@ public: LocalFrameSize = 0; LocalFrameMaxAlign = 0; UseLocalStackAllocationBlock = false; - HasInlineAsmWithSPAdjust = false; + HasOpaqueSPAdjustment = false; HasVAStart = false; HasMustTailInVarArgFunc = false; Save = nullptr; @@ -468,9 +468,9 @@ public: bool hasCalls() const { return HasCalls; } void setHasCalls(bool V) { HasCalls = V; } - /// Returns true if the function contains any stack-adjusting inline assembly. - bool hasInlineAsmWithSPAdjust() const { return HasInlineAsmWithSPAdjust; } - void setHasInlineAsmWithSPAdjust(bool B) { HasInlineAsmWithSPAdjust = B; } + /// Returns true if the function contains opaque dynamic stack adjustments. + bool hasOpaqueSPAdjustment() const { return HasOpaqueSPAdjustment; } + void setHasOpaqueSPAdjustment(bool B) { HasOpaqueSPAdjustment = B; } /// Returns true if the function calls the llvm.va_start intrinsic. bool hasVAStart() const { return HasVAStart; } @@ -541,6 +541,14 @@ public: return Objects[ObjectIdx+NumFixedObjects].Size == ~0ULL; } + /// Returns true if the specified index corresponds to a variable sized + /// object. + bool isVariableSizedObjectIndex(int ObjectIdx) const { + assert(unsigned(ObjectIdx + NumFixedObjects) < Objects.size() && + "Invalid Object Idx!"); + return Objects[ObjectIdx + NumFixedObjects].Size == 0; + } + /// Create a new statically sized stack object, returning /// a nonnegative identifier to represent it. int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS, diff --git a/include/llvm/CodeGen/MachineFunction.h b/include/llvm/CodeGen/MachineFunction.h index 94610ca..c15ee1c 100644 --- a/include/llvm/CodeGen/MachineFunction.h +++ b/include/llvm/CodeGen/MachineFunction.h @@ -155,6 +155,9 @@ public: MachineModuleInfo &getMMI() const { return MMI; } MCContext &getContext() const { return Ctx; } + /// Return the DataLayout attached to the Module associated to this MF. + const DataLayout &getDataLayout() const; + /// getFunction - Return the LLVM function that this machine code represents /// const Function *getFunction() const { return Fn; } diff --git a/include/llvm/CodeGen/MachineLoopInfo.h b/include/llvm/CodeGen/MachineLoopInfo.h index 438ef2e..4868b73 100644 --- a/include/llvm/CodeGen/MachineLoopInfo.h +++ b/include/llvm/CodeGen/MachineLoopInfo.h @@ -37,10 +37,8 @@ namespace llvm { // Implementation in LoopInfoImpl.h -#ifdef __GNUC__ class MachineLoop; -__extension__ extern template class LoopBase<MachineBasicBlock, MachineLoop>; -#endif +extern template class LoopBase<MachineBasicBlock, MachineLoop>; class MachineLoop : public LoopBase<MachineBasicBlock, MachineLoop> { public: @@ -65,10 +63,7 @@ private: }; // Implementation in LoopInfoImpl.h -#ifdef __GNUC__ -__extension__ extern template -class LoopInfoBase<MachineBasicBlock, MachineLoop>; -#endif +extern template class LoopInfoBase<MachineBasicBlock, MachineLoop>; class MachineLoopInfo : public MachineFunctionPass { LoopInfoBase<MachineBasicBlock, MachineLoop> LI; diff --git a/include/llvm/CodeGen/MachineModuleInfo.h b/include/llvm/CodeGen/MachineModuleInfo.h index ccaa83a..4cdfe24 100644 --- a/include/llvm/CodeGen/MachineModuleInfo.h +++ b/include/llvm/CodeGen/MachineModuleInfo.h @@ -320,6 +320,7 @@ public: /// information. void addPersonality(MachineBasicBlock *LandingPad, const Function *Personality); + void addPersonality(const Function *Personality); void addWinEHState(MachineBasicBlock *LandingPad, int State); diff --git a/include/llvm/CodeGen/MachineRegionInfo.h b/include/llvm/CodeGen/MachineRegionInfo.h index cf49c29..df9823f 100644 --- a/include/llvm/CodeGen/MachineRegionInfo.h +++ b/include/llvm/CodeGen/MachineRegionInfo.h @@ -172,10 +172,9 @@ template <> struct GraphTraits<MachineRegionInfoPass*> } }; -EXTERN_TEMPLATE_INSTANTIATION(class RegionBase<RegionTraits<MachineFunction>>); -EXTERN_TEMPLATE_INSTANTIATION(class RegionNodeBase<RegionTraits<MachineFunction>>); -EXTERN_TEMPLATE_INSTANTIATION(class RegionInfoBase<RegionTraits<MachineFunction>>); - +extern template class RegionBase<RegionTraits<MachineFunction>>; +extern template class RegionNodeBase<RegionTraits<MachineFunction>>; +extern template class RegionInfoBase<RegionTraits<MachineFunction>>; } #endif diff --git a/include/llvm/CodeGen/MachineRegisterInfo.h b/include/llvm/CodeGen/MachineRegisterInfo.h index e5b837a..67583be 100644 --- a/include/llvm/CodeGen/MachineRegisterInfo.h +++ b/include/llvm/CodeGen/MachineRegisterInfo.h @@ -95,20 +95,8 @@ private: return MO->Contents.Reg.Next; } - /// UsedRegUnits - This is a bit vector that is computed and set by the - /// register allocator, and must be kept up to date by passes that run after - /// register allocation (though most don't modify this). This is used - /// so that the code generator knows which callee save registers to save and - /// for other target specific uses. - /// This vector has bits set for register units that are modified in the - /// current function. It doesn't include registers clobbered by function - /// calls with register mask operands. - BitVector UsedRegUnits; - /// UsedPhysRegMask - Additional used physregs including aliases. /// This bit vector represents all the registers clobbered by function calls. - /// It can model things that UsedRegUnits can't, such as function calls that - /// clobber ymm7 but preserve the low half in xmm7. BitVector UsedPhysRegMask; /// ReservedRegs - This is a bit vector of reserved registers. The target @@ -647,40 +635,11 @@ public: /// deleted during LiveDebugVariables analysis. void markUsesInDebugValueAsUndef(unsigned Reg) const; - //===--------------------------------------------------------------------===// - // Physical Register Use Info - //===--------------------------------------------------------------------===// - - /// isPhysRegUsed - Return true if the specified register is used in this - /// function. Also check for clobbered aliases and registers clobbered by - /// function calls with register mask operands. - /// - /// This only works after register allocation. It is primarily used by - /// PrologEpilogInserter to determine which callee-saved registers need - /// spilling. - bool isPhysRegUsed(unsigned Reg) const { - if (UsedPhysRegMask.test(Reg)) - return true; - for (MCRegUnitIterator Units(Reg, getTargetRegisterInfo()); - Units.isValid(); ++Units) - if (UsedRegUnits.test(*Units)) - return true; - return false; - } - - /// Mark the specified register unit as used in this function. - /// This should only be called during and after register allocation. - void setRegUnitUsed(unsigned RegUnit) { - UsedRegUnits.set(RegUnit); - } - - /// setPhysRegUsed - Mark the specified register used in this function. - /// This should only be called during and after register allocation. - void setPhysRegUsed(unsigned Reg) { - for (MCRegUnitIterator Units(Reg, getTargetRegisterInfo()); - Units.isValid(); ++Units) - UsedRegUnits.set(*Units); - } + /// Return true if the specified register is modified in this function. + /// This checks that no defining machine operands exist for the register or + /// any of its aliases. Definitions found on functions marked noreturn are + /// ignored. + bool isPhysRegModified(unsigned PhysReg) const; /// addPhysRegsUsedFromRegMask - Mark any registers not in RegMask as used. /// This corresponds to the bit mask attached to register mask operands. @@ -688,16 +647,6 @@ public: UsedPhysRegMask.setBitsNotInMask(RegMask); } - /// setPhysRegUnused - Mark the specified register unused in this function. - /// This should only be called during and after register allocation. - void setPhysRegUnused(unsigned Reg) { - UsedPhysRegMask.reset(Reg); - for (MCRegUnitIterator Units(Reg, getTargetRegisterInfo()); - Units.isValid(); ++Units) - UsedRegUnits.reset(*Units); - } - - //===--------------------------------------------------------------------===// // Reserved Register Info //===--------------------------------------------------------------------===// diff --git a/include/llvm/CodeGen/Passes.h b/include/llvm/CodeGen/Passes.h index 538c995..5d82921 100644 --- a/include/llvm/CodeGen/Passes.h +++ b/include/llvm/CodeGen/Passes.h @@ -101,7 +101,7 @@ public: private: PassManagerBase *PM; - AnalysisID StartAfter; + AnalysisID StartBefore, StartAfter; AnalysisID StopAfter; bool Started; bool Stopped; @@ -142,16 +142,24 @@ public: CodeGenOpt::Level getOptLevel() const { return TM->getOptLevel(); } - /// setStartStopPasses - Set the StartAfter and StopAfter passes to allow - /// running only a portion of the normal code-gen pass sequence. If the - /// Start pass ID is zero, then compilation will begin at the normal point; - /// otherwise, clear the Started flag to indicate that passes should not be - /// added until the starting pass is seen. If the Stop pass ID is zero, - /// then compilation will continue to the end. - void setStartStopPasses(AnalysisID Start, AnalysisID Stop) { - StartAfter = Start; - StopAfter = Stop; - Started = (StartAfter == nullptr); + /// Set the StartAfter, StartBefore and StopAfter passes to allow running only + /// a portion of the normal code-gen pass sequence. + /// + /// If the StartAfter and StartBefore pass ID is zero, then compilation will + /// begin at the normal point; otherwise, clear the Started flag to indicate + /// that passes should not be added until the starting pass is seen. If the + /// Stop pass ID is zero, then compilation will continue to the end. + /// + /// This function expects that at least one of the StartAfter or the + /// StartBefore pass IDs is null. + void setStartStopPasses(AnalysisID StartBefore, AnalysisID StartAfter, + AnalysisID StopAfter) { + if (StartAfter) + assert(!StartBefore && "Start after and start before passes are given"); + this->StartBefore = StartBefore; + this->StartAfter = StartAfter; + this->StopAfter = StopAfter; + Started = (StartAfter == nullptr) && (StartBefore == nullptr); } void setDisableVerify(bool Disable) { setOpt(DisableVerify, Disable); } @@ -597,7 +605,7 @@ namespace llvm { /// createSjLjEHPreparePass - This pass adapts exception handling code to use /// the GCC-style builtin setjmp/longjmp (sjlj) to handling EH control flow. /// - FunctionPass *createSjLjEHPreparePass(const TargetMachine *TM); + FunctionPass *createSjLjEHPreparePass(); /// LocalStackSlotAllocation - This pass assigns local frame indices to stack /// slots relative to one another and allocates base registers to access them diff --git a/include/llvm/CodeGen/RegisterPressure.h b/include/llvm/CodeGen/RegisterPressure.h index fcb6fee..9d8843d 100644 --- a/include/llvm/CodeGen/RegisterPressure.h +++ b/include/llvm/CodeGen/RegisterPressure.h @@ -135,6 +135,8 @@ public: void addPressureChange(unsigned RegUnit, bool IsDec, const MachineRegisterInfo *MRI); + + LLVM_DUMP_METHOD void dump(const TargetRegisterInfo &TRI) const; }; /// Array of PressureDiffs. diff --git a/include/llvm/CodeGen/SelectionDAG.h b/include/llvm/CodeGen/SelectionDAG.h index c2b1243..1ee9238 100644 --- a/include/llvm/CodeGen/SelectionDAG.h +++ b/include/llvm/CodeGen/SelectionDAG.h @@ -281,6 +281,7 @@ public: void clear(); MachineFunction &getMachineFunction() const { return *MF; } + const DataLayout &getDataLayout() const { return MF->getDataLayout(); } const TargetMachine &getTarget() const { return TM; } const TargetSubtargetInfo &getSubtarget() const { return MF->getSubtarget(); } const TargetLowering &getTargetLoweringInfo() const { return *TLI; } @@ -322,6 +323,14 @@ public: return AllNodes.size(); } + iterator_range<allnodes_iterator> allnodes() { + return iterator_range<allnodes_iterator>(allnodes_begin(), allnodes_end()); + } + iterator_range<allnodes_const_iterator> allnodes() const { + return iterator_range<allnodes_const_iterator>(allnodes_begin(), + allnodes_end()); + } + /// Return the root tag of the SelectionDAG. const SDValue &getRoot() const { return Root; } diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h index 6191190..4821d1a 100644 --- a/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/include/llvm/CodeGen/SelectionDAGNodes.h @@ -140,7 +140,7 @@ public: } // Return true if this node is an operand of N. - bool isOperandOf(SDNode *N) const; + bool isOperandOf(const SDNode *N) const; /// Return the ValueType of the referenced return value. inline EVT getValueType() const; @@ -357,9 +357,6 @@ private: /// The number of entries in the Operand/Value list. unsigned short NumOperands, NumValues; - /// Source line information. - DebugLoc debugLoc; - // The ordering of the SDNodes. It roughly corresponds to the ordering of the // original LLVM instructions. // This is used for turning off scheduling, because we'll forgo @@ -367,6 +364,9 @@ private: // this ordering. unsigned IROrder; + /// Source line information. + DebugLoc debugLoc; + /// Return a pointer to the specified value type. static const EVT *getValueTypeList(EVT VT); @@ -532,10 +532,10 @@ public: bool hasAnyUseOfValue(unsigned Value) const; /// Return true if this node is the only use of N. - bool isOnlyUserOf(SDNode *N) const; + bool isOnlyUserOf(const SDNode *N) const; /// Return true if this node is an operand of N. - bool isOperandOf(SDNode *N) const; + bool isOperandOf(const SDNode *N) const; /// Return true if this node is a predecessor of N. /// NOTE: Implemented on top of hasPredecessor and every bit as @@ -732,7 +732,7 @@ protected: SubclassData(0), NodeId(-1), OperandList(Ops.size() ? new SDUse[Ops.size()] : nullptr), ValueList(VTs.VTs), UseList(nullptr), NumOperands(Ops.size()), - NumValues(VTs.NumVTs), debugLoc(std::move(dl)), IROrder(Order) { + NumValues(VTs.NumVTs), IROrder(Order), debugLoc(std::move(dl)) { assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor"); assert(NumOperands == Ops.size() && "NumOperands wasn't wide enough for its operands!"); @@ -752,7 +752,7 @@ protected: : NodeType(Opc), OperandsNeedDelete(false), HasDebugValue(false), SubclassData(0), NodeId(-1), OperandList(nullptr), ValueList(VTs.VTs), UseList(nullptr), NumOperands(0), NumValues(VTs.NumVTs), - debugLoc(std::move(dl)), IROrder(Order) { + IROrder(Order), debugLoc(std::move(dl)) { assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor"); assert(NumValues == VTs.NumVTs && "NumValues wasn't wide enough for its operands!"); diff --git a/include/llvm/CodeGen/StackMaps.h b/include/llvm/CodeGen/StackMaps.h index 46a773f..fdc1a91 100644 --- a/include/llvm/CodeGen/StackMaps.h +++ b/include/llvm/CodeGen/StackMaps.h @@ -1,5 +1,4 @@ //===------------------- StackMaps.h - StackMaps ----------------*- C++ -*-===// - // // The LLVM Compiler Infrastructure // @@ -42,10 +41,12 @@ class PatchPointOpers { public: /// Enumerate the meta operands. enum { IDPos, NBytesPos, TargetPos, NArgPos, CCPos, MetaEnd }; + private: const MachineInstr *MI; bool HasDef; bool IsAnyReg; + public: explicit PatchPointOpers(const MachineInstr *MI); @@ -66,8 +67,8 @@ public: /// Get the operand index of the variable list of non-argument operands. /// These hold the "live state". unsigned getVarIdx() const { - return getMetaIdx() + MetaEnd - + MI->getOperand(getMetaIdx(NArgPos)).getImm(); + return getMetaIdx() + MetaEnd + + MI->getOperand(getMetaIdx(NArgPos)).getImm(); } /// Get the index at which stack map locations will be recorded. @@ -98,15 +99,10 @@ private: // These values are relative offests from the start of the statepoint meta // arguments (i.e. the end of the call arguments). - enum { - CCOffset = 1, - FlagsOffset = 3, - NumVMSArgsOffset = 5 - }; + enum { CCOffset = 1, FlagsOffset = 3, NumVMSArgsOffset = 5 }; public: - explicit StatepointOpers(const MachineInstr *MI): - MI(MI) { } + explicit StatepointOpers(const MachineInstr *MI) : MI(MI) {} /// Get starting index of non call related arguments /// (calling convention, statepoint flags, vm state and gc state). @@ -134,31 +130,32 @@ private: class StackMaps { public: struct Location { - enum LocationType { Unprocessed, Register, Direct, Indirect, Constant, - ConstantIndex }; - LocationType LocType; + enum LocationType { + Unprocessed, + Register, + Direct, + Indirect, + Constant, + ConstantIndex + }; + LocationType Type; unsigned Size; unsigned Reg; int64_t Offset; - Location() : LocType(Unprocessed), Size(0), Reg(0), Offset(0) {} - Location(LocationType LocType, unsigned Size, unsigned Reg, int64_t Offset) - : LocType(LocType), Size(Size), Reg(Reg), Offset(Offset) {} + Location() : Type(Unprocessed), Size(0), Reg(0), Offset(0) {} + Location(LocationType Type, unsigned Size, unsigned Reg, int64_t Offset) + : Type(Type), Size(Size), Reg(Reg), Offset(Offset) {} }; struct LiveOutReg { unsigned short Reg; - unsigned short RegNo; + unsigned short DwarfRegNum; unsigned short Size; - LiveOutReg() : Reg(0), RegNo(0), Size(0) {} - LiveOutReg(unsigned short Reg, unsigned short RegNo, unsigned short Size) - : Reg(Reg), RegNo(RegNo), Size(Size) {} - - void MarkInvalid() { Reg = 0; } - - // Only sort by the dwarf register number. - bool operator< (const LiveOutReg &LO) const { return RegNo < LO.RegNo; } - static bool IsInvalid(const LiveOutReg &LO) { return LO.Reg == 0; } + LiveOutReg() : Reg(0), DwarfRegNum(0), Size(0) {} + LiveOutReg(unsigned short Reg, unsigned short DwarfRegNum, + unsigned short Size) + : Reg(Reg), DwarfRegNum(DwarfRegNum), Size(Size) {} }; // OpTypes are used to encode information about the following logical @@ -205,8 +202,8 @@ private: CallsiteInfo() : CSOffsetExpr(nullptr), ID(0) {} CallsiteInfo(const MCExpr *CSOffsetExpr, uint64_t ID, LocationVec &&Locations, LiveOutVec &&LiveOuts) - : CSOffsetExpr(CSOffsetExpr), ID(ID), Locations(std::move(Locations)), - LiveOuts(std::move(LiveOuts)) {} + : CSOffsetExpr(CSOffsetExpr), ID(ID), Locations(std::move(Locations)), + LiveOuts(std::move(LiveOuts)) {} }; typedef std::vector<CallsiteInfo> CallsiteInfoList; @@ -218,8 +215,8 @@ private: MachineInstr::const_mop_iterator parseOperand(MachineInstr::const_mop_iterator MOI, - MachineInstr::const_mop_iterator MOE, - LocationVec &Locs, LiveOutVec &LiveOuts) const; + MachineInstr::const_mop_iterator MOE, LocationVec &Locs, + LiveOutVec &LiveOuts) const; /// \brief Create a live-out register record for the given register @p Reg. LiveOutReg createLiveOutReg(unsigned Reg, @@ -254,7 +251,6 @@ private: void print(raw_ostream &OS); void debug() { print(dbgs()); } }; - } #endif diff --git a/include/llvm/CodeGen/WinEHFuncInfo.h b/include/llvm/CodeGen/WinEHFuncInfo.h index 291f390..75638a0 100644 --- a/include/llvm/CodeGen/WinEHFuncInfo.h +++ b/include/llvm/CodeGen/WinEHFuncInfo.h @@ -91,7 +91,7 @@ private: // When the parseEHActions function is called to populate a vector of // instances of this class, the ExceptionObjectVar field will be nullptr // and the ExceptionObjectIndex will be the index of the exception object in - // the parent function's frameescape block. + // the parent function's localescape block. const Value *ExceptionObjectVar; int ExceptionObjectIndex; TinyPtrVector<BasicBlock *> ReturnTargets; @@ -148,7 +148,7 @@ struct WinEHFuncInfo { int UnwindHelpFrameOffset = -1; unsigned NumIPToStateFuncsVisited = 0; - /// frameescape index of the 32-bit EH registration node. Set by + /// localescape index of the 32-bit EH registration node. Set by /// WinEHStatePass and used indirectly by SEH filter functions of the parent. int EHRegNodeEscapeIndex = INT_MAX; |