diff options
author | rdivacky <rdivacky@FreeBSD.org> | 2010-04-02 08:54:30 +0000 |
---|---|---|
committer | rdivacky <rdivacky@FreeBSD.org> | 2010-04-02 08:54:30 +0000 |
commit | 20e856b2a58d12231aa42d5d13888b15ac03e5a4 (patch) | |
tree | cf5763d092b81cecc168fa28032247ee495d06e2 /include/llvm | |
parent | 2f2afc1aae898651e26987a5c71f3febb19bca98 (diff) | |
download | FreeBSD-src-20e856b2a58d12231aa42d5d13888b15ac03e5a4.zip FreeBSD-src-20e856b2a58d12231aa42d5d13888b15ac03e5a4.tar.gz |
Update LLVM to r100181.
Diffstat (limited to 'include/llvm')
56 files changed, 1168 insertions, 685 deletions
diff --git a/include/llvm/ADT/PointerUnion.h b/include/llvm/ADT/PointerUnion.h index 49c8940..3a514b5 100644 --- a/include/llvm/ADT/PointerUnion.h +++ b/include/llvm/ADT/PointerUnion.h @@ -124,7 +124,7 @@ namespace llvm { } void *getOpaqueValue() const { return Val.getOpaqueValue(); } - static PointerUnion getFromOpaqueValue(void *VP) { + static inline PointerUnion getFromOpaqueValue(void *VP) { PointerUnion V; V.Val = ValTy::getFromOpaqueValue(VP); return V; @@ -227,7 +227,7 @@ namespace llvm { } void *getOpaqueValue() const { return Val.getOpaqueValue(); } - static PointerUnion3 getFromOpaqueValue(void *VP) { + static inline PointerUnion3 getFromOpaqueValue(void *VP) { PointerUnion3 V; V.Val = ValTy::getFromOpaqueValue(VP); return V; @@ -338,7 +338,7 @@ namespace llvm { } void *getOpaqueValue() const { return Val.getOpaqueValue(); } - static PointerUnion4 getFromOpaqueValue(void *VP) { + static inline PointerUnion4 getFromOpaqueValue(void *VP) { PointerUnion4 V; V.Val = ValTy::getFromOpaqueValue(VP); return V; diff --git a/include/llvm/ADT/SmallVector.h b/include/llvm/ADT/SmallVector.h index c2afb7e..2d79a02 100644 --- a/include/llvm/ADT/SmallVector.h +++ b/include/llvm/ADT/SmallVector.h @@ -239,11 +239,20 @@ public: /// starting with "Dest", constructing elements into it as needed. template<typename It1, typename It2> static void uninitialized_copy(It1 I, It1 E, It2 Dest) { - // Use memcpy for PODs: std::uninitialized_copy optimizes to memmove, memcpy - // is better. - memcpy(&*Dest, &*I, (E-I)*sizeof(T)); + // Arbitrary iterator types; just use the basic implementation. + std::uninitialized_copy(I, E, Dest); } - + + /// uninitialized_copy - Copy the range [I, E) onto the uninitialized memory + /// starting with "Dest", constructing elements into it as needed. + template<typename T1, typename T2> + static void uninitialized_copy(T1 *I, T1 *E, T2 *Dest) { + // Use memcpy for PODs iterated by pointers (which includes SmallVector + // iterators): std::uninitialized_copy optimizes to memmove, but we can + // use memcpy here. + memcpy(Dest, I, (E-I)*sizeof(T)); + } + /// grow - double the size of the allocated memory, guaranteeing space for at /// least one more element or MinSize if specified. void grow(size_t MinSize = 0) { @@ -501,10 +510,13 @@ public: this->uninitialized_copy(I, OldEnd, this->end()-NumOverwritten); // Replace the overwritten part. - std::copy(From, From+NumOverwritten, I); + for (; NumOverwritten > 0; --NumOverwritten) { + *I = *From; + ++I; ++From; + } // Insert the non-overwritten middle part. - this->uninitialized_copy(From+NumOverwritten, To, OldEnd); + this->uninitialized_copy(From, To, OldEnd); return I; } diff --git a/include/llvm/ADT/Statistic.h b/include/llvm/ADT/Statistic.h index 1a4833c..c593c58 100644 --- a/include/llvm/ADT/Statistic.h +++ b/include/llvm/ADT/Statistic.h @@ -29,6 +29,7 @@ #include "llvm/System/Atomic.h" namespace llvm { +class raw_ostream; class Statistic { public: @@ -113,6 +114,15 @@ protected: #define STATISTIC(VARNAME, DESC) \ static llvm::Statistic VARNAME = { DEBUG_TYPE, DESC, 0, 0 } +/// \brief Enable the collection and printing of statistics. +void EnableStatistics(); + +/// \brief Print statistics to the file returned by CreateInfoOutputFile(). +void PrintStatistics(); + +/// \brief Print statistics to the given output stream. +void PrintStatistics(raw_ostream &OS); + } // End llvm namespace #endif diff --git a/include/llvm/ADT/StringMap.h b/include/llvm/ADT/StringMap.h index 86e8546..4821938 100644 --- a/include/llvm/ADT/StringMap.h +++ b/include/llvm/ADT/StringMap.h @@ -216,6 +216,14 @@ public: static const StringMapEntry &GetStringMapEntryFromValue(const ValueTy &V) { return GetStringMapEntryFromValue(const_cast<ValueTy&>(V)); } + + /// GetStringMapEntryFromKeyData - Given key data that is known to be embedded + /// into a StringMapEntry, return the StringMapEntry itself. + static StringMapEntry &GetStringMapEntryFromKeyData(const char *KeyData) { + char *Ptr = const_cast<char*>(KeyData) - sizeof(StringMapEntry<ValueTy>); + return *reinterpret_cast<StringMapEntry*>(Ptr); + } + /// Destroy - Destroy this StringMapEntry, releasing memory back to the /// specified allocator. diff --git a/include/llvm/Analysis/DebugInfo.h b/include/llvm/Analysis/DebugInfo.h index 9b1d1b30..4e8c4c8 100644 --- a/include/llvm/Analysis/DebugInfo.h +++ b/include/llvm/Analysis/DebugInfo.h @@ -395,8 +395,21 @@ namespace llvm { } unsigned isArtificial() const { return getUnsignedField(14); } - StringRef getFilename() const { return getCompileUnit().getFilename();} - StringRef getDirectory() const { return getCompileUnit().getDirectory();} + StringRef getFilename() const { + if (getVersion() == llvm::LLVMDebugVersion7) + return getCompileUnit().getFilename(); + + DIFile F = getFieldAs<DIFile>(6); + return F.getFilename(); + } + + StringRef getDirectory() const { + if (getVersion() == llvm::LLVMDebugVersion7) + return getCompileUnit().getFilename(); + + DIFile F = getFieldAs<DIFile>(6); + return F.getDirectory(); + } /// Verify - Verify that a subprogram descriptor is well formed. bool Verify() const; diff --git a/include/llvm/Analysis/Dominators.h b/include/llvm/Analysis/Dominators.h index 1e94f30..f810310 100644 --- a/include/llvm/Analysis/Dominators.h +++ b/include/llvm/Analysis/Dominators.h @@ -116,12 +116,12 @@ public: return true; SmallPtrSet<NodeT *, 4> OtherChildren; - for(iterator I = Other->begin(), E = Other->end(); I != E; ++I) { + for (iterator I = Other->begin(), E = Other->end(); I != E; ++I) { NodeT *Nd = (*I)->getBlock(); OtherChildren.insert(Nd); } - for(iterator I = begin(), E = end(); I != E; ++I) { + for (iterator I = begin(), E = end(); I != E; ++I) { NodeT *N = (*I)->getBlock(); if (OtherChildren.count(N) == 0) return true; @@ -240,8 +240,9 @@ protected: template<class N, class GraphT> void Split(DominatorTreeBase<typename GraphT::NodeType>& DT, typename GraphT::NodeType* NewBB) { - assert(std::distance(GraphT::child_begin(NewBB), GraphT::child_end(NewBB)) == 1 - && "NewBB should have a single successor!"); + assert(std::distance(GraphT::child_begin(NewBB), + GraphT::child_end(NewBB)) == 1 && + "NewBB should have a single successor!"); typename GraphT::NodeType* NewBBSucc = *GraphT::child_begin(NewBB); std::vector<typename GraphT::NodeType*> PredBlocks; @@ -374,8 +375,8 @@ public: /// isReachableFromEntry - Return true if A is dominated by the entry /// block of the function containing it. bool isReachableFromEntry(NodeT* A) { - assert (!this->isPostDominator() - && "This is not implemented for post dominators"); + assert(!this->isPostDominator() && + "This is not implemented for post dominators"); return dominates(&A->getParent()->front(), A); } @@ -393,8 +394,9 @@ public: // Compare the result of the tree walk and the dfs numbers, if expensive // checks are enabled. #ifdef XDEBUG - assert(!DFSInfoValid - || (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))); + assert((!DFSInfoValid || + (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))) && + "Tree walk disagrees with dfs numbers!"); #endif if (DFSInfoValid) @@ -430,16 +432,16 @@ public: /// findNearestCommonDominator - Find nearest common dominator basic block /// for basic block A and B. If there is no such block then return NULL. NodeT *findNearestCommonDominator(NodeT *A, NodeT *B) { - - assert (!this->isPostDominator() - && "This is not implemented for post dominators"); - assert (A->getParent() == B->getParent() - && "Two blocks are not in same function"); - - // If either A or B is a entry block then it is nearest common dominator. - NodeT &Entry = A->getParent()->front(); - if (A == &Entry || B == &Entry) - return &Entry; + assert(A->getParent() == B->getParent() && + "Two blocks are not in same function"); + + // If either A or B is a entry block then it is nearest common dominator + // (for forward-dominators). + if (!this->isPostDominator()) { + NodeT &Entry = A->getParent()->front(); + if (A == &Entry || B == &Entry) + return &Entry; + } // If B dominates A then B is nearest common dominator. if (dominates(B, A)) @@ -463,7 +465,7 @@ public: // Walk NodeB immediate dominators chain and find common dominator node. DomTreeNodeBase<NodeT> *IDomB = NodeB->getIDom(); - while(IDomB) { + while (IDomB) { if (NodeADoms.count(IDomB) != 0) return IDomB->getBlock(); @@ -508,8 +510,8 @@ public: /// children list. Deletes dominator node associated with basic block BB. void eraseNode(NodeT *BB) { DomTreeNodeBase<NodeT> *Node = getNode(BB); - assert (Node && "Removing node that isn't in dominator tree."); - assert (Node->getChildren().empty() && "Node is not a leaf node."); + assert(Node && "Removing node that isn't in dominator tree."); + assert(Node->getChildren().empty() && "Node is not a leaf node."); // Remove node from immediate dominator's children list. DomTreeNodeBase<NodeT> *IDom = Node->getIDom(); @@ -952,7 +954,7 @@ public: return true; } - if(!tmpSet.empty()) + if (!tmpSet.empty()) // There are nodes that are in DS2 but not in DS1. return true; diff --git a/include/llvm/CodeGen/AsmPrinter.h b/include/llvm/CodeGen/AsmPrinter.h index 2cd477e..ffeb44d 100644 --- a/include/llvm/CodeGen/AsmPrinter.h +++ b/include/llvm/CodeGen/AsmPrinter.h @@ -47,7 +47,6 @@ namespace llvm { class MCSection; class MCStreamer; class MCSymbol; - class MDNode; class DwarfWriter; class Mangler; class MCAsmInfo; @@ -138,9 +137,6 @@ namespace llvm { mutable unsigned Counter; mutable unsigned SetCounter; - // Private state for processDebugLoc() - mutable const MDNode *PrevDLT; - protected: explicit AsmPrinter(formatted_raw_ostream &o, TargetMachine &TM, MCStreamer &Streamer); diff --git a/include/llvm/CodeGen/DwarfWriter.h b/include/llvm/CodeGen/DwarfWriter.h index 3c7f802..494400e 100644 --- a/include/llvm/CodeGen/DwarfWriter.h +++ b/include/llvm/CodeGen/DwarfWriter.h @@ -83,19 +83,11 @@ public: /// void EndFunction(const MachineFunction *MF); - /// RecordSourceLine - Register a source line with debug info. Returns the - /// unique label that was emitted and which provides correspondence to - /// the source line list. - MCSymbol *RecordSourceLine(unsigned Line, unsigned Col, MDNode *Scope); - - /// getRecordSourceLineCount - Count source lines. - unsigned getRecordSourceLineCount(); - /// ShouldEmitDwarfDebug - Returns true if Dwarf debugging declarations should /// be emitted. bool ShouldEmitDwarfDebug() const; - void BeginScope(const MachineInstr *MI, MCSymbol *Label); + void BeginScope(const MachineInstr *MI); void EndScope(const MachineInstr *MI); }; diff --git a/include/llvm/CodeGen/LiveInterval.h b/include/llvm/CodeGen/LiveInterval.h index eb5901c..70e79ce 100644 --- a/include/llvm/CodeGen/LiveInterval.h +++ b/include/llvm/CodeGen/LiveInterval.h @@ -67,7 +67,7 @@ namespace llvm { } cr; public: - + typedef SpecificBumpPtrAllocator<VNInfo> Allocator; typedef SmallVector<SlotIndex, 4> KillSet; /// The ID number of this value. @@ -330,12 +330,7 @@ namespace llvm { } void clear() { - while (!valnos.empty()) { - VNInfo *VNI = valnos.back(); - valnos.pop_back(); - VNI->~VNInfo(); - } - + valnos.clear(); ranges.clear(); } @@ -370,10 +365,8 @@ namespace llvm { /// getNextValue - Create a new value number and return it. MIIdx specifies /// the instruction that defines the value number. VNInfo *getNextValue(SlotIndex def, MachineInstr *CopyMI, - bool isDefAccurate, BumpPtrAllocator &VNInfoAllocator){ - VNInfo *VNI = - static_cast<VNInfo*>(VNInfoAllocator.Allocate((unsigned)sizeof(VNInfo), - alignof<VNInfo>())); + bool isDefAccurate, VNInfo::Allocator &VNInfoAllocator) { + VNInfo *VNI = VNInfoAllocator.Allocate(); new (VNI) VNInfo((unsigned)valnos.size(), def, CopyMI); VNI->setIsDefAccurate(isDefAccurate); valnos.push_back(VNI); @@ -383,11 +376,8 @@ namespace llvm { /// Create a copy of the given value. The new value will be identical except /// for the Value number. VNInfo *createValueCopy(const VNInfo *orig, - BumpPtrAllocator &VNInfoAllocator) { - VNInfo *VNI = - static_cast<VNInfo*>(VNInfoAllocator.Allocate((unsigned)sizeof(VNInfo), - alignof<VNInfo>())); - + VNInfo::Allocator &VNInfoAllocator) { + VNInfo *VNI = VNInfoAllocator.Allocate(); new (VNI) VNInfo((unsigned)valnos.size(), *orig); valnos.push_back(VNI); return VNI; @@ -427,14 +417,14 @@ namespace llvm { /// VNInfoAllocator since it will create a new val#. void MergeInClobberRanges(LiveIntervals &li_, const LiveInterval &Clobbers, - BumpPtrAllocator &VNInfoAllocator); + VNInfo::Allocator &VNInfoAllocator); /// MergeInClobberRange - Same as MergeInClobberRanges except it merge in a /// single LiveRange only. void MergeInClobberRange(LiveIntervals &li_, SlotIndex Start, SlotIndex End, - BumpPtrAllocator &VNInfoAllocator); + VNInfo::Allocator &VNInfoAllocator); /// MergeValueInAsValue - Merge all of the live ranges of a specific val# /// in RHS into this live interval as the specified value number. @@ -454,7 +444,7 @@ namespace llvm { /// Copy - Copy the specified live interval. This copies all the fields /// except for the register of the interval. void Copy(const LiveInterval &RHS, MachineRegisterInfo *MRI, - BumpPtrAllocator &VNInfoAllocator); + VNInfo::Allocator &VNInfoAllocator); bool empty() const { return ranges.empty(); } diff --git a/include/llvm/CodeGen/LiveIntervalAnalysis.h b/include/llvm/CodeGen/LiveIntervalAnalysis.h index 1a2cc25..8ddcac7 100644 --- a/include/llvm/CodeGen/LiveIntervalAnalysis.h +++ b/include/llvm/CodeGen/LiveIntervalAnalysis.h @@ -55,7 +55,7 @@ namespace llvm { /// Special pool allocator for VNInfo's (LiveInterval val#). /// - BumpPtrAllocator VNInfoAllocator; + VNInfo::Allocator VNInfoAllocator; typedef DenseMap<unsigned, LiveInterval*> Reg2IntervalMap; Reg2IntervalMap r2iMap_; @@ -221,7 +221,7 @@ namespace llvm { indexes_->renumberIndexes(); } - BumpPtrAllocator& getVNInfoAllocator() { return VNInfoAllocator; } + VNInfo::Allocator& getVNInfoAllocator() { return VNInfoAllocator; } /// getVNInfoSourceReg - Helper function that parses the specified VNInfo /// copy field and returns the source register that defines it. diff --git a/include/llvm/CodeGen/LiveStackAnalysis.h b/include/llvm/CodeGen/LiveStackAnalysis.h index e01d1ae..c6af6a1 100644 --- a/include/llvm/CodeGen/LiveStackAnalysis.h +++ b/include/llvm/CodeGen/LiveStackAnalysis.h @@ -27,7 +27,7 @@ namespace llvm { class LiveStacks : public MachineFunctionPass { /// Special pool allocator for VNInfo's (LiveInterval val#). /// - BumpPtrAllocator VNInfoAllocator; + VNInfo::Allocator VNInfoAllocator; /// S2IMap - Stack slot indices to live interval mapping. /// @@ -91,7 +91,7 @@ namespace llvm { return I->second; } - BumpPtrAllocator& getVNInfoAllocator() { return VNInfoAllocator; } + VNInfo::Allocator& getVNInfoAllocator() { return VNInfoAllocator; } virtual void getAnalysisUsage(AnalysisUsage &AU) const; virtual void releaseMemory(); diff --git a/include/llvm/CodeGen/MachineOperand.h b/include/llvm/CodeGen/MachineOperand.h index e522947..b5f6bcd 100644 --- a/include/llvm/CodeGen/MachineOperand.h +++ b/include/llvm/CodeGen/MachineOperand.h @@ -285,6 +285,11 @@ public: IsEarlyClobber = Val; } + void setIsDebug(bool Val = true) { + assert(isReg() && IsDef && "Wrong MachineOperand accessor"); + IsDebug = Val; + } + //===--------------------------------------------------------------------===// // Accessors for various operand types. //===--------------------------------------------------------------------===// diff --git a/include/llvm/CodeGen/RuntimeLibcalls.h b/include/llvm/CodeGen/RuntimeLibcalls.h index 4ac3160..42ae563 100644 --- a/include/llvm/CodeGen/RuntimeLibcalls.h +++ b/include/llvm/CodeGen/RuntimeLibcalls.h @@ -169,6 +169,8 @@ namespace RTLIB { FPTOSINT_F32_I32, FPTOSINT_F32_I64, FPTOSINT_F32_I128, + FPTOSINT_F64_I8, + FPTOSINT_F64_I16, FPTOSINT_F64_I32, FPTOSINT_F64_I64, FPTOSINT_F64_I128, @@ -183,6 +185,8 @@ namespace RTLIB { FPTOUINT_F32_I32, FPTOUINT_F32_I64, FPTOUINT_F32_I128, + FPTOUINT_F64_I8, + FPTOUINT_F64_I16, FPTOUINT_F64_I32, FPTOUINT_F64_I64, FPTOUINT_F64_I128, diff --git a/include/llvm/CodeGen/SelectionDAG.h b/include/llvm/CodeGen/SelectionDAG.h index c8d29aa..610edb6 100644 --- a/include/llvm/CodeGen/SelectionDAG.h +++ b/include/llvm/CodeGen/SelectionDAG.h @@ -34,6 +34,7 @@ class FunctionLoweringInfo; class MachineConstantPoolValue; class MachineFunction; class MachineModuleInfo; +class MDNode; class SDNodeOrdering; class SDDbgValue; class TargetLowering; @@ -60,42 +61,40 @@ private: /// SDDbgInfo - Keeps track of dbg_value information through SDISel. We do /// not build SDNodes for these so as not to perturb the generated code; -/// instead the info is kept off to the side in this structure. SDNodes may -/// have an associated dbg_value entry in DbgValMap. Debug info that is not -/// associated with any SDNode is held in DbgConstMap. It is possible for -/// optimizations to change a variable to a constant, in which case the -/// corresponding debug info is moved from the variable to the constant table -/// (NYI). +/// instead the info is kept off to the side in this structure. Each SDNode may +/// have one or more associated dbg_value entries. This information is kept in +/// DbgValMap. class SDDbgInfo { - DenseMap<const SDNode*, SDDbgValue*> DbgVblMap; - SmallVector<SDDbgValue*, 4> DbgConstMap; + SmallVector<SDDbgValue*, 32> DbgValues; + DenseMap<const SDNode*, SmallVector<SDDbgValue*, 2> > DbgValMap; void operator=(const SDDbgInfo&); // Do not implement. SDDbgInfo(const SDDbgInfo&); // Do not implement. public: SDDbgInfo() {} - void add(const SDNode *Node, SDDbgValue *V) { - DbgVblMap[Node] = V; + void add(SDDbgValue *V, const SDNode *Node = 0) { + if (Node) + DbgValMap[Node].push_back(V); + DbgValues.push_back(V); } - void add(SDDbgValue *V) { DbgConstMap.push_back(V); } - void remove(const SDNode *Node) { - DenseMap<const SDNode*, SDDbgValue*>::iterator Itr = - DbgVblMap.find(Node); - if (Itr != DbgVblMap.end()) - DbgVblMap.erase(Itr); - } - // No need to remove a constant. + void clear() { - DbgVblMap.clear(); - DbgConstMap.clear(); + DbgValMap.clear(); + DbgValues.clear(); } - SDDbgValue *getSDDbgValue(const SDNode *Node) { - return DbgVblMap[Node]; + + bool empty() const { + return DbgValues.empty(); + } + + SmallVector<SDDbgValue*,2> &getSDDbgValues(const SDNode *Node) { + return DbgValMap[Node]; } - typedef SmallVector<SDDbgValue*, 4>::iterator ConstDbgIterator; - ConstDbgIterator DbgConstBegin() { return DbgConstMap.begin(); } - ConstDbgIterator DbgConstEnd() { return DbgConstMap.end(); } + + typedef SmallVector<SDDbgValue*,32>::iterator DbgIterator; + DbgIterator DbgBegin() { return DbgValues.begin(); } + DbgIterator DbgEnd() { return DbgValues.end(); } }; enum CombineLevel { @@ -769,6 +768,15 @@ public: SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTs, const SDValue *Ops, unsigned NumOps); + /// getDbgValue - Creates a SDDbgValue node. + /// + SDDbgValue *getDbgValue(MDNode *MDPtr, SDNode *N, unsigned R, uint64_t Off, + DebugLoc DL, unsigned O); + SDDbgValue *getDbgValue(MDNode *MDPtr, Value *C, uint64_t Off, + DebugLoc DL, unsigned O); + SDDbgValue *getDbgValue(MDNode *MDPtr, unsigned FI, uint64_t Off, + DebugLoc DL, unsigned O); + /// DAGUpdateListener - Clients of various APIs that cause global effects on /// the DAG can optionally implement this interface. This allows the clients /// to handle the various sorts of updates that happen. @@ -871,19 +879,21 @@ public: /// GetOrdering - Get the order for the SDNode. unsigned GetOrdering(const SDNode *SD) const; - /// AssignDbgInfo - Assign debug info to the SDNode. - void AssignDbgInfo(SDNode *SD, SDDbgValue *db); + /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the + /// value is produced by SD. + void AddDbgValue(SDDbgValue *DB, SDNode *SD = 0); - /// RememberDbgInfo - Remember debug info with no associated SDNode. - void RememberDbgInfo(SDDbgValue *db); + /// GetDbgValues - Get the debug values which reference the given SDNode. + SmallVector<SDDbgValue*,2> &GetDbgValues(const SDNode* SD) { + return DbgInfo->getSDDbgValues(SD); + } - /// GetDbgInfo - Get the debug info for the SDNode. - SDDbgValue *GetDbgInfo(const SDNode* SD); + /// hasDebugValues - Return true if there are any SDDbgValue nodes associated + /// with this SelectionDAG. + bool hasDebugValues() const { return !DbgInfo->empty(); } - SDDbgInfo::ConstDbgIterator DbgConstBegin() { - return DbgInfo->DbgConstBegin(); - } - SDDbgInfo::ConstDbgIterator DbgConstEnd() { return DbgInfo->DbgConstEnd(); } + SDDbgInfo::DbgIterator DbgBegin() { return DbgInfo->DbgBegin(); } + SDDbgInfo::DbgIterator DbgEnd() { return DbgInfo->DbgEnd(); } void dump() const; diff --git a/include/llvm/CodeGen/SelectionDAGISel.h b/include/llvm/CodeGen/SelectionDAGISel.h index a1576be..def09c7 100644 --- a/include/llvm/CodeGen/SelectionDAGISel.h +++ b/include/llvm/CodeGen/SelectionDAGISel.h @@ -136,6 +136,8 @@ public: OPC_EmitRegister, OPC_EmitConvertToTarget, OPC_EmitMergeInputChains, + OPC_EmitMergeInputChains1_0, + OPC_EmitMergeInputChains1_1, OPC_EmitCopyToReg, OPC_EmitNodeXForm, OPC_EmitNode, diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h index c16a48a..4dcf013 100644 --- a/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/include/llvm/CodeGen/SelectionDAGNodes.h @@ -616,7 +616,7 @@ namespace ISD { /// which do not reference a specific memory location should be less than /// this value. Those that do must not be less than this value, and can /// be used with SelectionDAG::getMemIntrinsicNode. - static const int FIRST_TARGET_MEMORY_OPCODE = BUILTIN_OP_END+80; + static const int FIRST_TARGET_MEMORY_OPCODE = BUILTIN_OP_END+100; /// Node predicates diff --git a/include/llvm/Function.h b/include/llvm/Function.h index 3882233..2b19fa5 100644 --- a/include/llvm/Function.h +++ b/include/llvm/Function.h @@ -409,8 +409,11 @@ public: void dropAllReferences(); /// hasAddressTaken - returns true if there are any uses of this function - /// other than direct calls or invokes to it. - bool hasAddressTaken() const; + /// other than direct calls or invokes to it. Optionally passes back the + /// offending user for diagnostic purposes. + /// + bool hasAddressTaken(const User** = 0) const; + private: // Shadow Value::setValueSubclassData with a private forwarding method so that // subclasses cannot accidentally use it. diff --git a/include/llvm/InlineAsm.h b/include/llvm/InlineAsm.h index 4490ce5..2ac0fca 100644 --- a/include/llvm/InlineAsm.h +++ b/include/llvm/InlineAsm.h @@ -24,8 +24,17 @@ namespace llvm { class PointerType; class FunctionType; class Module; +struct InlineAsmKeyType; +template<class ValType, class TypeClass, class ConstantClass, bool HasLargeKey> +class ConstantUniqueMap; +template<class ConstantClass, class TypeClass, class ValType> +struct ConstantCreator; class InlineAsm : public Value { + friend struct ConstantCreator<InlineAsm, PointerType, InlineAsmKeyType>; + friend class ConstantUniqueMap<InlineAsmKeyType, PointerType, InlineAsm, + false>; + InlineAsm(const InlineAsm &); // do not implement void operator=(const InlineAsm&); // do not implement @@ -33,10 +42,14 @@ class InlineAsm : public Value { bool HasSideEffects; bool IsAlignStack; - InlineAsm(const FunctionType *Ty, StringRef AsmString, - StringRef Constraints, bool hasSideEffects, - bool isAlignStack = false); + InlineAsm(const PointerType *Ty, const std::string &AsmString, + const std::string &Constraints, bool hasSideEffects, + bool isAlignStack); virtual ~InlineAsm(); + + /// When the ConstantUniqueMap merges two types and makes two InlineAsms + /// identical, it destroys one of them with this method. + void destroyConstant(); public: /// InlineAsm::get - Return the specified uniqued inline asm string. diff --git a/include/llvm/Instruction.h b/include/llvm/Instruction.h index cf9dc44..13331e6 100644 --- a/include/llvm/Instruction.h +++ b/include/llvm/Instruction.h @@ -17,6 +17,7 @@ #include "llvm/User.h" #include "llvm/ADT/ilist_node.h" +#include "llvm/Support/DebugLoc.h" namespace llvm { @@ -31,6 +32,7 @@ class Instruction : public User, public ilist_node<Instruction> { Instruction(const Instruction &); // Do not implement BasicBlock *Parent; + NewDebugLoc DbgLoc; // 'dbg' Metadata cache. enum { /// HasMetadataBit - This is a bit stored in the SubClassData field which @@ -123,7 +125,13 @@ public: /// hasMetadata() - Return true if this instruction has any metadata attached /// to it. bool hasMetadata() const { - return (getSubclassDataFromValue() & HasMetadataBit) != 0; + return !DbgLoc.isUnknown() || hasMetadataHashEntry(); + } + + /// hasMetadataOtherThanDebugLoc - Return true if this instruction has + /// metadata attached to it other than a debug location. + bool hasMetadataOtherThanDebugLoc() const { + return hasMetadataHashEntry(); } /// getMetadata - Get the metadata of given kind attached to this Instruction. @@ -148,17 +156,49 @@ public: getAllMetadataImpl(MDs); } + /// getAllMetadataOtherThanDebugLoc - This does the same thing as + /// getAllMetadata, except that it filters out the debug location. + void getAllMetadataOtherThanDebugLoc(SmallVectorImpl<std::pair<unsigned, + MDNode*> > &MDs) const { + if (hasMetadataOtherThanDebugLoc()) + getAllMetadataOtherThanDebugLocImpl(MDs); + } + /// setMetadata - Set the metadata of the specified kind to the specified /// node. This updates/replaces metadata if already present, or removes it if /// Node is null. void setMetadata(unsigned KindID, MDNode *Node); void setMetadata(const char *Kind, MDNode *Node); + /// setDbgMetadata - This is just an optimized helper function that is + /// equivalent to setMetadata("dbg", Node); + void setDbgMetadata(MDNode *Node); + + /// getDbgMetadata - This is just an optimized helper function that is + /// equivalent to calling getMetadata("dbg"). + MDNode *getDbgMetadata() const { + return DbgLoc.getAsMDNode(getContext()); + } + + /// setDebugLoc - Set the debug location information for this instruction. + void setDebugLoc(const NewDebugLoc &Loc) { DbgLoc = Loc; } + + /// getDebugLoc - Return the debug location for this node as a DebugLoc. + const NewDebugLoc &getDebugLoc() const { return DbgLoc; } + private: + /// hasMetadataHashEntry - Return true if we have an entry in the on-the-side + /// metadata hash. + bool hasMetadataHashEntry() const { + return (getSubclassDataFromValue() & HasMetadataBit) != 0; + } + // These are all implemented in Metadata.cpp. MDNode *getMetadataImpl(unsigned KindID) const; MDNode *getMetadataImpl(const char *Kind) const; void getAllMetadataImpl(SmallVectorImpl<std::pair<unsigned,MDNode*> > &)const; + void getAllMetadataOtherThanDebugLocImpl(SmallVectorImpl<std::pair<unsigned, + MDNode*> > &) const; void removeAllMetadata(); public: //===--------------------------------------------------------------------===// @@ -315,7 +355,7 @@ private: return Value::getSubclassDataFromValue(); } - void setHasMetadata(bool V) { + void setHasMetadataHashEntry(bool V) { setValueSubclassData((getSubclassDataFromValue() & ~HasMetadataBit) | (V ? HasMetadataBit : 0)); } diff --git a/include/llvm/Instructions.h b/include/llvm/Instructions.h index b1f1996..413a595 100644 --- a/include/llvm/Instructions.h +++ b/include/llvm/Instructions.h @@ -971,6 +971,13 @@ public: unsigned getParamAlignment(unsigned i) const { return AttributeList.getParamAlignment(i); } + + /// @brief Return true if the call should not be inlined. + bool isNoInline() const { return paramHasAttr(~0, Attribute::NoInline); } + void setIsNoInline(bool Value) { + if (Value) addAttribute(~0, Attribute::NoInline); + else removeAttribute(~0, Attribute::NoInline); + } /// @brief Determine if the call does not access memory. bool doesNotAccessMemory() const { @@ -2456,6 +2463,13 @@ public: return AttributeList.getParamAlignment(i); } + /// @brief Return true if the call should not be inlined. + bool isNoInline() const { return paramHasAttr(~0, Attribute::NoInline); } + void setIsNoInline(bool Value) { + if (Value) addAttribute(~0, Attribute::NoInline); + else removeAttribute(~0, Attribute::NoInline); + } + /// @brief Determine if the call does not access memory. bool doesNotAccessMemory() const { return paramHasAttr(~0, Attribute::ReadNone); @@ -2508,32 +2522,31 @@ public: /// indirect function invocation. /// Function *getCalledFunction() const { - return dyn_cast<Function>(getOperand(0)); + return dyn_cast<Function>(Op<-3>()); } /// getCalledValue - Get a pointer to the function that is invoked by this /// instruction - const Value *getCalledValue() const { return getOperand(0); } - Value *getCalledValue() { return getOperand(0); } + const Value *getCalledValue() const { return Op<-3>(); } + Value *getCalledValue() { return Op<-3>(); } /// setCalledFunction - Set the function called. void setCalledFunction(Value* Fn) { - Op<0>() = Fn; + Op<-3>() = Fn; } // get*Dest - Return the destination basic blocks... BasicBlock *getNormalDest() const { - return cast<BasicBlock>(getOperand(1)); + return cast<BasicBlock>(Op<-2>()); } BasicBlock *getUnwindDest() const { - return cast<BasicBlock>(getOperand(2)); + return cast<BasicBlock>(Op<-1>()); } void setNormalDest(BasicBlock *B) { - setOperand(1, (Value*)B); + Op<-2>() = reinterpret_cast<Value*>(B); } - void setUnwindDest(BasicBlock *B) { - setOperand(2, (Value*)B); + Op<-1>() = reinterpret_cast<Value*>(B); } BasicBlock *getSuccessor(unsigned i) const { @@ -2543,7 +2556,7 @@ public: void setSuccessor(unsigned idx, BasicBlock *NewSucc) { assert(idx < 2 && "Successor # out of range for invoke!"); - setOperand(idx+1, (Value*)NewSucc); + *(&Op<-2>() + idx) = reinterpret_cast<Value*>(NewSucc); } unsigned getNumSuccessors() const { return 2; } @@ -2556,6 +2569,7 @@ public: static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } + private: virtual BasicBlock *getSuccessorV(unsigned idx) const; virtual unsigned getNumSuccessorsV() const; diff --git a/include/llvm/Intrinsics.td b/include/llvm/Intrinsics.td index 54c7b1f..d66e80f 100644 --- a/include/llvm/Intrinsics.td +++ b/include/llvm/Intrinsics.td @@ -176,19 +176,19 @@ class GCCBuiltin<string name> { //===--------------- Variable Argument Handling Intrinsics ----------------===// // -def int_vastart : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [], "llvm.va_start">; -def int_vacopy : Intrinsic<[llvm_void_ty], [llvm_ptr_ty, llvm_ptr_ty], [], +def int_vastart : Intrinsic<[], [llvm_ptr_ty], [], "llvm.va_start">; +def int_vacopy : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty], [], "llvm.va_copy">; -def int_vaend : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [], "llvm.va_end">; +def int_vaend : Intrinsic<[], [llvm_ptr_ty], [], "llvm.va_end">; //===------------------- Garbage Collection Intrinsics --------------------===// // -def int_gcroot : Intrinsic<[llvm_void_ty], +def int_gcroot : Intrinsic<[], [llvm_ptrptr_ty, llvm_ptr_ty]>; def int_gcread : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_ptrptr_ty], [IntrReadArgMem]>; -def int_gcwrite : Intrinsic<[llvm_void_ty], +def int_gcwrite : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty, llvm_ptrptr_ty], [IntrWriteArgMem, NoCapture<1>, NoCapture<2>]>; @@ -201,37 +201,37 @@ def int_frameaddress : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem]>; // model their dependencies on allocas. def int_stacksave : Intrinsic<[llvm_ptr_ty]>, GCCBuiltin<"__builtin_stack_save">; -def int_stackrestore : Intrinsic<[llvm_void_ty], [llvm_ptr_ty]>, +def int_stackrestore : Intrinsic<[], [llvm_ptr_ty]>, GCCBuiltin<"__builtin_stack_restore">; // IntrWriteArgMem is more pessimistic than strictly necessary for prefetch, // however it does conveniently prevent the prefetch from being reordered // with respect to nearby accesses to the same memory. -def int_prefetch : Intrinsic<[llvm_void_ty], +def int_prefetch : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], [IntrWriteArgMem, NoCapture<0>]>; -def int_pcmarker : Intrinsic<[llvm_void_ty], [llvm_i32_ty]>; +def int_pcmarker : Intrinsic<[], [llvm_i32_ty]>; def int_readcyclecounter : Intrinsic<[llvm_i64_ty]>; // Stack Protector Intrinsic - The stackprotector intrinsic writes the stack // guard to the correct place on the stack frame. -def int_stackprotector : Intrinsic<[llvm_void_ty], +def int_stackprotector : Intrinsic<[], [llvm_ptr_ty, llvm_ptrptr_ty], [IntrWriteMem]>; //===------------------- Standard C Library Intrinsics --------------------===// // -def int_memcpy : Intrinsic<[llvm_void_ty], +def int_memcpy : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty, llvm_anyint_ty, llvm_i32_ty], [IntrWriteArgMem, NoCapture<0>, NoCapture<1>]>; -def int_memmove : Intrinsic<[llvm_void_ty], +def int_memmove : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty, llvm_anyint_ty, llvm_i32_ty], [IntrWriteArgMem, NoCapture<0>, NoCapture<1>]>; -def int_memset : Intrinsic<[llvm_void_ty], +def int_memset : Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty, llvm_anyint_ty, llvm_i32_ty], [IntrWriteArgMem, NoCapture<0>]>; @@ -255,9 +255,9 @@ let Properties = [IntrReadMem] in { // NOTE: these are internal interfaces. def int_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>; -def int_longjmp : Intrinsic<[llvm_void_ty], [llvm_ptr_ty, llvm_i32_ty]>; +def int_longjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty]>; def int_sigsetjmp : Intrinsic<[llvm_i32_ty] , [llvm_ptr_ty, llvm_i32_ty]>; -def int_siglongjmp : Intrinsic<[llvm_void_ty], [llvm_ptr_ty, llvm_i32_ty]>; +def int_siglongjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty]>; // Internal interface for object size checking def int_objectsize : Intrinsic<[llvm_anyint_ty], [llvm_ptr_ty, llvm_i1_ty], @@ -282,9 +282,9 @@ let Properties = [IntrNoMem] in { // optimizers can change them aggressively. Special handling needed in a few // places. let Properties = [IntrNoMem] in { - def int_dbg_declare : Intrinsic<[llvm_void_ty], + def int_dbg_declare : Intrinsic<[], [llvm_metadata_ty, llvm_metadata_ty]>; - def int_dbg_value : Intrinsic<[llvm_void_ty], + def int_dbg_value : Intrinsic<[], [llvm_metadata_ty, llvm_i64_ty, llvm_metadata_ty]>; } @@ -297,24 +297,24 @@ def int_eh_selector : Intrinsic<[llvm_i32_ty], def int_eh_typeid_for : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>; -def int_eh_return_i32 : Intrinsic<[llvm_void_ty], [llvm_i32_ty, llvm_ptr_ty]>; -def int_eh_return_i64 : Intrinsic<[llvm_void_ty], [llvm_i64_ty, llvm_ptr_ty]>; +def int_eh_return_i32 : Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty]>; +def int_eh_return_i64 : Intrinsic<[], [llvm_i64_ty, llvm_ptr_ty]>; -def int_eh_unwind_init: Intrinsic<[llvm_void_ty]>, +def int_eh_unwind_init: Intrinsic<[]>, GCCBuiltin<"__builtin_unwind_init">; def int_eh_dwarf_cfa : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty]>; let Properties = [IntrNoMem] in { def int_eh_sjlj_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>; - def int_eh_sjlj_longjmp : Intrinsic<[llvm_void_ty], [llvm_ptr_ty]>; + def int_eh_sjlj_longjmp : Intrinsic<[], [llvm_ptr_ty]>; def int_eh_sjlj_lsda : Intrinsic<[llvm_ptr_ty]>; - def int_eh_sjlj_callsite: Intrinsic<[llvm_void_ty], [llvm_i32_ty]>; + def int_eh_sjlj_callsite: Intrinsic<[], [llvm_i32_ty]>; } //===---------------- Generic Variable Attribute Intrinsics----------------===// // -def int_var_annotation : Intrinsic<[llvm_void_ty], +def int_var_annotation : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty], [], "llvm.var.annotation">; @@ -361,7 +361,7 @@ def int_umul_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty], //===------------------------- Atomic Intrinsics --------------------------===// // -def int_memory_barrier : Intrinsic<[llvm_void_ty], +def int_memory_barrier : Intrinsic<[], [llvm_i1_ty, llvm_i1_ty, llvm_i1_ty, llvm_i1_ty, llvm_i1_ty], []>, GCCBuiltin<"__builtin_llvm_memory_barrier">; @@ -429,16 +429,16 @@ def int_atomic_load_umax : Intrinsic<[llvm_anyint_ty], //===------------------------- Memory Use Markers -------------------------===// // -def int_lifetime_start : Intrinsic<[llvm_void_ty], +def int_lifetime_start : Intrinsic<[], [llvm_i64_ty, llvm_ptr_ty], [IntrWriteArgMem, NoCapture<1>]>; -def int_lifetime_end : Intrinsic<[llvm_void_ty], +def int_lifetime_end : Intrinsic<[], [llvm_i64_ty, llvm_ptr_ty], [IntrWriteArgMem, NoCapture<1>]>; def int_invariant_start : Intrinsic<[llvm_descriptor_ty], [llvm_i64_ty, llvm_ptr_ty], [IntrReadArgMem, NoCapture<1>]>; -def int_invariant_end : Intrinsic<[llvm_void_ty], +def int_invariant_end : Intrinsic<[], [llvm_descriptor_ty, llvm_i64_ty, llvm_ptr_ty], [IntrWriteArgMem, NoCapture<2>]>; @@ -447,7 +447,7 @@ def int_invariant_end : Intrinsic<[llvm_void_ty], // def int_flt_rounds : Intrinsic<[llvm_i32_ty]>, GCCBuiltin<"__builtin_flt_rounds">; -def int_trap : Intrinsic<[llvm_void_ty]>, +def int_trap : Intrinsic<[]>, GCCBuiltin<"__builtin_trap">; // Intrisics to support half precision floating point format diff --git a/include/llvm/IntrinsicsARM.td b/include/llvm/IntrinsicsARM.td index c408a2f..40333ca 100644 --- a/include/llvm/IntrinsicsARM.td +++ b/include/llvm/IntrinsicsARM.td @@ -344,31 +344,31 @@ let TargetPrefix = "arm" in { [IntrReadArgMem]>; // Interleaving vector stores from N-element structures. - def int_arm_neon_vst1 : Intrinsic<[llvm_void_ty], + def int_arm_neon_vst1 : Intrinsic<[], [llvm_ptr_ty, llvm_anyvector_ty], [IntrWriteArgMem]>; - def int_arm_neon_vst2 : Intrinsic<[llvm_void_ty], + def int_arm_neon_vst2 : Intrinsic<[], [llvm_ptr_ty, llvm_anyvector_ty, LLVMMatchType<0>], [IntrWriteArgMem]>; - def int_arm_neon_vst3 : Intrinsic<[llvm_void_ty], + def int_arm_neon_vst3 : Intrinsic<[], [llvm_ptr_ty, llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>], [IntrWriteArgMem]>; - def int_arm_neon_vst4 : Intrinsic<[llvm_void_ty], + def int_arm_neon_vst4 : Intrinsic<[], [llvm_ptr_ty, llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], [IntrWriteArgMem]>; // Vector store N-element structure from one lane. - def int_arm_neon_vst2lane : Intrinsic<[llvm_void_ty], + def int_arm_neon_vst2lane : Intrinsic<[], [llvm_ptr_ty, llvm_anyvector_ty, LLVMMatchType<0>, llvm_i32_ty], [IntrWriteArgMem]>; - def int_arm_neon_vst3lane : Intrinsic<[llvm_void_ty], + def int_arm_neon_vst3lane : Intrinsic<[], [llvm_ptr_ty, llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty], [IntrWriteArgMem]>; - def int_arm_neon_vst4lane : Intrinsic<[llvm_void_ty], + def int_arm_neon_vst4lane : Intrinsic<[], [llvm_ptr_ty, llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty], diff --git a/include/llvm/IntrinsicsPowerPC.td b/include/llvm/IntrinsicsPowerPC.td index ffb870d..4e959f3 100644 --- a/include/llvm/IntrinsicsPowerPC.td +++ b/include/llvm/IntrinsicsPowerPC.td @@ -18,17 +18,17 @@ // Non-altivec intrinsics. let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.". // dcba/dcbf/dcbi/dcbst/dcbt/dcbz/dcbzl(PPC970) instructions. - def int_ppc_dcba : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>; - def int_ppc_dcbf : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>; - def int_ppc_dcbi : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>; - def int_ppc_dcbst : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>; - def int_ppc_dcbt : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>; - def int_ppc_dcbtst: Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>; - def int_ppc_dcbz : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>; - def int_ppc_dcbzl : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>; + def int_ppc_dcba : Intrinsic<[], [llvm_ptr_ty], [IntrWriteMem]>; + def int_ppc_dcbf : Intrinsic<[], [llvm_ptr_ty], [IntrWriteMem]>; + def int_ppc_dcbi : Intrinsic<[], [llvm_ptr_ty], [IntrWriteMem]>; + def int_ppc_dcbst : Intrinsic<[], [llvm_ptr_ty], [IntrWriteMem]>; + def int_ppc_dcbt : Intrinsic<[], [llvm_ptr_ty], [IntrWriteMem]>; + def int_ppc_dcbtst: Intrinsic<[], [llvm_ptr_ty], [IntrWriteMem]>; + def int_ppc_dcbz : Intrinsic<[], [llvm_ptr_ty], [IntrWriteMem]>; + def int_ppc_dcbzl : Intrinsic<[], [llvm_ptr_ty], [IntrWriteMem]>; // sync instruction - def int_ppc_sync : Intrinsic<[llvm_void_ty], [], [IntrWriteMem]>; + def int_ppc_sync : Intrinsic<[], [], [IntrWriteMem]>; } @@ -86,23 +86,23 @@ class PowerPC_Vec_WWW_Intrinsic<string GCCIntSuffix> let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.". // Data Stream Control. def int_ppc_altivec_dss : GCCBuiltin<"__builtin_altivec_dss">, - Intrinsic<[llvm_void_ty], [llvm_i32_ty], [IntrWriteMem]>; + Intrinsic<[], [llvm_i32_ty], [IntrWriteMem]>; def int_ppc_altivec_dssall : GCCBuiltin<"__builtin_altivec_dssall">, - Intrinsic<[llvm_void_ty], [], [IntrWriteMem]>; + Intrinsic<[], [], [IntrWriteMem]>; def int_ppc_altivec_dst : GCCBuiltin<"__builtin_altivec_dst">, - Intrinsic<[llvm_void_ty], + Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], [IntrWriteMem]>; def int_ppc_altivec_dstt : GCCBuiltin<"__builtin_altivec_dstt">, - Intrinsic<[llvm_void_ty], + Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], [IntrWriteMem]>; def int_ppc_altivec_dstst : GCCBuiltin<"__builtin_altivec_dstst">, - Intrinsic<[llvm_void_ty], + Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], [IntrWriteMem]>; def int_ppc_altivec_dststt : GCCBuiltin<"__builtin_altivec_dststt">, - Intrinsic<[llvm_void_ty], + Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], [IntrWriteMem]>; @@ -110,7 +110,7 @@ let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.". def int_ppc_altivec_mfvscr : GCCBuiltin<"__builtin_altivec_mfvscr">, Intrinsic<[llvm_v8i16_ty], [], [IntrReadMem]>; def int_ppc_altivec_mtvscr : GCCBuiltin<"__builtin_altivec_mtvscr">, - Intrinsic<[llvm_void_ty], [llvm_v4i32_ty], [IntrWriteMem]>; + Intrinsic<[], [llvm_v4i32_ty], [IntrWriteMem]>; // Loads. These don't map directly to GCC builtins because they represent the @@ -129,19 +129,19 @@ let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.". // Stores. These don't map directly to GCC builtins because they represent the // source address with a single pointer. def int_ppc_altivec_stvx : - Intrinsic<[llvm_void_ty], [llvm_v4i32_ty, llvm_ptr_ty], + Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty], [IntrWriteMem]>; def int_ppc_altivec_stvxl : - Intrinsic<[llvm_void_ty], [llvm_v4i32_ty, llvm_ptr_ty], + Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty], [IntrWriteMem]>; def int_ppc_altivec_stvebx : - Intrinsic<[llvm_void_ty], [llvm_v16i8_ty, llvm_ptr_ty], + Intrinsic<[], [llvm_v16i8_ty, llvm_ptr_ty], [IntrWriteMem]>; def int_ppc_altivec_stvehx : - Intrinsic<[llvm_void_ty], [llvm_v8i16_ty, llvm_ptr_ty], + Intrinsic<[], [llvm_v8i16_ty, llvm_ptr_ty], [IntrWriteMem]>; def int_ppc_altivec_stvewx : - Intrinsic<[llvm_void_ty], [llvm_v4i32_ty, llvm_ptr_ty], + Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty], [IntrWriteMem]>; // Comparisons setting a vector. diff --git a/include/llvm/IntrinsicsX86.td b/include/llvm/IntrinsicsX86.td index 67abd95..25169b4 100644 --- a/include/llvm/IntrinsicsX86.td +++ b/include/llvm/IntrinsicsX86.td @@ -142,25 +142,25 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". // SIMD store ops let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_sse_storeu_ps : GCCBuiltin<"__builtin_ia32_storeups">, - Intrinsic<[llvm_void_ty], [llvm_ptr_ty, + Intrinsic<[], [llvm_ptr_ty, llvm_v4f32_ty], [IntrWriteMem]>; } // Cacheability support ops let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_sse_movnt_ps : GCCBuiltin<"__builtin_ia32_movntps">, - Intrinsic<[llvm_void_ty], [llvm_ptr_ty, + Intrinsic<[], [llvm_ptr_ty, llvm_v4f32_ty], [IntrWriteMem]>; def int_x86_sse_sfence : GCCBuiltin<"__builtin_ia32_sfence">, - Intrinsic<[llvm_void_ty], [], [IntrWriteMem]>; + Intrinsic<[], [], [IntrWriteMem]>; } // Control register. let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_sse_stmxcsr : - Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>; + Intrinsic<[], [llvm_ptr_ty], [IntrWriteMem]>; def int_x86_sse_ldmxcsr : - Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>; + Intrinsic<[], [llvm_ptr_ty], [IntrWriteMem]>; } // Misc. @@ -458,26 +458,26 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". // SIMD store ops let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_sse2_storeu_pd : GCCBuiltin<"__builtin_ia32_storeupd">, - Intrinsic<[llvm_void_ty], [llvm_ptr_ty, + Intrinsic<[], [llvm_ptr_ty, llvm_v2f64_ty], [IntrWriteMem]>; def int_x86_sse2_storeu_dq : GCCBuiltin<"__builtin_ia32_storedqu">, - Intrinsic<[llvm_void_ty], [llvm_ptr_ty, + Intrinsic<[], [llvm_ptr_ty, llvm_v16i8_ty], [IntrWriteMem]>; def int_x86_sse2_storel_dq : GCCBuiltin<"__builtin_ia32_storelv4si">, - Intrinsic<[llvm_void_ty], [llvm_ptr_ty, + Intrinsic<[], [llvm_ptr_ty, llvm_v4i32_ty], [IntrWriteMem]>; } // Cacheability support ops let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_sse2_movnt_dq : GCCBuiltin<"__builtin_ia32_movntdq">, - Intrinsic<[llvm_void_ty], [llvm_ptr_ty, + Intrinsic<[], [llvm_ptr_ty, llvm_v2i64_ty], [IntrWriteMem]>; def int_x86_sse2_movnt_pd : GCCBuiltin<"__builtin_ia32_movntpd">, - Intrinsic<[llvm_void_ty], [llvm_ptr_ty, + Intrinsic<[], [llvm_ptr_ty, llvm_v2f64_ty], [IntrWriteMem]>; def int_x86_sse2_movnt_i : GCCBuiltin<"__builtin_ia32_movnti">, - Intrinsic<[llvm_void_ty], [llvm_ptr_ty, + Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [IntrWriteMem]>; } @@ -497,14 +497,14 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_sse2_pmovmskb_128 : GCCBuiltin<"__builtin_ia32_pmovmskb128">, Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty], [IntrNoMem]>; def int_x86_sse2_maskmov_dqu : GCCBuiltin<"__builtin_ia32_maskmovdqu">, - Intrinsic<[llvm_void_ty], [llvm_v16i8_ty, + Intrinsic<[], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_ptr_ty], [IntrWriteMem]>; def int_x86_sse2_clflush : GCCBuiltin<"__builtin_ia32_clflush">, - Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>; + Intrinsic<[], [llvm_ptr_ty], [IntrWriteMem]>; def int_x86_sse2_lfence : GCCBuiltin<"__builtin_ia32_lfence">, - Intrinsic<[llvm_void_ty], [], [IntrWriteMem]>; + Intrinsic<[], [], [IntrWriteMem]>; def int_x86_sse2_mfence : GCCBuiltin<"__builtin_ia32_mfence">, - Intrinsic<[llvm_void_ty], [], [IntrWriteMem]>; + Intrinsic<[], [], [IntrWriteMem]>; } //===----------------------------------------------------------------------===// @@ -545,10 +545,10 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". // Thread synchronization ops. let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_sse3_monitor : GCCBuiltin<"__builtin_ia32_monitor">, - Intrinsic<[llvm_void_ty], [llvm_ptr_ty, + Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], [IntrWriteMem]>; def int_x86_sse3_mwait : GCCBuiltin<"__builtin_ia32_mwait">, - Intrinsic<[llvm_void_ty], [llvm_i32_ty, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [IntrWriteMem]>; } @@ -779,6 +779,29 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". [IntrNoMem, Commutative]>; } +// Advanced Encryption Standard (AES) Instructions +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_aesni_aesimc : GCCBuiltin<"__builtin_ia32_aesimc128">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_aesni_aesenc : GCCBuiltin<"__builtin_ia32_aesenc128">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_aesni_aesenclast : GCCBuiltin<"__builtin_ia32_aesenclast128">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_aesni_aesdec : GCCBuiltin<"__builtin_ia32_aesdec128">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_aesni_aesdeclast : GCCBuiltin<"__builtin_ia32_aesdeclast128">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_aesni_aeskeygenassist : + GCCBuiltin<"__builtin_ia32_aeskeygenassist">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], + [IntrNoMem]>; +} + // Vector pack let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_sse41_packusdw : GCCBuiltin<"__builtin_ia32_packusdw128">, @@ -791,9 +814,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_sse41_pmuldq : GCCBuiltin<"__builtin_ia32_pmuldq128">, Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem, Commutative]>; - def int_x86_sse41_pmulld : GCCBuiltin<"__builtin_ia32_pmulld128">, - Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], - [IntrNoMem, Commutative]>; } // Vector extract @@ -973,9 +993,9 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". // Empty MMX state op. let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_mmx_emms : GCCBuiltin<"__builtin_ia32_emms">, - Intrinsic<[llvm_void_ty], [], [IntrWriteMem]>; + Intrinsic<[], [], [IntrWriteMem]>; def int_x86_mmx_femms : GCCBuiltin<"__builtin_ia32_femms">, - Intrinsic<[llvm_void_ty], [], [IntrWriteMem]>; + Intrinsic<[], [], [IntrWriteMem]>; } // Integer arithmetic ops. @@ -1151,7 +1171,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". // Misc. let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_mmx_maskmovq : GCCBuiltin<"__builtin_ia32_maskmovq">, - Intrinsic<[llvm_void_ty], + Intrinsic<[], [llvm_v8i8_ty, llvm_v8i8_ty, llvm_ptr_ty], [IntrWriteMem]>; @@ -1159,6 +1179,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". Intrinsic<[llvm_i32_ty], [llvm_v8i8_ty], [IntrNoMem]>; def int_x86_mmx_movnt_dq : GCCBuiltin<"__builtin_ia32_movntq">, - Intrinsic<[llvm_void_ty], [llvm_ptr_ty, + Intrinsic<[], [llvm_ptr_ty, llvm_v1i64_ty], [IntrWriteMem]>; } diff --git a/include/llvm/LLVMContext.h b/include/llvm/LLVMContext.h index 6d36d5e..ea7f4a2 100644 --- a/include/llvm/LLVMContext.h +++ b/include/llvm/LLVMContext.h @@ -36,6 +36,12 @@ public: LLVMContext(); ~LLVMContext(); + // Pinned metadata names, which always have the same value. This is a + // compile-time performance optimization, not a correctness optimization. + enum { + MD_dbg = 1 // "dbg" -> 1. + }; + /// getMDKindID - Return a unique non-zero ID for the specified metadata kind. /// This ID is uniqued across modules in the current LLVMContext. unsigned getMDKindID(StringRef Name) const; diff --git a/include/llvm/MC/MCAsmLayout.h b/include/llvm/MC/MCAsmLayout.h index 27bdbe9..ebf0520 100644 --- a/include/llvm/MC/MCAsmLayout.h +++ b/include/llvm/MC/MCAsmLayout.h @@ -12,6 +12,9 @@ namespace llvm { class MCAssembler; +class MCFragment; +class MCSectionData; +class MCSymbolData; /// Encapsulates the layout of an assembly file at a particular point in time. /// @@ -29,6 +32,64 @@ public: /// Get the assembler object this is a layout for. MCAssembler &getAssembler() const { return Assembler; } + + /// \brief Update the layout because a fragment has been resized. The + /// fragments size should have already been updated, the \arg SlideAmount is + /// the delta from the old size. + void UpdateForSlide(MCFragment *F, int SlideAmount); + + /// @name Fragment Layout Data + /// @{ + + /// \brief Get the effective size of the given fragment, as computed in the + /// current layout. + uint64_t getFragmentEffectiveSize(const MCFragment *F) const; + + /// \brief Set the effective size of the given fragment. + void setFragmentEffectiveSize(MCFragment *F, uint64_t Value); + + /// \brief Get the offset of the given fragment inside its containing section. + uint64_t getFragmentOffset(const MCFragment *F) const; + + /// \brief Set the offset of the given fragment inside its containing section. + void setFragmentOffset(MCFragment *F, uint64_t Value); + + /// @} + /// @name Section Layout Data + /// @{ + + /// \brief Get the computed address of the given section. + uint64_t getSectionAddress(const MCSectionData *SD) const; + + /// \brief Set the computed address of the given section. + void setSectionAddress(MCSectionData *SD, uint64_t Value); + + /// \brief Get the data size of the given section, as emitted to the object + /// file. This may include additional padding, or be 0 for virtual sections. + uint64_t getSectionFileSize(const MCSectionData *SD) const; + + /// \brief Set the data size of the given section. + void setSectionFileSize(MCSectionData *SD, uint64_t Value); + + /// \brief Get the actual data size of the given section. + uint64_t getSectionSize(const MCSectionData *SD) const; + + /// \brief Set the actual data size of the given section. + void setSectionSize(MCSectionData *SD, uint64_t Value); + + /// @} + /// @name Utility Functions + /// @{ + + /// \brief Get the address of the given fragment, as computed in the current + /// layout. + uint64_t getFragmentAddress(const MCFragment *F) const; + + /// \brief Get the address of the given symbol, as computed in the current + /// layout. + uint64_t getSymbolAddress(const MCSymbolData *SD) const; + + /// @} }; } // end namespace llvm diff --git a/include/llvm/MC/MCAssembler.h b/include/llvm/MC/MCAssembler.h index 363c7d9..c1b60f0 100644 --- a/include/llvm/MC/MCAssembler.h +++ b/include/llvm/MC/MCAssembler.h @@ -16,6 +16,7 @@ #include "llvm/ADT/ilist_node.h" #include "llvm/Support/Casting.h" #include "llvm/MC/MCFixup.h" +#include "llvm/MC/MCInst.h" #include "llvm/System/DataTypes.h" #include <vector> // FIXME: Shouldn't be needed. @@ -37,6 +38,8 @@ class TargetAsmBackend; /// MCAsmFixup - Represent a fixed size region of bytes inside some fragment /// which needs to be rewritten. This region will either be rewritten by the /// assembler or cause a relocation entry to be generated. +// +// FIXME: This should probably just be merged with MCFixup. class MCAsmFixup { public: /// Offset - The offset inside the fragment which needs to be rewritten. @@ -54,14 +57,17 @@ public: }; class MCFragment : public ilist_node<MCFragment> { + friend class MCAsmLayout; + MCFragment(const MCFragment&); // DO NOT IMPLEMENT void operator=(const MCFragment&); // DO NOT IMPLEMENT public: enum FragmentType { - FT_Data, FT_Align, + FT_Data, FT_Fill, + FT_Inst, FT_Org, FT_ZeroFill }; @@ -81,8 +87,13 @@ private: /// initialized. uint64_t Offset; - /// FileSize - The file size of this section. This is ~0 until initialized. - uint64_t FileSize; + /// EffectiveSize - The compute size of this section. This is ~0 until + /// initialized. + uint64_t EffectiveSize; + + /// Ordinal - The global index of this fragment. This is the index across all + /// sections, not just the parent section. + unsigned Ordinal; /// @} @@ -99,35 +110,8 @@ public: MCSectionData *getParent() const { return Parent; } void setParent(MCSectionData *Value) { Parent = Value; } - // FIXME: This should be abstract, fix sentinel. - virtual uint64_t getMaxFileSize() const { - assert(0 && "Invalid getMaxFileSize call!"); - return 0; - } - - /// @name Assembler Backend Support - /// @{ - // - // FIXME: This could all be kept private to the assembler implementation. - - uint64_t getAddress() const; - - uint64_t getFileSize() const { - assert(FileSize != ~UINT64_C(0) && "File size not set!"); - return FileSize; - } - void setFileSize(uint64_t Value) { - assert(Value <= getMaxFileSize() && "Invalid file size!"); - FileSize = Value; - } - - uint64_t getOffset() const { - assert(Offset != ~UINT64_C(0) && "File offset not set!"); - return Offset; - } - void setOffset(uint64_t Value) { Offset = Value; } - - /// @} + unsigned getOrdinal() const { return Ordinal; } + void setOrdinal(unsigned Value) { Ordinal = Value; } static bool classof(const MCFragment *O) { return true; } @@ -150,15 +134,10 @@ public: /// @name Accessors /// @{ - uint64_t getMaxFileSize() const { - return Contents.size(); - } - SmallString<32> &getContents() { return Contents; } const SmallString<32> &getContents() const { return Contents; } /// @} - /// @name Fixup Access /// @{ @@ -190,6 +169,68 @@ public: virtual void dump(); }; +// FIXME: This current incarnation of MCInstFragment doesn't make much sense, as +// it is almost entirely a duplicate of MCDataFragment. If we decide to stick +// with this approach (as opposed to making MCInstFragment a very light weight +// object with just the MCInst and a code size, then we should just change +// MCDataFragment to have an optional MCInst at its end. +class MCInstFragment : public MCFragment { + /// Inst - The instruction this is a fragment for. + MCInst Inst; + + /// InstSize - The size of the currently encoded instruction. + SmallString<8> Code; + + /// Fixups - The list of fixups in this fragment. + SmallVector<MCAsmFixup, 1> Fixups; + +public: + typedef SmallVectorImpl<MCAsmFixup>::const_iterator const_fixup_iterator; + typedef SmallVectorImpl<MCAsmFixup>::iterator fixup_iterator; + +public: + MCInstFragment(MCInst _Inst, MCSectionData *SD = 0) + : MCFragment(FT_Inst, SD), Inst(_Inst) { + } + + /// @name Accessors + /// @{ + + SmallVectorImpl<char> &getCode() { return Code; } + const SmallVectorImpl<char> &getCode() const { return Code; } + + unsigned getInstSize() const { return Code.size(); } + + MCInst &getInst() { return Inst; } + const MCInst &getInst() const { return Inst; } + + void setInst(MCInst Value) { Inst = Value; } + + /// @} + /// @name Fixup Access + /// @{ + + SmallVectorImpl<MCAsmFixup> &getFixups() { return Fixups; } + const SmallVectorImpl<MCAsmFixup> &getFixups() const { return Fixups; } + + fixup_iterator fixup_begin() { return Fixups.begin(); } + const_fixup_iterator fixup_begin() const { return Fixups.begin(); } + + fixup_iterator fixup_end() {return Fixups.end();} + const_fixup_iterator fixup_end() const {return Fixups.end();} + + size_t fixup_size() const { return Fixups.size(); } + + /// @} + + static bool classof(const MCFragment *F) { + return F->getKind() == MCFragment::FT_Inst; + } + static bool classof(const MCInstFragment *) { return true; } + + virtual void dump(); +}; + class MCAlignFragment : public MCFragment { /// Alignment - The alignment to ensure, in bytes. unsigned Alignment; @@ -219,10 +260,6 @@ public: /// @name Accessors /// @{ - uint64_t getMaxFileSize() const { - return std::max(Alignment - 1, MaxBytesToEmit); - } - unsigned getAlignment() const { return Alignment; } int64_t getValue() const { return Value; } @@ -262,10 +299,6 @@ public: /// @name Accessors /// @{ - uint64_t getMaxFileSize() const { - return ValueSize * Count; - } - int64_t getValue() const { return Value; } unsigned getValueSize() const { return ValueSize; } @@ -297,11 +330,6 @@ public: /// @name Accessors /// @{ - uint64_t getMaxFileSize() const { - // FIXME: This doesn't make much sense. - return ~UINT64_C(0); - } - const MCExpr &getOffset() const { return *Offset; } uint8_t getValue() const { return Value; } @@ -333,11 +361,6 @@ public: /// @name Accessors /// @{ - uint64_t getMaxFileSize() const { - // FIXME: This also doesn't make much sense, this method is misnamed. - return ~UINT64_C(0); - } - uint64_t getSize() const { return Size; } unsigned getAlignment() const { return Alignment; } @@ -356,6 +379,8 @@ public: // we anticipate the fast path being through an MCAssembler, the only reason to // keep it out is for API abstraction. class MCSectionData : public ilist_node<MCSectionData> { + friend class MCAsmLayout; + MCSectionData(const MCSectionData&); // DO NOT IMPLEMENT void operator=(const MCSectionData&); // DO NOT IMPLEMENT @@ -372,6 +397,9 @@ private: iplist<MCFragment> Fragments; const MCSection *Section; + /// Ordinal - The section index in the assemblers section list. + unsigned Ordinal; + /// Alignment - The maximum alignment seen in this section. unsigned Alignment; @@ -407,6 +435,12 @@ public: unsigned getAlignment() const { return Alignment; } void setAlignment(unsigned Value) { Alignment = Value; } + bool hasInstructions() const { return HasInstructions; } + void setHasInstructions(bool Value) { HasInstructions = Value; } + + unsigned getOrdinal() const { return Ordinal; } + void setOrdinal(unsigned Value) { Ordinal = Value; } + /// @name Fragment Access /// @{ @@ -429,36 +463,9 @@ public: bool empty() const { return Fragments.empty(); } - /// @} - /// @name Assembler Backend Support - /// @{ - // - // FIXME: This could all be kept private to the assembler implementation. - - uint64_t getAddress() const { - assert(Address != ~UINT64_C(0) && "Address not set!"); - return Address; - } - void setAddress(uint64_t Value) { Address = Value; } - - uint64_t getSize() const { - assert(Size != ~UINT64_C(0) && "File size not set!"); - return Size; - } - void setSize(uint64_t Value) { Size = Value; } - - uint64_t getFileSize() const { - assert(FileSize != ~UINT64_C(0) && "File size not set!"); - return FileSize; - } - void setFileSize(uint64_t Value) { FileSize = Value; } - - bool hasInstructions() const { return HasInstructions; } - void setHasInstructions(bool Value) { HasInstructions = Value; } + void dump(); /// @} - - void dump(); }; // FIXME: Same concerns as with SectionData. @@ -515,11 +522,6 @@ public: uint64_t getOffset() const { return Offset; } void setOffset(uint64_t Value) { Offset = Value; } - uint64_t getAddress() const { - assert(getFragment() && "Invalid getAddress() on undefined symbol!"); - return getFragment()->getAddress() + getOffset(); - } - /// @} /// @name Symbol Attributes /// @{ @@ -578,6 +580,8 @@ struct IndirectSymbolData { }; class MCAssembler { + friend class MCAsmLayout; + public: typedef iplist<MCSectionData> SectionDataListType; typedef iplist<MCSymbolData> SymbolDataListType; @@ -620,6 +624,7 @@ private: std::vector<IndirectSymbolData> IndirectSymbols; + unsigned RelaxAll : 1; unsigned SubsectionsViaSymbols : 1; private: @@ -637,35 +642,49 @@ private: /// \arg Value result is fixed, otherwise the value may change due to /// relocation. bool EvaluateFixup(const MCAsmLayout &Layout, - MCAsmFixup &Fixup, MCDataFragment *DF, + const MCAsmFixup &Fixup, const MCFragment *DF, MCValue &Target, uint64_t &Value) const; /// Check whether a fixup can be satisfied, or whether it needs to be relaxed /// (increased in size, in order to hold its value correctly). - bool FixupNeedsRelaxation(MCAsmFixup &Fixup, MCDataFragment *DF); + bool FixupNeedsRelaxation(const MCAsmFixup &Fixup, const MCFragment *DF, + const MCAsmLayout &Layout) const; + + /// Check whether the given fragment needs relaxation. + bool FragmentNeedsRelaxation(const MCInstFragment *IF, + const MCAsmLayout &Layout) const; - /// LayoutSection - Assign offsets and sizes to the fragments in the section - /// \arg SD, and update the section size. The section file offset should - /// already have been computed. - void LayoutSection(MCSectionData &SD); + /// LayoutSection - Assign the section the given \arg StartAddress, and then + /// assign offsets and sizes to the fragments in the section \arg SD, and + /// update the section size. + /// + /// \return The address at the end of the section, for use in laying out the + /// succeeding section. + uint64_t LayoutSection(MCSectionData &SD, MCAsmLayout &Layout, + uint64_t StartAddress); /// LayoutOnce - Perform one layout iteration and return true if any offsets /// were adjusted. - bool LayoutOnce(); + bool LayoutOnce(MCAsmLayout &Layout); + + /// FinishLayout - Finalize a layout, including fragment lowering. + void FinishLayout(MCAsmLayout &Layout); public: /// Find the symbol which defines the atom containing given address, inside /// the given section, or null if there is no such symbol. // - // FIXME: Eliminate this, it is very slow. - const MCSymbolData *getAtomForAddress(const MCSectionData *Section, + // FIXME-PERF: Eliminate this, it is very slow. + const MCSymbolData *getAtomForAddress(const MCAsmLayout &Layout, + const MCSectionData *Section, uint64_t Address) const; /// Find the symbol which defines the atom containing the given symbol, or /// null if there is no such symbol. // - // FIXME: Eliminate this, it is very slow. - const MCSymbolData *getAtom(const MCSymbolData *Symbol) const; + // FIXME-PERF: Eliminate this, it is very slow. + const MCSymbolData *getAtom(const MCAsmLayout &Layout, + const MCSymbolData *Symbol) const; /// Check whether a particular symbol is visible to the linker and is required /// in the symbol table, or whether it can be discarded by the assembler. This @@ -676,7 +695,8 @@ public: /// Emit the section contents using the given object writer. // // FIXME: Should MCAssembler always have a reference to the object writer? - void WriteSectionData(const MCSectionData *Section, MCObjectWriter *OW) const; + void WriteSectionData(const MCSectionData *Section, const MCAsmLayout &Layout, + MCObjectWriter *OW) const; public: /// Construct a new assembler instance. @@ -708,6 +728,9 @@ public: SubsectionsViaSymbols = Value; } + bool getRelaxAll() const { return RelaxAll; } + void setRelaxAll(bool Value) { RelaxAll = Value; } + /// @name Section List Access /// @{ diff --git a/include/llvm/MC/MCContext.h b/include/llvm/MC/MCContext.h index c5814b3..968d55f 100644 --- a/include/llvm/MC/MCContext.h +++ b/include/llvm/MC/MCContext.h @@ -65,19 +65,8 @@ namespace llvm { /// reference and return it. /// /// @param Name - The symbol name, which must be unique across all symbols. - MCSymbol *GetOrCreateSymbol(StringRef Name, bool isTemporary = false); - MCSymbol *GetOrCreateSymbol(const Twine &Name, bool isTemporary = false); - - /// GetOrCreateTemporarySymbol - Create a new assembler temporary symbol - /// with the specified @p Name if it doesn't exist or return the existing - /// one if it does. - /// - /// @param Name - The symbol name, for debugging purposes only, temporary - /// symbols do not surive assembly. - MCSymbol *GetOrCreateTemporarySymbol(StringRef Name) { - return GetOrCreateSymbol(Name, true); - } - MCSymbol *GetOrCreateTemporarySymbol(const Twine &Name); + MCSymbol *GetOrCreateSymbol(StringRef Name); + MCSymbol *GetOrCreateSymbol(const Twine &Name); /// LookupSymbol - Get the symbol for \p Name, or null. MCSymbol *LookupSymbol(StringRef Name) const; diff --git a/include/llvm/MC/MCExpr.h b/include/llvm/MC/MCExpr.h index 6efec52..bd0684d 100644 --- a/include/llvm/MC/MCExpr.h +++ b/include/llvm/MC/MCExpr.h @@ -160,11 +160,6 @@ public: static const MCSymbolRefExpr *Create(StringRef Name, VariantKind Kind, MCContext &Ctx); - /// CreateTemp - Create a reference to an assembler temporary label with the - /// specified name. - static const MCSymbolRefExpr *CreateTemp(StringRef Name, VariantKind Kind, - MCContext &Ctx); - /// @} /// @name Accessors /// @{ diff --git a/include/llvm/MC/MCInst.h b/include/llvm/MC/MCInst.h index 29b38dd..dc630fe 100644 --- a/include/llvm/MC/MCInst.h +++ b/include/llvm/MC/MCInst.h @@ -17,11 +17,13 @@ #define LLVM_MC_MCINST_H #include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringRef.h" #include "llvm/System/DataTypes.h" namespace llvm { class raw_ostream; class MCAsmInfo; +class MCInstPrinter; class MCExpr; /// MCOperand - Instances of this class represent operands of the MCInst class. @@ -125,6 +127,13 @@ public: void print(raw_ostream &OS, const MCAsmInfo *MAI) const; void dump() const; + + /// \brief Dump the MCInst as prettily as possible using the additional MC + /// structures, if given. Operators are separated by the \arg Separator + /// string. + void dump_pretty(raw_ostream &OS, const MCAsmInfo *MAI = 0, + const MCInstPrinter *Printer = 0, + StringRef Separator = " ") const; }; diff --git a/include/llvm/MC/MCObjectWriter.h b/include/llvm/MC/MCObjectWriter.h index d4fab0e..f70a3d1 100644 --- a/include/llvm/MC/MCObjectWriter.h +++ b/include/llvm/MC/MCObjectWriter.h @@ -16,8 +16,9 @@ namespace llvm { class MCAsmFixup; +class MCAsmLayout; class MCAssembler; -class MCDataFragment; +class MCFragment; class MCValue; class raw_ostream; @@ -69,7 +70,8 @@ public: /// information about the relocation so that it can be emitted during /// WriteObject(). virtual void RecordRelocation(const MCAssembler &Asm, - const MCDataFragment &Fragment, + const MCAsmLayout &Layout, + const MCFragment *Fragment, const MCAsmFixup &Fixup, MCValue Target, uint64_t &FixedValue) = 0; @@ -78,7 +80,8 @@ public: /// This routine is called by the assembler after layout and relaxation is /// complete, fixups have been evaluate and applied, and relocations /// generated. - virtual void WriteObject(const MCAssembler &Asm) = 0; + virtual void WriteObject(const MCAssembler &Asm, + const MCAsmLayout &Layout) = 0; /// @} /// @name Binary Output diff --git a/include/llvm/MC/MCSection.h b/include/llvm/MC/MCSection.h index 3d8815a..4a1c46c 100644 --- a/include/llvm/MC/MCSection.h +++ b/include/llvm/MC/MCSection.h @@ -42,9 +42,8 @@ namespace llvm { }; class MCSectionCOFF : public MCSection { - // FIXME: This memory is leaked because MCSectionCOFF is bump pointer - // allocated and this never gets freed. - std::string Name; + // The memory for this string is stored in the same MCContext as *this. + StringRef Name; /// IsDirective - This is true if the section name is a directive, not /// something that should be printed with ".section". @@ -61,7 +60,7 @@ namespace llvm { static MCSectionCOFF *Create(StringRef Name, bool IsDirective, SectionKind K, MCContext &Ctx); - const std::string &getName() const { return Name; } + StringRef getName() const { return Name; } bool isDirective() const { return IsDirective; } virtual void PrintSwitchToSection(const MCAsmInfo &MAI, diff --git a/include/llvm/MC/MCStreamer.h b/include/llvm/MC/MCStreamer.h index 4b088a5..bdcfdb2 100644 --- a/include/llvm/MC/MCStreamer.h +++ b/include/llvm/MC/MCStreamer.h @@ -88,7 +88,7 @@ class TargetAsmBackend; /// @name Symbol & Section Management /// @{ - /// getCurrentSection - Return the current seciton that the streamer is + /// getCurrentSection - Return the current section that the streamer is /// emitting code to. const MCSection *getCurrentSection() const { return CurSection; } @@ -308,7 +308,8 @@ class TargetAsmBackend; /// createMachOStream - Create a machine code streamer which will generative /// Mach-O format object files. MCStreamer *createMachOStreamer(MCContext &Ctx, TargetAsmBackend &TAB, - raw_ostream &OS, MCCodeEmitter *CE); + raw_ostream &OS, MCCodeEmitter *CE, + bool RelaxAll = false); } // end namespace llvm diff --git a/include/llvm/MC/MachObjectWriter.h b/include/llvm/MC/MachObjectWriter.h index 3e3305f..844025d 100644 --- a/include/llvm/MC/MachObjectWriter.h +++ b/include/llvm/MC/MachObjectWriter.h @@ -17,7 +17,7 @@ namespace llvm { class MCAsmFixup; class MCAssembler; -class MCDataFragment; +class MCFragment; class MCValue; class raw_ostream; @@ -31,11 +31,12 @@ public: virtual void ExecutePostLayoutBinding(MCAssembler &Asm); virtual void RecordRelocation(const MCAssembler &Asm, - const MCDataFragment &Fragment, + const MCAsmLayout &Layout, + const MCFragment *Fragment, const MCAsmFixup &Fixup, MCValue Target, uint64_t &FixedValue); - virtual void WriteObject(const MCAssembler &Asm); + virtual void WriteObject(const MCAssembler &Asm, const MCAsmLayout &Layout); }; } // End llvm namespace diff --git a/include/llvm/PassManagers.h b/include/llvm/PassManagers.h index d5685c6..ed1e80e 100644 --- a/include/llvm/PassManagers.h +++ b/include/llvm/PassManagers.h @@ -413,7 +413,6 @@ private: /// It batches all function passes and basic block pass managers together and /// sequence them to process one function at a time before processing next /// function. - class FPPassManager : public ModulePass, public PMDataManager { public: static char ID; @@ -462,8 +461,7 @@ public: } }; -extern Timer *StartPassTimer(Pass *); -extern void StopPassTimer(Pass *, Timer *); +Timer *getPassTimer(Pass *); } diff --git a/include/llvm/Support/Allocator.h b/include/llvm/Support/Allocator.h index b1f59dc..bd38180 100644 --- a/include/llvm/Support/Allocator.h +++ b/include/llvm/Support/Allocator.h @@ -133,6 +133,7 @@ class BumpPtrAllocator { static MallocSlabAllocator DefaultSlabAllocator; + template<typename T> friend class SpecificBumpPtrAllocator; public: BumpPtrAllocator(size_t size = 4096, size_t threshold = 4096, SlabAllocator &allocator = DefaultSlabAllocator); @@ -176,6 +177,45 @@ public: void PrintStats() const; }; +/// SpecificBumpPtrAllocator - Same as BumpPtrAllocator but allows only +/// elements of one type to be allocated. This allows calling the destructor +/// in DestroyAll() and when the allocator is destroyed. +template <typename T> +class SpecificBumpPtrAllocator { + BumpPtrAllocator Allocator; +public: + SpecificBumpPtrAllocator(size_t size = 4096, size_t threshold = 4096, + SlabAllocator &allocator = BumpPtrAllocator::DefaultSlabAllocator) + : Allocator(size, threshold, allocator) {} + + ~SpecificBumpPtrAllocator() { + DestroyAll(); + } + + /// Call the destructor of each allocated object and deallocate all but the + /// current slab and reset the current pointer to the beginning of it, freeing + /// all memory allocated so far. + void DestroyAll() { + MemSlab *Slab = Allocator.CurSlab; + while (Slab) { + char *End = Slab == Allocator.CurSlab ? Allocator.CurPtr : + (char *)Slab + Slab->Size; + for (char *Ptr = (char*)Slab+1; Ptr < End; Ptr += sizeof(T)) { + Ptr = Allocator.AlignPtr(Ptr, alignof<T>()); + if (Ptr + sizeof(T) <= End) + reinterpret_cast<T*>(Ptr)->~T(); + } + Slab = Slab->NextPtr; + } + Allocator.Reset(); + } + + /// Allocate space for a specific count of elements. + T *Allocate(size_t num = 1) { + return Allocator.Allocate<T>(num); + } +}; + } // end namespace llvm inline void *operator new(size_t Size, llvm::BumpPtrAllocator &Allocator) { diff --git a/include/llvm/Support/CFG.h b/include/llvm/Support/CFG.h index 3875f0b..57699c7 100644 --- a/include/llvm/Support/CFG.h +++ b/include/llvm/Support/CFG.h @@ -67,15 +67,15 @@ public: typedef PredIterator<BasicBlock, Value::use_iterator> pred_iterator; typedef PredIterator<const BasicBlock, - Value::use_const_iterator> pred_const_iterator; + Value::const_use_iterator> const_pred_iterator; inline pred_iterator pred_begin(BasicBlock *BB) { return pred_iterator(BB); } -inline pred_const_iterator pred_begin(const BasicBlock *BB) { - return pred_const_iterator(BB); +inline const_pred_iterator pred_begin(const BasicBlock *BB) { + return const_pred_iterator(BB); } inline pred_iterator pred_end(BasicBlock *BB) { return pred_iterator(BB, true);} -inline pred_const_iterator pred_end(const BasicBlock *BB) { - return pred_const_iterator(BB, true); +inline const_pred_iterator pred_end(const BasicBlock *BB) { + return const_pred_iterator(BB, true); } @@ -268,7 +268,7 @@ template <> struct GraphTraits<Inverse<BasicBlock*> > { template <> struct GraphTraits<Inverse<const BasicBlock*> > { typedef const BasicBlock NodeType; - typedef pred_const_iterator ChildIteratorType; + typedef const_pred_iterator ChildIteratorType; static NodeType *getEntryNode(Inverse<const BasicBlock*> G) { return G.Graph; } diff --git a/include/llvm/Support/CallSite.h b/include/llvm/Support/CallSite.h index 285b558..9f527e2 100644 --- a/include/llvm/Support/CallSite.h +++ b/include/llvm/Support/CallSite.h @@ -8,15 +8,18 @@ //===----------------------------------------------------------------------===// // // This file defines the CallSite class, which is a handy wrapper for code that -// wants to treat Call and Invoke instructions in a generic way. +// wants to treat Call and Invoke instructions in a generic way. When in non- +// mutation context (e.g. an analysis) ImmutableCallSite should be used. +// Finally, when some degree of customization is necessary between these two +// extremes, CallSiteBase<> can be supplied with fine-tuned parameters. // -// NOTE: This class is supposed to have "value semantics". So it should be -// passed by value, not by reference; it should not be "new"ed or "delete"d. It -// is efficiently copyable, assignable and constructable, with cost equivalent -// to copying a pointer (notice that it has only a single data member). -// The internal representation carries a flag which indicates which of the two -// variants is enclosed. This allows for cheaper checks when various accessors -// of CallSite are employed. +// NOTE: These classes are supposed to have "value semantics". So they should be +// passed by value, not by reference; they should not be "new"ed or "delete"d. +// They are efficiently copyable, assignable and constructable, with cost +// equivalent to copying a pointer (notice that they have only a single data +// member). The internal representation carries a flag which indicates which of +// the two variants is enclosed. This allows for cheaper checks when various +// accessors of CallSite are employed. // //===----------------------------------------------------------------------===// @@ -34,68 +37,42 @@ namespace llvm { class CallInst; class InvokeInst; -class CallSite { - PointerIntPair<Instruction*, 1, bool> I; +template <typename FunTy = const Function, + typename ValTy = const Value, + typename UserTy = const User, + typename InstrTy = const Instruction, + typename CallTy = const CallInst, + typename InvokeTy = const InvokeInst, + typename IterTy = User::const_op_iterator> +class CallSiteBase { +protected: + PointerIntPair<InstrTy*, 1, bool> I; public: - CallSite() : I(0, false) {} - CallSite(CallInst *CI) : I(reinterpret_cast<Instruction*>(CI), true) {} - CallSite(InvokeInst *II) : I(reinterpret_cast<Instruction*>(II), false) {} - CallSite(Instruction *C); - - bool operator==(const CallSite &CS) const { return I == CS.I; } - bool operator!=(const CallSite &CS) const { return I != CS.I; } + CallSiteBase() : I(0, false) {} + CallSiteBase(CallTy *CI) : I(reinterpret_cast<InstrTy*>(CI), true) {} + CallSiteBase(InvokeTy *II) : I(reinterpret_cast<InstrTy*>(II), false) {} + CallSiteBase(ValTy *II) { *this = get(II); } + CallSiteBase(InstrTy *II) { + assert(II && "Null instruction given?"); + *this = get(II); + assert(I.getPointer()); + } - /// CallSite::get - This static method is sort of like a constructor. It will - /// create an appropriate call site for a Call or Invoke instruction, but it - /// can also create a null initialized CallSite object for something which is - /// NOT a call site. + /// CallSiteBase::get - This static method is sort of like a constructor. It + /// will create an appropriate call site for a Call or Invoke instruction, but + /// it can also create a null initialized CallSiteBase object for something + /// which is NOT a call site. /// - static CallSite get(Value *V) { - if (Instruction *I = dyn_cast<Instruction>(V)) { - if (I->getOpcode() == Instruction::Call) - return CallSite(reinterpret_cast<CallInst*>(I)); - else if (I->getOpcode() == Instruction::Invoke) - return CallSite(reinterpret_cast<InvokeInst*>(I)); + static CallSiteBase get(ValTy *V) { + if (InstrTy *II = dyn_cast<InstrTy>(V)) { + if (II->getOpcode() == Instruction::Call) + return CallSiteBase(reinterpret_cast<CallTy*>(II)); + else if (II->getOpcode() == Instruction::Invoke) + return CallSiteBase(reinterpret_cast<InvokeTy*>(II)); } - return CallSite(); + return CallSiteBase(); } - /// getCallingConv/setCallingConv - get or set the calling convention of the - /// call. - CallingConv::ID getCallingConv() const; - void setCallingConv(CallingConv::ID CC); - - /// getAttributes/setAttributes - get or set the parameter attributes of - /// the call. - const AttrListPtr &getAttributes() const; - void setAttributes(const AttrListPtr &PAL); - - /// paramHasAttr - whether the call or the callee has the given attribute. - bool paramHasAttr(uint16_t i, Attributes attr) const; - - /// @brief Extract the alignment for a call or parameter (0=unknown). - uint16_t getParamAlignment(uint16_t i) const; - - /// @brief Determine if the call does not access memory. - bool doesNotAccessMemory() const; - void setDoesNotAccessMemory(bool doesNotAccessMemory = true); - - /// @brief Determine if the call does not access or only reads memory. - bool onlyReadsMemory() const; - void setOnlyReadsMemory(bool onlyReadsMemory = true); - - /// @brief Determine if the call cannot return. - bool doesNotReturn() const; - void setDoesNotReturn(bool doesNotReturn = true); - - /// @brief Determine if the call cannot unwind. - bool doesNotThrow() const; - void setDoesNotThrow(bool doesNotThrow = true); - - /// getType - Return the type of the instruction that generated this call site - /// - const Type *getType() const { return getInstruction()->getType(); } - /// isCall - true if a CallInst is enclosed. /// Note that !isCall() does not mean it is an InvokeInst enclosed, /// it also could signify a NULL Instruction pointer. @@ -105,37 +82,39 @@ public: /// bool isInvoke() const { return getInstruction() && !I.getInt(); } - /// getInstruction - Return the instruction this call site corresponds to - /// - Instruction *getInstruction() const { return I.getPointer(); } - - /// getCaller - Return the caller function for this call site - /// - Function *getCaller() const { return getInstruction() - ->getParent()->getParent(); } + InstrTy *getInstruction() const { return I.getPointer(); } + InstrTy *operator->() const { return I.getPointer(); } + operator bool() const { return I.getPointer(); } /// getCalledValue - Return the pointer to function that is being called... /// - Value *getCalledValue() const { + ValTy *getCalledValue() const { assert(getInstruction() && "Not a call or invoke instruction!"); - return getInstruction()->getOperand(0); + return *getCallee(); } /// getCalledFunction - Return the function being called if this is a direct /// call, otherwise return null (if it's an indirect call). /// - Function *getCalledFunction() const { - return dyn_cast<Function>(getCalledValue()); + FunTy *getCalledFunction() const { + return dyn_cast<FunTy>(getCalledValue()); } /// setCalledFunction - Set the callee to the specified value... /// void setCalledFunction(Value *V) { assert(getInstruction() && "Not a call or invoke instruction!"); - getInstruction()->setOperand(0, V); + *getCallee() = V; + } + + /// isCallee - Determine whether the passed iterator points to the + /// callee operand's Use. + /// + bool isCallee(value_use_iterator<UserTy> UI) const { + return getCallee() == &UI.getUse(); } - Value *getArgument(unsigned ArgNo) const { + ValTy *getArgument(unsigned ArgNo) const { assert(arg_begin() + ArgNo < arg_end() && "Argument # out of range!"); return *(arg_begin()+ArgNo); } @@ -146,51 +125,145 @@ public: getInstruction()->setOperand(getArgumentOffset() + ArgNo, newVal); } - /// Given an operand number, returns the argument that corresponds to it. - /// OperandNo must be a valid operand number that actually corresponds to an - /// argument. - unsigned getArgumentNo(unsigned OperandNo) const { - assert(OperandNo >= getArgumentOffset() && "Operand number passed was not " - "a valid argument"); - return OperandNo - getArgumentOffset(); + /// Given a value use iterator, returns the argument that corresponds to it. + /// Iterator must actually correspond to an argument. + unsigned getArgumentNo(value_use_iterator<UserTy> I) const { + assert(getInstruction() && "Not a call or invoke instruction!"); + assert(arg_begin() <= &I.getUse() && &I.getUse() < arg_end() + && "Argument # out of range!"); + return &I.getUse() - arg_begin(); } - /// hasArgument - Returns true if this CallSite passes the given Value* as an - /// argument to the called function. - bool hasArgument(const Value *Arg) const; - /// arg_iterator - The type of iterator to use when looping over actual /// arguments at this call site... - typedef User::op_iterator arg_iterator; + typedef IterTy arg_iterator; /// arg_begin/arg_end - Return iterators corresponding to the actual argument /// list for a call site. - arg_iterator arg_begin() const { + IterTy arg_begin() const { assert(getInstruction() && "Not a call or invoke instruction!"); // Skip non-arguments - return getInstruction()->op_begin() + getArgumentOffset(); + return (*this)->op_begin() + getArgumentOffset(); } - arg_iterator arg_end() const { return getInstruction()->op_end(); } + IterTy arg_end() const { return (*this)->op_end() - getArgumentEndOffset(); } bool arg_empty() const { return arg_end() == arg_begin(); } unsigned arg_size() const { return unsigned(arg_end() - arg_begin()); } - - bool operator<(const CallSite &CS) const { - return getInstruction() < CS.getInstruction(); + +private: + /// Returns the operand number of the first argument + unsigned getArgumentOffset() const { + if (isCall()) + return 1; // Skip Function (ATM) + else + return 0; // Args are at the front } - bool isCallee(Value::use_iterator UI) const { - return getInstruction()->op_begin() == &UI.getUse(); + unsigned getArgumentEndOffset() const { + if (isCall()) + return 0; // Unchanged (ATM) + else + return 3; // Skip BB, BB, Function } -private: - /// Returns the operand number of the first argument - unsigned getArgumentOffset() const { + IterTy getCallee() const { + // FIXME: this is slow, since we do not have the fast versions + // of the op_*() functions here. See CallSite::getCallee. + // if (isCall()) - return 1; // Skip Function + return getInstruction()->op_begin(); // Unchanged (ATM) else - return 3; // Skip Function, BB, BB + return getInstruction()->op_end() - 3; // Skip BB, BB, Function + } +}; + +/// ImmutableCallSite - establish a view to a call site for examination +class ImmutableCallSite : public CallSiteBase<> { + typedef CallSiteBase<> _Base; +public: + ImmutableCallSite(const Value* V) : _Base(V) {} + ImmutableCallSite(const CallInst *CI) : _Base(CI) {} + ImmutableCallSite(const InvokeInst *II) : _Base(II) {} + ImmutableCallSite(const Instruction *II) : _Base(II) {} +}; + +class CallSite : public CallSiteBase<Function, Value, User, Instruction, + CallInst, InvokeInst, User::op_iterator> { + typedef CallSiteBase<Function, Value, User, Instruction, + CallInst, InvokeInst, User::op_iterator> _Base; +public: + CallSite() {} + CallSite(_Base B) : _Base(B) {} + CallSite(CallInst *CI) : _Base(CI) {} + CallSite(InvokeInst *II) : _Base(II) {} + CallSite(Instruction *II) : _Base(II) {} + + bool operator==(const CallSite &CS) const { return I == CS.I; } + bool operator!=(const CallSite &CS) const { return I != CS.I; } + + /// CallSite::get - This static method is sort of like a constructor. It will + /// create an appropriate call site for a Call or Invoke instruction, but it + /// can also create a null initialized CallSite object for something which is + /// NOT a call site. + /// + static CallSite get(Value *V) { + return _Base::get(V); } + + /// getCallingConv/setCallingConv - get or set the calling convention of the + /// call. + CallingConv::ID getCallingConv() const; + void setCallingConv(CallingConv::ID CC); + + /// getAttributes/setAttributes - get or set the parameter attributes of + /// the call. + const AttrListPtr &getAttributes() const; + void setAttributes(const AttrListPtr &PAL); + + /// paramHasAttr - whether the call or the callee has the given attribute. + bool paramHasAttr(uint16_t i, Attributes attr) const; + + /// @brief Extract the alignment for a call or parameter (0=unknown). + uint16_t getParamAlignment(uint16_t i) const; + + /// @brief Return true if the call should not be inlined. + bool isNoInline() const; + void setIsNoInline(bool Value = true); + + /// @brief Determine if the call does not access memory. + bool doesNotAccessMemory() const; + void setDoesNotAccessMemory(bool doesNotAccessMemory = true); + + /// @brief Determine if the call does not access or only reads memory. + bool onlyReadsMemory() const; + void setOnlyReadsMemory(bool onlyReadsMemory = true); + + /// @brief Determine if the call cannot return. + bool doesNotReturn() const; + void setDoesNotReturn(bool doesNotReturn = true); + + /// @brief Determine if the call cannot unwind. + bool doesNotThrow() const; + void setDoesNotThrow(bool doesNotThrow = true); + + /// getType - Return the type of the instruction that generated this call site + /// + const Type *getType() const { return (*this)->getType(); } + + /// getCaller - Return the caller function for this call site + /// + Function *getCaller() const { return (*this)->getParent()->getParent(); } + + /// hasArgument - Returns true if this CallSite passes the given Value* as an + /// argument to the called function. + bool hasArgument(const Value *Arg) const; + + bool operator<(const CallSite &CS) const { + return getInstruction() < CS.getInstruction(); + } + +private: + User::op_iterator getCallee() const; }; } // End llvm namespace diff --git a/include/llvm/Support/Casting.h b/include/llvm/Support/Casting.h index 17bcb59..dccbfad 100644 --- a/include/llvm/Support/Casting.h +++ b/include/llvm/Support/Casting.h @@ -50,9 +50,11 @@ template<typename From> struct simplify_type<const From> { // if (isa<Type*>(myVal)) { ... } // template <typename To, typename From> -inline bool isa_impl(const From &Val) { - return To::classof(&Val); -} +struct isa_impl { + static inline bool doit(const From &Val) { + return To::classof(&Val); + } +}; template<typename To, typename From, typename SimpleType> struct isa_impl_wrap { @@ -68,7 +70,7 @@ template<typename To, typename FromTy> struct isa_impl_wrap<To, const FromTy, const FromTy> { // When From == SimpleType, we are as simple as we are going to get. static bool doit(const FromTy &Val) { - return isa_impl<To,FromTy>(Val); + return isa_impl<To,FromTy>::doit(Val); } }; @@ -251,10 +253,12 @@ struct foo { }*/ }; -template <> inline bool isa_impl<foo,bar>(const bar &Val) { - dbgs() << "Classof: " << &Val << "\n"; - return true; -} +template <> struct isa_impl<foo,bar> { + static inline bool doit(const bar &Val) { + dbgs() << "Classof: " << &Val << "\n"; + return true; + } +}; bar *fub(); diff --git a/include/llvm/Support/DebugLoc.h b/include/llvm/Support/DebugLoc.h index 32631fc..ede1ed3 100644 --- a/include/llvm/Support/DebugLoc.h +++ b/include/llvm/Support/DebugLoc.h @@ -1,4 +1,4 @@ -//===---- llvm/DebugLoc.h - Debug Location Information ----------*- C++ -*-===// +//===---- llvm/Support/DebugLoc.h - Debug Location Information --*- C++ -*-===// // // The LLVM Compiler Infrastructure // @@ -20,12 +20,71 @@ namespace llvm { class MDNode; + class LLVMContext; + + /// DebugLoc - Debug location id. This is carried by Instruction, SDNode, + /// and MachineInstr to compactly encode file/line/scope information for an + /// operation. + class NewDebugLoc { + /// LineCol - This 32-bit value encodes the line and column number for the + /// location, encoded as 24-bits for line and 8 bits for col. A value of 0 + /// for either means unknown. + unsigned LineCol; + + /// ScopeIdx - This is an opaque ID# for Scope/InlinedAt information, + /// decoded by LLVMContext. 0 is unknown. + int ScopeIdx; + public: + NewDebugLoc() : LineCol(0), ScopeIdx(0) {} // Defaults to unknown. + + /// get - Get a new DebugLoc that corresponds to the specified line/col + /// scope/inline location. + static NewDebugLoc get(unsigned Line, unsigned Col, + MDNode *Scope, MDNode *InlinedAt = 0); + + /// getFromDILocation - Translate the DILocation quad into a NewDebugLoc. + static NewDebugLoc getFromDILocation(MDNode *N); + + /// isUnknown - Return true if this is an unknown location. + bool isUnknown() const { return ScopeIdx == 0; } + + unsigned getLine() const { + return (LineCol << 8) >> 8; // Mask out column. + } + + unsigned getCol() const { + return LineCol >> 24; + } + + /// getScope - This returns the scope pointer for this DebugLoc, or null if + /// invalid. + MDNode *getScope(const LLVMContext &Ctx) const; + + /// getInlinedAt - This returns the InlinedAt pointer for this DebugLoc, or + /// null if invalid or not present. + MDNode *getInlinedAt(const LLVMContext &Ctx) const; + + /// getScopeAndInlinedAt - Return both the Scope and the InlinedAt values. + void getScopeAndInlinedAt(MDNode *&Scope, MDNode *&IA, + const LLVMContext &Ctx) const; + + + /// getAsMDNode - This method converts the compressed DebugLoc node into a + /// DILocation compatible MDNode. + MDNode *getAsMDNode(const LLVMContext &Ctx) const; + + bool operator==(const NewDebugLoc &DL) const { + return LineCol == DL.LineCol && ScopeIdx == DL.ScopeIdx; + } + bool operator!=(const NewDebugLoc &DL) const { return !(*this == DL); } + }; + + /// DebugLoc - Debug location id. This is carried by SDNode and MachineInstr /// to index into a vector of unique debug location tuples. class DebugLoc { unsigned Idx; - public: DebugLoc() : Idx(~0U) {} // Defaults to invalid. @@ -42,7 +101,7 @@ namespace llvm { bool operator!=(const DebugLoc &DL) const { return !(*this == DL); } }; - /// DebugLocTracker - This class tracks debug location information. + /// DebugLocTracker - This class tracks debug location information. /// struct DebugLocTracker { /// DebugLocations - A vector of unique DebugLocTuple's. diff --git a/include/llvm/Support/FileUtilities.h b/include/llvm/Support/FileUtilities.h index cc8f953..d0dd4a7 100644 --- a/include/llvm/Support/FileUtilities.h +++ b/include/llvm/Support/FileUtilities.h @@ -40,6 +40,8 @@ namespace llvm { sys::Path Filename; bool DeleteIt; public: + FileRemover() : DeleteIt(false) {} + explicit FileRemover(const sys::Path &filename, bool deleteIt = true) : Filename(filename), DeleteIt(deleteIt) {} @@ -50,6 +52,17 @@ namespace llvm { } } + /// setFile - Give ownership of the file to the FileRemover so it will + /// be removed when the object is destroyed. If the FileRemover already + /// had ownership of a file, remove it first. + void setFile(const sys::Path &filename, bool deleteIt = true) { + if (DeleteIt) + Filename.eraseFromDisk(); + + Filename = filename; + DeleteIt = deleteIt; + } + /// releaseFile - Take ownership of the file away from the FileRemover so it /// will not be removed when the object is destroyed. void releaseFile() { DeleteIt = false; } diff --git a/include/llvm/Support/IRBuilder.h b/include/llvm/Support/IRBuilder.h index 1f4e598..c352625 100644 --- a/include/llvm/Support/IRBuilder.h +++ b/include/llvm/Support/IRBuilder.h @@ -40,8 +40,7 @@ protected: /// IRBuilderBase - Common base class shared among various IRBuilders. class IRBuilderBase { - unsigned DbgMDKind; - MDNode *CurDbgLocation; + NewDebugLoc CurDbgLocation; protected: BasicBlock *BB; BasicBlock::iterator InsertPt; @@ -49,7 +48,7 @@ protected: public: IRBuilderBase(LLVMContext &context) - : DbgMDKind(0), CurDbgLocation(0), Context(context) { + : Context(context) { ClearInsertionPoint(); } @@ -65,6 +64,7 @@ public: BasicBlock *GetInsertBlock() const { return BB; } BasicBlock::iterator GetInsertPoint() const { return InsertPt; } + LLVMContext &getContext() const { return Context; } /// SetInsertPoint - This specifies that created instructions should be /// appended to the end of the specified block. @@ -82,12 +82,20 @@ public: /// SetCurrentDebugLocation - Set location information used by debugging /// information. - void SetCurrentDebugLocation(MDNode *L); - MDNode *getCurrentDebugLocation() const { return CurDbgLocation; } + void SetCurrentDebugLocation(const NewDebugLoc &L) { + CurDbgLocation = L; + } + + /// getCurrentDebugLocation - Get location information used by debugging + /// information. + const NewDebugLoc &getCurrentDebugLocation() const { return CurDbgLocation; } /// SetInstDebugLocation - If this builder has a current debug location, set /// it on the specified instruction. - void SetInstDebugLocation(Instruction *I) const; + void SetInstDebugLocation(Instruction *I) const { + if (!CurDbgLocation.isUnknown()) + I->setDebugLoc(CurDbgLocation); + } //===--------------------------------------------------------------------===// // Miscellaneous creation methods. @@ -208,7 +216,7 @@ public: template<typename InstTy> InstTy *Insert(InstTy *I, const Twine &Name = "") const { this->InsertHelper(I, Name, BB, InsertPt); - if (getCurrentDebugLocation() != 0) + if (!getCurrentDebugLocation().isUnknown()) this->SetInstDebugLocation(I); return I; } diff --git a/include/llvm/Support/MathExtras.h b/include/llvm/Support/MathExtras.h index fa12416..9c5f32c 100644 --- a/include/llvm/Support/MathExtras.h +++ b/include/llvm/Support/MathExtras.h @@ -32,35 +32,43 @@ inline uint32_t Lo_32(uint64_t Value) { return static_cast<uint32_t>(Value); } -/// is?Type - these functions produce optimal testing for integer data types. -inline bool isInt8 (int64_t Value) { - return static_cast<int8_t>(Value) == Value; -} -inline bool isUInt8 (int64_t Value) { - return static_cast<uint8_t>(Value) == Value; -} -inline bool isInt16 (int64_t Value) { - return static_cast<int16_t>(Value) == Value; -} -inline bool isUInt16(int64_t Value) { - return static_cast<uint16_t>(Value) == Value; -} -inline bool isInt32 (int64_t Value) { - return static_cast<int32_t>(Value) == Value; -} -inline bool isUInt32(int64_t Value) { - return static_cast<uint32_t>(Value) == Value; -} - +/// isInt - Checks if an integer fits into the given bit width. template<unsigned N> inline bool isInt(int64_t x) { return N >= 64 || (-(INT64_C(1)<<(N-1)) <= x && x < (INT64_C(1)<<(N-1))); } +// Template specializations to get better code for common cases. +template<> +inline bool isInt<8>(int64_t x) { + return static_cast<int8_t>(x) == x; +} +template<> +inline bool isInt<16>(int64_t x) { + return static_cast<int16_t>(x) == x; +} +template<> +inline bool isInt<32>(int64_t x) { + return static_cast<int32_t>(x) == x; +} +/// isUInt - Checks if an unsigned integer fits into the given bit width. template<unsigned N> -inline bool isUint(uint64_t x) { +inline bool isUInt(uint64_t x) { return N >= 64 || x < (UINT64_C(1)<<N); } +// Template specializations to get better code for common cases. +template<> +inline bool isUInt<8>(uint64_t x) { + return static_cast<uint8_t>(x) == x; +} +template<> +inline bool isUInt<16>(uint64_t x) { + return static_cast<uint16_t>(x) == x; +} +template<> +inline bool isUInt<32>(uint64_t x) { + return static_cast<uint32_t>(x) == x; +} /// isMask_32 - This function returns true if the argument is a sequence of ones /// starting at the least significant bit with the remainder zero (32 bit diff --git a/include/llvm/Support/Timer.h b/include/llvm/Support/Timer.h index 8a0f55d..00dfeaa 100644 --- a/include/llvm/Support/Timer.h +++ b/include/llvm/Support/Timer.h @@ -16,16 +16,63 @@ #define LLVM_SUPPORT_TIMER_H #include "llvm/System/DataTypes.h" -#include "llvm/System/Mutex.h" +#include "llvm/ADT/StringRef.h" +#include <cassert> #include <string> #include <vector> -#include <cassert> +#include <utility> namespace llvm { +class Timer; class TimerGroup; class raw_ostream; +class TimeRecord { + double WallTime; // Wall clock time elapsed in seconds + double UserTime; // User time elapsed + double SystemTime; // System time elapsed + ssize_t MemUsed; // Memory allocated (in bytes) +public: + TimeRecord() : WallTime(0), UserTime(0), SystemTime(0), MemUsed(0) {} + + /// getCurrentTime - Get the current time and memory usage. If Start is true + /// we get the memory usage before the time, otherwise we get time before + /// memory usage. This matters if the time to get the memory usage is + /// significant and shouldn't be counted as part of a duration. + static TimeRecord getCurrentTime(bool Start = true); + + double getProcessTime() const { return UserTime+SystemTime; } + double getUserTime() const { return UserTime; } + double getSystemTime() const { return SystemTime; } + double getWallTime() const { return WallTime; } + ssize_t getMemUsed() const { return MemUsed; } + + + // operator< - Allow sorting. + bool operator<(const TimeRecord &T) const { + // Sort by Wall Time elapsed, as it is the only thing really accurate + return WallTime < T.WallTime; + } + + void operator+=(const TimeRecord &RHS) { + WallTime += RHS.WallTime; + UserTime += RHS.UserTime; + SystemTime += RHS.SystemTime; + MemUsed += RHS.MemUsed; + } + void operator-=(const TimeRecord &RHS) { + WallTime -= RHS.WallTime; + UserTime -= RHS.UserTime; + SystemTime -= RHS.SystemTime; + MemUsed -= RHS.MemUsed; + } + + /// print - Print the current timer to standard error, and reset the "Started" + /// flag. + void print(const TimeRecord &Total, raw_ostream &OS) const; +}; + /// Timer - This class is used to track the amount of time spent between /// invocations of its startTimer()/stopTimer() methods. Given appropriate OS /// support it can also keep track of the RSS of the program at various points. @@ -35,65 +82,32 @@ class raw_ostream; /// if they are never started. /// class Timer { - double Elapsed; // Wall clock time elapsed in seconds - double UserTime; // User time elapsed - double SystemTime; // System time elapsed - ssize_t MemUsed; // Memory allocated (in bytes) - size_t PeakMem; // Peak memory used - size_t PeakMemBase; // Temporary for peak calculation... - std::string Name; // The name of this time variable + TimeRecord Time; + std::string Name; // The name of this time variable. bool Started; // Has this time variable ever been started? TimerGroup *TG; // The TimerGroup this Timer is in. - mutable sys::SmartMutex<true> Lock; // Mutex for the contents of this Timer. + + Timer **Prev, *Next; // Doubly linked list of timers in the group. public: - explicit Timer(const std::string &N); - Timer(const std::string &N, TimerGroup &tg); - Timer(const Timer &T); - ~Timer(); - - double getProcessTime() const { return UserTime+SystemTime; } - double getWallTime() const { return Elapsed; } - ssize_t getMemUsed() const { return MemUsed; } - size_t getPeakMem() const { return PeakMem; } - std::string getName() const { return Name; } - + explicit Timer(StringRef N) : TG(0) { init(N); } + Timer(StringRef N, TimerGroup &tg) : TG(0) { init(N, tg); } + Timer(const Timer &RHS) : TG(0) { + assert(RHS.TG == 0 && "Can only copy uninitialized timers"); + } const Timer &operator=(const Timer &T) { - if (&T < this) { - T.Lock.acquire(); - Lock.acquire(); - } else { - Lock.acquire(); - T.Lock.acquire(); - } - - Elapsed = T.Elapsed; - UserTime = T.UserTime; - SystemTime = T.SystemTime; - MemUsed = T.MemUsed; - PeakMem = T.PeakMem; - PeakMemBase = T.PeakMemBase; - Name = T.Name; - Started = T.Started; - assert(TG == T.TG && "Can only assign timers in the same TimerGroup!"); - - if (&T < this) { - T.Lock.release(); - Lock.release(); - } else { - Lock.release(); - T.Lock.release(); - } - + assert(TG == 0 && T.TG == 0 && "Can only assign uninit timers"); return *this; } + ~Timer(); - // operator< - Allow sorting... - bool operator<(const Timer &T) const { - // Sort by Wall Time elapsed, as it is the only thing really accurate - return Elapsed < T.Elapsed; - } - bool operator>(const Timer &T) const { return T.operator<(*this); } - + // Create an uninitialized timer, client must use 'init'. + explicit Timer() : TG(0) {} + void init(StringRef N); + void init(StringRef N, TimerGroup &tg); + + const std::string &getName() const { return Name; } + bool isInitialized() const { return TG != 0; } + /// startTimer - Start the timer running. Time between calls to /// startTimer/stopTimer is counted by the Timer class. Note that these calls /// must be correctly paired. @@ -104,25 +118,8 @@ public: /// void stopTimer(); - /// addPeakMemoryMeasurement - This method should be called whenever memory - /// usage needs to be checked. It adds a peak memory measurement to the - /// currently active timers, which will be printed when the timer group prints - /// - static void addPeakMemoryMeasurement(); - - /// print - Print the current timer to standard error, and reset the "Started" - /// flag. - void print(const Timer &Total, raw_ostream &OS); - private: friend class TimerGroup; - - // Copy ctor, initialize with no TG member. - Timer(bool, const Timer &T); - - /// sum - Add the time accumulated in the specified timer into this timer. - /// - void sum(const Timer &T); }; @@ -139,12 +136,10 @@ public: T->startTimer(); } explicit TimeRegion(Timer *t) : T(t) { - if (T) - T->startTimer(); + if (T) T->startTimer(); } ~TimeRegion() { - if (T) - T->stopTimer(); + if (T) T->stopTimer(); } }; @@ -155,9 +150,8 @@ public: /// is primarily used for debugging and for hunting performance problems. /// struct NamedRegionTimer : public TimeRegion { - explicit NamedRegionTimer(const std::string &Name); - explicit NamedRegionTimer(const std::string &Name, - const std::string &GroupName); + explicit NamedRegionTimer(StringRef Name); + explicit NamedRegionTimer(StringRef Name, StringRef GroupName); }; @@ -168,20 +162,29 @@ struct NamedRegionTimer : public TimeRegion { /// class TimerGroup { std::string Name; - unsigned NumTimers; - std::vector<Timer> TimersToPrint; + Timer *FirstTimer; // First timer in the group. + std::vector<std::pair<TimeRecord, std::string> > TimersToPrint; + + TimerGroup **Prev, *Next; // Doubly linked list of TimerGroup's. + TimerGroup(const TimerGroup &TG); // DO NOT IMPLEMENT + void operator=(const TimerGroup &TG); // DO NOT IMPLEMENT public: - explicit TimerGroup(const std::string &name) : Name(name), NumTimers(0) {} - ~TimerGroup() { - assert(NumTimers == 0 && - "TimerGroup destroyed before all contained timers!"); - } + explicit TimerGroup(StringRef name); + ~TimerGroup(); + + void setName(StringRef name) { Name.assign(name.begin(), name.end()); } + /// print - Print any started timers in this group and zero them. + void print(raw_ostream &OS); + + /// printAll - This static method prints all timers and clears them all out. + static void printAll(raw_ostream &OS); + private: friend class Timer; - void addTimer(); - void removeTimer(); - void addTimerToPrint(const Timer &T); + void addTimer(Timer &T); + void removeTimer(Timer &T); + void PrintQueuedTimers(raw_ostream &OS); }; } // End llvm namespace diff --git a/include/llvm/Support/ValueHandle.h b/include/llvm/Support/ValueHandle.h index 82c3cae..130a620 100644 --- a/include/llvm/Support/ValueHandle.h +++ b/include/llvm/Support/ValueHandle.h @@ -284,8 +284,7 @@ class TrackingVH : public ValueHandleBase { Value *VP = ValueHandleBase::getValPtr(); // Null is always ok. - if (!VP) - return; + if (!VP) return; // Check that this value is valid (i.e., it hasn't been deleted). We // explicitly delay this check until access to avoid requiring clients to be @@ -302,7 +301,7 @@ class TrackingVH : public ValueHandleBase { ValueTy *getValPtr() const { CheckValidity(); - return static_cast<ValueTy*>(ValueHandleBase::getValPtr()); + return (ValueTy*)ValueHandleBase::getValPtr(); } void setValPtr(ValueTy *P) { CheckValidity(); diff --git a/include/llvm/Support/raw_ostream.h b/include/llvm/Support/raw_ostream.h index 0f227cc..e0de80f 100644 --- a/include/llvm/Support/raw_ostream.h +++ b/include/llvm/Support/raw_ostream.h @@ -89,6 +89,7 @@ public: /// has_error - Return the value of the flag in this raw_ostream indicating /// whether an output error has been encountered. + /// This doesn't implicitly flush any pending output. bool has_error() const { return Error; } diff --git a/include/llvm/System/Memory.h b/include/llvm/System/Memory.h index 69251dd..01bcab1 100644 --- a/include/llvm/System/Memory.h +++ b/include/llvm/System/Memory.h @@ -27,7 +27,7 @@ namespace sys { /// @brief Memory block abstraction. class MemoryBlock { public: - MemoryBlock() { } + MemoryBlock() : Address(0), Size(0) { } MemoryBlock(void *addr, size_t size) : Address(addr), Size(size) { } void *base() const { return Address; } size_t size() const { return Size; } diff --git a/include/llvm/Target/TargetAsmBackend.h b/include/llvm/Target/TargetAsmBackend.h index bb501cc..f350ecc 100644 --- a/include/llvm/Target/TargetAsmBackend.h +++ b/include/llvm/Target/TargetAsmBackend.h @@ -15,8 +15,12 @@ namespace llvm { class MCAsmFixup; class MCDataFragment; +class MCInst; +class MCInstFragment; class MCObjectWriter; class MCSection; +template<typename T> +class SmallVectorImpl; class Target; class raw_ostream; @@ -95,6 +99,27 @@ public: /// fixup kind as appropriate. virtual void ApplyFixup(const MCAsmFixup &Fixup, MCDataFragment &Fragment, uint64_t Value) const = 0; + + /// MayNeedRelaxation - Check whether the given instruction may need + /// relaxation. + /// + /// \arg Inst - The instruction to test. + /// \arg Fixups - The actual fixups this instruction encoded to, for potential + /// use by the target backend. + virtual bool MayNeedRelaxation(const MCInst &Inst, + const SmallVectorImpl<MCAsmFixup> &Fixups) const = 0; + + /// RelaxInstruction - Relax the instruction in the given fragment to the next + /// wider instruction. + virtual void RelaxInstruction(const MCInstFragment *IF, + MCInst &Res) const = 0; + + /// WriteNopData - Write an (optimal) nop sequence of Count bytes to the given + /// output. If the target cannot generate such a sequence, it should return an + /// error. + /// + /// \return - True on success. + virtual bool WriteNopData(uint64_t Count, MCObjectWriter *OW) const = 0; }; } // End llvm namespace diff --git a/include/llvm/Target/TargetInstrDesc.h b/include/llvm/Target/TargetInstrDesc.h index 9efb683..adc37e1 100644 --- a/include/llvm/Target/TargetInstrDesc.h +++ b/include/llvm/Target/TargetInstrDesc.h @@ -204,6 +204,16 @@ public: return ImplicitUses; } + /// getNumImplicitUses - Return the number of implicit uses this instruction + /// has. + unsigned getNumImplicitUses() const { + if (ImplicitUses == 0) return 0; + unsigned i = 0; + for (; ImplicitUses[i]; ++i) /*empty*/; + return i; + } + + /// getImplicitDefs - Return a list of registers that are potentially /// written by any instance of this machine instruction. For example, on X86, /// many instructions implicitly set the flags register. In this case, they @@ -218,6 +228,15 @@ public: return ImplicitDefs; } + /// getNumImplicitDefs - Return the number of implicit defs this instruction + /// has. + unsigned getNumImplicitDefs() const { + if (ImplicitDefs == 0) return 0; + unsigned i = 0; + for (; ImplicitDefs[i]; ++i) /*empty*/; + return i; + } + /// hasImplicitUseOfPhysReg - Return true if this instruction implicitly /// uses the specified physical register. bool hasImplicitUseOfPhysReg(unsigned Reg) const { diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index da0f686..b0534dd 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -469,29 +469,6 @@ public: getIndexedStoreAction(IdxMode, VT) == Custom); } - /// getConvertAction - Return how the conversion should be treated: - /// either it is legal, needs to be promoted to a larger size, needs to be - /// expanded to some other code sequence, or the target has a custom expander - /// for it. - LegalizeAction - getConvertAction(EVT FromVT, EVT ToVT) const { - assert((unsigned)FromVT.getSimpleVT().SimpleTy < - array_lengthof(ConvertActions) && - (unsigned)ToVT.getSimpleVT().SimpleTy < - sizeof(ConvertActions[0])*4 && - "Table isn't big enough!"); - return (LegalizeAction)((ConvertActions[FromVT.getSimpleVT().SimpleTy] >> - (2*ToVT.getSimpleVT().SimpleTy)) & 3); - } - - /// isConvertLegal - Return true if the specified conversion is legal - /// on this target. - bool isConvertLegal(EVT FromVT, EVT ToVT) const { - return isTypeLegal(FromVT) && isTypeLegal(ToVT) && - (getConvertAction(FromVT, ToVT) == Legal || - getConvertAction(FromVT, ToVT) == Custom); - } - /// getCondCodeAction - Return how the condition code should be treated: /// either it is legal, needs to be expanded to some other code sequence, /// or the target has a custom expander for it. @@ -545,7 +522,7 @@ public: /// counterpart (e.g. structs), otherwise it will assert. EVT getValueType(const Type *Ty, bool AllowUnknown = false) const { EVT VT = EVT::getEVT(Ty, AllowUnknown); - return VT == MVT:: iPTR ? PointerTy : VT; + return VT == MVT::iPTR ? PointerTy : VT; } /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate @@ -657,11 +634,14 @@ public: /// getOptimalMemOpType - Returns the target specific optimal type for load /// and store operations as a result of memset, memcpy, and memmove lowering. - /// It returns EVT::Other if SelectionDAG should be responsible for - /// determining it. - virtual EVT getOptimalMemOpType(uint64_t Size, unsigned Align, - bool isSrcConst, bool isSrcStr, - SelectionDAG &DAG) const { + /// If DstAlign is zero that means it's safe to destination alignment can + /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't + /// a need to check it against alignment requirement, probably because the + /// source does not need to be loaded. It returns EVT::Other if SelectionDAG + /// should be responsible for determining it. + virtual EVT getOptimalMemOpType(uint64_t Size, + unsigned DstAlign, unsigned SrcAlign, + bool SafeToUseFP, SelectionDAG &DAG) const { return MVT::Other; } @@ -990,7 +970,7 @@ protected: } /// setLoadExtAction - Indicate that the specified load with extension does - /// not work with the with specified type and indicate what to do about it. + /// not work with the specified type and indicate what to do about it. void setLoadExtAction(unsigned ExtType, MVT VT, LegalizeAction Action) { assert((unsigned)VT.SimpleTy*2 < 63 && @@ -1001,7 +981,7 @@ protected: } /// setTruncStoreAction - Indicate that the specified truncating store does - /// not work with the with specified type and indicate what to do about it. + /// not work with the specified type and indicate what to do about it. void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action) { assert((unsigned)ValVT.SimpleTy < array_lengthof(TruncStoreActions) && @@ -1012,7 +992,7 @@ protected: } /// setIndexedLoadAction - Indicate that the specified indexed load does or - /// does not work with the with specified type and indicate what to do abort + /// does not work with the specified type and indicate what to do abort /// it. NOTE: All indexed mode loads are initialized to Expand in /// TargetLowering.cpp void setIndexedLoadAction(unsigned IdxMode, MVT VT, @@ -1024,7 +1004,7 @@ protected: } /// setIndexedStoreAction - Indicate that the specified indexed store does or - /// does not work with the with specified type and indicate what to do about + /// does not work with the specified type and indicate what to do about /// it. NOTE: All indexed mode stores are initialized to Expand in /// TargetLowering.cpp void setIndexedStoreAction(unsigned IdxMode, MVT VT, @@ -1035,17 +1015,6 @@ protected: IndexedModeActions[(unsigned)VT.SimpleTy][1][IdxMode] = (uint8_t)Action; } - /// setConvertAction - Indicate that the specified conversion does or does - /// not work with the with specified type and indicate what to do about it. - void setConvertAction(MVT FromVT, MVT ToVT, - LegalizeAction Action) { - assert((unsigned)FromVT.SimpleTy < array_lengthof(ConvertActions) && - (unsigned)ToVT.SimpleTy < MVT::LAST_VALUETYPE && - "Table isn't big enough!"); - ConvertActions[FromVT.SimpleTy] &= ~(uint64_t(3UL) << ToVT.SimpleTy*2); - ConvertActions[FromVT.SimpleTy] |= (uint64_t)Action << ToVT.SimpleTy*2; - } - /// setCondCodeAction - Indicate that the specified condition code is or isn't /// supported on the target and indicate what to do about it. void setCondCodeAction(ISD::CondCode CC, MVT VT, @@ -1674,13 +1643,6 @@ private: /// represents the various modes for load store. uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][2][ISD::LAST_INDEXED_MODE]; - /// ConvertActions - For each conversion from source type to destination type, - /// keep a LegalizeAction that indicates how instruction selection should - /// deal with the conversion. - /// Currently, this is used only for floating->floating conversions - /// (FP_EXTEND and FP_ROUND). - uint64_t ConvertActions[MVT::LAST_VALUETYPE]; - /// CondCodeActions - For each condition code (ISD::CondCode) keep a /// LegalizeAction that indicates how instruction selection should /// deal with the condition code. diff --git a/include/llvm/Target/TargetMachine.h b/include/llvm/Target/TargetMachine.h index b239a30..d1d665f 100644 --- a/include/llvm/Target/TargetMachine.h +++ b/include/llvm/Target/TargetMachine.h @@ -192,7 +192,7 @@ public: formatted_raw_ostream &, CodeGenFileType, CodeGenOpt::Level, - bool DisableVerify = true) { + bool = true) { return true; } @@ -205,7 +205,7 @@ public: virtual bool addPassesToEmitMachineCode(PassManagerBase &, JITCodeEmitter &, CodeGenOpt::Level, - bool DisableVerify = true) { + bool = true) { return true; } @@ -216,7 +216,7 @@ public: virtual bool addPassesToEmitWholeFile(PassManager &, formatted_raw_ostream &, CodeGenFileType, CodeGenOpt::Level, - bool DisableVerify = true) { + bool = true) { return true; } }; diff --git a/include/llvm/Target/TargetSelectionDAG.td b/include/llvm/Target/TargetSelectionDAG.td index e56d886..58ccfba 100644 --- a/include/llvm/Target/TargetSelectionDAG.td +++ b/include/llvm/Target/TargetSelectionDAG.td @@ -92,6 +92,10 @@ def SDTIntBinOp : SDTypeProfile<1, 2, [ // add, and, or, xor, udiv, etc. def SDTIntShiftOp : SDTypeProfile<1, 2, [ // shl, sra, srl SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisInt<2> ]>; +def SDTIntBinHiLoOp : SDTypeProfile<2, 2, [ // mulhi, mullo, sdivrem, udivrem + SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>,SDTCisInt<0> +]>; + def SDTFPBinOp : SDTypeProfile<1, 2, [ // fadd, fmul, etc. SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisFP<0> ]>; @@ -235,7 +239,6 @@ class SDNode<string opcode, SDTypeProfile typeprof, // Special TableGen-recognized dag nodes def set; def implicit; -def parallel; def node; def srcvalue; @@ -282,10 +285,14 @@ def mul : SDNode<"ISD::MUL" , SDTIntBinOp, [SDNPCommutative, SDNPAssociative]>; def mulhs : SDNode<"ISD::MULHS" , SDTIntBinOp, [SDNPCommutative]>; def mulhu : SDNode<"ISD::MULHU" , SDTIntBinOp, [SDNPCommutative]>; +def smullohi : SDNode<"ISD::SMUL_LOHI" , SDTIntBinHiLoOp, [SDNPCommutative]>; +def umullohi : SDNode<"ISD::UMUL_LOHI" , SDTIntBinHiLoOp, [SDNPCommutative]>; def sdiv : SDNode<"ISD::SDIV" , SDTIntBinOp>; def udiv : SDNode<"ISD::UDIV" , SDTIntBinOp>; def srem : SDNode<"ISD::SREM" , SDTIntBinOp>; def urem : SDNode<"ISD::UREM" , SDTIntBinOp>; +def sdivrem : SDNode<"ISD::SDIVREM" , SDTIntBinHiLoOp>; +def udivrem : SDNode<"ISD::UDIVREM" , SDTIntBinHiLoOp>; def srl : SDNode<"ISD::SRL" , SDTIntShiftOp>; def sra : SDNode<"ISD::SRA" , SDTIntShiftOp>; def shl : SDNode<"ISD::SHL" , SDTIntShiftOp>; @@ -485,22 +492,15 @@ def vtFP : PatLeaf<(vt), [{ return N->getVT().isFloatingPoint(); }]>; def immAllOnesV: PatLeaf<(build_vector), [{ return ISD::isBuildVectorAllOnes(N); }]>; -def immAllOnesV_bc: PatLeaf<(bitconvert), [{ - return ISD::isBuildVectorAllOnes(N); -}]>; def immAllZerosV: PatLeaf<(build_vector), [{ return ISD::isBuildVectorAllZeros(N); }]>; -def immAllZerosV_bc: PatLeaf<(bitconvert), [{ - return ISD::isBuildVectorAllZeros(N); -}]>; // Other helper fragments. def not : PatFrag<(ops node:$in), (xor node:$in, -1)>; def vnot : PatFrag<(ops node:$in), (xor node:$in, immAllOnesV)>; -def vnot_conv : PatFrag<(ops node:$in), (xor node:$in, immAllOnesV_bc)>; def ineg : PatFrag<(ops node:$in), (sub 0, node:$in)>; // load fragments. diff --git a/include/llvm/Transforms/Utils/BuildLibCalls.h b/include/llvm/Transforms/Utils/BuildLibCalls.h index ac5f07e..d278672 100644 --- a/include/llvm/Transforms/Utils/BuildLibCalls.h +++ b/include/llvm/Transforms/Utils/BuildLibCalls.h @@ -49,6 +49,12 @@ namespace llvm { Value *EmitMemCpy(Value *Dst, Value *Src, Value *Len, unsigned Align, IRBuilder<> &B, const TargetData *TD); + /// EmitMemCpyChk - Emit a call to the __memcpy_chk function to the builder. + /// This expects that the Len and ObjSize have type 'intptr_t' and Dst/Src + /// are pointers. + Value *EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize, + IRBuilder<> &B, const TargetData *TD); + /// EmitMemMove - Emit a call to the memmove function to the builder. This /// always expects that the size has type 'intptr_t' and Dst/Src are pointers. Value *EmitMemMove(Value *Dst, Value *Src, Value *Len, diff --git a/include/llvm/Transforms/Utils/SSAUpdater.h b/include/llvm/Transforms/Utils/SSAUpdater.h index 927e156..b29b749 100644 --- a/include/llvm/Transforms/Utils/SSAUpdater.h +++ b/include/llvm/Transforms/Utils/SSAUpdater.h @@ -27,22 +27,28 @@ namespace llvm { /// transformation wants to rewrite a set of uses of one value with uses of a /// set of values. class SSAUpdater { +public: + class BBInfo; + +private: /// AvailableVals - This keeps track of which value to use on a per-block - /// basis. When we insert PHI nodes, we keep track of them here. We use - /// TrackingVH's for the value of the map because we RAUW PHI nodes when we - /// eliminate them, and want the TrackingVH's to track this. - //typedef DenseMap<BasicBlock*, TrackingVH<Value> > AvailableValsTy; + /// basis. When we insert PHI nodes, we keep track of them here. + //typedef DenseMap<BasicBlock*, Value*> AvailableValsTy; void *AV; /// PrototypeValue is an arbitrary representative value, which we derive names /// and a type for PHI nodes. Value *PrototypeValue; - /// IncomingPredInfo - We use this as scratch space when doing our recursive - /// walk. This should only be used in GetValueInBlockInternal, normally it - /// should be empty. - //std::vector<std::pair<BasicBlock*, TrackingVH<Value> > > IncomingPredInfo; - void *IPI; + /// BBMap - The GetValueAtEndOfBlock method maintains this mapping from + /// basic blocks to BBInfo structures. + /// typedef DenseMap<BasicBlock*, BBInfo*> BBMapTy; + void *BM; + + /// Allocator - The GetValueAtEndOfBlock method uses this BumpPtrAllocator to + /// hold its internal data. The allocator and its storage is created and + /// discarded for each invocation of GetValueAtEndOfBlock. + void *BPA; /// InsertedPHIs - If this is non-null, the SSAUpdater adds all PHI nodes that /// it creates to the vector. @@ -99,6 +105,14 @@ public: private: Value *GetValueAtEndOfBlockInternal(BasicBlock *BB); + void FindPHIPlacement(BasicBlock *BB, BBInfo *Info, bool &Changed, + unsigned Counter); + void FindAvailableVal(BasicBlock *BB, BBInfo *Info, unsigned Counter); + void FindExistingPHI(BasicBlock *BB); + bool CheckIfPHIMatches(PHINode *PHI); + void RecordMatchingPHI(PHINode *PHI); + void ClearPHITags(PHINode *PHI); + void operator=(const SSAUpdater&); // DO NOT IMPLEMENT SSAUpdater(const SSAUpdater&); // DO NOT IMPLEMENT }; diff --git a/include/llvm/Type.h b/include/llvm/Type.h index d09913a..df3a16c 100644 --- a/include/llvm/Type.h +++ b/include/llvm/Type.h @@ -548,9 +548,11 @@ template <> struct GraphTraits<const Type*> { } }; -template <> inline bool isa_impl<PointerType, Type>(const Type &Ty) { - return Ty.getTypeID() == Type::PointerTyID; -} +template <> struct isa_impl<PointerType, Type> { + static inline bool doit(const Type &Ty) { + return Ty.getTypeID() == Type::PointerTyID; + } +}; raw_ostream &operator<<(raw_ostream &OS, const Type &T); diff --git a/include/llvm/Value.h b/include/llvm/Value.h index d06cbc0..bc25a0f 100644 --- a/include/llvm/Value.h +++ b/include/llvm/Value.h @@ -157,13 +157,13 @@ public: // Methods for handling the chain of uses of this Value. // typedef value_use_iterator<User> use_iterator; - typedef value_use_iterator<const User> use_const_iterator; + typedef value_use_iterator<const User> const_use_iterator; bool use_empty() const { return UseList == 0; } use_iterator use_begin() { return use_iterator(UseList); } - use_const_iterator use_begin() const { return use_const_iterator(UseList); } + const_use_iterator use_begin() const { return const_use_iterator(UseList); } use_iterator use_end() { return use_iterator(0); } - use_const_iterator use_end() const { return use_const_iterator(0); } + const_use_iterator use_end() const { return const_use_iterator(0); } User *use_back() { return *use_begin(); } const User *use_back() const { return *use_begin(); } @@ -172,7 +172,7 @@ public: /// traversing the whole use list. /// bool hasOneUse() const { - use_const_iterator I = use_begin(), E = use_end(); + const_use_iterator I = use_begin(), E = use_end(); if (I == E) return false; return ++I == E; } @@ -324,39 +324,67 @@ void Use::set(Value *V) { // isa - Provide some specializations of isa so that we don't have to include // the subtype header files to test to see if the value is a subclass... // -template <> inline bool isa_impl<Constant, Value>(const Value &Val) { - return Val.getValueID() >= Value::ConstantFirstVal && - Val.getValueID() <= Value::ConstantLastVal; -} -template <> inline bool isa_impl<Argument, Value>(const Value &Val) { - return Val.getValueID() == Value::ArgumentVal; -} -template <> inline bool isa_impl<InlineAsm, Value>(const Value &Val) { - return Val.getValueID() == Value::InlineAsmVal; -} -template <> inline bool isa_impl<Instruction, Value>(const Value &Val) { - return Val.getValueID() >= Value::InstructionVal; -} -template <> inline bool isa_impl<BasicBlock, Value>(const Value &Val) { - return Val.getValueID() == Value::BasicBlockVal; -} -template <> inline bool isa_impl<Function, Value>(const Value &Val) { - return Val.getValueID() == Value::FunctionVal; -} -template <> inline bool isa_impl<GlobalVariable, Value>(const Value &Val) { - return Val.getValueID() == Value::GlobalVariableVal; -} -template <> inline bool isa_impl<GlobalAlias, Value>(const Value &Val) { - return Val.getValueID() == Value::GlobalAliasVal; -} -template <> inline bool isa_impl<GlobalValue, Value>(const Value &Val) { - return isa<GlobalVariable>(Val) || isa<Function>(Val) || - isa<GlobalAlias>(Val); -} -template <> inline bool isa_impl<MDNode, Value>(const Value &Val) { - return Val.getValueID() == Value::MDNodeVal; -} - +template <> struct isa_impl<Constant, Value> { + static inline bool doit(const Value &Val) { + return Val.getValueID() >= Value::ConstantFirstVal && + Val.getValueID() <= Value::ConstantLastVal; + } +}; + +template <> struct isa_impl<Argument, Value> { + static inline bool doit (const Value &Val) { + return Val.getValueID() == Value::ArgumentVal; + } +}; + +template <> struct isa_impl<InlineAsm, Value> { + static inline bool doit(const Value &Val) { + return Val.getValueID() == Value::InlineAsmVal; + } +}; + +template <> struct isa_impl<Instruction, Value> { + static inline bool doit(const Value &Val) { + return Val.getValueID() >= Value::InstructionVal; + } +}; + +template <> struct isa_impl<BasicBlock, Value> { + static inline bool doit(const Value &Val) { + return Val.getValueID() == Value::BasicBlockVal; + } +}; + +template <> struct isa_impl<Function, Value> { + static inline bool doit(const Value &Val) { + return Val.getValueID() == Value::FunctionVal; + } +}; + +template <> struct isa_impl<GlobalVariable, Value> { + static inline bool doit(const Value &Val) { + return Val.getValueID() == Value::GlobalVariableVal; + } +}; + +template <> struct isa_impl<GlobalAlias, Value> { + static inline bool doit(const Value &Val) { + return Val.getValueID() == Value::GlobalAliasVal; + } +}; + +template <> struct isa_impl<GlobalValue, Value> { + static inline bool doit(const Value &Val) { + return isa<GlobalVariable>(Val) || isa<Function>(Val) || + isa<GlobalAlias>(Val); + } +}; + +template <> struct isa_impl<MDNode, Value> { + static inline bool doit(const Value &Val) { + return Val.getValueID() == Value::MDNodeVal; + } +}; // Value* is only 4-byte aligned. template<> |