diff options
author | rdivacky <rdivacky@FreeBSD.org> | 2010-07-13 17:19:57 +0000 |
---|---|---|
committer | rdivacky <rdivacky@FreeBSD.org> | 2010-07-13 17:19:57 +0000 |
commit | 9112829d76cbb8e0c8ef51bbc2d7d1be48cd7b74 (patch) | |
tree | 9de1c5f67a98cd0e73c60838396486c984f63ac2 /include/llvm/Analysis | |
parent | 1e3dec662ea18131c495db50caccc57f77b7a5fe (diff) | |
download | FreeBSD-src-9112829d76cbb8e0c8ef51bbc2d7d1be48cd7b74.zip FreeBSD-src-9112829d76cbb8e0c8ef51bbc2d7d1be48cd7b74.tar.gz |
Update LLVM to r108243.
Diffstat (limited to 'include/llvm/Analysis')
-rw-r--r-- | include/llvm/Analysis/AliasAnalysis.h | 21 | ||||
-rw-r--r-- | include/llvm/Analysis/CFGPrinter.h | 6 | ||||
-rw-r--r-- | include/llvm/Analysis/CaptureTracking.h | 6 | ||||
-rw-r--r-- | include/llvm/Analysis/CodeMetrics.h | 72 | ||||
-rw-r--r-- | include/llvm/Analysis/DebugInfo.h | 9 | ||||
-rw-r--r-- | include/llvm/Analysis/DominatorInternals.h | 18 | ||||
-rw-r--r-- | include/llvm/Analysis/Dominators.h | 22 | ||||
-rw-r--r-- | include/llvm/Analysis/InlineCost.h | 45 | ||||
-rw-r--r-- | include/llvm/Analysis/IntervalIterator.h | 22 | ||||
-rw-r--r-- | include/llvm/Analysis/Loads.h | 51 | ||||
-rw-r--r-- | include/llvm/Analysis/LoopInfo.h | 59 | ||||
-rw-r--r-- | include/llvm/Analysis/MemoryBuiltins.h | 4 | ||||
-rw-r--r-- | include/llvm/Analysis/ScalarEvolution.h | 8 | ||||
-rw-r--r-- | include/llvm/Analysis/ScalarEvolutionExpander.h | 15 | ||||
-rw-r--r-- | include/llvm/Analysis/ValueTracking.h | 2 |
15 files changed, 232 insertions, 128 deletions
diff --git a/include/llvm/Analysis/AliasAnalysis.h b/include/llvm/Analysis/AliasAnalysis.h index 9f41135..e611a35 100644 --- a/include/llvm/Analysis/AliasAnalysis.h +++ b/include/llvm/Analysis/AliasAnalysis.h @@ -165,27 +165,6 @@ public: /// ModRefInfo - Whether the pointer is loaded or stored to/from. /// ModRefResult ModRefInfo; - - /// AccessType - Specific fine-grained access information for the argument. - /// If none of these classifications is general enough, the - /// getModRefBehavior method should not return AccessesArguments*. If a - /// record is not returned for a particular argument, the argument is never - /// dead and never dereferenced. - enum AccessType { - /// ScalarAccess - The pointer is dereferenced. - /// - ScalarAccess, - - /// ArrayAccess - The pointer is indexed through as an array of elements. - /// - ArrayAccess, - - /// ElementAccess ?? P->F only? - - /// CallsThrough - Indirect calls are made through the specified function - /// pointer. - CallsThrough - }; }; /// getModRefBehavior - Return the behavior when calling the given call site. diff --git a/include/llvm/Analysis/CFGPrinter.h b/include/llvm/Analysis/CFGPrinter.h index 6ad2e5a..ac8f596 100644 --- a/include/llvm/Analysis/CFGPrinter.h +++ b/include/llvm/Analysis/CFGPrinter.h @@ -1,4 +1,4 @@ -//===-- CFGPrinter.h - CFG printer external interface ------------*- C++ -*-===// +//===-- CFGPrinter.h - CFG printer external interface -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // @@ -43,8 +43,8 @@ struct DOTGraphTraits<const Function*> : public DefaultDOTGraphTraits { return OS.str(); } - static std::string getCompleteNodeLabel(const BasicBlock *Node, - const Function *Graph) { + static std::string getCompleteNodeLabel(const BasicBlock *Node, + const Function *Graph) { std::string Str; raw_string_ostream OS(Str); diff --git a/include/llvm/Analysis/CaptureTracking.h b/include/llvm/Analysis/CaptureTracking.h index 493ecf5..b3390f4 100644 --- a/include/llvm/Analysis/CaptureTracking.h +++ b/include/llvm/Analysis/CaptureTracking.h @@ -21,9 +21,9 @@ namespace llvm { /// by the enclosing function (which is required to exist). This routine can /// be expensive, so consider caching the results. The boolean ReturnCaptures /// specifies whether returning the value (or part of it) from the function - /// counts as capturing it or not. The boolean StoreCaptures specified whether - /// storing the value (or part of it) into memory anywhere automatically - /// counts as capturing it or not. + /// counts as capturing it or not. The boolean StoreCaptures specified + /// whether storing the value (or part of it) into memory anywhere + /// automatically counts as capturing it or not. bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures); diff --git a/include/llvm/Analysis/CodeMetrics.h b/include/llvm/Analysis/CodeMetrics.h new file mode 100644 index 0000000..58096f1 --- /dev/null +++ b/include/llvm/Analysis/CodeMetrics.h @@ -0,0 +1,72 @@ +//===- CodeMetrics.h - Measures the weight of a function---------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements various weight measurements for a function, helping +// the Inliner and PartialSpecialization decide whether to duplicate its +// contents. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_CODEMETRICS_H +#define LLVM_ANALYSIS_CODEMETRICS_H + +namespace llvm { + // CodeMetrics - Calculate size and a few similar metrics for a set of + // basic blocks. + struct CodeMetrics { + /// NeverInline - True if this callee should never be inlined into a + /// caller. + // bool NeverInline; + + // True if this function contains a call to setjmp or _setjmp + bool callsSetJmp; + + // True if this function calls itself + bool isRecursive; + + // True if this function contains one or more indirect branches + bool containsIndirectBr; + + /// usesDynamicAlloca - True if this function calls alloca (in the C sense). + bool usesDynamicAlloca; + + /// NumInsts, NumBlocks - Keep track of how large each function is, which + /// is used to estimate the code size cost of inlining it. + unsigned NumInsts, NumBlocks; + + /// NumBBInsts - Keeps track of basic block code size estimates. + DenseMap<const BasicBlock *, unsigned> NumBBInsts; + + /// NumCalls - Keep track of the number of calls to 'big' functions. + unsigned NumCalls; + + /// NumVectorInsts - Keep track of how many instructions produce vector + /// values. The inliner is being more aggressive with inlining vector + /// kernels. + unsigned NumVectorInsts; + + /// NumRets - Keep track of how many Ret instructions the block contains. + unsigned NumRets; + + CodeMetrics() : callsSetJmp(false), isRecursive(false), + containsIndirectBr(false), usesDynamicAlloca(false), + NumInsts(0), NumBlocks(0), NumCalls(0), NumVectorInsts(0), + NumRets(0) {} + + /// analyzeBasicBlock - Add information about the specified basic block + /// to the current structure. + void analyzeBasicBlock(const BasicBlock *BB); + + /// analyzeFunction - Add information about the specified function + /// to the current structure. + void analyzeFunction(Function *F); + }; +} + +#endif diff --git a/include/llvm/Analysis/DebugInfo.h b/include/llvm/Analysis/DebugInfo.h index 473b127..a85b6bc 100644 --- a/include/llvm/Analysis/DebugInfo.h +++ b/include/llvm/Analysis/DebugInfo.h @@ -56,6 +56,7 @@ namespace llvm { } GlobalVariable *getGlobalVariableField(unsigned Elt) const; + Function *getFunctionField(unsigned Elt) const; public: explicit DIDescriptor() : DbgNode(0) {} @@ -409,6 +410,8 @@ namespace llvm { /// describes - Return true if this subprogram provides debugging /// information for the function F. bool describes(const Function *F); + + Function *getFunction() const { return getFunctionField(16); } }; /// DIGlobalVariable - This is a wrapper for a global variable. @@ -577,7 +580,8 @@ namespace llvm { unsigned RunTimeVer = 0); /// CreateFile - Create a new descriptor for the specified file. - DIFile CreateFile(StringRef Filename, StringRef Directory, DICompileUnit CU); + DIFile CreateFile(StringRef Filename, StringRef Directory, + DICompileUnit CU); /// CreateEnumerator - Create a single enumerator value. DIEnumerator CreateEnumerator(StringRef Name, uint64_t Val); @@ -658,7 +662,8 @@ namespace llvm { unsigned VIndex = 0, DIType = DIType(), bool isArtificial = 0, - bool isOptimized = false); + bool isOptimized = false, + Function *Fn = 0); /// CreateSubprogramDefinition - Create new subprogram descriptor for the /// given declaration. diff --git a/include/llvm/Analysis/DominatorInternals.h b/include/llvm/Analysis/DominatorInternals.h index 8cea96d..0419688 100644 --- a/include/llvm/Analysis/DominatorInternals.h +++ b/include/llvm/Analysis/DominatorInternals.h @@ -152,8 +152,9 @@ void Compress(DominatorTreeBase<typename GraphT::NodeType>& DT, } template<class GraphT> -typename GraphT::NodeType* Eval(DominatorTreeBase<typename GraphT::NodeType>& DT, - typename GraphT::NodeType *V) { +typename GraphT::NodeType* +Eval(DominatorTreeBase<typename GraphT::NodeType>& DT, + typename GraphT::NodeType *V) { typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &VInfo = DT.Info[V]; #if !BALANCE_IDOM_TREE @@ -265,14 +266,17 @@ void Calculate(DominatorTreeBase<typename GraphTraits<NodeT>::NodeType>& DT, // initialize the semi dominator to point to the parent node WInfo.Semi = WInfo.Parent; - for (typename GraphTraits<Inverse<NodeT> >::ChildIteratorType CI = - GraphTraits<Inverse<NodeT> >::child_begin(W), - E = GraphTraits<Inverse<NodeT> >::child_end(W); CI != E; ++CI) - if (DT.Info.count(*CI)) { // Only if this predecessor is reachable! - unsigned SemiU = DT.Info[Eval<GraphT>(DT, *CI)].Semi; + typedef GraphTraits<Inverse<NodeT> > InvTraits; + for (typename InvTraits::ChildIteratorType CI = + InvTraits::child_begin(W), + E = InvTraits::child_end(W); CI != E; ++CI) { + typename InvTraits::NodeType *N = *CI; + if (DT.Info.count(N)) { // Only if this predecessor is reachable! + unsigned SemiU = DT.Info[Eval<GraphT>(DT, N)].Semi; if (SemiU < WInfo.Semi) WInfo.Semi = SemiU; } + } DT.Info[DT.Vertex[WInfo.Semi]].Bucket.push_back(W); diff --git a/include/llvm/Analysis/Dominators.h b/include/llvm/Analysis/Dominators.h index f810310..1979d3f 100644 --- a/include/llvm/Analysis/Dominators.h +++ b/include/llvm/Analysis/Dominators.h @@ -246,22 +246,25 @@ protected: typename GraphT::NodeType* NewBBSucc = *GraphT::child_begin(NewBB); std::vector<typename GraphT::NodeType*> PredBlocks; - for (typename GraphTraits<Inverse<N> >::ChildIteratorType PI = - GraphTraits<Inverse<N> >::child_begin(NewBB), - PE = GraphTraits<Inverse<N> >::child_end(NewBB); PI != PE; ++PI) + typedef GraphTraits<Inverse<N> > InvTraits; + for (typename InvTraits::ChildIteratorType PI = + InvTraits::child_begin(NewBB), + PE = InvTraits::child_end(NewBB); PI != PE; ++PI) PredBlocks.push_back(*PI); - assert(!PredBlocks.empty() && "No predblocks??"); + assert(!PredBlocks.empty() && "No predblocks?"); bool NewBBDominatesNewBBSucc = true; - for (typename GraphTraits<Inverse<N> >::ChildIteratorType PI = - GraphTraits<Inverse<N> >::child_begin(NewBBSucc), - E = GraphTraits<Inverse<N> >::child_end(NewBBSucc); PI != E; ++PI) - if (*PI != NewBB && !DT.dominates(NewBBSucc, *PI) && - DT.isReachableFromEntry(*PI)) { + for (typename InvTraits::ChildIteratorType PI = + InvTraits::child_begin(NewBBSucc), + E = InvTraits::child_end(NewBBSucc); PI != E; ++PI) { + typename InvTraits::NodeType *ND = *PI; + if (ND != NewBB && !DT.dominates(NewBBSucc, ND) && + DT.isReachableFromEntry(ND)) { NewBBDominatesNewBBSucc = false; break; } + } // Find NewBB's immediate dominator and create new dominator tree node for // NewBB. @@ -704,7 +707,6 @@ public: } ~DominatorTree() { - DT->releaseMemory(); delete DT; } diff --git a/include/llvm/Analysis/InlineCost.h b/include/llvm/Analysis/InlineCost.h index cac7cfe..462bddd 100644 --- a/include/llvm/Analysis/InlineCost.h +++ b/include/llvm/Analysis/InlineCost.h @@ -19,6 +19,7 @@ #include <vector> #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/ValueMap.h" +#include "llvm/Analysis/CodeMetrics.h" namespace llvm { @@ -29,46 +30,6 @@ namespace llvm { template<class PtrType, unsigned SmallSize> class SmallPtrSet; - // CodeMetrics - Calculate size and a few similar metrics for a set of - // basic blocks. - struct CodeMetrics { - /// NeverInline - True if this callee should never be inlined into a - /// caller. - bool NeverInline; - - /// usesDynamicAlloca - True if this function calls alloca (in the C sense). - bool usesDynamicAlloca; - - /// NumInsts, NumBlocks - Keep track of how large each function is, which - /// is used to estimate the code size cost of inlining it. - unsigned NumInsts, NumBlocks; - - /// NumBBInsts - Keeps track of basic block code size estimates. - DenseMap<const BasicBlock *, unsigned> NumBBInsts; - - /// NumCalls - Keep track of the number of calls to 'big' functions. - unsigned NumCalls; - - /// NumVectorInsts - Keep track of how many instructions produce vector - /// values. The inliner is being more aggressive with inlining vector - /// kernels. - unsigned NumVectorInsts; - - /// NumRets - Keep track of how many Ret instructions the block contains. - unsigned NumRets; - - CodeMetrics() : NeverInline(false), usesDynamicAlloca(false), NumInsts(0), - NumBlocks(0), NumCalls(0), NumVectorInsts(0), NumRets(0) {} - - /// analyzeBasicBlock - Add information about the specified basic block - /// to the current structure. - void analyzeBasicBlock(const BasicBlock *BB); - - /// analyzeFunction - Add information about the specified function - /// to the current structure. - void analyzeFunction(Function *F); - }; - namespace InlineConstants { // Various magic constants used to adjust heuristics. const int InstrCost = 5; @@ -163,6 +124,10 @@ namespace llvm { /// analyzeFunction - Add information about the specified function /// to the current structure. void analyzeFunction(Function *F); + + /// NeverInline - Returns true if the function should never be + /// inlined into any caller. + bool NeverInline(); }; // The Function* for a function can be changed (by ArgumentPromotion); diff --git a/include/llvm/Analysis/IntervalIterator.h b/include/llvm/Analysis/IntervalIterator.h index d842840..82b3294 100644 --- a/include/llvm/Analysis/IntervalIterator.h +++ b/include/llvm/Analysis/IntervalIterator.h @@ -36,9 +36,9 @@ #include "llvm/Analysis/IntervalPartition.h" #include "llvm/Function.h" #include "llvm/Support/CFG.h" -#include <stack> -#include <set> #include <algorithm> +#include <set> +#include <vector> namespace llvm { @@ -88,7 +88,7 @@ inline void addNodeToInterval(Interval *Int, Interval *I) { template<class NodeTy, class OrigContainer_t, class GT = GraphTraits<NodeTy*>, class IGT = GraphTraits<Inverse<NodeTy*> > > class IntervalIterator { - std::stack<std::pair<Interval*, typename Interval::succ_iterator> > IntStack; + std::vector<std::pair<Interval*, typename Interval::succ_iterator> > IntStack; std::set<BasicBlock*> Visited; OrigContainer_t *OrigContainer; bool IOwnMem; // If True, delete intervals when done with them @@ -116,15 +116,15 @@ public: if (IOwnMem) while (!IntStack.empty()) { delete operator*(); - IntStack.pop(); + IntStack.pop_back(); } } inline bool operator==(const _Self& x) const { return IntStack == x.IntStack;} inline bool operator!=(const _Self& x) const { return !operator==(x); } - inline const Interval *operator*() const { return IntStack.top().first; } - inline Interval *operator*() { return IntStack.top().first; } + inline const Interval *operator*() const { return IntStack.back().first; } + inline Interval *operator*() { return IntStack.back().first; } inline const Interval *operator->() const { return operator*(); } inline Interval *operator->() { return operator*(); } @@ -133,8 +133,8 @@ public: do { // All of the intervals on the stack have been visited. Try visiting // their successors now. - Interval::succ_iterator &SuccIt = IntStack.top().second, - EndIt = succ_end(IntStack.top().first); + Interval::succ_iterator &SuccIt = IntStack.back().second, + EndIt = succ_end(IntStack.back().first); while (SuccIt != EndIt) { // Loop over all interval succs bool Done = ProcessInterval(getSourceGraphNode(OrigContainer, *SuccIt)); ++SuccIt; // Increment iterator @@ -142,10 +142,10 @@ public: } // Free interval memory... if necessary - if (IOwnMem) delete IntStack.top().first; + if (IOwnMem) delete IntStack.back().first; // We ran out of successors for this interval... pop off the stack - IntStack.pop(); + IntStack.pop_back(); } while (!IntStack.empty()); return *this; @@ -175,7 +175,7 @@ private: E = GT::child_end(Node); I != E; ++I) ProcessNode(Int, getSourceGraphNode(OrigContainer, *I)); - IntStack.push(std::make_pair(Int, succ_begin(Int))); + IntStack.push_back(std::make_pair(Int, succ_begin(Int))); return true; } diff --git a/include/llvm/Analysis/Loads.h b/include/llvm/Analysis/Loads.h new file mode 100644 index 0000000..1574262 --- /dev/null +++ b/include/llvm/Analysis/Loads.h @@ -0,0 +1,51 @@ +//===- Loads.h - Local load analysis --------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares simple local analyses for load instructions. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_LOADS_H +#define LLVM_ANALYSIS_LOADS_H + +#include "llvm/BasicBlock.h" + +namespace llvm { + +class AliasAnalysis; +class TargetData; + +/// isSafeToLoadUnconditionally - Return true if we know that executing a load +/// from this value cannot trap. If it is not obviously safe to load from the +/// specified pointer, we do a quick local scan of the basic block containing +/// ScanFrom, to determine if the address is already accessed. +bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom, + unsigned Align, const TargetData *TD = 0); + +/// FindAvailableLoadedValue - Scan the ScanBB block backwards (starting at +/// the instruction before ScanFrom) checking to see if we have the value at +/// the memory address *Ptr locally available within a small number of +/// instructions. If the value is available, return it. +/// +/// If not, return the iterator for the last validated instruction that the +/// value would be live through. If we scanned the entire block and didn't +/// find something that invalidates *Ptr or provides it, ScanFrom would be +/// left at begin() and this returns null. ScanFrom could also be left +/// +/// MaxInstsToScan specifies the maximum instructions to scan in the block. +/// If it is set to 0, it will scan the whole block. You can also optionally +/// specify an alias analysis implementation, which makes this more precise. +Value *FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB, + BasicBlock::iterator &ScanFrom, + unsigned MaxInstsToScan = 6, + AliasAnalysis *AA = 0); + +} + +#endif diff --git a/include/llvm/Analysis/LoopInfo.h b/include/llvm/Analysis/LoopInfo.h index 2babc25..9455fd8 100644 --- a/include/llvm/Analysis/LoopInfo.h +++ b/include/llvm/Analysis/LoopInfo.h @@ -256,6 +256,27 @@ public: /// BlockT *getLoopPreheader() const { // Keep track of nodes outside the loop branching to the header... + BlockT *Out = getLoopPredecessor(); + if (!Out) return 0; + + // Make sure there is only one exit out of the preheader. + typedef GraphTraits<BlockT*> BlockTraits; + typename BlockTraits::ChildIteratorType SI = BlockTraits::child_begin(Out); + ++SI; + if (SI != BlockTraits::child_end(Out)) + return 0; // Multiple exits from the block, must not be a preheader. + + // The predecessor has exactly one successor, so it is a preheader. + return Out; + } + + /// getLoopPredecessor - If the given loop's header has exactly one unique + /// predecessor outside the loop, return it. Otherwise return null. + /// This is less strict that the loop "preheader" concept, which requires + /// the predecessor to have exactly one successor. + /// + BlockT *getLoopPredecessor() const { + // Keep track of nodes outside the loop branching to the header... BlockT *Out = 0; // Loop over the predecessors of the header node... @@ -264,22 +285,17 @@ public: typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits; for (typename InvBlockTraits::ChildIteratorType PI = InvBlockTraits::child_begin(Header), - PE = InvBlockTraits::child_end(Header); PI != PE; ++PI) - if (!contains(*PI)) { // If the block is not in the loop... - if (Out && Out != *PI) + PE = InvBlockTraits::child_end(Header); PI != PE; ++PI) { + typename InvBlockTraits::NodeType *N = *PI; + if (!contains(N)) { // If the block is not in the loop... + if (Out && Out != N) return 0; // Multiple predecessors outside the loop - Out = *PI; + Out = N; } + } // Make sure there is only one exit out of the preheader. assert(Out && "Header of loop has no predecessors from outside loop?"); - typename BlockTraits::ChildIteratorType SI = BlockTraits::child_begin(Out); - ++SI; - if (SI != BlockTraits::child_end(Out)) - return 0; // Multiple exits from the block, must not be a preheader. - - // If there is exactly one preheader, return it. If there was zero, then - // Out is still null. return Out; } @@ -293,11 +309,13 @@ public: typename InvBlockTraits::ChildIteratorType PE = InvBlockTraits::child_end(Header); BlockT *Latch = 0; - for (; PI != PE; ++PI) - if (contains(*PI)) { + for (; PI != PE; ++PI) { + typename InvBlockTraits::NodeType *N = *PI; + if (contains(N)) { if (Latch) return 0; - Latch = *PI; + Latch = N; } + } return Latch; } @@ -409,10 +427,11 @@ public: for (typename InvBlockTraits::ChildIteratorType PI = InvBlockTraits::child_begin(BB), PE = InvBlockTraits::child_end(BB); PI != PE; ++PI) { - if (std::binary_search(LoopBBs.begin(), LoopBBs.end(), *PI)) + typename InvBlockTraits::NodeType *N = *PI; + if (std::binary_search(LoopBBs.begin(), LoopBBs.end(), N)) HasInsideLoopPreds = true; else - OutsideLoopPreds.push_back(*PI); + OutsideLoopPreds.push_back(N); } if (BB == getHeader()) { @@ -743,9 +762,11 @@ public: typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits; for (typename InvBlockTraits::ChildIteratorType I = InvBlockTraits::child_begin(BB), E = InvBlockTraits::child_end(BB); - I != E; ++I) - if (DT.dominates(BB, *I)) // If BB dominates its predecessor... - TodoStack.push_back(*I); + I != E; ++I) { + typename InvBlockTraits::NodeType *N = *I; + if (DT.dominates(BB, N)) // If BB dominates its predecessor... + TodoStack.push_back(N); + } if (TodoStack.empty()) return 0; // No backedges to this block... diff --git a/include/llvm/Analysis/MemoryBuiltins.h b/include/llvm/Analysis/MemoryBuiltins.h index a7f42c9..a4f9162 100644 --- a/include/llvm/Analysis/MemoryBuiltins.h +++ b/include/llvm/Analysis/MemoryBuiltins.h @@ -72,8 +72,8 @@ Value *getMallocArraySize(CallInst *CI, const TargetData *TD, // free Call Utility Functions. // -/// isFreeCall - Returns true if the value is a call to the builtin free() -bool isFreeCall(const Value *I); +/// isFreeCall - Returns non-null if the value is a call to the builtin free() +const CallInst *isFreeCall(const Value *I); } // End llvm namespace diff --git a/include/llvm/Analysis/ScalarEvolution.h b/include/llvm/Analysis/ScalarEvolution.h index d3a8d8f..8da3af0 100644 --- a/include/llvm/Analysis/ScalarEvolution.h +++ b/include/llvm/Analysis/ScalarEvolution.h @@ -343,10 +343,6 @@ namespace llvm { BackedgeTakenInfo HowManyLessThans(const SCEV *LHS, const SCEV *RHS, const Loop *L, bool isSigned); - /// getLoopPredecessor - If the given loop's header has exactly one unique - /// predecessor outside the loop, return it. Otherwise return null. - BasicBlock *getLoopPredecessor(const Loop *L); - /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB /// (which may not be an immediate predecessor) which has exactly one /// successor from which BB is reachable, or null if no such block is @@ -530,10 +526,6 @@ namespace llvm { /// widening. const SCEV *getTruncateOrNoop(const SCEV *V, const Type *Ty); - /// getIntegerSCEV - Given a SCEVable type, create a constant for the - /// specified signed integer value and return a SCEV for the constant. - const SCEV *getIntegerSCEV(int64_t Val, const Type *Ty); - /// getUMaxFromMismatchedTypes - Promote the operands to the wider of /// the types using zero-extension, and then perform a umax operation /// with them. diff --git a/include/llvm/Analysis/ScalarEvolutionExpander.h b/include/llvm/Analysis/ScalarEvolutionExpander.h index baf6946..9501555 100644 --- a/include/llvm/Analysis/ScalarEvolutionExpander.h +++ b/include/llvm/Analysis/ScalarEvolutionExpander.h @@ -32,6 +32,7 @@ namespace llvm { std::map<std::pair<const SCEV *, Instruction *>, AssertingVH<Value> > InsertedExpressions; std::set<Value*> InsertedValues; + std::set<Value*> InsertedPostIncValues; /// PostIncLoops - Addrecs referring to any of the given loops are expanded /// in post-inc mode. For example, expanding {1,+,1}<L> in post-inc mode @@ -102,6 +103,10 @@ namespace llvm { /// clearPostInc - Disable all post-inc expansion. void clearPostInc() { PostIncLoops.clear(); + + // When we change the post-inc loop set, cached expansions may no + // longer be valid. + InsertedPostIncValues.clear(); } /// disableCanonicalMode - Disable the behavior of expanding expressions in @@ -123,6 +128,14 @@ namespace llvm { /// of work to avoid inserting an obviously redundant operation. Value *InsertBinop(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS); + /// ReuseOrCreateCast - Arange for there to be a cast of V to Ty at IP, + /// reusing an existing cast if a suitable one exists, moving an existing + /// cast if a suitable one exists but isn't in the right place, or + /// or creating a new one. + Value *ReuseOrCreateCast(Value *V, const Type *Ty, + Instruction::CastOps Op, + BasicBlock::iterator IP); + /// InsertNoopCastOfTo - Insert a cast of V to the specified type, /// which must be possible with a noop cast, doing what we can to /// share the casts. @@ -146,7 +159,7 @@ namespace llvm { /// inserted by the code rewriter. If so, the client should not modify the /// instruction. bool isInsertedInstruction(Instruction *I) const { - return InsertedValues.count(I); + return InsertedValues.count(I) || InsertedPostIncValues.count(I); } Value *visitConstant(const SCEVConstant *S) { diff --git a/include/llvm/Analysis/ValueTracking.h b/include/llvm/Analysis/ValueTracking.h index d580897..b9634f0 100644 --- a/include/llvm/Analysis/ValueTracking.h +++ b/include/llvm/Analysis/ValueTracking.h @@ -97,7 +97,7 @@ namespace llvm { - /// FindScalarValue - Given an aggregrate and an sequence of indices, see if + /// FindInsertedValue - Given an aggregrate and an sequence of indices, see if /// the scalar value indexed is already around as a register, for example if /// it were inserted directly into the aggregrate. /// |