summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Transforms/IPO
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Transforms/IPO')
-rw-r--r--contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp14
-rw-r--r--contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp227
-rw-r--r--contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp715
-rw-r--r--contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp102
-rw-r--r--contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp58
-rw-r--r--contrib/llvm/lib/Transforms/IPO/Inliner.cpp149
-rw-r--r--contrib/llvm/lib/Transforms/IPO/Internalize.cpp7
-rw-r--r--contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp27
-rw-r--r--contrib/llvm/lib/Transforms/IPO/PruneEH.cpp3
10 files changed, 828 insertions, 476 deletions
diff --git a/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp b/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp
index c3ecb7a..d8fae8a 100644
--- a/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp
@@ -140,18 +140,24 @@ bool ConstantMerge::runOnModule(Module &M) {
UsedGlobals.count(GV))
continue;
+ // This transformation is legal for weak ODR globals in the sense it
+ // doesn't change semantics, but we really don't want to perform it
+ // anyway; it's likely to pessimize code generation, and some tools
+ // (like the Darwin linker in cases involving CFString) don't expect it.
+ if (GV->isWeakForLinker())
+ continue;
+
Constant *Init = GV->getInitializer();
// Check to see if the initializer is already known.
PointerIntPair<Constant*, 1, bool> Pair(Init, hasKnownAlignment(GV));
GlobalVariable *&Slot = CMap[Pair];
- // If this is the first constant we find or if the old on is local,
- // replace with the current one. It the current is externally visible
+ // If this is the first constant we find or if the old one is local,
+ // replace with the current one. If the current is externally visible
// it cannot be replace, but can be the canonical constant we merge with.
- if (Slot == 0 || IsBetterCannonical(*GV, *Slot)) {
+ if (Slot == 0 || IsBetterCannonical(*GV, *Slot))
Slot = GV;
- }
}
// Second: identify all globals that can be merged together, filling in
diff --git a/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp b/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
index 4bb6f7a..95aef27 100644
--- a/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
@@ -74,7 +74,7 @@ namespace {
std::string getDescription() const {
return std::string((IsArg ? "Argument #" : "Return value #"))
- + utostr(Idx) + " of function " + F->getNameStr();
+ + utostr(Idx) + " of function " + F->getName().str();
}
};
diff --git a/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
index 0edf342..f3f6228 100644
--- a/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -27,6 +27,7 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/CaptureTracking.h"
+#include "llvm/ADT/SCCIterator.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/UniqueVector.h"
@@ -225,31 +226,247 @@ bool FunctionAttrs::AddReadAttrs(const CallGraphSCC &SCC) {
return MadeChange;
}
+namespace {
+ // For a given pointer Argument, this retains a list of Arguments of functions
+ // in the same SCC that the pointer data flows into. We use this to build an
+ // SCC of the arguments.
+ struct ArgumentGraphNode {
+ Argument *Definition;
+ SmallVector<ArgumentGraphNode*, 4> Uses;
+ };
+
+ class ArgumentGraph {
+ // We store pointers to ArgumentGraphNode objects, so it's important that
+ // that they not move around upon insert.
+ typedef std::map<Argument*, ArgumentGraphNode> ArgumentMapTy;
+
+ ArgumentMapTy ArgumentMap;
+
+ // There is no root node for the argument graph, in fact:
+ // void f(int *x, int *y) { if (...) f(x, y); }
+ // is an example where the graph is disconnected. The SCCIterator requires a
+ // single entry point, so we maintain a fake ("synthetic") root node that
+ // uses every node. Because the graph is directed and nothing points into
+ // the root, it will not participate in any SCCs (except for its own).
+ ArgumentGraphNode SyntheticRoot;
+
+ public:
+ ArgumentGraph() { SyntheticRoot.Definition = 0; }
+
+ typedef SmallVectorImpl<ArgumentGraphNode*>::iterator iterator;
+
+ iterator begin() { return SyntheticRoot.Uses.begin(); }
+ iterator end() { return SyntheticRoot.Uses.end(); }
+ ArgumentGraphNode *getEntryNode() { return &SyntheticRoot; }
+
+ ArgumentGraphNode *operator[](Argument *A) {
+ ArgumentGraphNode &Node = ArgumentMap[A];
+ Node.Definition = A;
+ SyntheticRoot.Uses.push_back(&Node);
+ return &Node;
+ }
+ };
+
+ // This tracker checks whether callees are in the SCC, and if so it does not
+ // consider that a capture, instead adding it to the "Uses" list and
+ // continuing with the analysis.
+ struct ArgumentUsesTracker : public CaptureTracker {
+ ArgumentUsesTracker(const SmallPtrSet<Function*, 8> &SCCNodes)
+ : Captured(false), SCCNodes(SCCNodes) {}
+
+ void tooManyUses() { Captured = true; }
+
+ bool shouldExplore(Use *U) { return true; }
+
+ bool captured(Use *U) {
+ CallSite CS(U->getUser());
+ if (!CS.getInstruction()) { Captured = true; return true; }
+
+ Function *F = CS.getCalledFunction();
+ if (!F || !SCCNodes.count(F)) { Captured = true; return true; }
+
+ Function::arg_iterator AI = F->arg_begin(), AE = F->arg_end();
+ for (CallSite::arg_iterator PI = CS.arg_begin(), PE = CS.arg_end();
+ PI != PE; ++PI, ++AI) {
+ if (AI == AE) {
+ assert(F->isVarArg() && "More params than args in non-varargs call");
+ Captured = true;
+ return true;
+ }
+ if (PI == U) {
+ Uses.push_back(AI);
+ break;
+ }
+ }
+ assert(!Uses.empty() && "Capturing call-site captured nothing?");
+ return false;
+ }
+
+ bool Captured; // True only if certainly captured (used outside our SCC).
+ SmallVector<Argument*, 4> Uses; // Uses within our SCC.
+
+ const SmallPtrSet<Function*, 8> &SCCNodes;
+ };
+}
+
+namespace llvm {
+ template<> struct GraphTraits<ArgumentGraphNode*> {
+ typedef ArgumentGraphNode NodeType;
+ typedef SmallVectorImpl<ArgumentGraphNode*>::iterator ChildIteratorType;
+
+ static inline NodeType *getEntryNode(NodeType *A) { return A; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return N->Uses.begin();
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return N->Uses.end();
+ }
+ };
+ template<> struct GraphTraits<ArgumentGraph*>
+ : public GraphTraits<ArgumentGraphNode*> {
+ static NodeType *getEntryNode(ArgumentGraph *AG) {
+ return AG->getEntryNode();
+ }
+ static ChildIteratorType nodes_begin(ArgumentGraph *AG) {
+ return AG->begin();
+ }
+ static ChildIteratorType nodes_end(ArgumentGraph *AG) {
+ return AG->end();
+ }
+ };
+}
+
/// AddNoCaptureAttrs - Deduce nocapture attributes for the SCC.
bool FunctionAttrs::AddNoCaptureAttrs(const CallGraphSCC &SCC) {
bool Changed = false;
+ SmallPtrSet<Function*, 8> SCCNodes;
+
+ // Fill SCCNodes with the elements of the SCC. Used for quickly
+ // looking up whether a given CallGraphNode is in this SCC.
+ for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
+ Function *F = (*I)->getFunction();
+ if (F && !F->isDeclaration() && !F->mayBeOverridden())
+ SCCNodes.insert(F);
+ }
+
+ ArgumentGraph AG;
+
// Check each function in turn, determining which pointer arguments are not
// captured.
for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
Function *F = (*I)->getFunction();
if (F == 0)
- // External node - skip it;
+ // External node - only a problem for arguments that we pass to it.
continue;
// Definitions with weak linkage may be overridden at linktime with
- // something that writes memory, so treat them like declarations.
+ // something that captures pointers, so treat them like declarations.
if (F->isDeclaration() || F->mayBeOverridden())
continue;
+ // Functions that are readonly (or readnone) and nounwind and don't return
+ // a value can't capture arguments. Don't analyze them.
+ if (F->onlyReadsMemory() && F->doesNotThrow() &&
+ F->getReturnType()->isVoidTy()) {
+ for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end();
+ A != E; ++A) {
+ if (A->getType()->isPointerTy() && !A->hasNoCaptureAttr()) {
+ A->addAttr(Attribute::NoCapture);
+ ++NumNoCapture;
+ Changed = true;
+ }
+ }
+ continue;
+ }
+
for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A!=E; ++A)
- if (A->getType()->isPointerTy() && !A->hasNoCaptureAttr() &&
- !PointerMayBeCaptured(A, true, /*StoreCaptures=*/false)) {
- A->addAttr(Attribute::NoCapture);
+ if (A->getType()->isPointerTy() && !A->hasNoCaptureAttr()) {
+ ArgumentUsesTracker Tracker(SCCNodes);
+ PointerMayBeCaptured(A, &Tracker);
+ if (!Tracker.Captured) {
+ if (Tracker.Uses.empty()) {
+ // If it's trivially not captured, mark it nocapture now.
+ A->addAttr(Attribute::NoCapture);
+ ++NumNoCapture;
+ Changed = true;
+ } else {
+ // If it's not trivially captured and not trivially not captured,
+ // then it must be calling into another function in our SCC. Save
+ // its particulars for Argument-SCC analysis later.
+ ArgumentGraphNode *Node = AG[A];
+ for (SmallVectorImpl<Argument*>::iterator UI = Tracker.Uses.begin(),
+ UE = Tracker.Uses.end(); UI != UE; ++UI)
+ Node->Uses.push_back(AG[*UI]);
+ }
+ }
+ // Otherwise, it's captured. Don't bother doing SCC analysis on it.
+ }
+ }
+
+ // The graph we've collected is partial because we stopped scanning for
+ // argument uses once we solved the argument trivially. These partial nodes
+ // show up as ArgumentGraphNode objects with an empty Uses list, and for
+ // these nodes the final decision about whether they capture has already been
+ // made. If the definition doesn't have a 'nocapture' attribute by now, it
+ // captures.
+
+ for (scc_iterator<ArgumentGraph*> I = scc_begin(&AG), E = scc_end(&AG);
+ I != E; ++I) {
+ std::vector<ArgumentGraphNode*> &ArgumentSCC = *I;
+ if (ArgumentSCC.size() == 1) {
+ if (!ArgumentSCC[0]->Definition) continue; // synthetic root node
+
+ // eg. "void f(int* x) { if (...) f(x); }"
+ if (ArgumentSCC[0]->Uses.size() == 1 &&
+ ArgumentSCC[0]->Uses[0] == ArgumentSCC[0]) {
+ ArgumentSCC[0]->Definition->addAttr(Attribute::NoCapture);
++NumNoCapture;
Changed = true;
}
+ continue;
+ }
+
+ bool SCCCaptured = false;
+ for (std::vector<ArgumentGraphNode*>::iterator I = ArgumentSCC.begin(),
+ E = ArgumentSCC.end(); I != E && !SCCCaptured; ++I) {
+ ArgumentGraphNode *Node = *I;
+ if (Node->Uses.empty()) {
+ if (!Node->Definition->hasNoCaptureAttr())
+ SCCCaptured = true;
+ }
+ }
+ if (SCCCaptured) continue;
+
+ SmallPtrSet<Argument*, 8> ArgumentSCCNodes;
+ // Fill ArgumentSCCNodes with the elements of the ArgumentSCC. Used for
+ // quickly looking up whether a given Argument is in this ArgumentSCC.
+ for (std::vector<ArgumentGraphNode*>::iterator I = ArgumentSCC.begin(),
+ E = ArgumentSCC.end(); I != E; ++I) {
+ ArgumentSCCNodes.insert((*I)->Definition);
+ }
+
+ for (std::vector<ArgumentGraphNode*>::iterator I = ArgumentSCC.begin(),
+ E = ArgumentSCC.end(); I != E && !SCCCaptured; ++I) {
+ ArgumentGraphNode *N = *I;
+ for (SmallVectorImpl<ArgumentGraphNode*>::iterator UI = N->Uses.begin(),
+ UE = N->Uses.end(); UI != UE; ++UI) {
+ Argument *A = (*UI)->Definition;
+ if (A->hasNoCaptureAttr() || ArgumentSCCNodes.count(A))
+ continue;
+ SCCCaptured = true;
+ break;
+ }
+ }
+ if (SCCCaptured) continue;
+
+ for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) {
+ Argument *A = ArgumentSCC[i]->Definition;
+ A->addAttr(Attribute::NoCapture);
+ ++NumNoCapture;
+ Changed = true;
+ }
}
return Changed;
diff --git a/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index 3552d03..1522aa4 100644
--- a/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -26,6 +26,7 @@
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -61,6 +62,7 @@ namespace {
struct GlobalStatus;
struct GlobalOpt : public ModulePass {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<TargetLibraryInfo>();
}
static char ID; // Pass identification, replacement for typeid
GlobalOpt() : ModulePass(ID) {
@@ -80,11 +82,17 @@ namespace {
const SmallPtrSet<const PHINode*, 16> &PHIUsers,
const GlobalStatus &GS);
bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn);
+
+ TargetData *TD;
+ TargetLibraryInfo *TLI;
};
}
char GlobalOpt::ID = 0;
-INITIALIZE_PASS(GlobalOpt, "globalopt",
+INITIALIZE_PASS_BEGIN(GlobalOpt, "globalopt",
+ "Global Variable Optimizer", false, false)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
+INITIALIZE_PASS_END(GlobalOpt, "globalopt",
"Global Variable Optimizer", false, false)
ModulePass *llvm::createGlobalOptimizerPass() { return new GlobalOpt(); }
@@ -143,18 +151,31 @@ struct GlobalStatus {
/// HasPHIUser - Set to true if this global has a user that is a PHI node.
bool HasPHIUser;
+ /// AtomicOrdering - Set to the strongest atomic ordering requirement.
+ AtomicOrdering Ordering;
+
GlobalStatus() : isCompared(false), isLoaded(false), StoredType(NotStored),
StoredOnceValue(0), AccessingFunction(0),
- HasMultipleAccessingFunctions(false), HasNonInstructionUser(false),
- HasPHIUser(false) {}
+ HasMultipleAccessingFunctions(false),
+ HasNonInstructionUser(false), HasPHIUser(false),
+ Ordering(NotAtomic) {}
};
}
-// SafeToDestroyConstant - It is safe to destroy a constant iff it is only used
-// by constants itself. Note that constants cannot be cyclic, so this test is
-// pretty easy to implement recursively.
-//
+/// StrongerOrdering - Return the stronger of the two ordering. If the two
+/// orderings are acquire and release, then return AcquireRelease.
+///
+static AtomicOrdering StrongerOrdering(AtomicOrdering X, AtomicOrdering Y) {
+ if (X == Acquire && Y == Release) return AcquireRelease;
+ if (Y == Acquire && X == Release) return AcquireRelease;
+ return (AtomicOrdering)std::max(X, Y);
+}
+
+/// SafeToDestroyConstant - It is safe to destroy a constant iff it is only used
+/// by constants itself. Note that constants cannot be cyclic, so this test is
+/// pretty easy to implement recursively.
+///
static bool SafeToDestroyConstant(const Constant *C) {
if (isa<GlobalValue>(C)) return false;
@@ -195,14 +216,16 @@ static bool AnalyzeGlobal(const Value *V, GlobalStatus &GS,
}
if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
GS.isLoaded = true;
- // Don't hack on volatile/atomic loads.
- if (!LI->isSimple()) return true;
+ // Don't hack on volatile loads.
+ if (LI->isVolatile()) return true;
+ GS.Ordering = StrongerOrdering(GS.Ordering, LI->getOrdering());
} else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) {
// Don't allow a store OF the address, only stores TO the address.
if (SI->getOperand(0) == V) return true;
- // Don't hack on volatile/atomic stores.
- if (!SI->isSimple()) return true;
+ // Don't hack on volatile stores.
+ if (SI->isVolatile()) return true;
+ GS.Ordering = StrongerOrdering(GS.Ordering, SI->getOrdering());
// If this is a direct store to the global (i.e., the global is a scalar
// value, not an aggregate), keep more specific information about
@@ -271,43 +294,12 @@ static bool AnalyzeGlobal(const Value *V, GlobalStatus &GS,
return false;
}
-static Constant *getAggregateConstantElement(Constant *Agg, Constant *Idx) {
- ConstantInt *CI = dyn_cast<ConstantInt>(Idx);
- if (!CI) return 0;
- unsigned IdxV = CI->getZExtValue();
-
- if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Agg)) {
- if (IdxV < CS->getNumOperands()) return CS->getOperand(IdxV);
- } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Agg)) {
- if (IdxV < CA->getNumOperands()) return CA->getOperand(IdxV);
- } else if (ConstantVector *CP = dyn_cast<ConstantVector>(Agg)) {
- if (IdxV < CP->getNumOperands()) return CP->getOperand(IdxV);
- } else if (isa<ConstantAggregateZero>(Agg)) {
- if (StructType *STy = dyn_cast<StructType>(Agg->getType())) {
- if (IdxV < STy->getNumElements())
- return Constant::getNullValue(STy->getElementType(IdxV));
- } else if (SequentialType *STy =
- dyn_cast<SequentialType>(Agg->getType())) {
- return Constant::getNullValue(STy->getElementType());
- }
- } else if (isa<UndefValue>(Agg)) {
- if (StructType *STy = dyn_cast<StructType>(Agg->getType())) {
- if (IdxV < STy->getNumElements())
- return UndefValue::get(STy->getElementType(IdxV));
- } else if (SequentialType *STy =
- dyn_cast<SequentialType>(Agg->getType())) {
- return UndefValue::get(STy->getElementType());
- }
- }
- return 0;
-}
-
-
/// CleanupConstantGlobalUsers - We just marked GV constant. Loop over all
/// users of the global, cleaning up the obvious ones. This is largely just a
/// quick scan over the use list to clean up the easy and obvious cruft. This
/// returns true if it made a change.
-static bool CleanupConstantGlobalUsers(Value *V, Constant *Init) {
+static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
+ TargetData *TD, TargetLibraryInfo *TLI) {
bool Changed = false;
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;) {
User *U = *UI++;
@@ -328,11 +320,11 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init) {
Constant *SubInit = 0;
if (Init)
SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
- Changed |= CleanupConstantGlobalUsers(CE, SubInit);
+ Changed |= CleanupConstantGlobalUsers(CE, SubInit, TD, TLI);
} else if (CE->getOpcode() == Instruction::BitCast &&
CE->getType()->isPointerTy()) {
// Pointer cast, delete any stores and memsets to the global.
- Changed |= CleanupConstantGlobalUsers(CE, 0);
+ Changed |= CleanupConstantGlobalUsers(CE, 0, TD, TLI);
}
if (CE->use_empty()) {
@@ -346,11 +338,17 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init) {
Constant *SubInit = 0;
if (!isa<ConstantExpr>(GEP->getOperand(0))) {
ConstantExpr *CE =
- dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP));
+ dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, TD, TLI));
if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr)
SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
+
+ // If the initializer is an all-null value and we have an inbounds GEP,
+ // we already know what the result of any load from that GEP is.
+ // TODO: Handle splats.
+ if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds())
+ SubInit = Constant::getNullValue(GEP->getType()->getElementType());
}
- Changed |= CleanupConstantGlobalUsers(GEP, SubInit);
+ Changed |= CleanupConstantGlobalUsers(GEP, SubInit, TD, TLI);
if (GEP->use_empty()) {
GEP->eraseFromParent();
@@ -368,7 +366,7 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init) {
if (SafeToDestroyConstant(C)) {
C->destroyConstant();
// This could have invalidated UI, start over from scratch.
- CleanupConstantGlobalUsers(V, Init);
+ CleanupConstantGlobalUsers(V, Init, TD, TLI);
return true;
}
}
@@ -514,8 +512,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) {
NewGlobals.reserve(STy->getNumElements());
const StructLayout &Layout = *TD.getStructLayout(STy);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- Constant *In = getAggregateConstantElement(Init,
- ConstantInt::get(Type::getInt32Ty(STy->getContext()), i));
+ Constant *In = Init->getAggregateElement(i);
assert(In && "Couldn't get element of initializer?");
GlobalVariable *NGV = new GlobalVariable(STy->getElementType(i), false,
GlobalVariable::InternalLinkage,
@@ -547,8 +544,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) {
uint64_t EltSize = TD.getTypeAllocSize(STy->getElementType());
unsigned EltAlign = TD.getABITypeAlignment(STy->getElementType());
for (unsigned i = 0, e = NumElements; i != e; ++i) {
- Constant *In = getAggregateConstantElement(Init,
- ConstantInt::get(Type::getInt32Ty(Init->getContext()), i));
+ Constant *In = Init->getAggregateElement(i);
assert(In && "Couldn't get element of initializer?");
GlobalVariable *NGV = new GlobalVariable(STy->getElementType(), false,
@@ -770,7 +766,9 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
/// value stored into it. If there are uses of the loaded value that would trap
/// if the loaded value is dynamically null, then we know that they cannot be
/// reachable with a null optimize away the load.
-static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV) {
+static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
+ TargetData *TD,
+ TargetLibraryInfo *TLI) {
bool Changed = false;
// Keep track of whether we are able to remove all the uses of the global
@@ -813,7 +811,7 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV) {
// nor is the global.
if (AllNonStoreUsesGone) {
DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n");
- CleanupConstantGlobalUsers(GV, 0);
+ CleanupConstantGlobalUsers(GV, 0, TD, TLI);
if (GV->use_empty()) {
GV->eraseFromParent();
++NumDeleted;
@@ -825,10 +823,11 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV) {
/// ConstantPropUsersOf - Walk the use list of V, constant folding all of the
/// instructions that are foldable.
-static void ConstantPropUsersOf(Value *V) {
+static void ConstantPropUsersOf(Value *V,
+ TargetData *TD, TargetLibraryInfo *TLI) {
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; )
if (Instruction *I = dyn_cast<Instruction>(*UI++))
- if (Constant *NewC = ConstantFoldInstruction(I)) {
+ if (Constant *NewC = ConstantFoldInstruction(I, TD, TLI)) {
I->replaceAllUsesWith(NewC);
// Advance UI to the next non-I use to avoid invalidating it!
@@ -848,7 +847,8 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
CallInst *CI,
Type *AllocTy,
ConstantInt *NElements,
- TargetData* TD) {
+ TargetData *TD,
+ TargetLibraryInfo *TLI) {
DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n');
Type *GlobalType;
@@ -906,7 +906,8 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
while (!GV->use_empty()) {
if (StoreInst *SI = dyn_cast<StoreInst>(GV->use_back())) {
// The global is initialized when the store to it occurs.
- new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, SI);
+ new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0,
+ SI->getOrdering(), SI->getSynchScope(), SI);
SI->eraseFromParent();
continue;
}
@@ -921,7 +922,10 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
ICmpInst *ICI = cast<ICmpInst>(LoadUse.getUser());
// Replace the cmp X, 0 with a use of the bool value.
- Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", ICI);
+ // Sink the load to where the compare was, if atomic rules allow us to.
+ Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", false, 0,
+ LI->getOrdering(), LI->getSynchScope(),
+ LI->isUnordered() ? (Instruction*)ICI : LI);
InitBoolUsed = true;
switch (ICI->getPredicate()) {
default: llvm_unreachable("Unknown ICmp Predicate!");
@@ -962,9 +966,9 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
// To further other optimizations, loop over all users of NewGV and try to
// constant prop them. This will promote GEP instructions with constant
// indices into GEP constant-exprs, which will allow global-opt to hack on it.
- ConstantPropUsersOf(NewGV);
+ ConstantPropUsersOf(NewGV, TD, TLI);
if (RepValue != NewGV)
- ConstantPropUsersOf(RepValue);
+ ConstantPropUsersOf(RepValue, TD, TLI);
return NewGV;
}
@@ -1203,7 +1207,6 @@ static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
PHIsToRewrite.push_back(std::make_pair(PN, FieldNo));
} else {
llvm_unreachable("Unknown usable value");
- Result = 0;
}
return FieldVals[FieldNo] = Result;
@@ -1293,9 +1296,9 @@ static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
/// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break
/// it up into multiple allocations of arrays of the fields.
static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
- Value* NElems, TargetData *TD) {
+ Value *NElems, TargetData *TD) {
DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n');
- Type* MAT = getMallocAllocatedType(CI);
+ Type *MAT = getMallocAllocatedType(CI);
StructType *STy = cast<StructType>(MAT);
// There is guaranteed to be at least one use of the malloc (storing
@@ -1482,8 +1485,10 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
CallInst *CI,
Type *AllocTy,
+ AtomicOrdering Ordering,
Module::global_iterator &GVI,
- TargetData *TD) {
+ TargetData *TD,
+ TargetLibraryInfo *TLI) {
if (!TD)
return false;
@@ -1502,7 +1507,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// We can't optimize this if the malloc itself is used in a complex way,
// for example, being stored into multiple globals. This allows the
- // malloc to be stored into the specified global, loaded setcc'd, and
+ // malloc to be stored into the specified global, loaded icmp'd, and
// GEP'd. These are all things we could transform to using the global
// for.
SmallPtrSet<const PHINode*, 8> PHIs;
@@ -1523,7 +1528,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// (2048 bytes currently), as we don't want to introduce a 16M global or
// something.
if (NElements->getZExtValue() * TD->getTypeAllocSize(AllocTy) < 2048) {
- GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, TD);
+ GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, TD, TLI);
return true;
}
@@ -1531,6 +1536,9 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// into multiple malloc'd arrays, one for each field. This is basically
// SRoA for malloc'd memory.
+ if (Ordering != NotAtomic)
+ return false;
+
// If this is an allocation of a fixed size array of structs, analyze as a
// variable size array. malloc [100 x struct],1 -> malloc struct, 100
if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1))
@@ -1563,7 +1571,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
extractMallocCallFromBitCast(Malloc) : cast<CallInst>(Malloc);
}
- GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, true),TD);
+ GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, true), TD);
return true;
}
@@ -1573,8 +1581,9 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// OptimizeOnceStoredGlobal - Try to optimize globals based on the knowledge
// that only one value (besides its initializer) is ever stored to the global.
static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
+ AtomicOrdering Ordering,
Module::global_iterator &GVI,
- TargetData *TD) {
+ TargetData *TD, TargetLibraryInfo *TLI) {
// Ignore no-op GEPs and bitcasts.
StoredOnceVal = StoredOnceVal->stripPointerCasts();
@@ -1589,12 +1598,13 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType());
// Optimize away any trapping uses of the loaded value.
- if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC))
+ if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, TD, TLI))
return true;
} else if (CallInst *CI = extractMallocCall(StoredOnceVal)) {
- Type* MallocType = getMallocAllocatedType(CI);
- if (MallocType && TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType,
- GVI, TD))
+ Type *MallocType = getMallocAllocatedType(CI);
+ if (MallocType &&
+ TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, Ordering, GVI,
+ TD, TLI))
return true;
}
}
@@ -1670,7 +1680,8 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
assert(LI->getOperand(0) == GV && "Not a copy!");
// Insert a new load, to preserve the saved value.
- StoreVal = new LoadInst(NewGV, LI->getName()+".b", LI);
+ StoreVal = new LoadInst(NewGV, LI->getName()+".b", false, 0,
+ LI->getOrdering(), LI->getSynchScope(), LI);
} else {
assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) &&
"This is not a form that we understand!");
@@ -1678,11 +1689,13 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!");
}
}
- new StoreInst(StoreVal, NewGV, SI);
+ new StoreInst(StoreVal, NewGV, false, 0,
+ SI->getOrdering(), SI->getSynchScope(), SI);
} else {
// Change the load into a load of bool then a select.
LoadInst *LI = cast<LoadInst>(UI);
- LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", LI);
+ LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", false, 0,
+ LI->getOrdering(), LI->getSynchScope(), LI);
Value *NSI;
if (IsOneZero)
NSI = new ZExtInst(NLI, LI->getType(), "", LI);
@@ -1699,8 +1712,8 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
}
-/// ProcessInternalGlobal - Analyze the specified global variable and optimize
-/// it if possible. If we make a change, return true.
+/// ProcessGlobal - Analyze the specified global variable and optimize it if
+/// possible. If we make a change, return true.
bool GlobalOpt::ProcessGlobal(GlobalVariable *GV,
Module::global_iterator &GVI) {
if (!GV->hasLocalLinkage())
@@ -1737,7 +1750,7 @@ bool GlobalOpt::ProcessGlobal(GlobalVariable *GV,
/// it if possible. If we make a change, return true.
bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
Module::global_iterator &GVI,
- const SmallPtrSet<const PHINode*, 16> &PHIUsers,
+ const SmallPtrSet<const PHINode*, 16> &PHIUsers,
const GlobalStatus &GS) {
// If this is a first class global and has only one accessing function
// and this function is main (which we know is not recursive we can make
@@ -1755,11 +1768,11 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
GS.AccessingFunction->hasExternalLinkage() &&
GV->getType()->getAddressSpace() == 0) {
DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV);
- Instruction& FirstI = const_cast<Instruction&>(*GS.AccessingFunction
+ Instruction &FirstI = const_cast<Instruction&>(*GS.AccessingFunction
->getEntryBlock().begin());
- Type* ElemTy = GV->getType()->getElementType();
+ Type *ElemTy = GV->getType()->getElementType();
// FIXME: Pass Global's alignment when globals have alignment
- AllocaInst* Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), &FirstI);
+ AllocaInst *Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), &FirstI);
if (!isa<UndefValue>(GV->getInitializer()))
new StoreInst(GV->getInitializer(), Alloca, &FirstI);
@@ -1776,7 +1789,8 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
// Delete any stores we can find to the global. We may not be able to
// make it completely dead though.
- bool Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer());
+ bool Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(),
+ TD, TLI);
// If the global is dead now, delete it.
if (GV->use_empty()) {
@@ -1791,7 +1805,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
GV->setConstant(true);
// Clean up any obviously simplifiable users now.
- CleanupConstantGlobalUsers(GV, GV->getInitializer());
+ CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI);
// If the global is dead now, just nuke it.
if (GV->use_empty()) {
@@ -1820,7 +1834,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
GV->setInitializer(SOVConstant);
// Clean up any obviously simplifiable users now.
- CleanupConstantGlobalUsers(GV, GV->getInitializer());
+ CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI);
if (GV->use_empty()) {
DEBUG(dbgs() << " *** Substituting initializer allowed us to "
@@ -1836,8 +1850,8 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
// Try to optimize globals based on the knowledge that only one value
// (besides its initializer) is ever stored to the global.
- if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GVI,
- getAnalysisIfAvailable<TargetData>()))
+ if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, GVI,
+ TD, TLI))
return true;
// Otherwise, if the global was not a boolean, we can shrink it to be a
@@ -1890,7 +1904,7 @@ bool GlobalOpt::OptimizeFunctions(Module &M) {
if (!F->hasName() && !F->isDeclaration())
F->setLinkage(GlobalValue::InternalLinkage);
F->removeDeadConstantUsers();
- if (F->use_empty() && (F->hasLocalLinkage() || F->hasLinkOnceLinkage())) {
+ if (F->isDefTriviallyDead()) {
F->eraseFromParent();
Changed = true;
++NumFnDeleted;
@@ -1930,8 +1944,7 @@ bool GlobalOpt::OptimizeGlobalVars(Module &M) {
// Simplify the initializer.
if (GV->hasInitializer())
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GV->getInitializer())) {
- TargetData *TD = getAnalysisIfAvailable<TargetData>();
- Constant *New = ConstantFoldConstantExpression(CE, TD);
+ Constant *New = ConstantFoldConstantExpression(CE, TD, TLI);
if (New && New != CE)
GV->setInitializer(New);
}
@@ -2052,16 +2065,10 @@ static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
}
-static Constant *getVal(DenseMap<Value*, Constant*> &ComputedValues, Value *V) {
- if (Constant *CV = dyn_cast<Constant>(V)) return CV;
- Constant *R = ComputedValues[V];
- assert(R && "Reference to an uncomputed value!");
- return R;
-}
-
static inline bool
isSimpleEnoughValueToCommit(Constant *C,
- SmallPtrSet<Constant*, 8> &SimpleConstants);
+ SmallPtrSet<Constant*, 8> &SimpleConstants,
+ const TargetData *TD);
/// isSimpleEnoughValueToCommit - Return true if the specified constant can be
@@ -2073,7 +2080,8 @@ isSimpleEnoughValueToCommit(Constant *C,
/// in SimpleConstants to avoid having to rescan the same constants all the
/// time.
static bool isSimpleEnoughValueToCommitHelper(Constant *C,
- SmallPtrSet<Constant*, 8> &SimpleConstants) {
+ SmallPtrSet<Constant*, 8> &SimpleConstants,
+ const TargetData *TD) {
// Simple integer, undef, constant aggregate zero, global addresses, etc are
// all supported.
if (C->getNumOperands() == 0 || isa<BlockAddress>(C) ||
@@ -2085,7 +2093,7 @@ static bool isSimpleEnoughValueToCommitHelper(Constant *C,
isa<ConstantVector>(C)) {
for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) {
Constant *Op = cast<Constant>(C->getOperand(i));
- if (!isSimpleEnoughValueToCommit(Op, SimpleConstants))
+ if (!isSimpleEnoughValueToCommit(Op, SimpleConstants, TD))
return false;
}
return true;
@@ -2097,34 +2105,42 @@ static bool isSimpleEnoughValueToCommitHelper(Constant *C,
ConstantExpr *CE = cast<ConstantExpr>(C);
switch (CE->getOpcode()) {
case Instruction::BitCast:
+ // Bitcast is fine if the casted value is fine.
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);
+
case Instruction::IntToPtr:
case Instruction::PtrToInt:
- // These casts are always fine if the casted value is.
- return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants);
+ // int <=> ptr is fine if the int type is the same size as the
+ // pointer type.
+ if (!TD || TD->getTypeSizeInBits(CE->getType()) !=
+ TD->getTypeSizeInBits(CE->getOperand(0)->getType()))
+ return false;
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);
// GEP is fine if it is simple + constant offset.
case Instruction::GetElementPtr:
for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i)
if (!isa<ConstantInt>(CE->getOperand(i)))
return false;
- return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants);
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);
case Instruction::Add:
// We allow simple+cst.
if (!isa<ConstantInt>(CE->getOperand(1)))
return false;
- return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants);
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);
}
return false;
}
static inline bool
isSimpleEnoughValueToCommit(Constant *C,
- SmallPtrSet<Constant*, 8> &SimpleConstants) {
+ SmallPtrSet<Constant*, 8> &SimpleConstants,
+ const TargetData *TD) {
// If we already checked this constant, we win.
if (!SimpleConstants.insert(C)) return true;
// Check the constant.
- return isSimpleEnoughValueToCommitHelper(C, SimpleConstants);
+ return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, TD);
}
@@ -2191,23 +2207,11 @@ static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
return Val;
}
- std::vector<Constant*> Elts;
+ SmallVector<Constant*, 32> Elts;
if (StructType *STy = dyn_cast<StructType>(Init->getType())) {
-
// Break up the constant into its elements.
- if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
- for (User::op_iterator i = CS->op_begin(), e = CS->op_end(); i != e; ++i)
- Elts.push_back(cast<Constant>(*i));
- } else if (isa<ConstantAggregateZero>(Init)) {
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
- Elts.push_back(Constant::getNullValue(STy->getElementType(i)));
- } else if (isa<UndefValue>(Init)) {
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
- Elts.push_back(UndefValue::get(STy->getElementType(i)));
- } else {
- llvm_unreachable("This code is out of sync with "
- " ConstantFoldLoadThroughGEPConstantExpr");
- }
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
+ Elts.push_back(Init->getAggregateElement(i));
// Replace the element that we are supposed to.
ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo));
@@ -2226,22 +2230,11 @@ static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
if (ArrayType *ATy = dyn_cast<ArrayType>(InitTy))
NumElts = ATy->getNumElements();
else
- NumElts = cast<VectorType>(InitTy)->getNumElements();
+ NumElts = InitTy->getVectorNumElements();
// Break up the array into elements.
- if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
- for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i)
- Elts.push_back(cast<Constant>(*i));
- } else if (ConstantVector *CV = dyn_cast<ConstantVector>(Init)) {
- for (User::op_iterator i = CV->op_begin(), e = CV->op_end(); i != e; ++i)
- Elts.push_back(cast<Constant>(*i));
- } else if (isa<ConstantAggregateZero>(Init)) {
- Elts.assign(NumElts, Constant::getNullValue(InitTy->getElementType()));
- } else {
- assert(isa<UndefValue>(Init) && "This code is out of sync with "
- " ConstantFoldLoadThroughGEPConstantExpr");
- Elts.assign(NumElts, UndefValue::get(InitTy->getElementType()));
- }
+ for (uint64_t i = 0, e = NumElts; i != e; ++i)
+ Elts.push_back(Init->getAggregateElement(i));
assert(CI->getZExtValue() < NumElts);
Elts[CI->getZExtValue()] =
@@ -2266,15 +2259,109 @@ static void CommitValueTo(Constant *Val, Constant *Addr) {
GV->setInitializer(EvaluateStoreInto(GV->getInitializer(), Val, CE, 2));
}
+namespace {
+
+/// Evaluator - This class evaluates LLVM IR, producing the Constant
+/// representing each SSA instruction. Changes to global variables are stored
+/// in a mapping that can be iterated over after the evaluation is complete.
+/// Once an evaluation call fails, the evaluation object should not be reused.
+class Evaluator {
+public:
+ Evaluator(const TargetData *TD, const TargetLibraryInfo *TLI)
+ : TD(TD), TLI(TLI) {
+ ValueStack.push_back(new DenseMap<Value*, Constant*>);
+ }
+
+ ~Evaluator() {
+ DeleteContainerPointers(ValueStack);
+ while (!AllocaTmps.empty()) {
+ GlobalVariable *Tmp = AllocaTmps.back();
+ AllocaTmps.pop_back();
+
+ // If there are still users of the alloca, the program is doing something
+ // silly, e.g. storing the address of the alloca somewhere and using it
+ // later. Since this is undefined, we'll just make it be null.
+ if (!Tmp->use_empty())
+ Tmp->replaceAllUsesWith(Constant::getNullValue(Tmp->getType()));
+ delete Tmp;
+ }
+ }
+
+ /// EvaluateFunction - Evaluate a call to function F, returning true if
+ /// successful, false if we can't evaluate it. ActualArgs contains the formal
+ /// arguments for the function.
+ bool EvaluateFunction(Function *F, Constant *&RetVal,
+ const SmallVectorImpl<Constant*> &ActualArgs);
+
+ /// EvaluateBlock - Evaluate all instructions in block BB, returning true if
+ /// successful, false if we can't evaluate it. NewBB returns the next BB that
+ /// control flows into, or null upon return.
+ bool EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB);
+
+ Constant *getVal(Value *V) {
+ if (Constant *CV = dyn_cast<Constant>(V)) return CV;
+ Constant *R = ValueStack.back()->lookup(V);
+ assert(R && "Reference to an uncomputed value!");
+ return R;
+ }
+
+ void setVal(Value *V, Constant *C) {
+ ValueStack.back()->operator[](V) = C;
+ }
+
+ const DenseMap<Constant*, Constant*> &getMutatedMemory() const {
+ return MutatedMemory;
+ }
+
+ const SmallPtrSet<GlobalVariable*, 8> &getInvariants() const {
+ return Invariants;
+ }
+
+private:
+ Constant *ComputeLoadResult(Constant *P);
+
+ /// ValueStack - As we compute SSA register values, we store their contents
+ /// here. The back of the vector contains the current function and the stack
+ /// contains the values in the calling frames.
+ SmallVector<DenseMap<Value*, Constant*>*, 4> ValueStack;
+
+ /// CallStack - This is used to detect recursion. In pathological situations
+ /// we could hit exponential behavior, but at least there is nothing
+ /// unbounded.
+ SmallVector<Function*, 4> CallStack;
+
+ /// MutatedMemory - For each store we execute, we update this map. Loads
+ /// check this to get the most up-to-date value. If evaluation is successful,
+ /// this state is committed to the process.
+ DenseMap<Constant*, Constant*> MutatedMemory;
+
+ /// AllocaTmps - To 'execute' an alloca, we create a temporary global variable
+ /// to represent its body. This vector is needed so we can delete the
+ /// temporary globals when we are done.
+ SmallVector<GlobalVariable*, 32> AllocaTmps;
+
+ /// Invariants - These global variables have been marked invariant by the
+ /// static constructor.
+ SmallPtrSet<GlobalVariable*, 8> Invariants;
+
+ /// SimpleConstants - These are constants we have checked and know to be
+ /// simple enough to live in a static initializer of a global.
+ SmallPtrSet<Constant*, 8> SimpleConstants;
+
+ const TargetData *TD;
+ const TargetLibraryInfo *TLI;
+};
+
+} // anonymous namespace
+
/// ComputeLoadResult - Return the value that would be computed by a load from
/// P after the stores reflected by 'memory' have been performed. If we can't
/// decide, return null.
-static Constant *ComputeLoadResult(Constant *P,
- const DenseMap<Constant*, Constant*> &Memory) {
+Constant *Evaluator::ComputeLoadResult(Constant *P) {
// If this memory location has been recently stored, use the stored value: it
// is the most up-to-date.
- DenseMap<Constant*, Constant*>::const_iterator I = Memory.find(P);
- if (I != Memory.end()) return I->second;
+ DenseMap<Constant*, Constant*>::const_iterator I = MutatedMemory.find(P);
+ if (I != MutatedMemory.end()) return I->second;
// Access it.
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
@@ -2295,56 +2382,29 @@ static Constant *ComputeLoadResult(Constant *P,
return 0; // don't know how to evaluate.
}
-/// EvaluateFunction - Evaluate a call to function F, returning true if
-/// successful, false if we can't evaluate it. ActualArgs contains the formal
-/// arguments for the function.
-static bool EvaluateFunction(Function *F, Constant *&RetVal,
- const SmallVectorImpl<Constant*> &ActualArgs,
- std::vector<Function*> &CallStack,
- DenseMap<Constant*, Constant*> &MutatedMemory,
- std::vector<GlobalVariable*> &AllocaTmps,
- SmallPtrSet<Constant*, 8> &SimpleConstants,
- const TargetData *TD) {
- // Check to see if this function is already executing (recursion). If so,
- // bail out. TODO: we might want to accept limited recursion.
- if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end())
- return false;
-
- CallStack.push_back(F);
-
- /// Values - As we compute SSA register values, we store their contents here.
- DenseMap<Value*, Constant*> Values;
-
- // Initialize arguments to the incoming values specified.
- unsigned ArgNo = 0;
- for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E;
- ++AI, ++ArgNo)
- Values[AI] = ActualArgs[ArgNo];
-
- /// ExecutedBlocks - We only handle non-looping, non-recursive code. As such,
- /// we can only evaluate any one basic block at most once. This set keeps
- /// track of what we have executed so we can detect recursive cases etc.
- SmallPtrSet<BasicBlock*, 32> ExecutedBlocks;
-
- // CurInst - The current instruction we're evaluating.
- BasicBlock::iterator CurInst = F->begin()->begin();
-
+/// EvaluateBlock - Evaluate all instructions in block BB, returning true if
+/// successful, false if we can't evaluate it. NewBB returns the next BB that
+/// control flows into, or null upon return.
+bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
+ BasicBlock *&NextBB) {
// This is the main evaluation loop.
while (1) {
Constant *InstResult = 0;
if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) {
if (!SI->isSimple()) return false; // no volatile/atomic accesses.
- Constant *Ptr = getVal(Values, SI->getOperand(1));
+ Constant *Ptr = getVal(SI->getOperand(1));
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
+ Ptr = ConstantFoldConstantExpression(CE, TD, TLI);
if (!isSimpleEnoughPointerToCommit(Ptr))
// If this is too complex for us to commit, reject it.
return false;
- Constant *Val = getVal(Values, SI->getOperand(0));
+ Constant *Val = getVal(SI->getOperand(0));
// If this might be too difficult for the backend to handle (e.g. the addr
// of one global variable divided by another) then we can't commit it.
- if (!isSimpleEnoughValueToCommit(Val, SimpleConstants))
+ if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, TD))
return false;
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
@@ -2354,7 +2414,7 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
// stored value.
Ptr = CE->getOperand(0);
- Type *NewTy=cast<PointerType>(Ptr->getType())->getElementType();
+ Type *NewTy = cast<PointerType>(Ptr->getType())->getElementType();
// In order to push the bitcast onto the stored value, a bitcast
// from NewTy to Val's type must be legal. If it's not, we can try
@@ -2366,16 +2426,18 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
if (StructType *STy = dyn_cast<StructType>(NewTy)) {
NewTy = STy->getTypeAtIndex(0U);
- IntegerType *IdxTy =IntegerType::get(NewTy->getContext(), 32);
+ IntegerType *IdxTy = IntegerType::get(NewTy->getContext(), 32);
Constant *IdxZero = ConstantInt::get(IdxTy, 0, false);
Constant * const IdxList[] = {IdxZero, IdxZero};
Ptr = ConstantExpr::getGetElementPtr(Ptr, IdxList);
-
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
+ Ptr = ConstantFoldConstantExpression(CE, TD, TLI);
+
// If we can't improve the situation by introspecting NewTy,
// we have to give up.
} else {
- return 0;
+ return false;
}
}
@@ -2387,33 +2449,35 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
MutatedMemory[Ptr] = Val;
} else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CurInst)) {
InstResult = ConstantExpr::get(BO->getOpcode(),
- getVal(Values, BO->getOperand(0)),
- getVal(Values, BO->getOperand(1)));
+ getVal(BO->getOperand(0)),
+ getVal(BO->getOperand(1)));
} else if (CmpInst *CI = dyn_cast<CmpInst>(CurInst)) {
InstResult = ConstantExpr::getCompare(CI->getPredicate(),
- getVal(Values, CI->getOperand(0)),
- getVal(Values, CI->getOperand(1)));
+ getVal(CI->getOperand(0)),
+ getVal(CI->getOperand(1)));
} else if (CastInst *CI = dyn_cast<CastInst>(CurInst)) {
InstResult = ConstantExpr::getCast(CI->getOpcode(),
- getVal(Values, CI->getOperand(0)),
+ getVal(CI->getOperand(0)),
CI->getType());
} else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) {
- InstResult = ConstantExpr::getSelect(getVal(Values, SI->getOperand(0)),
- getVal(Values, SI->getOperand(1)),
- getVal(Values, SI->getOperand(2)));
+ InstResult = ConstantExpr::getSelect(getVal(SI->getOperand(0)),
+ getVal(SI->getOperand(1)),
+ getVal(SI->getOperand(2)));
} else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) {
- Constant *P = getVal(Values, GEP->getOperand(0));
+ Constant *P = getVal(GEP->getOperand(0));
SmallVector<Constant*, 8> GEPOps;
for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end();
i != e; ++i)
- GEPOps.push_back(getVal(Values, *i));
+ GEPOps.push_back(getVal(*i));
InstResult =
ConstantExpr::getGetElementPtr(P, GEPOps,
cast<GEPOperator>(GEP)->isInBounds());
} else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) {
if (!LI->isSimple()) return false; // no volatile/atomic accesses.
- InstResult = ComputeLoadResult(getVal(Values, LI->getOperand(0)),
- MutatedMemory);
+ Constant *Ptr = getVal(LI->getOperand(0));
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
+ Ptr = ConstantFoldConstantExpression(CE, TD, TLI);
+ InstResult = ComputeLoadResult(Ptr);
if (InstResult == 0) return false; // Could not evaluate load.
} else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) {
if (AI->isArrayAllocation()) return false; // Cannot handle array allocs.
@@ -2423,25 +2487,53 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
UndefValue::get(Ty),
AI->getName()));
InstResult = AllocaTmps.back();
- } else if (CallInst *CI = dyn_cast<CallInst>(CurInst)) {
+ } else if (isa<CallInst>(CurInst) || isa<InvokeInst>(CurInst)) {
+ CallSite CS(CurInst);
// Debug info can safely be ignored here.
- if (isa<DbgInfoIntrinsic>(CI)) {
+ if (isa<DbgInfoIntrinsic>(CS.getInstruction())) {
++CurInst;
continue;
}
// Cannot handle inline asm.
- if (isa<InlineAsm>(CI->getCalledValue())) return false;
-
- if (MemSetInst *MSI = dyn_cast<MemSetInst>(CI)) {
- if (MSI->isVolatile()) return false;
- Constant *Ptr = getVal(Values, MSI->getDest());
- Constant *Val = getVal(Values, MSI->getValue());
- Constant *DestVal = ComputeLoadResult(getVal(Values, Ptr),
- MutatedMemory);
- if (Val->isNullValue() && DestVal && DestVal->isNullValue()) {
- // This memset is a no-op.
+ if (isa<InlineAsm>(CS.getCalledValue())) return false;
+
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
+ if (MemSetInst *MSI = dyn_cast<MemSetInst>(II)) {
+ if (MSI->isVolatile()) return false;
+ Constant *Ptr = getVal(MSI->getDest());
+ Constant *Val = getVal(MSI->getValue());
+ Constant *DestVal = ComputeLoadResult(getVal(Ptr));
+ if (Val->isNullValue() && DestVal && DestVal->isNullValue()) {
+ // This memset is a no-op.
+ ++CurInst;
+ continue;
+ }
+ }
+
+ if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
+ II->getIntrinsicID() == Intrinsic::lifetime_end) {
+ ++CurInst;
+ continue;
+ }
+
+ if (II->getIntrinsicID() == Intrinsic::invariant_start) {
+ // We don't insert an entry into Values, as it doesn't have a
+ // meaningful return value.
+ if (!II->use_empty())
+ return false;
+ ConstantInt *Size = cast<ConstantInt>(II->getArgOperand(0));
+ Value *PtrArg = getVal(II->getArgOperand(1));
+ Value *Ptr = PtrArg->stripPointerCasts();
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
+ Type *ElemTy = cast<PointerType>(GV->getType())->getElementType();
+ if (!Size->isAllOnesValue() &&
+ Size->getValue().getLimitedValue() >=
+ TD->getTypeStoreSize(ElemTy))
+ Invariants.insert(GV);
+ }
+ // Continue even if we do nothing.
++CurInst;
continue;
}
@@ -2449,19 +2541,17 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
}
// Resolve function pointers.
- Function *Callee = dyn_cast<Function>(getVal(Values,
- CI->getCalledValue()));
- if (!Callee) return false; // Cannot resolve.
+ Function *Callee = dyn_cast<Function>(getVal(CS.getCalledValue()));
+ if (!Callee || Callee->mayBeOverridden())
+ return false; // Cannot resolve.
SmallVector<Constant*, 8> Formals;
- CallSite CS(CI);
- for (User::op_iterator i = CS.arg_begin(), e = CS.arg_end();
- i != e; ++i)
- Formals.push_back(getVal(Values, *i));
+ for (User::op_iterator i = CS.arg_begin(), e = CS.arg_end(); i != e; ++i)
+ Formals.push_back(getVal(*i));
if (Callee->isDeclaration()) {
// If this is a function we can constant fold, do it.
- if (Constant *C = ConstantFoldCall(Callee, Formals)) {
+ if (Constant *C = ConstantFoldCall(Callee, Formals, TLI)) {
InstResult = C;
} else {
return false;
@@ -2472,62 +2562,43 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
Constant *RetVal;
// Execute the call, if successful, use the return value.
- if (!EvaluateFunction(Callee, RetVal, Formals, CallStack,
- MutatedMemory, AllocaTmps, SimpleConstants, TD))
+ ValueStack.push_back(new DenseMap<Value*, Constant*>);
+ if (!EvaluateFunction(Callee, RetVal, Formals))
return false;
+ delete ValueStack.pop_back_val();
InstResult = RetVal;
}
} else if (isa<TerminatorInst>(CurInst)) {
- BasicBlock *NewBB = 0;
if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) {
if (BI->isUnconditional()) {
- NewBB = BI->getSuccessor(0);
+ NextBB = BI->getSuccessor(0);
} else {
ConstantInt *Cond =
- dyn_cast<ConstantInt>(getVal(Values, BI->getCondition()));
+ dyn_cast<ConstantInt>(getVal(BI->getCondition()));
if (!Cond) return false; // Cannot determine.
- NewBB = BI->getSuccessor(!Cond->getZExtValue());
+ NextBB = BI->getSuccessor(!Cond->getZExtValue());
}
} else if (SwitchInst *SI = dyn_cast<SwitchInst>(CurInst)) {
ConstantInt *Val =
- dyn_cast<ConstantInt>(getVal(Values, SI->getCondition()));
+ dyn_cast<ConstantInt>(getVal(SI->getCondition()));
if (!Val) return false; // Cannot determine.
- NewBB = SI->getSuccessor(SI->findCaseValue(Val));
+ NextBB = SI->findCaseValue(Val).getCaseSuccessor();
} else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(CurInst)) {
- Value *Val = getVal(Values, IBI->getAddress())->stripPointerCasts();
+ Value *Val = getVal(IBI->getAddress())->stripPointerCasts();
if (BlockAddress *BA = dyn_cast<BlockAddress>(Val))
- NewBB = BA->getBasicBlock();
+ NextBB = BA->getBasicBlock();
else
return false; // Cannot determine.
- } else if (ReturnInst *RI = dyn_cast<ReturnInst>(CurInst)) {
- if (RI->getNumOperands())
- RetVal = getVal(Values, RI->getOperand(0));
-
- CallStack.pop_back(); // return from fn.
- return true; // We succeeded at evaluating this ctor!
+ } else if (isa<ReturnInst>(CurInst)) {
+ NextBB = 0;
} else {
// invoke, unwind, resume, unreachable.
return false; // Cannot handle this terminator.
}
- // Okay, we succeeded in evaluating this control flow. See if we have
- // executed the new block before. If so, we have a looping function,
- // which we cannot evaluate in reasonable time.
- if (!ExecutedBlocks.insert(NewBB))
- return false; // looped!
-
- // Okay, we have never been in this block before. Check to see if there
- // are any PHI nodes. If so, evaluate them with information about where
- // we came from.
- BasicBlock *OldBB = CurInst->getParent();
- CurInst = NewBB->begin();
- PHINode *PN;
- for (; (PN = dyn_cast<PHINode>(CurInst)); ++CurInst)
- Values[PN] = getVal(Values, PN->getIncomingValueForBlock(OldBB));
-
- // Do NOT increment CurInst. We know that the terminator had no value.
- continue;
+ // We succeeded at evaluating this block!
+ return true;
} else {
// Did not know how to evaluate this!
return false;
@@ -2535,9 +2606,15 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
if (!CurInst->use_empty()) {
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(InstResult))
- InstResult = ConstantFoldConstantExpression(CE, TD);
+ InstResult = ConstantFoldConstantExpression(CE, TD, TLI);
- Values[CurInst] = InstResult;
+ setVal(CurInst, InstResult);
+ }
+
+ // If we just processed an invoke, we finished evaluating the block.
+ if (InvokeInst *II = dyn_cast<InvokeInst>(CurInst)) {
+ NextBB = II->getNormalDest();
+ return true;
}
// Advance program counter.
@@ -2545,64 +2622,96 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
}
}
-/// EvaluateStaticConstructor - Evaluate static constructors in the function, if
-/// we can. Return true if we can, false otherwise.
-static bool EvaluateStaticConstructor(Function *F, const TargetData *TD) {
- /// MutatedMemory - For each store we execute, we update this map. Loads
- /// check this to get the most up-to-date value. If evaluation is successful,
- /// this state is committed to the process.
- DenseMap<Constant*, Constant*> MutatedMemory;
+/// EvaluateFunction - Evaluate a call to function F, returning true if
+/// successful, false if we can't evaluate it. ActualArgs contains the formal
+/// arguments for the function.
+bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal,
+ const SmallVectorImpl<Constant*> &ActualArgs) {
+ // Check to see if this function is already executing (recursion). If so,
+ // bail out. TODO: we might want to accept limited recursion.
+ if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end())
+ return false;
- /// AllocaTmps - To 'execute' an alloca, we create a temporary global variable
- /// to represent its body. This vector is needed so we can delete the
- /// temporary globals when we are done.
- std::vector<GlobalVariable*> AllocaTmps;
+ CallStack.push_back(F);
- /// CallStack - This is used to detect recursion. In pathological situations
- /// we could hit exponential behavior, but at least there is nothing
- /// unbounded.
- std::vector<Function*> CallStack;
+ // Initialize arguments to the incoming values specified.
+ unsigned ArgNo = 0;
+ for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E;
+ ++AI, ++ArgNo)
+ setVal(AI, ActualArgs[ArgNo]);
- /// SimpleConstants - These are constants we have checked and know to be
- /// simple enough to live in a static initializer of a global.
- SmallPtrSet<Constant*, 8> SimpleConstants;
-
+ // ExecutedBlocks - We only handle non-looping, non-recursive code. As such,
+ // we can only evaluate any one basic block at most once. This set keeps
+ // track of what we have executed so we can detect recursive cases etc.
+ SmallPtrSet<BasicBlock*, 32> ExecutedBlocks;
+
+ // CurBB - The current basic block we're evaluating.
+ BasicBlock *CurBB = F->begin();
+
+ BasicBlock::iterator CurInst = CurBB->begin();
+
+ while (1) {
+ BasicBlock *NextBB = 0; // Initialized to avoid compiler warnings.
+ if (!EvaluateBlock(CurInst, NextBB))
+ return false;
+
+ if (NextBB == 0) {
+ // Successfully running until there's no next block means that we found
+ // the return. Fill it the return value and pop the call stack.
+ ReturnInst *RI = cast<ReturnInst>(CurBB->getTerminator());
+ if (RI->getNumOperands())
+ RetVal = getVal(RI->getOperand(0));
+ CallStack.pop_back();
+ return true;
+ }
+
+ // Okay, we succeeded in evaluating this control flow. See if we have
+ // executed the new block before. If so, we have a looping function,
+ // which we cannot evaluate in reasonable time.
+ if (!ExecutedBlocks.insert(NextBB))
+ return false; // looped!
+
+ // Okay, we have never been in this block before. Check to see if there
+ // are any PHI nodes. If so, evaluate them with information about where
+ // we came from.
+ PHINode *PN = 0;
+ for (CurInst = NextBB->begin();
+ (PN = dyn_cast<PHINode>(CurInst)); ++CurInst)
+ setVal(PN, getVal(PN->getIncomingValueForBlock(CurBB)));
+
+ // Advance to the next block.
+ CurBB = NextBB;
+ }
+}
+
+/// EvaluateStaticConstructor - Evaluate static constructors in the function, if
+/// we can. Return true if we can, false otherwise.
+static bool EvaluateStaticConstructor(Function *F, const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
// Call the function.
+ Evaluator Eval(TD, TLI);
Constant *RetValDummy;
- bool EvalSuccess = EvaluateFunction(F, RetValDummy,
- SmallVector<Constant*, 0>(), CallStack,
- MutatedMemory, AllocaTmps,
- SimpleConstants, TD);
+ bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy,
+ SmallVector<Constant*, 0>());
if (EvalSuccess) {
// We succeeded at evaluation: commit the result.
DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '"
- << F->getName() << "' to " << MutatedMemory.size()
+ << F->getName() << "' to " << Eval.getMutatedMemory().size()
<< " stores.\n");
- for (DenseMap<Constant*, Constant*>::iterator I = MutatedMemory.begin(),
- E = MutatedMemory.end(); I != E; ++I)
+ for (DenseMap<Constant*, Constant*>::const_iterator I =
+ Eval.getMutatedMemory().begin(), E = Eval.getMutatedMemory().end();
+ I != E; ++I)
CommitValueTo(I->second, I->first);
- }
-
- // At this point, we are done interpreting. If we created any 'alloca'
- // temporaries, release them now.
- while (!AllocaTmps.empty()) {
- GlobalVariable *Tmp = AllocaTmps.back();
- AllocaTmps.pop_back();
-
- // If there are still users of the alloca, the program is doing something
- // silly, e.g. storing the address of the alloca somewhere and using it
- // later. Since this is undefined, we'll just make it be null.
- if (!Tmp->use_empty())
- Tmp->replaceAllUsesWith(Constant::getNullValue(Tmp->getType()));
- delete Tmp;
+ for (SmallPtrSet<GlobalVariable*, 8>::const_iterator I =
+ Eval.getInvariants().begin(), E = Eval.getInvariants().end();
+ I != E; ++I)
+ (*I)->setConstant(true);
}
return EvalSuccess;
}
-
-
/// OptimizeGlobalCtorsList - Simplify and evaluation global ctors if possible.
/// Return true if anything changed.
bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) {
@@ -2610,7 +2719,6 @@ bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) {
bool MadeChange = false;
if (Ctors.empty()) return false;
- const TargetData *TD = getAnalysisIfAvailable<TargetData>();
// Loop over global ctors, optimizing them when we can.
for (unsigned i = 0; i != Ctors.size(); ++i) {
Function *F = Ctors[i];
@@ -2628,7 +2736,7 @@ bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) {
if (F->empty()) continue;
// If we can evaluate the ctor at compile time, do.
- if (EvaluateStaticConstructor(F, TD)) {
+ if (EvaluateStaticConstructor(F, TD, TLI)) {
Ctors.erase(Ctors.begin()+i);
MadeChange = true;
--i;
@@ -2700,12 +2808,15 @@ bool GlobalOpt::OptimizeGlobalAliases(Module &M) {
return Changed;
}
-static Function *FindCXAAtExit(Module &M) {
- Function *Fn = M.getFunction("__cxa_atexit");
+static Function *FindCXAAtExit(Module &M, TargetLibraryInfo *TLI) {
+ if (!TLI->has(LibFunc::cxa_atexit))
+ return 0;
+
+ Function *Fn = M.getFunction(TLI->getName(LibFunc::cxa_atexit));
if (!Fn)
return 0;
-
+
FunctionType *FTy = Fn->getFunctionType();
// Checking that the function has the right return type, the right number of
@@ -2724,7 +2835,8 @@ static Function *FindCXAAtExit(Module &M) {
/// destructor and can therefore be eliminated.
/// Note that we assume that other optimization passes have already simplified
/// the code so we only look for a function with a single basic block, where
-/// the only allowed instructions are 'ret' or 'call' to empty C++ dtor.
+/// the only allowed instructions are 'ret', 'call' to an empty C++ dtor and
+/// other side-effect free instructions.
static bool cxxDtorIsEmpty(const Function &Fn,
SmallPtrSet<const Function *, 8> &CalledFunctions) {
// FIXME: We could eliminate C++ destructors if they're readonly/readnone and
@@ -2757,9 +2869,9 @@ static bool cxxDtorIsEmpty(const Function &Fn,
if (!cxxDtorIsEmpty(*CalledFn, NewCalledFunctions))
return false;
} else if (isa<ReturnInst>(*I))
- return true;
- else
- return false;
+ return true; // We're done.
+ else if (I->mayHaveSideEffects())
+ return false; // Destructor with side effects, bail.
}
return false;
@@ -2815,10 +2927,13 @@ bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) {
bool GlobalOpt::runOnModule(Module &M) {
bool Changed = false;
+ TD = getAnalysisIfAvailable<TargetData>();
+ TLI = &getAnalysis<TargetLibraryInfo>();
+
// Try to find the llvm.globalctors list.
GlobalVariable *GlobalCtors = FindGlobalCtors(M);
- Function *CXAAtExitFn = FindCXAAtExit(M);
+ Function *CXAAtExitFn = FindCXAAtExit(M, TLI);
bool LocalChange = true;
while (LocalChange) {
diff --git a/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp b/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp
index c0426da..664ddf6 100644
--- a/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp
@@ -32,34 +32,21 @@ namespace {
// AlwaysInliner only inlines functions that are mark as "always inline".
class AlwaysInliner : public Inliner {
- // Functions that are never inlined
- SmallPtrSet<const Function*, 16> NeverInline;
- InlineCostAnalyzer CA;
public:
// Use extremely low threshold.
- AlwaysInliner() : Inliner(ID, -2000000000) {
+ AlwaysInliner() : Inliner(ID, -2000000000, /*InsertLifetime*/true) {
initializeAlwaysInlinerPass(*PassRegistry::getPassRegistry());
}
- static char ID; // Pass identification, replacement for typeid
- InlineCost getInlineCost(CallSite CS) {
- return CA.getInlineCost(CS, NeverInline);
- }
- float getInlineFudgeFactor(CallSite CS) {
- return CA.getInlineFudgeFactor(CS);
- }
- void resetCachedCostInfo(Function *Caller) {
- CA.resetCachedCostInfo(Caller);
- }
- void growCachedCostInfo(Function* Caller, Function* Callee) {
- CA.growCachedCostInfo(Caller, Callee);
+ AlwaysInliner(bool InsertLifetime) : Inliner(ID, -2000000000,
+ InsertLifetime) {
+ initializeAlwaysInlinerPass(*PassRegistry::getPassRegistry());
}
+ static char ID; // Pass identification, replacement for typeid
+ virtual InlineCost getInlineCost(CallSite CS);
virtual bool doFinalization(CallGraph &CG) {
- return removeDeadFunctions(CG, &NeverInline);
+ return removeDeadFunctions(CG, /*AlwaysInlineOnly=*/true);
}
virtual bool doInitialization(CallGraph &CG);
- void releaseMemory() {
- CA.clear();
- }
};
}
@@ -72,17 +59,74 @@ INITIALIZE_PASS_END(AlwaysInliner, "always-inline",
Pass *llvm::createAlwaysInlinerPass() { return new AlwaysInliner(); }
-// doInitialization - Initializes the vector of functions that have not
-// been annotated with the "always inline" attribute.
-bool AlwaysInliner::doInitialization(CallGraph &CG) {
- CA.setTargetData(getAnalysisIfAvailable<TargetData>());
+Pass *llvm::createAlwaysInlinerPass(bool InsertLifetime) {
+ return new AlwaysInliner(InsertLifetime);
+}
+
+/// \brief Minimal filter to detect invalid constructs for inlining.
+static bool isInlineViable(Function &F) {
+ bool ReturnsTwice = F.hasFnAttr(Attribute::ReturnsTwice);
+ for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
+ // Disallow inlining of functions which contain an indirect branch.
+ if (isa<IndirectBrInst>(BI->getTerminator()))
+ return false;
- Module &M = CG.getModule();
+ for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE;
+ ++II) {
+ CallSite CS(II);
+ if (!CS)
+ continue;
- for (Module::iterator I = M.begin(), E = M.end();
- I != E; ++I)
- if (!I->isDeclaration() && !I->hasFnAttr(Attribute::AlwaysInline))
- NeverInline.insert(I);
+ // Disallow recursive calls.
+ if (&F == CS.getCalledFunction())
+ return false;
+ // Disallow calls which expose returns-twice to a function not previously
+ // attributed as such.
+ if (!ReturnsTwice && CS.isCall() &&
+ cast<CallInst>(CS.getInstruction())->canReturnTwice())
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/// \brief Get the inline cost for the always-inliner.
+///
+/// The always inliner *only* handles functions which are marked with the
+/// attribute to force inlining. As such, it is dramatically simpler and avoids
+/// using the powerful (but expensive) inline cost analysis. Instead it uses
+/// a very simple and boring direct walk of the instructions looking for
+/// impossible-to-inline constructs.
+///
+/// Note, it would be possible to go to some lengths to cache the information
+/// computed here, but as we only expect to do this for relatively few and
+/// small functions which have the explicit attribute to force inlining, it is
+/// likely not worth it in practice.
+InlineCost AlwaysInliner::getInlineCost(CallSite CS) {
+ Function *Callee = CS.getCalledFunction();
+ // We assume indirect calls aren't calling an always-inline function.
+ if (!Callee) return InlineCost::getNever();
+
+ // We can't inline calls to external functions.
+ // FIXME: We shouldn't even get here.
+ if (Callee->isDeclaration()) return InlineCost::getNever();
+
+ // Return never for anything not marked as always inline.
+ if (!Callee->hasFnAttr(Attribute::AlwaysInline))
+ return InlineCost::getNever();
+
+ // Do some minimal analysis to preclude non-viable functions.
+ if (!isInlineViable(*Callee))
+ return InlineCost::getNever();
+
+ // Otherwise, force inlining.
+ return InlineCost::getAlways();
+}
+
+// doInitialization - Initializes the vector of functions that have not
+// been annotated with the "always inline" attribute.
+bool AlwaysInliner::doInitialization(CallGraph &CG) {
return false;
}
diff --git a/contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp b/contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp
index 84dd4fd..50038d8 100644
--- a/contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp
@@ -23,40 +23,26 @@
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/IPO/InlinerPass.h"
#include "llvm/Target/TargetData.h"
-#include "llvm/ADT/SmallPtrSet.h"
using namespace llvm;
namespace {
class SimpleInliner : public Inliner {
- // Functions that are never inlined
- SmallPtrSet<const Function*, 16> NeverInline;
InlineCostAnalyzer CA;
public:
SimpleInliner() : Inliner(ID) {
initializeSimpleInlinerPass(*PassRegistry::getPassRegistry());
}
- SimpleInliner(int Threshold) : Inliner(ID, Threshold) {
+ SimpleInliner(int Threshold) : Inliner(ID, Threshold,
+ /*InsertLifetime*/true) {
initializeSimpleInlinerPass(*PassRegistry::getPassRegistry());
}
static char ID; // Pass identification, replacement for typeid
InlineCost getInlineCost(CallSite CS) {
- return CA.getInlineCost(CS, NeverInline);
- }
- float getInlineFudgeFactor(CallSite CS) {
- return CA.getInlineFudgeFactor(CS);
- }
- void resetCachedCostInfo(Function *Caller) {
- CA.resetCachedCostInfo(Caller);
- }
- void growCachedCostInfo(Function* Caller, Function* Callee) {
- CA.growCachedCostInfo(Caller, Callee);
+ return CA.getInlineCost(CS, getInlineThreshold(CS));
}
virtual bool doInitialization(CallGraph &CG);
- void releaseMemory() {
- CA.clear();
- }
};
}
@@ -77,44 +63,6 @@ Pass *llvm::createFunctionInliningPass(int Threshold) {
// annotated with the noinline attribute.
bool SimpleInliner::doInitialization(CallGraph &CG) {
CA.setTargetData(getAnalysisIfAvailable<TargetData>());
-
- Module &M = CG.getModule();
-
- for (Module::iterator I = M.begin(), E = M.end();
- I != E; ++I)
- if (!I->isDeclaration() && I->hasFnAttr(Attribute::NoInline))
- NeverInline.insert(I);
-
- // Get llvm.noinline
- GlobalVariable *GV = M.getNamedGlobal("llvm.noinline");
-
- if (GV == 0)
- return false;
-
- // Don't crash on invalid code
- if (!GV->hasDefinitiveInitializer())
- return false;
-
- const ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer());
-
- if (InitList == 0)
- return false;
-
- // Iterate over each element and add to the NeverInline set
- for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) {
-
- // Get Source
- const Constant *Elt = InitList->getOperand(i);
-
- if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(Elt))
- if (CE->getOpcode() == Instruction::BitCast)
- Elt = CE->getOperand(0);
-
- // Insert into set of functions to never inline
- if (const Function *F = dyn_cast<Function>(Elt))
- NeverInline.insert(F);
- }
-
return false;
}
diff --git a/contrib/llvm/lib/Transforms/IPO/Inliner.cpp b/contrib/llvm/lib/Transforms/IPO/Inliner.cpp
index f00935b..dc9cbfb 100644
--- a/contrib/llvm/lib/Transforms/IPO/Inliner.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/Inliner.cpp
@@ -36,6 +36,11 @@ STATISTIC(NumCallsDeleted, "Number of call sites deleted, not inlined");
STATISTIC(NumDeleted, "Number of functions deleted because all callers found");
STATISTIC(NumMergedAllocas, "Number of allocas merged together");
+// This weirdly named statistic tracks the number of times that, when attemting
+// to inline a function A into B, we analyze the callers of B in order to see
+// if those would be more profitable and blocked inline steps.
+STATISTIC(NumCallerCallersAnalyzed, "Number of caller-callers analyzed");
+
static cl::opt<int>
InlineLimit("inline-threshold", cl::Hidden, cl::init(225), cl::ZeroOrMore,
cl::desc("Control the amount of inlining to perform (default = 225)"));
@@ -48,11 +53,12 @@ HintThreshold("inlinehint-threshold", cl::Hidden, cl::init(325),
const int OptSizeThreshold = 75;
Inliner::Inliner(char &ID)
- : CallGraphSCCPass(ID), InlineThreshold(InlineLimit) {}
+ : CallGraphSCCPass(ID), InlineThreshold(InlineLimit), InsertLifetime(true) {}
-Inliner::Inliner(char &ID, int Threshold)
+Inliner::Inliner(char &ID, int Threshold, bool InsertLifetime)
: CallGraphSCCPass(ID), InlineThreshold(InlineLimit.getNumOccurrences() > 0 ?
- InlineLimit : Threshold) {}
+ InlineLimit : Threshold),
+ InsertLifetime(InsertLifetime) {}
/// getAnalysisUsage - For this class, we declare that we require and preserve
/// the call graph. If the derived class implements this method, it should
@@ -75,13 +81,13 @@ InlinedArrayAllocasTy;
/// any new allocas to the set if not possible.
static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
InlinedArrayAllocasTy &InlinedArrayAllocas,
- int InlineHistory) {
+ int InlineHistory, bool InsertLifetime) {
Function *Callee = CS.getCalledFunction();
Function *Caller = CS.getCaller();
// Try to inline the function. Get the list of static allocas that were
// inlined.
- if (!InlineFunction(CS, IFI))
+ if (!InlineFunction(CS, IFI, InsertLifetime))
return false;
// If the inlined function had a higher stack protection level than the
@@ -230,29 +236,37 @@ bool Inliner::shouldInline(CallSite CS) {
return false;
}
- int Cost = IC.getValue();
Function *Caller = CS.getCaller();
- int CurrentThreshold = getInlineThreshold(CS);
- float FudgeFactor = getInlineFudgeFactor(CS);
- int AdjThreshold = (int)(CurrentThreshold * FudgeFactor);
- if (Cost >= AdjThreshold) {
- DEBUG(dbgs() << " NOT Inlining: cost=" << Cost
- << ", thres=" << AdjThreshold
+ if (!IC) {
+ DEBUG(dbgs() << " NOT Inlining: cost=" << IC.getCost()
+ << ", thres=" << (IC.getCostDelta() + IC.getCost())
<< ", Call: " << *CS.getInstruction() << "\n");
return false;
}
- // Try to detect the case where the current inlining candidate caller
- // (call it B) is a static function and is an inlining candidate elsewhere,
- // and the current candidate callee (call it C) is large enough that
- // inlining it into B would make B too big to inline later. In these
- // circumstances it may be best not to inline C into B, but to inline B
- // into its callers.
- if (Caller->hasLocalLinkage()) {
+ // Try to detect the case where the current inlining candidate caller (call
+ // it B) is a static or linkonce-ODR function and is an inlining candidate
+ // elsewhere, and the current candidate callee (call it C) is large enough
+ // that inlining it into B would make B too big to inline later. In these
+ // circumstances it may be best not to inline C into B, but to inline B into
+ // its callers.
+ //
+ // This only applies to static and linkonce-ODR functions because those are
+ // expected to be available for inlining in the translation units where they
+ // are used. Thus we will always have the opportunity to make local inlining
+ // decisions. Importantly the linkonce-ODR linkage covers inline functions
+ // and templates in C++.
+ //
+ // FIXME: All of this logic should be sunk into getInlineCost. It relies on
+ // the internal implementation of the inline cost metrics rather than
+ // treating them as truly abstract units etc.
+ if (Caller->hasLocalLinkage() ||
+ Caller->getLinkage() == GlobalValue::LinkOnceODRLinkage) {
int TotalSecondaryCost = 0;
- bool outerCallsFound = false;
+ // The candidate cost to be imposed upon the current function.
+ int CandidateCost = IC.getCost() - (InlineConstants::CallPenalty + 1);
// This bool tracks what happens if we do NOT inline C into B.
- bool callerWillBeRemoved = true;
+ bool callerWillBeRemoved = Caller->hasLocalLinkage();
// This bool tracks what happens if we DO inline C into B.
bool inliningPreventsSomeOuterInline = false;
for (Value::use_iterator I = Caller->use_begin(), E =Caller->use_end();
@@ -268,26 +282,20 @@ bool Inliner::shouldInline(CallSite CS) {
}
InlineCost IC2 = getInlineCost(CS2);
- if (IC2.isNever())
+ ++NumCallerCallersAnalyzed;
+ if (!IC2) {
callerWillBeRemoved = false;
- if (IC2.isAlways() || IC2.isNever())
+ continue;
+ }
+ if (IC2.isAlways())
continue;
- outerCallsFound = true;
- int Cost2 = IC2.getValue();
- int CurrentThreshold2 = getInlineThreshold(CS2);
- float FudgeFactor2 = getInlineFudgeFactor(CS2);
-
- if (Cost2 >= (int)(CurrentThreshold2 * FudgeFactor2))
- callerWillBeRemoved = false;
-
- // See if we have this case. We subtract off the penalty
- // for the call instruction, which we would be deleting.
- if (Cost2 < (int)(CurrentThreshold2 * FudgeFactor2) &&
- Cost2 + Cost - (InlineConstants::CallPenalty + 1) >=
- (int)(CurrentThreshold2 * FudgeFactor2)) {
+ // See if inlining or original callsite would erase the cost delta of
+ // this callsite. We subtract off the penalty for the call instruction,
+ // which we would be deleting.
+ if (IC2.getCostDelta() <= CandidateCost) {
inliningPreventsSomeOuterInline = true;
- TotalSecondaryCost += Cost2;
+ TotalSecondaryCost += IC2.getCost();
}
}
// If all outer calls to Caller would get inlined, the cost for the last
@@ -297,17 +305,16 @@ bool Inliner::shouldInline(CallSite CS) {
if (callerWillBeRemoved && Caller->use_begin() != Caller->use_end())
TotalSecondaryCost += InlineConstants::LastCallToStaticBonus;
- if (outerCallsFound && inliningPreventsSomeOuterInline &&
- TotalSecondaryCost < Cost) {
- DEBUG(dbgs() << " NOT Inlining: " << *CS.getInstruction() <<
- " Cost = " << Cost <<
+ if (inliningPreventsSomeOuterInline && TotalSecondaryCost < IC.getCost()) {
+ DEBUG(dbgs() << " NOT Inlining: " << *CS.getInstruction() <<
+ " Cost = " << IC.getCost() <<
", outer Cost = " << TotalSecondaryCost << '\n');
return false;
}
}
- DEBUG(dbgs() << " Inlining: cost=" << Cost
- << ", thres=" << AdjThreshold
+ DEBUG(dbgs() << " Inlining: cost=" << IC.getCost()
+ << ", thres=" << (IC.getCostDelta() + IC.getCost())
<< ", Call: " << *CS.getInstruction() << '\n');
return true;
}
@@ -326,7 +333,6 @@ static bool InlineHistoryIncludes(Function *F, int InlineHistoryID,
return false;
}
-
bool Inliner::runOnSCC(CallGraphSCC &SCC) {
CallGraph &CG = getAnalysis<CallGraph>();
const TargetData *TD = getAnalysisIfAvailable<TargetData>();
@@ -415,8 +421,6 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
CG[Caller]->removeCallEdgeFor(CS);
CS.getInstruction()->eraseFromParent();
++NumCallsDeleted;
- // Update the cached cost info with the missing call
- growCachedCostInfo(Caller, NULL);
} else {
// We can only inline direct calls to non-declarations.
if (Callee == 0 || Callee->isDeclaration()) continue;
@@ -439,7 +443,7 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
// Attempt to inline the function.
if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas,
- InlineHistoryID))
+ InlineHistoryID, InsertLifetime))
continue;
++NumInlined;
@@ -457,9 +461,6 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
CallSites.push_back(std::make_pair(CallSite(Ptr), NewHistoryID));
}
}
-
- // Update the cached cost info with the inlined call.
- growCachedCostInfo(Caller, Callee);
}
// If we inlined or deleted the last possible call site to the function,
@@ -479,8 +480,6 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
// Remove any call graph edges from the callee to its callees.
CalleeNode->removeAllCalledFunctions();
- resetCachedCostInfo(Callee);
-
// Removing the node for callee from the call graph and delete it.
delete CG.removeFunctionFromModule(CalleeNode);
++NumDeleted;
@@ -514,29 +513,28 @@ bool Inliner::doFinalization(CallGraph &CG) {
/// removeDeadFunctions - Remove dead functions that are not included in
/// DNR (Do Not Remove) list.
-bool Inliner::removeDeadFunctions(CallGraph &CG,
- SmallPtrSet<const Function *, 16> *DNR) {
- SmallPtrSet<CallGraphNode*, 16> FunctionsToRemove;
+bool Inliner::removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly) {
+ SmallVector<CallGraphNode*, 16> FunctionsToRemove;
// Scan for all of the functions, looking for ones that should now be removed
// from the program. Insert the dead ones in the FunctionsToRemove set.
for (CallGraph::iterator I = CG.begin(), E = CG.end(); I != E; ++I) {
CallGraphNode *CGN = I->second;
- if (CGN->getFunction() == 0)
- continue;
-
Function *F = CGN->getFunction();
-
+ if (!F || F->isDeclaration())
+ continue;
+
+ // Handle the case when this function is called and we only want to care
+ // about always-inline functions. This is a bit of a hack to share code
+ // between here and the InlineAlways pass.
+ if (AlwaysInlineOnly && !F->hasFnAttr(Attribute::AlwaysInline))
+ continue;
+
// If the only remaining users of the function are dead constants, remove
// them.
F->removeDeadConstantUsers();
- if (DNR && DNR->count(F))
- continue;
- if (!F->hasLinkOnceLinkage() && !F->hasLocalLinkage() &&
- !F->hasAvailableExternallyLinkage())
- continue;
- if (!F->use_empty())
+ if (!F->isDefTriviallyDead())
continue;
// Remove any call graph edges from the function to its callees.
@@ -548,24 +546,27 @@ bool Inliner::removeDeadFunctions(CallGraph &CG,
CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN);
// Removing the node for callee from the call graph and delete it.
- FunctionsToRemove.insert(CGN);
+ FunctionsToRemove.push_back(CGN);
}
+ if (FunctionsToRemove.empty())
+ return false;
// Now that we know which functions to delete, do so. We didn't want to do
// this inline, because that would invalidate our CallGraph::iterator
// objects. :(
//
- // Note that it doesn't matter that we are iterating over a non-stable set
+ // Note that it doesn't matter that we are iterating over a non-stable order
// here to do this, it doesn't matter which order the functions are deleted
// in.
- bool Changed = false;
- for (SmallPtrSet<CallGraphNode*, 16>::iterator I = FunctionsToRemove.begin(),
- E = FunctionsToRemove.end(); I != E; ++I) {
- resetCachedCostInfo((*I)->getFunction());
+ array_pod_sort(FunctionsToRemove.begin(), FunctionsToRemove.end());
+ FunctionsToRemove.erase(std::unique(FunctionsToRemove.begin(),
+ FunctionsToRemove.end()),
+ FunctionsToRemove.end());
+ for (SmallVectorImpl<CallGraphNode *>::iterator I = FunctionsToRemove.begin(),
+ E = FunctionsToRemove.end();
+ I != E; ++I) {
delete CG.removeFunctionFromModule(*I);
++NumDeleted;
- Changed = true;
}
-
- return Changed;
+ return true;
}
diff --git a/contrib/llvm/lib/Transforms/IPO/Internalize.cpp b/contrib/llvm/lib/Transforms/IPO/Internalize.cpp
index 7cb1d18..cd29e7a 100644
--- a/contrib/llvm/lib/Transforms/IPO/Internalize.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/Internalize.cpp
@@ -122,6 +122,9 @@ bool InternalizePass::runOnModule(Module &M) {
bool Changed = false;
+ // Never internalize functions which code-gen might insert.
+ ExternalNames.insert("__stack_chk_fail");
+
// Mark all functions not in the api as internal.
// FIXME: maybe use private linkage?
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
@@ -148,9 +151,11 @@ bool InternalizePass::runOnModule(Module &M) {
// won't find them. (see MachineModuleInfo.)
ExternalNames.insert("llvm.global_ctors");
ExternalNames.insert("llvm.global_dtors");
- ExternalNames.insert("llvm.noinline");
ExternalNames.insert("llvm.global.annotations");
+ // Never internalize symbols code-gen inserts.
+ ExternalNames.insert("__stack_chk_guard");
+
// Mark all global variables with initializers that are not in the api as
// internal as well.
// FIXME: maybe use private linkage?
diff --git a/contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp b/contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
index 8fdfd72..a1b0a45 100644
--- a/contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
@@ -22,14 +22,19 @@
#include "llvm/PassManager.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/Verifier.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Vectorize.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/ManagedStatic.h"
using namespace llvm;
+static cl::opt<bool>
+RunVectorization("vectorize", cl::desc("Run vectorization passes"));
+
PassManagerBuilder::PassManagerBuilder() {
OptLevel = 2;
SizeLevel = 0;
@@ -38,6 +43,7 @@ PassManagerBuilder::PassManagerBuilder() {
DisableSimplifyLibCalls = false;
DisableUnitAtATime = false;
DisableUnrollLoops = false;
+ Vectorize = RunVectorization;
}
PassManagerBuilder::~PassManagerBuilder() {
@@ -101,6 +107,7 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
MPM.add(Inliner);
Inliner = 0;
}
+ addExtensionsToPM(EP_EnabledOnOptLevel0, MPM);
return;
}
@@ -110,6 +117,8 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
addInitialAliasAnalysisPasses(MPM);
if (!DisableUnitAtATime) {
+ addExtensionsToPM(EP_ModuleOptimizerEarly, MPM);
+
MPM.add(createGlobalOptimizerPass()); // Optimize out global vars
MPM.add(createIPSCCPPass()); // IP SCCP
@@ -170,6 +179,13 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
addExtensionsToPM(EP_ScalarOptimizerLate, MPM);
+ if (Vectorize) {
+ MPM.add(createBBVectorizePass());
+ MPM.add(createInstructionCombiningPass());
+ if (OptLevel > 1)
+ MPM.add(createGVNPass()); // Remove redundancies
+ }
+
MPM.add(createAggressiveDCEPass()); // Delete dead instructions
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
MPM.add(createInstructionCombiningPass()); // Clean up after everything.
@@ -186,11 +202,13 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
if (OptLevel > 1)
MPM.add(createConstantMergePass()); // Merge dup global constants
}
+ addExtensionsToPM(EP_OptimizerLast, MPM);
}
void PassManagerBuilder::populateLTOPassManager(PassManagerBase &PM,
bool Internalize,
- bool RunInliner) {
+ bool RunInliner,
+ bool DisableGVNLoadPRE) {
// Provide AliasAnalysis services for optimizations.
addInitialAliasAnalysisPasses(PM);
@@ -246,9 +264,9 @@ void PassManagerBuilder::populateLTOPassManager(PassManagerBase &PM,
PM.add(createFunctionAttrsPass()); // Add nocapture.
PM.add(createGlobalsModRefPass()); // IP alias analysis.
- PM.add(createLICMPass()); // Hoist loop invariants.
- PM.add(createGVNPass()); // Remove redundancies.
- PM.add(createMemCpyOptPass()); // Remove dead memcpys.
+ PM.add(createLICMPass()); // Hoist loop invariants.
+ PM.add(createGVNPass(DisableGVNLoadPRE)); // Remove redundancies.
+ PM.add(createMemCpyOptPass()); // Remove dead memcpys.
// Nuke dead stores.
PM.add(createDeadStoreEliminationPass());
@@ -340,4 +358,3 @@ void LLVMPassManagerBuilderPopulateLTOPassManager(LLVMPassManagerBuilderRef PMB,
PassManagerBase *LPM = unwrap(PM);
Builder->populateLTOPassManager(*LPM, Internalize, RunInliner);
}
-
diff --git a/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp b/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp
index cbb80f0..c8cc8fd 100644
--- a/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp
@@ -101,8 +101,7 @@ bool PruneEH::runOnSCC(CallGraphSCC &SCC) {
// Check to see if this function performs an unwind or calls an
// unwinding function.
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
- if (CheckUnwind && (isa<UnwindInst>(BB->getTerminator()) ||
- isa<ResumeInst>(BB->getTerminator()))) {
+ if (CheckUnwind && isa<ResumeInst>(BB->getTerminator())) {
// Uses unwind / resume!
SCCMightUnwind = true;
} else if (CheckReturn && isa<ReturnInst>(BB->getTerminator())) {
OpenPOWER on IntegriCloud