summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Analysis
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Analysis')
-rw-r--r--contrib/llvm/lib/Analysis/AliasAnalysis.cpp233
-rw-r--r--contrib/llvm/lib/Analysis/AliasAnalysisCounter.cpp20
-rw-r--r--contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp50
-rw-r--r--contrib/llvm/lib/Analysis/AliasDebugger.cpp17
-rw-r--r--contrib/llvm/lib/Analysis/AliasSetTracker.cpp157
-rw-r--r--contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp584
-rw-r--r--contrib/llvm/lib/Analysis/CFGPrinter.cpp30
-rw-r--r--contrib/llvm/lib/Analysis/CMakeLists.txt3
-rw-r--r--contrib/llvm/lib/Analysis/CaptureTracking.cpp2
-rw-r--r--contrib/llvm/lib/Analysis/ConstantFolding.cpp4
-rw-r--r--contrib/llvm/lib/Analysis/DbgInfoPrinter.cpp6
-rw-r--r--contrib/llvm/lib/Analysis/DebugInfo.cpp192
-rw-r--r--contrib/llvm/lib/Analysis/DomPrinter.cpp65
-rw-r--r--contrib/llvm/lib/Analysis/IPA/CallGraph.cpp15
-rw-r--r--contrib/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp12
-rw-r--r--contrib/llvm/lib/Analysis/IPA/FindUsedTypes.cpp4
-rw-r--r--contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp81
-rw-r--r--contrib/llvm/lib/Analysis/IVUsers.cpp39
-rw-r--r--contrib/llvm/lib/Analysis/InlineCost.cpp8
-rw-r--r--contrib/llvm/lib/Analysis/InstCount.cpp6
-rw-r--r--contrib/llvm/lib/Analysis/IntervalPartition.cpp6
-rw-r--r--contrib/llvm/lib/Analysis/LazyValueInfo.cpp583
-rw-r--r--contrib/llvm/lib/Analysis/LibCallAliasAnalysis.cpp14
-rw-r--r--contrib/llvm/lib/Analysis/LibCallSemantics.cpp3
-rw-r--r--contrib/llvm/lib/Analysis/Lint.cpp11
-rw-r--r--contrib/llvm/lib/Analysis/LiveValues.cpp6
-rw-r--r--contrib/llvm/lib/Analysis/LoopDependenceAnalysis.cpp4
-rw-r--r--contrib/llvm/lib/Analysis/LoopInfo.cpp45
-rw-r--r--contrib/llvm/lib/Analysis/LoopPass.cpp16
-rw-r--r--contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp44
-rw-r--r--contrib/llvm/lib/Analysis/ModuleDebugInfoPrinter.cpp7
-rw-r--r--contrib/llvm/lib/Analysis/PointerTracking.cpp55
-rw-r--r--contrib/llvm/lib/Analysis/PostDominators.cpp8
-rw-r--r--contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp14
-rw-r--r--contrib/llvm/lib/Analysis/ProfileInfo.cpp13
-rw-r--r--contrib/llvm/lib/Analysis/ProfileInfoLoaderPass.cpp14
-rw-r--r--contrib/llvm/lib/Analysis/ProfileVerifierPass.cpp8
-rw-r--r--contrib/llvm/lib/Analysis/RegionInfo.cpp749
-rw-r--r--contrib/llvm/lib/Analysis/RegionPrinter.cpp186
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolution.cpp743
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp17
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp74
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp114
-rw-r--r--contrib/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp191
-rw-r--r--contrib/llvm/lib/Analysis/ValueTracking.cpp238
45 files changed, 3338 insertions, 1353 deletions
diff --git a/contrib/llvm/lib/Analysis/AliasAnalysis.cpp b/contrib/llvm/lib/Analysis/AliasAnalysis.cpp
index 503fbbd..1f2528f 100644
--- a/contrib/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -65,10 +65,127 @@ void AliasAnalysis::copyValue(Value *From, Value *To) {
}
AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(CallSite CS1, CallSite CS2) {
- // FIXME: we can do better.
+AliasAnalysis::getModRefInfo(ImmutableCallSite CS,
+ const Value *P, unsigned Size) {
+ // Don't assert AA because BasicAA calls us in order to make use of the
+ // logic here.
+
+ ModRefBehavior MRB = getModRefBehavior(CS);
+ if (MRB == DoesNotAccessMemory)
+ return NoModRef;
+
+ ModRefResult Mask = ModRef;
+ if (MRB == OnlyReadsMemory)
+ Mask = Ref;
+ else if (MRB == AliasAnalysis::AccessesArguments) {
+ bool doesAlias = false;
+ for (ImmutableCallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
+ AI != AE; ++AI)
+ if (!isNoAlias(*AI, ~0U, P, Size)) {
+ doesAlias = true;
+ break;
+ }
+
+ if (!doesAlias)
+ return NoModRef;
+ }
+
+ // If P points to a constant memory location, the call definitely could not
+ // modify the memory location.
+ if ((Mask & Mod) && pointsToConstantMemory(P))
+ Mask = ModRefResult(Mask & ~Mod);
+
+ // If this is BasicAA, don't forward.
+ if (!AA) return Mask;
+
+ // Otherwise, fall back to the next AA in the chain. But we can merge
+ // in any mask we've managed to compute.
+ return ModRefResult(AA->getModRefInfo(CS, P, Size) & Mask);
+}
+
+AliasAnalysis::ModRefResult
+AliasAnalysis::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
+ // Don't assert AA because BasicAA calls us in order to make use of the
+ // logic here.
+
+ // If CS1 or CS2 are readnone, they don't interact.
+ ModRefBehavior CS1B = getModRefBehavior(CS1);
+ if (CS1B == DoesNotAccessMemory) return NoModRef;
+
+ ModRefBehavior CS2B = getModRefBehavior(CS2);
+ if (CS2B == DoesNotAccessMemory) return NoModRef;
+
+ // If they both only read from memory, there is no dependence.
+ if (CS1B == OnlyReadsMemory && CS2B == OnlyReadsMemory)
+ return NoModRef;
+
+ AliasAnalysis::ModRefResult Mask = ModRef;
+
+ // If CS1 only reads memory, the only dependence on CS2 can be
+ // from CS1 reading memory written by CS2.
+ if (CS1B == OnlyReadsMemory)
+ Mask = ModRefResult(Mask & Ref);
+
+ // If CS2 only access memory through arguments, accumulate the mod/ref
+ // information from CS1's references to the memory referenced by
+ // CS2's arguments.
+ if (CS2B == AccessesArguments) {
+ AliasAnalysis::ModRefResult R = NoModRef;
+ for (ImmutableCallSite::arg_iterator
+ I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) {
+ R = ModRefResult((R | getModRefInfo(CS1, *I, UnknownSize)) & Mask);
+ if (R == Mask)
+ break;
+ }
+ return R;
+ }
+
+ // If CS1 only accesses memory through arguments, check if CS2 references
+ // any of the memory referenced by CS1's arguments. If not, return NoModRef.
+ if (CS1B == AccessesArguments) {
+ AliasAnalysis::ModRefResult R = NoModRef;
+ for (ImmutableCallSite::arg_iterator
+ I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I)
+ if (getModRefInfo(CS2, *I, UnknownSize) != NoModRef) {
+ R = Mask;
+ break;
+ }
+ if (R == NoModRef)
+ return R;
+ }
+
+ // If this is BasicAA, don't forward.
+ if (!AA) return Mask;
+
+ // Otherwise, fall back to the next AA in the chain. But we can merge
+ // in any mask we've managed to compute.
+ return ModRefResult(AA->getModRefInfo(CS1, CS2) & Mask);
+}
+
+AliasAnalysis::ModRefBehavior
+AliasAnalysis::getModRefBehavior(ImmutableCallSite CS) {
+ // Don't assert AA because BasicAA calls us in order to make use of the
+ // logic here.
+
+ ModRefBehavior Min = UnknownModRefBehavior;
+
+ // Call back into the alias analysis with the other form of getModRefBehavior
+ // to see if it can give a better response.
+ if (const Function *F = CS.getCalledFunction())
+ Min = getModRefBehavior(F);
+
+ // If this is BasicAA, don't forward.
+ if (!AA) return Min;
+
+ // Otherwise, fall back to the next AA in the chain. But we can merge
+ // in any result we've managed to compute.
+ return std::min(AA->getModRefBehavior(CS), Min);
+}
+
+AliasAnalysis::ModRefBehavior
+AliasAnalysis::getModRefBehavior(const Function *F) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
- return AA->getModRefInfo(CS1, CS2);
+ return AA->getModRefBehavior(F);
}
@@ -77,87 +194,63 @@ AliasAnalysis::getModRefInfo(CallSite CS1, CallSite CS2) {
//===----------------------------------------------------------------------===//
AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(LoadInst *L, Value *P, unsigned Size) {
- return alias(L->getOperand(0), getTypeStoreSize(L->getType()),
- P, Size) ? Ref : NoModRef;
+AliasAnalysis::getModRefInfo(const LoadInst *L, const Value *P, unsigned Size) {
+ // Be conservative in the face of volatile.
+ if (L->isVolatile())
+ return ModRef;
+
+ // If the load address doesn't alias the given address, it doesn't read
+ // or write the specified memory.
+ if (!alias(L->getOperand(0), getTypeStoreSize(L->getType()), P, Size))
+ return NoModRef;
+
+ // Otherwise, a load just reads.
+ return Ref;
}
AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(StoreInst *S, Value *P, unsigned Size) {
- // If the stored address cannot alias the pointer in question, then the
- // pointer cannot be modified by the store.
+AliasAnalysis::getModRefInfo(const StoreInst *S, const Value *P, unsigned Size) {
+ // Be conservative in the face of volatile.
+ if (S->isVolatile())
+ return ModRef;
+
+ // If the store address cannot alias the pointer in question, then the
+ // specified memory cannot be modified by the store.
if (!alias(S->getOperand(1),
getTypeStoreSize(S->getOperand(0)->getType()), P, Size))
return NoModRef;
// If the pointer is a pointer to constant memory, then it could not have been
// modified by this store.
- return pointsToConstantMemory(P) ? NoModRef : Mod;
-}
-
-AliasAnalysis::ModRefBehavior
-AliasAnalysis::getModRefBehavior(CallSite CS,
- std::vector<PointerAccessInfo> *Info) {
- if (CS.doesNotAccessMemory())
- // Can't do better than this.
- return DoesNotAccessMemory;
- ModRefBehavior MRB = getModRefBehavior(CS.getCalledFunction(), Info);
- if (MRB != DoesNotAccessMemory && CS.onlyReadsMemory())
- return OnlyReadsMemory;
- return MRB;
-}
-
-AliasAnalysis::ModRefBehavior
-AliasAnalysis::getModRefBehavior(Function *F,
- std::vector<PointerAccessInfo> *Info) {
- if (F) {
- if (F->doesNotAccessMemory())
- // Can't do better than this.
- return DoesNotAccessMemory;
- if (F->onlyReadsMemory())
- return OnlyReadsMemory;
- if (unsigned id = F->getIntrinsicID())
- return getModRefBehavior(id);
- }
- return UnknownModRefBehavior;
-}
+ if (pointsToConstantMemory(P))
+ return NoModRef;
-AliasAnalysis::ModRefBehavior AliasAnalysis::getModRefBehavior(unsigned iid) {
-#define GET_INTRINSIC_MODREF_BEHAVIOR
-#include "llvm/Intrinsics.gen"
-#undef GET_INTRINSIC_MODREF_BEHAVIOR
+ // Otherwise, a store just writes.
+ return Mod;
}
AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
- ModRefBehavior MRB = getModRefBehavior(CS);
- if (MRB == DoesNotAccessMemory)
+AliasAnalysis::getModRefInfo(const VAArgInst *V, const Value *P, unsigned Size) {
+ // If the va_arg address cannot alias the pointer in question, then the
+ // specified memory cannot be accessed by the va_arg.
+ if (!alias(V->getOperand(0), UnknownSize, P, Size))
return NoModRef;
-
- ModRefResult Mask = ModRef;
- if (MRB == OnlyReadsMemory)
- Mask = Ref;
- else if (MRB == AliasAnalysis::AccessesArguments) {
- bool doesAlias = false;
- for (CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
- AI != AE; ++AI)
- if (!isNoAlias(*AI, ~0U, P, Size)) {
- doesAlias = true;
- break;
- }
- if (!doesAlias)
- return NoModRef;
- }
+ // If the pointer is a pointer to constant memory, then it could not have been
+ // modified by this va_arg.
+ if (pointsToConstantMemory(P))
+ return NoModRef;
- if (!AA) return Mask;
+ // Otherwise, a va_arg reads and writes.
+ return ModRef;
+}
- // If P points to a constant memory location, the call definitely could not
- // modify the memory location.
- if ((Mask & Mod) && AA->pointsToConstantMemory(P))
- Mask = ModRefResult(Mask & ~Mod);
- return ModRefResult(Mask & AA->getModRefInfo(CS, P, Size));
+AliasAnalysis::ModRefBehavior
+AliasAnalysis::getIntrinsicModRefBehavior(unsigned iid) {
+#define GET_INTRINSIC_MODREF_BEHAVIOR
+#include "llvm/Intrinsics.gen"
+#undef GET_INTRINSIC_MODREF_BEHAVIOR
}
// AliasAnalysis destructor: DO NOT move this to the header file for
@@ -206,12 +299,12 @@ bool AliasAnalysis::canInstructionRangeModify(const Instruction &I1,
const Value *Ptr, unsigned Size) {
assert(I1.getParent() == I2.getParent() &&
"Instructions not in same basic block!");
- BasicBlock::iterator I = const_cast<Instruction*>(&I1);
- BasicBlock::iterator E = const_cast<Instruction*>(&I2);
+ BasicBlock::const_iterator I = &I1;
+ BasicBlock::const_iterator E = &I2;
++E; // Convert from inclusive to exclusive range.
for (; I != E; ++I) // Check every instruction in range
- if (getModRefInfo(I, const_cast<Value*>(Ptr), Size) & Mod)
+ if (getModRefInfo(I, Ptr, Size) & Mod)
return true;
return false;
}
@@ -220,7 +313,7 @@ bool AliasAnalysis::canInstructionRangeModify(const Instruction &I1,
/// function.
bool llvm::isNoAliasCall(const Value *V) {
if (isa<CallInst>(V) || isa<InvokeInst>(V))
- return CallSite(const_cast<Instruction*>(cast<Instruction>(V)))
+ return ImmutableCallSite(cast<Instruction>(V))
.paramHasAttr(0, Attribute::NoAlias);
return false;
}
diff --git a/contrib/llvm/lib/Analysis/AliasAnalysisCounter.cpp b/contrib/llvm/lib/Analysis/AliasAnalysisCounter.cpp
index 1053955..b178041 100644
--- a/contrib/llvm/lib/Analysis/AliasAnalysisCounter.cpp
+++ b/contrib/llvm/lib/Analysis/AliasAnalysisCounter.cpp
@@ -34,7 +34,7 @@ namespace {
Module *M;
public:
static char ID; // Class identification, replacement for typeinfo
- AliasAnalysisCounter() : ModulePass(&ID) {
+ AliasAnalysisCounter() : ModulePass(ID) {
No = May = Must = 0;
NoMR = JustRef = JustMod = MR = 0;
}
@@ -87,8 +87,8 @@ namespace {
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
- virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
- if (PI->isPassID(&AliasAnalysis::ID))
+ virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+ if (PI == &AliasAnalysis::ID)
return (AliasAnalysis*)this;
return this;
}
@@ -103,17 +103,18 @@ namespace {
AliasResult alias(const Value *V1, unsigned V1Size,
const Value *V2, unsigned V2Size);
- ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size);
- ModRefResult getModRefInfo(CallSite CS1, CallSite CS2) {
+ ModRefResult getModRefInfo(ImmutableCallSite CS,
+ const Value *P, unsigned Size);
+ ModRefResult getModRefInfo(ImmutableCallSite CS1,
+ ImmutableCallSite CS2) {
return AliasAnalysis::getModRefInfo(CS1,CS2);
}
};
}
char AliasAnalysisCounter::ID = 0;
-static RegisterPass<AliasAnalysisCounter>
-X("count-aa", "Count Alias Analysis Query Responses", false, true);
-static RegisterAnalysisGroup<AliasAnalysis> Y(X);
+INITIALIZE_AG_PASS(AliasAnalysisCounter, AliasAnalysis, "count-aa",
+ "Count Alias Analysis Query Responses", false, true, false);
ModulePass *llvm::createAliasAnalysisCounterPass() {
return new AliasAnalysisCounter();
@@ -146,7 +147,8 @@ AliasAnalysisCounter::alias(const Value *V1, unsigned V1Size,
}
AliasAnalysis::ModRefResult
-AliasAnalysisCounter::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
+AliasAnalysisCounter::getModRefInfo(ImmutableCallSite CS,
+ const Value *P, unsigned Size) {
ModRefResult R = getAnalysis<AliasAnalysis>().getModRefInfo(CS, P, Size);
const char *MRString;
diff --git a/contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp b/contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
index 37ee9fc..ce363cb 100644
--- a/contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
+++ b/contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
@@ -50,7 +50,7 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
- AAEval() : FunctionPass(&ID) {}
+ AAEval() : FunctionPass(ID) {}
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<AliasAnalysis>();
@@ -74,8 +74,8 @@ namespace {
}
char AAEval::ID = 0;
-static RegisterPass<AAEval>
-X("aa-eval", "Exhaustive Alias Analysis Precision Evaluator", false, true);
+INITIALIZE_PASS(AAEval, "aa-eval",
+ "Exhaustive Alias Analysis Precision Evaluator", false, true);
FunctionPass *llvm::createAAEvalPass() { return new AAEval(); }
@@ -107,6 +107,15 @@ PrintModRefResults(const char *Msg, bool P, Instruction *I, Value *Ptr,
}
}
+static inline void
+PrintModRefResults(const char *Msg, bool P, CallSite CSA, CallSite CSB,
+ Module *M) {
+ if (P) {
+ errs() << " " << Msg << ": " << *CSA.getInstruction()
+ << " <-> " << *CSB.getInstruction() << '\n';
+ }
+}
+
static inline bool isInterestingPointer(Value *V) {
return V->getType()->isPointerTy()
&& !isa<ConstantPointerNull>(V);
@@ -126,8 +135,7 @@ bool AAEval::runOnFunction(Function &F) {
if (I->getType()->isPointerTy()) // Add all pointer instructions.
Pointers.insert(&*I);
Instruction &Inst = *I;
- CallSite CS = CallSite::get(&Inst);
- if (CS) {
+ if (CallSite CS = cast<Value>(&Inst)) {
Value *Callee = CS.getCalledValue();
// Skip actual functions for direct function calls.
if (!isa<Function>(Callee) && isInterestingPointer(Callee))
@@ -137,6 +145,7 @@ bool AAEval::runOnFunction(Function &F) {
AI != AE; ++AI)
if (isInterestingPointer(*AI))
Pointers.insert(*AI);
+ CallSites.insert(CS);
} else {
// Consider all operands.
for (Instruction::op_iterator OI = Inst.op_begin(), OE = Inst.op_end();
@@ -144,8 +153,6 @@ bool AAEval::runOnFunction(Function &F) {
if (isInterestingPointer(*OI))
Pointers.insert(*OI);
}
-
- if (CS.getInstruction()) CallSites.insert(CS);
}
if (PrintNoAlias || PrintMayAlias || PrintMustAlias ||
@@ -197,13 +204,13 @@ bool AAEval::runOnFunction(Function &F) {
PrintModRefResults("NoModRef", PrintNoModRef, I, *V, F.getParent());
++NoModRef; break;
case AliasAnalysis::Mod:
- PrintModRefResults(" Mod", PrintMod, I, *V, F.getParent());
+ PrintModRefResults("Just Mod", PrintMod, I, *V, F.getParent());
++Mod; break;
case AliasAnalysis::Ref:
- PrintModRefResults(" Ref", PrintRef, I, *V, F.getParent());
+ PrintModRefResults("Just Ref", PrintRef, I, *V, F.getParent());
++Ref; break;
case AliasAnalysis::ModRef:
- PrintModRefResults(" ModRef", PrintModRef, I, *V, F.getParent());
+ PrintModRefResults("Both ModRef", PrintModRef, I, *V, F.getParent());
++ModRef; break;
default:
errs() << "Unknown alias query result!\n";
@@ -211,6 +218,29 @@ bool AAEval::runOnFunction(Function &F) {
}
}
+ // Mod/ref alias analysis: compare all pairs of calls
+ for (SetVector<CallSite>::iterator C = CallSites.begin(),
+ Ce = CallSites.end(); C != Ce; ++C) {
+ for (SetVector<CallSite>::iterator D = CallSites.begin(); D != Ce; ++D) {
+ if (D == C)
+ continue;
+ switch (AA.getModRefInfo(*C, *D)) {
+ case AliasAnalysis::NoModRef:
+ PrintModRefResults("NoModRef", PrintNoModRef, *C, *D, F.getParent());
+ ++NoModRef; break;
+ case AliasAnalysis::Mod:
+ PrintModRefResults("Just Mod", PrintMod, *C, *D, F.getParent());
+ ++Mod; break;
+ case AliasAnalysis::Ref:
+ PrintModRefResults("Just Ref", PrintRef, *C, *D, F.getParent());
+ ++Ref; break;
+ case AliasAnalysis::ModRef:
+ PrintModRefResults("Both ModRef", PrintModRef, *C, *D, F.getParent());
+ ++ModRef; break;
+ }
+ }
+ }
+
return false;
}
diff --git a/contrib/llvm/lib/Analysis/AliasDebugger.cpp b/contrib/llvm/lib/Analysis/AliasDebugger.cpp
index bc2d9c55..b9fe646 100644
--- a/contrib/llvm/lib/Analysis/AliasDebugger.cpp
+++ b/contrib/llvm/lib/Analysis/AliasDebugger.cpp
@@ -39,7 +39,7 @@ namespace {
public:
static char ID; // Class identification, replacement for typeinfo
- AliasDebugger() : ModulePass(&ID) {}
+ AliasDebugger() : ModulePass(ID) {}
bool runOnModule(Module &M) {
InitializeAliasAnalysis(this); // set up super class
@@ -83,8 +83,8 @@ namespace {
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
- virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
- if (PI->isPassID(&AliasAnalysis::ID))
+ virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+ if (PI == &AliasAnalysis::ID)
return (AliasAnalysis*)this;
return this;
}
@@ -99,12 +99,14 @@ namespace {
return AliasAnalysis::alias(V1, V1Size, V2, V2Size);
}
- ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size) {
+ ModRefResult getModRefInfo(ImmutableCallSite CS,
+ const Value *P, unsigned Size) {
assert(Vals.find(P) != Vals.end() && "Never seen value in AA before");
return AliasAnalysis::getModRefInfo(CS, P, Size);
}
- ModRefResult getModRefInfo(CallSite CS1, CallSite CS2) {
+ ModRefResult getModRefInfo(ImmutableCallSite CS1,
+ ImmutableCallSite CS2) {
return AliasAnalysis::getModRefInfo(CS1,CS2);
}
@@ -126,9 +128,8 @@ namespace {
}
char AliasDebugger::ID = 0;
-static RegisterPass<AliasDebugger>
-X("debug-aa", "AA use debugger", false, true);
-static RegisterAnalysisGroup<AliasAnalysis> Y(X);
+INITIALIZE_AG_PASS(AliasDebugger, AliasAnalysis, "debug-aa",
+ "AA use debugger", false, true, false);
Pass *llvm::createAliasDebugger() { return new AliasDebugger(); }
diff --git a/contrib/llvm/lib/Analysis/AliasSetTracker.cpp b/contrib/llvm/lib/Analysis/AliasSetTracker.cpp
index 02aff50..e74543b 100644
--- a/contrib/llvm/lib/Analysis/AliasSetTracker.cpp
+++ b/contrib/llvm/lib/Analysis/AliasSetTracker.cpp
@@ -22,7 +22,6 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/InstIterator.h"
-#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -35,6 +34,7 @@ void AliasSet::mergeSetIn(AliasSet &AS, AliasSetTracker &AST) {
// Update the alias and access types of this set...
AccessTy |= AS.AccessTy;
AliasTy |= AS.AliasTy;
+ Volatile |= AS.Volatile;
if (AliasTy == MustAlias) {
// Check that these two merged sets really are must aliases. Since both
@@ -111,11 +111,11 @@ void AliasSet::addPointer(AliasSetTracker &AST, PointerRec &Entry,
*PtrListEnd = &Entry;
PtrListEnd = Entry.setPrevInList(PtrListEnd);
assert(*PtrListEnd == 0 && "End of list is not null?");
- addRef(); // Entry points to alias set...
+ addRef(); // Entry points to alias set.
}
void AliasSet::addCallSite(CallSite CS, AliasAnalysis &AA) {
- CallSites.push_back(CS);
+ CallSites.push_back(CS.getInstruction());
AliasAnalysis::ModRefBehavior Behavior = AA.getModRefBehavior(CS);
if (Behavior == AliasAnalysis::DoesNotAccessMemory)
@@ -140,7 +140,7 @@ bool AliasSet::aliasesPointer(const Value *Ptr, unsigned Size,
assert(CallSites.empty() && "Illegal must alias set!");
// If this is a set of MustAliases, only check to see if the pointer aliases
- // SOME value in the set...
+ // SOME value in the set.
PointerRec *SomePtr = getSomePointer();
assert(SomePtr && "Empty must-alias set??");
return AA.alias(SomePtr->getValue(), SomePtr->getSize(), Ptr, Size);
@@ -155,8 +155,7 @@ bool AliasSet::aliasesPointer(const Value *Ptr, unsigned Size,
// Check the call sites list and invoke list...
if (!CallSites.empty()) {
for (unsigned i = 0, e = CallSites.size(); i != e; ++i)
- if (AA.getModRefInfo(CallSites[i], const_cast<Value*>(Ptr), Size)
- != AliasAnalysis::NoModRef)
+ if (AA.getModRefInfo(CallSites[i], Ptr, Size) != AliasAnalysis::NoModRef)
return true;
}
@@ -167,10 +166,11 @@ bool AliasSet::aliasesCallSite(CallSite CS, AliasAnalysis &AA) const {
if (AA.doesNotAccessMemory(CS))
return false;
- for (unsigned i = 0, e = CallSites.size(); i != e; ++i)
- if (AA.getModRefInfo(CallSites[i], CS) != AliasAnalysis::NoModRef ||
- AA.getModRefInfo(CS, CallSites[i]) != AliasAnalysis::NoModRef)
+ for (unsigned i = 0, e = CallSites.size(); i != e; ++i) {
+ if (AA.getModRefInfo(getCallSite(i), CS) != AliasAnalysis::NoModRef ||
+ AA.getModRefInfo(CS, getCallSite(i)) != AliasAnalysis::NoModRef)
return true;
+ }
for (iterator I = begin(), E = end(); I != E; ++I)
if (AA.getModRefInfo(CS, I.getPointer(), I.getSize()) !=
@@ -200,14 +200,15 @@ void AliasSetTracker::clear() {
AliasSet *AliasSetTracker::findAliasSetForPointer(const Value *Ptr,
unsigned Size) {
AliasSet *FoundSet = 0;
- for (iterator I = begin(), E = end(); I != E; ++I)
- if (!I->Forward && I->aliasesPointer(Ptr, Size, AA)) {
- if (FoundSet == 0) { // If this is the first alias set ptr can go into.
- FoundSet = I; // Remember it.
- } else { // Otherwise, we must merge the sets.
- FoundSet->mergeSetIn(*I, *this); // Merge in contents.
- }
+ for (iterator I = begin(), E = end(); I != E; ++I) {
+ if (I->Forward || !I->aliasesPointer(Ptr, Size, AA)) continue;
+
+ if (FoundSet == 0) { // If this is the first alias set ptr can go into.
+ FoundSet = I; // Remember it.
+ } else { // Otherwise, we must merge the sets.
+ FoundSet->mergeSetIn(*I, *this); // Merge in contents.
}
+ }
return FoundSet;
}
@@ -226,15 +227,15 @@ bool AliasSetTracker::containsPointer(Value *Ptr, unsigned Size) const {
AliasSet *AliasSetTracker::findAliasSetForCallSite(CallSite CS) {
AliasSet *FoundSet = 0;
- for (iterator I = begin(), E = end(); I != E; ++I)
- if (!I->Forward && I->aliasesCallSite(CS, AA)) {
- if (FoundSet == 0) { // If this is the first alias set ptr can go into.
- FoundSet = I; // Remember it.
- } else if (!I->Forward) { // Otherwise, we must merge the sets.
- FoundSet->mergeSetIn(*I, *this); // Merge in contents.
- }
- }
-
+ for (iterator I = begin(), E = end(); I != E; ++I) {
+ if (I->Forward || !I->aliasesCallSite(CS, AA))
+ continue;
+
+ if (FoundSet == 0) // If this is the first alias set ptr can go into.
+ FoundSet = I; // Remember it.
+ else if (!I->Forward) // Otherwise, we must merge the sets.
+ FoundSet->mergeSetIn(*I, *this); // Merge in contents.
+ }
return FoundSet;
}
@@ -247,22 +248,24 @@ AliasSet &AliasSetTracker::getAliasSetForPointer(Value *Pointer, unsigned Size,
bool *New) {
AliasSet::PointerRec &Entry = getEntryFor(Pointer);
- // Check to see if the pointer is already known...
+ // Check to see if the pointer is already known.
if (Entry.hasAliasSet()) {
Entry.updateSize(Size);
// Return the set!
return *Entry.getAliasSet(*this)->getForwardedTarget(*this);
- } else if (AliasSet *AS = findAliasSetForPointer(Pointer, Size)) {
- // Add it to the alias set it aliases...
+ }
+
+ if (AliasSet *AS = findAliasSetForPointer(Pointer, Size)) {
+ // Add it to the alias set it aliases.
AS->addPointer(*this, Entry, Size);
return *AS;
- } else {
- if (New) *New = true;
- // Otherwise create a new alias set to hold the loaded pointer...
- AliasSets.push_back(new AliasSet());
- AliasSets.back().addPointer(*this, Entry, Size);
- return AliasSets.back();
}
+
+ if (New) *New = true;
+ // Otherwise create a new alias set to hold the loaded pointer.
+ AliasSets.push_back(new AliasSet());
+ AliasSets.back().addPointer(*this, Entry, Size);
+ return AliasSets.back();
}
bool AliasSetTracker::add(Value *Ptr, unsigned Size) {
@@ -305,28 +308,27 @@ bool AliasSetTracker::add(CallSite CS) {
return true; // doesn't alias anything
AliasSet *AS = findAliasSetForCallSite(CS);
- if (!AS) {
- AliasSets.push_back(new AliasSet());
- AS = &AliasSets.back();
- AS->addCallSite(CS, AA);
- return true;
- } else {
+ if (AS) {
AS->addCallSite(CS, AA);
return false;
}
+ AliasSets.push_back(new AliasSet());
+ AS = &AliasSets.back();
+ AS->addCallSite(CS, AA);
+ return true;
}
bool AliasSetTracker::add(Instruction *I) {
- // Dispatch to one of the other add methods...
+ // Dispatch to one of the other add methods.
if (LoadInst *LI = dyn_cast<LoadInst>(I))
return add(LI);
- else if (StoreInst *SI = dyn_cast<StoreInst>(I))
+ if (StoreInst *SI = dyn_cast<StoreInst>(I))
return add(SI);
- else if (CallInst *CI = dyn_cast<CallInst>(I))
+ if (CallInst *CI = dyn_cast<CallInst>(I))
return add(CI);
- else if (InvokeInst *II = dyn_cast<InvokeInst>(I))
+ if (InvokeInst *II = dyn_cast<InvokeInst>(I))
return add(II);
- else if (VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
+ if (VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
return add(VAAI);
return true;
}
@@ -343,23 +345,23 @@ void AliasSetTracker::add(const AliasSetTracker &AST) {
// Loop over all of the alias sets in AST, adding the pointers contained
// therein into the current alias sets. This can cause alias sets to be
// merged together in the current AST.
- for (const_iterator I = AST.begin(), E = AST.end(); I != E; ++I)
- if (!I->Forward) { // Ignore forwarding alias sets
- AliasSet &AS = const_cast<AliasSet&>(*I);
-
- // If there are any call sites in the alias set, add them to this AST.
- for (unsigned i = 0, e = AS.CallSites.size(); i != e; ++i)
- add(AS.CallSites[i]);
-
- // Loop over all of the pointers in this alias set...
- AliasSet::iterator I = AS.begin(), E = AS.end();
- bool X;
- for (; I != E; ++I) {
- AliasSet &NewAS = addPointer(I.getPointer(), I.getSize(),
- (AliasSet::AccessType)AS.AccessTy, X);
- if (AS.isVolatile()) NewAS.setVolatile();
- }
+ for (const_iterator I = AST.begin(), E = AST.end(); I != E; ++I) {
+ if (I->Forward) continue; // Ignore forwarding alias sets
+
+ AliasSet &AS = const_cast<AliasSet&>(*I);
+
+ // If there are any call sites in the alias set, add them to this AST.
+ for (unsigned i = 0, e = AS.CallSites.size(); i != e; ++i)
+ add(AS.CallSites[i]);
+
+ // Loop over all of the pointers in this alias set.
+ bool X;
+ for (AliasSet::iterator ASI = AS.begin(), E = AS.end(); ASI != E; ++ASI) {
+ AliasSet &NewAS = addPointer(ASI.getPointer(), ASI.getSize(),
+ (AliasSet::AccessType)AS.AccessTy, X);
+ if (AS.isVolatile()) NewAS.setVolatile();
}
+ }
}
/// remove - Remove the specified (potentially non-empty) alias set from the
@@ -435,11 +437,11 @@ bool AliasSetTracker::remove(Instruction *I) {
// Dispatch to one of the other remove methods...
if (LoadInst *LI = dyn_cast<LoadInst>(I))
return remove(LI);
- else if (StoreInst *SI = dyn_cast<StoreInst>(I))
+ if (StoreInst *SI = dyn_cast<StoreInst>(I))
return remove(SI);
- else if (CallInst *CI = dyn_cast<CallInst>(I))
+ if (CallInst *CI = dyn_cast<CallInst>(I))
return remove(CI);
- else if (VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
+ if (VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
return remove(VAAI);
return true;
}
@@ -455,12 +457,17 @@ void AliasSetTracker::deleteValue(Value *PtrVal) {
AA.deleteValue(PtrVal);
// If this is a call instruction, remove the callsite from the appropriate
- // AliasSet.
- CallSite CS = CallSite::get(PtrVal);
- if (CS.getInstruction())
- if (!AA.doesNotAccessMemory(CS))
- if (AliasSet *AS = findAliasSetForCallSite(CS))
- AS->removeCallSite(CS);
+ // AliasSet (if present).
+ if (CallSite CS = PtrVal) {
+ if (!AA.doesNotAccessMemory(CS)) {
+ // Scan all the alias sets to see if this call site is contained.
+ for (iterator I = begin(), E = end(); I != E; ++I) {
+ if (I->Forward) continue;
+
+ I->removeCallSite(CS);
+ }
+ }
+ }
// First, look up the PointerRec for this pointer.
PointerMapType::iterator I = PointerMap.find(PtrVal);
@@ -510,7 +517,7 @@ void AliasSetTracker::copyValue(Value *From, Value *To) {
//===----------------------------------------------------------------------===//
void AliasSet::print(raw_ostream &OS) const {
- OS << " AliasSet[" << format("0x%p", (void*)this) << "," << RefCount << "] ";
+ OS << " AliasSet[" << (void*)this << ", " << RefCount << "] ";
OS << (AliasTy == MustAlias ? "must" : "may") << " alias, ";
switch (AccessTy) {
case NoModRef: OS << "No access "; break;
@@ -536,7 +543,7 @@ void AliasSet::print(raw_ostream &OS) const {
OS << "\n " << CallSites.size() << " Call Sites: ";
for (unsigned i = 0, e = CallSites.size(); i != e; ++i) {
if (i) OS << ", ";
- WriteAsOperand(OS, CallSites[i].getCalledValue());
+ WriteAsOperand(OS, CallSites[i]);
}
}
OS << "\n";
@@ -580,7 +587,7 @@ namespace {
AliasSetTracker *Tracker;
public:
static char ID; // Pass identification, replacement for typeid
- AliasSetPrinter() : FunctionPass(&ID) {}
+ AliasSetPrinter() : FunctionPass(ID) {}
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
@@ -600,5 +607,5 @@ namespace {
}
char AliasSetPrinter::ID = 0;
-static RegisterPass<AliasSetPrinter>
-X("print-alias-sets", "Alias Set Printer", false, true);
+INITIALIZE_PASS(AliasSetPrinter, "print-alias-sets",
+ "Alias Set Printer", false, true);
diff --git a/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 4f53a6d..113c72b 100644
--- a/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -18,6 +18,7 @@
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
+#include "llvm/GlobalAlias.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
@@ -30,6 +31,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/GetElementPtrTypeIterator.h"
#include <algorithm>
using namespace llvm;
@@ -137,8 +139,8 @@ namespace {
///
struct NoAA : public ImmutablePass, public AliasAnalysis {
static char ID; // Class identification, replacement for typeinfo
- NoAA() : ImmutablePass(&ID) {}
- explicit NoAA(void *PID) : ImmutablePass(PID) { }
+ NoAA() : ImmutablePass(ID) {}
+ explicit NoAA(char &PID) : ImmutablePass(PID) { }
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
}
@@ -152,16 +154,20 @@ namespace {
return MayAlias;
}
- virtual void getArgumentAccesses(Function *F, CallSite CS,
- std::vector<PointerAccessInfo> &Info) {
- llvm_unreachable("This method may not be called on this function!");
+ virtual ModRefBehavior getModRefBehavior(ImmutableCallSite CS) {
+ return UnknownModRefBehavior;
+ }
+ virtual ModRefBehavior getModRefBehavior(const Function *F) {
+ return UnknownModRefBehavior;
}
virtual bool pointsToConstantMemory(const Value *P) { return false; }
- virtual ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size) {
+ virtual ModRefResult getModRefInfo(ImmutableCallSite CS,
+ const Value *P, unsigned Size) {
return ModRef;
}
- virtual ModRefResult getModRefInfo(CallSite CS1, CallSite CS2) {
+ virtual ModRefResult getModRefInfo(ImmutableCallSite CS1,
+ ImmutableCallSite CS2) {
return ModRef;
}
@@ -169,11 +175,11 @@ namespace {
virtual void copyValue(Value *From, Value *To) {}
/// getAdjustedAnalysisPointer - This method is used when a pass implements
- /// an analysis interface through multiple inheritance. If needed, it should
- /// override this to adjust the this pointer as needed for the specified pass
- /// info.
- virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
- if (PI->isPassID(&AliasAnalysis::ID))
+ /// an analysis interface through multiple inheritance. If needed, it
+ /// should override this to adjust the this pointer as needed for the
+ /// specified pass info.
+ virtual void *getAdjustedAnalysisPointer(const void *ID) {
+ if (ID == &AliasAnalysis::ID)
return (AliasAnalysis*)this;
return this;
}
@@ -182,15 +188,279 @@ namespace {
// Register this pass...
char NoAA::ID = 0;
-static RegisterPass<NoAA>
-U("no-aa", "No Alias Analysis (always returns 'may' alias)", true, true);
-
-// Declare that we implement the AliasAnalysis interface
-static RegisterAnalysisGroup<AliasAnalysis> V(U);
+INITIALIZE_AG_PASS(NoAA, AliasAnalysis, "no-aa",
+ "No Alias Analysis (always returns 'may' alias)",
+ true, true, false);
ImmutablePass *llvm::createNoAAPass() { return new NoAA(); }
//===----------------------------------------------------------------------===//
+// GetElementPtr Instruction Decomposition and Analysis
+//===----------------------------------------------------------------------===//
+
+namespace {
+ enum ExtensionKind {
+ EK_NotExtended,
+ EK_SignExt,
+ EK_ZeroExt
+ };
+
+ struct VariableGEPIndex {
+ const Value *V;
+ ExtensionKind Extension;
+ int64_t Scale;
+ };
+}
+
+
+/// GetLinearExpression - Analyze the specified value as a linear expression:
+/// "A*V + B", where A and B are constant integers. Return the scale and offset
+/// values as APInts and return V as a Value*, and return whether we looked
+/// through any sign or zero extends. The incoming Value is known to have
+/// IntegerType and it may already be sign or zero extended.
+///
+/// Note that this looks through extends, so the high bits may not be
+/// represented in the result.
+static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
+ ExtensionKind &Extension,
+ const TargetData &TD, unsigned Depth) {
+ assert(V->getType()->isIntegerTy() && "Not an integer value");
+
+ // Limit our recursion depth.
+ if (Depth == 6) {
+ Scale = 1;
+ Offset = 0;
+ return V;
+ }
+
+ if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
+ if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
+ switch (BOp->getOpcode()) {
+ default: break;
+ case Instruction::Or:
+ // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
+ // analyze it.
+ if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), &TD))
+ break;
+ // FALL THROUGH.
+ case Instruction::Add:
+ V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
+ TD, Depth+1);
+ Offset += RHSC->getValue();
+ return V;
+ case Instruction::Mul:
+ V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
+ TD, Depth+1);
+ Offset *= RHSC->getValue();
+ Scale *= RHSC->getValue();
+ return V;
+ case Instruction::Shl:
+ V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
+ TD, Depth+1);
+ Offset <<= RHSC->getValue().getLimitedValue();
+ Scale <<= RHSC->getValue().getLimitedValue();
+ return V;
+ }
+ }
+ }
+
+ // Since GEP indices are sign extended anyway, we don't care about the high
+ // bits of a sign or zero extended value - just scales and offsets. The
+ // extensions have to be consistent though.
+ if ((isa<SExtInst>(V) && Extension != EK_ZeroExt) ||
+ (isa<ZExtInst>(V) && Extension != EK_SignExt)) {
+ Value *CastOp = cast<CastInst>(V)->getOperand(0);
+ unsigned OldWidth = Scale.getBitWidth();
+ unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
+ Scale.trunc(SmallWidth);
+ Offset.trunc(SmallWidth);
+ Extension = isa<SExtInst>(V) ? EK_SignExt : EK_ZeroExt;
+
+ Value *Result = GetLinearExpression(CastOp, Scale, Offset, Extension,
+ TD, Depth+1);
+ Scale.zext(OldWidth);
+ Offset.zext(OldWidth);
+
+ return Result;
+ }
+
+ Scale = 1;
+ Offset = 0;
+ return V;
+}
+
+/// DecomposeGEPExpression - If V is a symbolic pointer expression, decompose it
+/// into a base pointer with a constant offset and a number of scaled symbolic
+/// offsets.
+///
+/// The scaled symbolic offsets (represented by pairs of a Value* and a scale in
+/// the VarIndices vector) are Value*'s that are known to be scaled by the
+/// specified amount, but which may have other unrepresented high bits. As such,
+/// the gep cannot necessarily be reconstructed from its decomposed form.
+///
+/// When TargetData is around, this function is capable of analyzing everything
+/// that Value::getUnderlyingObject() can look through. When not, it just looks
+/// through pointer casts.
+///
+static const Value *
+DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
+ SmallVectorImpl<VariableGEPIndex> &VarIndices,
+ const TargetData *TD) {
+ // Limit recursion depth to limit compile time in crazy cases.
+ unsigned MaxLookup = 6;
+
+ BaseOffs = 0;
+ do {
+ // See if this is a bitcast or GEP.
+ const Operator *Op = dyn_cast<Operator>(V);
+ if (Op == 0) {
+ // The only non-operator case we can handle are GlobalAliases.
+ if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
+ if (!GA->mayBeOverridden()) {
+ V = GA->getAliasee();
+ continue;
+ }
+ }
+ return V;
+ }
+
+ if (Op->getOpcode() == Instruction::BitCast) {
+ V = Op->getOperand(0);
+ continue;
+ }
+
+ const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
+ if (GEPOp == 0)
+ return V;
+
+ // Don't attempt to analyze GEPs over unsized objects.
+ if (!cast<PointerType>(GEPOp->getOperand(0)->getType())
+ ->getElementType()->isSized())
+ return V;
+
+ // If we are lacking TargetData information, we can't compute the offets of
+ // elements computed by GEPs. However, we can handle bitcast equivalent
+ // GEPs.
+ if (TD == 0) {
+ if (!GEPOp->hasAllZeroIndices())
+ return V;
+ V = GEPOp->getOperand(0);
+ continue;
+ }
+
+ // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
+ gep_type_iterator GTI = gep_type_begin(GEPOp);
+ for (User::const_op_iterator I = GEPOp->op_begin()+1,
+ E = GEPOp->op_end(); I != E; ++I) {
+ Value *Index = *I;
+ // Compute the (potentially symbolic) offset in bytes for this index.
+ if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
+ // For a struct, add the member offset.
+ unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
+ if (FieldNo == 0) continue;
+
+ BaseOffs += TD->getStructLayout(STy)->getElementOffset(FieldNo);
+ continue;
+ }
+
+ // For an array/pointer, add the element offset, explicitly scaled.
+ if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
+ if (CIdx->isZero()) continue;
+ BaseOffs += TD->getTypeAllocSize(*GTI)*CIdx->getSExtValue();
+ continue;
+ }
+
+ uint64_t Scale = TD->getTypeAllocSize(*GTI);
+ ExtensionKind Extension = EK_NotExtended;
+
+ // If the integer type is smaller than the pointer size, it is implicitly
+ // sign extended to pointer size.
+ unsigned Width = cast<IntegerType>(Index->getType())->getBitWidth();
+ if (TD->getPointerSizeInBits() > Width)
+ Extension = EK_SignExt;
+
+ // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
+ APInt IndexScale(Width, 0), IndexOffset(Width, 0);
+ Index = GetLinearExpression(Index, IndexScale, IndexOffset, Extension,
+ *TD, 0);
+
+ // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
+ // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
+ BaseOffs += IndexOffset.getZExtValue()*Scale;
+ Scale *= IndexScale.getZExtValue();
+
+
+ // If we already had an occurrance of this index variable, merge this
+ // scale into it. For example, we want to handle:
+ // A[x][x] -> x*16 + x*4 -> x*20
+ // This also ensures that 'x' only appears in the index list once.
+ for (unsigned i = 0, e = VarIndices.size(); i != e; ++i) {
+ if (VarIndices[i].V == Index &&
+ VarIndices[i].Extension == Extension) {
+ Scale += VarIndices[i].Scale;
+ VarIndices.erase(VarIndices.begin()+i);
+ break;
+ }
+ }
+
+ // Make sure that we have a scale that makes sense for this target's
+ // pointer size.
+ if (unsigned ShiftBits = 64-TD->getPointerSizeInBits()) {
+ Scale <<= ShiftBits;
+ Scale >>= ShiftBits;
+ }
+
+ if (Scale) {
+ VariableGEPIndex Entry = {Index, Extension, Scale};
+ VarIndices.push_back(Entry);
+ }
+ }
+
+ // Analyze the base pointer next.
+ V = GEPOp->getOperand(0);
+ } while (--MaxLookup);
+
+ // If the chain of expressions is too deep, just return early.
+ return V;
+}
+
+/// GetIndexDifference - Dest and Src are the variable indices from two
+/// decomposed GetElementPtr instructions GEP1 and GEP2 which have common base
+/// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic
+/// difference between the two pointers.
+static void GetIndexDifference(SmallVectorImpl<VariableGEPIndex> &Dest,
+ const SmallVectorImpl<VariableGEPIndex> &Src) {
+ if (Src.empty()) return;
+
+ for (unsigned i = 0, e = Src.size(); i != e; ++i) {
+ const Value *V = Src[i].V;
+ ExtensionKind Extension = Src[i].Extension;
+ int64_t Scale = Src[i].Scale;
+
+ // Find V in Dest. This is N^2, but pointer indices almost never have more
+ // than a few variable indexes.
+ for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
+ if (Dest[j].V != V || Dest[j].Extension != Extension) continue;
+
+ // If we found it, subtract off Scale V's from the entry in Dest. If it
+ // goes to zero, remove the entry.
+ if (Dest[j].Scale != Scale)
+ Dest[j].Scale -= Scale;
+ else
+ Dest.erase(Dest.begin()+j);
+ Scale = 0;
+ break;
+ }
+
+ // If we didn't consume this entry, add it to the end of the Dest list.
+ if (Scale) {
+ VariableGEPIndex Entry = { V, Extension, -Scale };
+ Dest.push_back(Entry);
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
// BasicAliasAnalysis Pass
//===----------------------------------------------------------------------===//
@@ -220,10 +490,10 @@ namespace {
/// derives from the NoAA class.
struct BasicAliasAnalysis : public NoAA {
static char ID; // Class identification, replacement for typeinfo
- BasicAliasAnalysis() : NoAA(&ID) {}
+ BasicAliasAnalysis() : NoAA(ID) {}
- AliasResult alias(const Value *V1, unsigned V1Size,
- const Value *V2, unsigned V2Size) {
+ virtual AliasResult alias(const Value *V1, unsigned V1Size,
+ const Value *V2, unsigned V2Size) {
assert(Visited.empty() && "Visited must be cleared after use!");
assert(notDifferentParent(V1, V2) &&
"BasicAliasAnalysis doesn't support interprocedural queries.");
@@ -232,19 +502,33 @@ namespace {
return Alias;
}
- ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size);
- ModRefResult getModRefInfo(CallSite CS1, CallSite CS2);
+ virtual ModRefResult getModRefInfo(ImmutableCallSite CS,
+ const Value *P, unsigned Size);
+
+ virtual ModRefResult getModRefInfo(ImmutableCallSite CS1,
+ ImmutableCallSite CS2) {
+ // The AliasAnalysis base class has some smarts, lets use them.
+ return AliasAnalysis::getModRefInfo(CS1, CS2);
+ }
/// pointsToConstantMemory - Chase pointers until we find a (constant
/// global) or not.
- bool pointsToConstantMemory(const Value *P);
+ virtual bool pointsToConstantMemory(const Value *P);
+
+ /// getModRefBehavior - Return the behavior when calling the given
+ /// call site.
+ virtual ModRefBehavior getModRefBehavior(ImmutableCallSite CS);
+
+ /// getModRefBehavior - Return the behavior when calling the given function.
+ /// For use when the call site is not known.
+ virtual ModRefBehavior getModRefBehavior(const Function *F);
/// getAdjustedAnalysisPointer - This method is used when a pass implements
- /// an analysis interface through multiple inheritance. If needed, it should
- /// override this to adjust the this pointer as needed for the specified pass
- /// info.
- virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
- if (PI->isPassID(&AliasAnalysis::ID))
+ /// an analysis interface through multiple inheritance. If needed, it
+ /// should override this to adjust the this pointer as needed for the
+ /// specified pass info.
+ virtual void *getAdjustedAnalysisPointer(const void *ID) {
+ if (ID == &AliasAnalysis::ID)
return (AliasAnalysis*)this;
return this;
}
@@ -275,11 +559,9 @@ namespace {
// Register this pass...
char BasicAliasAnalysis::ID = 0;
-static RegisterPass<BasicAliasAnalysis>
-X("basicaa", "Basic Alias Analysis (default AA impl)", false, true);
-
-// Declare that we implement the AliasAnalysis interface
-static RegisterAnalysisGroup<AliasAnalysis, true> Y(X);
+INITIALIZE_AG_PASS(BasicAliasAnalysis, AliasAnalysis, "basicaa",
+ "Basic Alias Analysis (default AA impl)",
+ false, true, true);
ImmutablePass *llvm::createBasicAliasAnalysisPass() {
return new BasicAliasAnalysis();
@@ -295,16 +577,50 @@ bool BasicAliasAnalysis::pointsToConstantMemory(const Value *P) {
// global to be marked constant in some modules and non-constant in others.
// GV may even be a declaration, not a definition.
return GV->isConstant();
- return false;
+
+ return NoAA::pointsToConstantMemory(P);
}
+/// getModRefBehavior - Return the behavior when calling the given call site.
+AliasAnalysis::ModRefBehavior
+BasicAliasAnalysis::getModRefBehavior(ImmutableCallSite CS) {
+ if (CS.doesNotAccessMemory())
+ // Can't do better than this.
+ return DoesNotAccessMemory;
+
+ ModRefBehavior Min = UnknownModRefBehavior;
+
+ // If the callsite knows it only reads memory, don't return worse
+ // than that.
+ if (CS.onlyReadsMemory())
+ Min = OnlyReadsMemory;
+
+ // The AliasAnalysis base class has some smarts, lets use them.
+ return std::min(AliasAnalysis::getModRefBehavior(CS), Min);
+}
+
+/// getModRefBehavior - Return the behavior when calling the given function.
+/// For use when the call site is not known.
+AliasAnalysis::ModRefBehavior
+BasicAliasAnalysis::getModRefBehavior(const Function *F) {
+ if (F->doesNotAccessMemory())
+ // Can't do better than this.
+ return DoesNotAccessMemory;
+ if (F->onlyReadsMemory())
+ return OnlyReadsMemory;
+ if (unsigned id = F->getIntrinsicID())
+ return getIntrinsicModRefBehavior(id);
+
+ return NoAA::getModRefBehavior(F);
+}
/// getModRefInfo - Check to see if the specified callsite can clobber the
/// specified memory object. Since we only look at local properties of this
/// function, we really can't say much about this query. We do, however, use
/// simple "address taken" analysis on local objects.
AliasAnalysis::ModRefResult
-BasicAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
+BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
+ const Value *P, unsigned Size) {
assert(notDifferentParent(CS.getInstruction(), P) &&
"AliasAnalysis query involving multiple functions!");
@@ -316,7 +632,7 @@ BasicAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
// the current function not to the current function, and a tail callee
// may reference them.
if (isa<AllocaInst>(Object))
- if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
+ if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
if (CI->isTailCall())
return NoModRef;
@@ -327,7 +643,7 @@ BasicAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
isNonEscapingLocalObject(Object)) {
bool PassedAsArg = false;
unsigned ArgNo = 0;
- for (CallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
+ for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
CI != CE; ++CI, ++ArgNo) {
// Only look at the no-capture pointer arguments.
if (!(*CI)->getType()->isPointerTy() ||
@@ -338,7 +654,7 @@ BasicAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
// is impossible to alias the pointer we're checking. If not, we have to
// assume that the call could touch the pointer, even though it doesn't
// escape.
- if (!isNoAlias(cast<Value>(CI), ~0U, P, ~0U)) {
+ if (!isNoAlias(cast<Value>(CI), UnknownSize, P, UnknownSize)) {
PassedAsArg = true;
break;
}
@@ -349,127 +665,76 @@ BasicAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
}
// Finally, handle specific knowledge of intrinsics.
- IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
- if (II == 0)
- return AliasAnalysis::getModRefInfo(CS, P, Size);
-
- switch (II->getIntrinsicID()) {
- default: break;
- case Intrinsic::memcpy:
- case Intrinsic::memmove: {
- unsigned Len = ~0U;
- if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
- Len = LenCI->getZExtValue();
- Value *Dest = II->getArgOperand(0);
- Value *Src = II->getArgOperand(1);
- if (isNoAlias(Dest, Len, P, Size)) {
- if (isNoAlias(Src, Len, P, Size))
- return NoModRef;
- return Ref;
- }
- break;
- }
- case Intrinsic::memset:
- // Since memset is 'accesses arguments' only, the AliasAnalysis base class
- // will handle it for the variable length case.
- if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
- unsigned Len = LenCI->getZExtValue();
+ const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
+ if (II != 0)
+ switch (II->getIntrinsicID()) {
+ default: break;
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove: {
+ unsigned Len = UnknownSize;
+ if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
+ Len = LenCI->getZExtValue();
Value *Dest = II->getArgOperand(0);
- if (isNoAlias(Dest, Len, P, Size))
+ Value *Src = II->getArgOperand(1);
+ if (isNoAlias(Dest, Len, P, Size)) {
+ if (isNoAlias(Src, Len, P, Size))
+ return NoModRef;
+ return Ref;
+ }
+ break;
+ }
+ case Intrinsic::memset:
+ // Since memset is 'accesses arguments' only, the AliasAnalysis base class
+ // will handle it for the variable length case.
+ if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
+ unsigned Len = LenCI->getZExtValue();
+ Value *Dest = II->getArgOperand(0);
+ if (isNoAlias(Dest, Len, P, Size))
+ return NoModRef;
+ }
+ break;
+ case Intrinsic::atomic_cmp_swap:
+ case Intrinsic::atomic_swap:
+ case Intrinsic::atomic_load_add:
+ case Intrinsic::atomic_load_sub:
+ case Intrinsic::atomic_load_and:
+ case Intrinsic::atomic_load_nand:
+ case Intrinsic::atomic_load_or:
+ case Intrinsic::atomic_load_xor:
+ case Intrinsic::atomic_load_max:
+ case Intrinsic::atomic_load_min:
+ case Intrinsic::atomic_load_umax:
+ case Intrinsic::atomic_load_umin:
+ if (TD) {
+ Value *Op1 = II->getArgOperand(0);
+ unsigned Op1Size = TD->getTypeStoreSize(Op1->getType());
+ if (isNoAlias(Op1, Op1Size, P, Size))
+ return NoModRef;
+ }
+ break;
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ case Intrinsic::invariant_start: {
+ unsigned PtrSize =
+ cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
+ if (isNoAlias(II->getArgOperand(1), PtrSize, P, Size))
return NoModRef;
+ break;
}
- break;
- case Intrinsic::atomic_cmp_swap:
- case Intrinsic::atomic_swap:
- case Intrinsic::atomic_load_add:
- case Intrinsic::atomic_load_sub:
- case Intrinsic::atomic_load_and:
- case Intrinsic::atomic_load_nand:
- case Intrinsic::atomic_load_or:
- case Intrinsic::atomic_load_xor:
- case Intrinsic::atomic_load_max:
- case Intrinsic::atomic_load_min:
- case Intrinsic::atomic_load_umax:
- case Intrinsic::atomic_load_umin:
- if (TD) {
- Value *Op1 = II->getArgOperand(0);
- unsigned Op1Size = TD->getTypeStoreSize(Op1->getType());
- if (isNoAlias(Op1, Op1Size, P, Size))
+ case Intrinsic::invariant_end: {
+ unsigned PtrSize =
+ cast<ConstantInt>(II->getArgOperand(1))->getZExtValue();
+ if (isNoAlias(II->getArgOperand(2), PtrSize, P, Size))
return NoModRef;
+ break;
+ }
}
- break;
- case Intrinsic::lifetime_start:
- case Intrinsic::lifetime_end:
- case Intrinsic::invariant_start: {
- unsigned PtrSize = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
- if (isNoAlias(II->getArgOperand(1), PtrSize, P, Size))
- return NoModRef;
- break;
- }
- case Intrinsic::invariant_end: {
- unsigned PtrSize = cast<ConstantInt>(II->getArgOperand(1))->getZExtValue();
- if (isNoAlias(II->getArgOperand(2), PtrSize, P, Size))
- return NoModRef;
- break;
- }
- }
// The AliasAnalysis base class has some smarts, lets use them.
return AliasAnalysis::getModRefInfo(CS, P, Size);
}
-AliasAnalysis::ModRefResult
-BasicAliasAnalysis::getModRefInfo(CallSite CS1, CallSite CS2) {
- // If CS1 or CS2 are readnone, they don't interact.
- ModRefBehavior CS1B = AliasAnalysis::getModRefBehavior(CS1);
- if (CS1B == DoesNotAccessMemory) return NoModRef;
-
- ModRefBehavior CS2B = AliasAnalysis::getModRefBehavior(CS2);
- if (CS2B == DoesNotAccessMemory) return NoModRef;
-
- // If they both only read from memory, just return ref.
- if (CS1B == OnlyReadsMemory && CS2B == OnlyReadsMemory)
- return Ref;
-
- // Otherwise, fall back to NoAA (mod+ref).
- return NoAA::getModRefInfo(CS1, CS2);
-}
-
-/// GetIndiceDifference - Dest and Src are the variable indices from two
-/// decomposed GetElementPtr instructions GEP1 and GEP2 which have common base
-/// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic
-/// difference between the two pointers.
-static void GetIndiceDifference(
- SmallVectorImpl<std::pair<const Value*, int64_t> > &Dest,
- const SmallVectorImpl<std::pair<const Value*, int64_t> > &Src) {
- if (Src.empty()) return;
-
- for (unsigned i = 0, e = Src.size(); i != e; ++i) {
- const Value *V = Src[i].first;
- int64_t Scale = Src[i].second;
-
- // Find V in Dest. This is N^2, but pointer indices almost never have more
- // than a few variable indexes.
- for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
- if (Dest[j].first != V) continue;
-
- // If we found it, subtract off Scale V's from the entry in Dest. If it
- // goes to zero, remove the entry.
- if (Dest[j].second != Scale)
- Dest[j].second -= Scale;
- else
- Dest.erase(Dest.begin()+j);
- Scale = 0;
- break;
- }
-
- // If we didn't consume this entry, add it to the end of the Dest list.
- if (Scale)
- Dest.push_back(std::make_pair(V, -Scale));
- }
-}
-
/// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction
/// against another pointer. We know that V1 is a GEP, but we don't know
/// anything about V2. UnderlyingV1 is GEP1->getUnderlyingObject(),
@@ -488,13 +753,14 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, unsigned V1Size,
return MayAlias;
int64_t GEP1BaseOffset;
- SmallVector<std::pair<const Value*, int64_t>, 4> GEP1VariableIndices;
+ SmallVector<VariableGEPIndex, 4> GEP1VariableIndices;
// If we have two gep instructions with must-alias'ing base pointers, figure
// out if the indexes to the GEP tell us anything about the derived pointer.
if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
// Do the base pointers alias?
- AliasResult BaseAlias = aliasCheck(UnderlyingV1, ~0U, UnderlyingV2, ~0U);
+ AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize,
+ UnderlyingV2, UnknownSize);
// If we get a No or May, then return it immediately, no amount of analysis
// will improve this situation.
@@ -507,7 +773,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, unsigned V1Size,
DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
int64_t GEP2BaseOffset;
- SmallVector<std::pair<const Value*, int64_t>, 4> GEP2VariableIndices;
+ SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
const Value *GEP2BasePtr =
DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD);
@@ -523,7 +789,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, unsigned V1Size,
// Subtract the GEP2 pointer from the GEP1 pointer to find out their
// symbolic difference.
GEP1BaseOffset -= GEP2BaseOffset;
- GetIndiceDifference(GEP1VariableIndices, GEP2VariableIndices);
+ GetIndexDifference(GEP1VariableIndices, GEP2VariableIndices);
} else {
// Check to see if these two pointers are related by the getelementptr
@@ -531,10 +797,10 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, unsigned V1Size,
// pointer, we know they cannot alias.
// If both accesses are unknown size, we can't do anything useful here.
- if (V1Size == ~0U && V2Size == ~0U)
+ if (V1Size == UnknownSize && V2Size == UnknownSize)
return MayAlias;
- AliasResult R = aliasCheck(UnderlyingV1, ~0U, V2, V2Size);
+ AliasResult R = aliasCheck(UnderlyingV1, UnknownSize, V2, V2Size);
if (R != MustAlias)
// If V2 may alias GEP base pointer, conservatively returns MayAlias.
// If V2 is known not to alias GEP base pointer, then the two values
@@ -578,8 +844,8 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, unsigned V1Size,
// provides an offset of 4 bytes (assuming a <= 4 byte access).
for (unsigned i = 0, e = GEP1VariableIndices.size();
i != e && GEP1BaseOffset;++i)
- if (int64_t RemovedOffset = GEP1BaseOffset/GEP1VariableIndices[i].second)
- GEP1BaseOffset -= RemovedOffset*GEP1VariableIndices[i].second;
+ if (int64_t RemovedOffset = GEP1BaseOffset/GEP1VariableIndices[i].Scale)
+ GEP1BaseOffset -= RemovedOffset*GEP1VariableIndices[i].Scale;
// If our known offset is bigger than the access size, we know we don't have
// an alias.
@@ -782,8 +1048,8 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, unsigned V1Size,
// If the size of one access is larger than the entire object on the other
// side, then we know such behavior is undefined and can assume no alias.
if (TD)
- if ((V1Size != ~0U && isObjectSmallerThan(O2, V1Size, *TD)) ||
- (V2Size != ~0U && isObjectSmallerThan(O1, V2Size, *TD)))
+ if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *TD)) ||
+ (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *TD)))
return NoAlias;
// FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
@@ -810,7 +1076,7 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, unsigned V1Size,
if (const SelectInst *S1 = dyn_cast<SelectInst>(V1))
return aliasSelect(S1, V1Size, V2, V2Size);
- return MayAlias;
+ return NoAA::alias(V1, V1Size, V2, V2Size);
}
// Make sure that anything that uses AliasAnalysis pulls in this file.
diff --git a/contrib/llvm/lib/Analysis/CFGPrinter.cpp b/contrib/llvm/lib/Analysis/CFGPrinter.cpp
index e06704b..617a362 100644
--- a/contrib/llvm/lib/Analysis/CFGPrinter.cpp
+++ b/contrib/llvm/lib/Analysis/CFGPrinter.cpp
@@ -25,7 +25,7 @@ using namespace llvm;
namespace {
struct CFGViewer : public FunctionPass {
static char ID; // Pass identifcation, replacement for typeid
- CFGViewer() : FunctionPass(&ID) {}
+ CFGViewer() : FunctionPass(ID) {}
virtual bool runOnFunction(Function &F) {
F.viewCFG();
@@ -41,13 +41,12 @@ namespace {
}
char CFGViewer::ID = 0;
-static RegisterPass<CFGViewer>
-V0("view-cfg", "View CFG of function", false, true);
+INITIALIZE_PASS(CFGViewer, "view-cfg", "View CFG of function", false, true);
namespace {
struct CFGOnlyViewer : public FunctionPass {
static char ID; // Pass identifcation, replacement for typeid
- CFGOnlyViewer() : FunctionPass(&ID) {}
+ CFGOnlyViewer() : FunctionPass(ID) {}
virtual bool runOnFunction(Function &F) {
F.viewCFGOnly();
@@ -63,15 +62,14 @@ namespace {
}
char CFGOnlyViewer::ID = 0;
-static RegisterPass<CFGOnlyViewer>
-V1("view-cfg-only",
- "View CFG of function (with no function bodies)", false, true);
+INITIALIZE_PASS(CFGOnlyViewer, "view-cfg-only",
+ "View CFG of function (with no function bodies)", false, true);
namespace {
struct CFGPrinter : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- CFGPrinter() : FunctionPass(&ID) {}
- explicit CFGPrinter(void *pid) : FunctionPass(pid) {}
+ CFGPrinter() : FunctionPass(ID) {}
+ explicit CFGPrinter(char &pid) : FunctionPass(pid) {}
virtual bool runOnFunction(Function &F) {
std::string Filename = "cfg." + F.getNameStr() + ".dot";
@@ -97,14 +95,14 @@ namespace {
}
char CFGPrinter::ID = 0;
-static RegisterPass<CFGPrinter>
-P1("dot-cfg", "Print CFG of function to 'dot' file", false, true);
+INITIALIZE_PASS(CFGPrinter, "dot-cfg", "Print CFG of function to 'dot' file",
+ false, true);
namespace {
struct CFGOnlyPrinter : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- CFGOnlyPrinter() : FunctionPass(&ID) {}
- explicit CFGOnlyPrinter(void *pid) : FunctionPass(pid) {}
+ CFGOnlyPrinter() : FunctionPass(ID) {}
+ explicit CFGOnlyPrinter(char &pid) : FunctionPass(pid) {}
virtual bool runOnFunction(Function &F) {
std::string Filename = "cfg." + F.getNameStr() + ".dot";
errs() << "Writing '" << Filename << "'...";
@@ -128,9 +126,9 @@ namespace {
}
char CFGOnlyPrinter::ID = 0;
-static RegisterPass<CFGOnlyPrinter>
-P2("dot-cfg-only",
- "Print CFG of function to 'dot' file (with no function bodies)", false, true);
+INITIALIZE_PASS(CFGOnlyPrinter, "dot-cfg-only",
+ "Print CFG of function to 'dot' file (with no function bodies)",
+ false, true);
/// viewCFG - This function is meant for use from the debugger. You can just
/// say 'call F->viewCFG()' and a ghostview window should pop up from the
diff --git a/contrib/llvm/lib/Analysis/CMakeLists.txt b/contrib/llvm/lib/Analysis/CMakeLists.txt
index d9b670d..6a2ab68 100644
--- a/contrib/llvm/lib/Analysis/CMakeLists.txt
+++ b/contrib/llvm/lib/Analysis/CMakeLists.txt
@@ -38,12 +38,15 @@ add_llvm_library(LLVMAnalysis
ProfileInfoLoader.cpp
ProfileInfoLoaderPass.cpp
ProfileVerifierPass.cpp
+ RegionInfo.cpp
+ RegionPrinter.cpp
ScalarEvolution.cpp
ScalarEvolutionAliasAnalysis.cpp
ScalarEvolutionExpander.cpp
ScalarEvolutionNormalization.cpp
SparsePropagation.cpp
Trace.cpp
+ TypeBasedAliasAnalysis.cpp
ValueTracking.cpp
)
diff --git a/contrib/llvm/lib/Analysis/CaptureTracking.cpp b/contrib/llvm/lib/Analysis/CaptureTracking.cpp
index 0478258..90eae20 100644
--- a/contrib/llvm/lib/Analysis/CaptureTracking.cpp
+++ b/contrib/llvm/lib/Analysis/CaptureTracking.cpp
@@ -69,7 +69,7 @@ bool llvm::PointerMayBeCaptured(const Value *V,
switch (I->getOpcode()) {
case Instruction::Call:
case Instruction::Invoke: {
- CallSite CS = CallSite::get(I);
+ CallSite CS(I);
// Not captured if the callee is readonly, doesn't return a copy through
// its return value and doesn't unwind (a readonly function can leak bits
// by throwing an exception or not depending on the input value).
diff --git a/contrib/llvm/lib/Analysis/ConstantFolding.cpp b/contrib/llvm/lib/Analysis/ConstantFolding.cpp
index 13d8f4d..0bf7967 100644
--- a/contrib/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/contrib/llvm/lib/Analysis/ConstantFolding.cpp
@@ -778,9 +778,9 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
case Instruction::ICmp:
case Instruction::FCmp: assert(0 && "Invalid for compares");
case Instruction::Call:
- if (Function *F = dyn_cast<Function>(Ops[CallInst::ArgOffset ? 0:NumOps-1]))
+ if (Function *F = dyn_cast<Function>(Ops[NumOps - 1]))
if (canConstantFoldCallTo(F))
- return ConstantFoldCall(F, Ops+CallInst::ArgOffset, NumOps-1);
+ return ConstantFoldCall(F, Ops, NumOps - 1);
return 0;
case Instruction::PtrToInt:
// If the input is a inttoptr, eliminate the pair. This requires knowing
diff --git a/contrib/llvm/lib/Analysis/DbgInfoPrinter.cpp b/contrib/llvm/lib/Analysis/DbgInfoPrinter.cpp
index 3532b05..0567750 100644
--- a/contrib/llvm/lib/Analysis/DbgInfoPrinter.cpp
+++ b/contrib/llvm/lib/Analysis/DbgInfoPrinter.cpp
@@ -40,7 +40,7 @@ namespace {
void printVariableDeclaration(const Value *V);
public:
static char ID; // Pass identification
- PrintDbgInfo() : FunctionPass(&ID), Out(outs()) {}
+ PrintDbgInfo() : FunctionPass(ID), Out(errs()) {}
virtual bool runOnFunction(Function &F);
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
@@ -48,8 +48,8 @@ namespace {
}
};
char PrintDbgInfo::ID = 0;
- static RegisterPass<PrintDbgInfo> X("print-dbginfo",
- "Print debug info in human readable form");
+ INITIALIZE_PASS(PrintDbgInfo, "print-dbginfo",
+ "Print debug info in human readable form", false, false);
}
FunctionPass *llvm::createDbgInfoPrinterPass() { return new PrintDbgInfo(); }
diff --git a/contrib/llvm/lib/Analysis/DebugInfo.cpp b/contrib/llvm/lib/Analysis/DebugInfo.cpp
index c8d0d22..5ca89c6 100644
--- a/contrib/llvm/lib/Analysis/DebugInfo.cpp
+++ b/contrib/llvm/lib/Analysis/DebugInfo.cpp
@@ -13,7 +13,6 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/DebugInfo.h"
-#include "llvm/Target/TargetMachine.h" // FIXME: LAYERING VIOLATION!
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Intrinsics.h"
@@ -22,6 +21,8 @@
#include "llvm/Module.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/raw_ostream.h"
@@ -32,7 +33,22 @@ using namespace llvm::dwarf;
// DIDescriptor
//===----------------------------------------------------------------------===//
-StringRef
+DIDescriptor::DIDescriptor(const DIFile F) : DbgNode(F.DbgNode) {
+}
+
+DIDescriptor::DIDescriptor(const DISubprogram F) : DbgNode(F.DbgNode) {
+}
+
+DIDescriptor::DIDescriptor(const DILexicalBlock F) : DbgNode(F.DbgNode) {
+}
+
+DIDescriptor::DIDescriptor(const DIVariable F) : DbgNode(F.DbgNode) {
+}
+
+DIDescriptor::DIDescriptor(const DIType F) : DbgNode(F.DbgNode) {
+}
+
+StringRef
DIDescriptor::getStringField(unsigned Elt) const {
if (DbgNode == 0)
return StringRef();
@@ -60,7 +76,8 @@ DIDescriptor DIDescriptor::getDescriptorField(unsigned Elt) const {
return DIDescriptor();
if (Elt < DbgNode->getNumOperands())
- return DIDescriptor(dyn_cast_or_null<const MDNode>(DbgNode->getOperand(Elt)));
+ return
+ DIDescriptor(dyn_cast_or_null<const MDNode>(DbgNode->getOperand(Elt)));
return DIDescriptor();
}
@@ -73,6 +90,15 @@ GlobalVariable *DIDescriptor::getGlobalVariableField(unsigned Elt) const {
return 0;
}
+Constant *DIDescriptor::getConstantField(unsigned Elt) const {
+ if (DbgNode == 0)
+ return 0;
+
+ if (Elt < DbgNode->getNumOperands())
+ return dyn_cast_or_null<Constant>(DbgNode->getOperand(Elt));
+ return 0;
+}
+
Function *DIDescriptor::getFunctionField(unsigned Elt) const {
if (DbgNode == 0)
return 0;
@@ -109,6 +135,7 @@ bool DIDescriptor::isDerivedType() const {
case dwarf::DW_TAG_restrict_type:
case dwarf::DW_TAG_member:
case dwarf::DW_TAG_inheritance:
+ case dwarf::DW_TAG_friend:
return true;
default:
// CompositeTypes are currently modelled as DerivedTypes.
@@ -161,7 +188,8 @@ bool DIDescriptor::isSubprogram() const {
/// isGlobalVariable - Return true if the specified tag is legal for
/// DIGlobalVariable.
bool DIDescriptor::isGlobalVariable() const {
- return DbgNode && getTag() == dwarf::DW_TAG_variable;
+ return DbgNode && (getTag() == dwarf::DW_TAG_variable ||
+ getTag() == dwarf::DW_TAG_constant);
}
/// isGlobal - Return true if the specified tag is legal for DIGlobal.
@@ -233,9 +261,8 @@ unsigned DIArray::getNumElements() const {
}
/// replaceAllUsesWith - Replace all uses of debug info referenced by
-/// this descriptor. After this completes, the current debug info value
-/// is erased.
-void DIDerivedType::replaceAllUsesWith(DIDescriptor &D) {
+/// this descriptor.
+void DIType::replaceAllUsesWith(DIDescriptor &D) {
if (!DbgNode)
return;
@@ -249,7 +276,7 @@ void DIDerivedType::replaceAllUsesWith(DIDescriptor &D) {
const MDNode *DN = D;
const Value *V = cast_or_null<Value>(DN);
Node->replaceAllUsesWith(const_cast<Value*>(V));
- Node->destroy();
+ MDNode::deleteTemporary(Node);
}
}
@@ -277,6 +304,16 @@ bool DIType::Verify() const {
return true;
}
+/// Verify - Verify that a basic type descriptor is well formed.
+bool DIBasicType::Verify() const {
+ return isBasicType();
+}
+
+/// Verify - Verify that a derived type descriptor is well formed.
+bool DIDerivedType::Verify() const {
+ return isDerivedType();
+}
+
/// Verify - Verify that a composite type descriptor is well formed.
bool DICompositeType::Verify() const {
if (!DbgNode)
@@ -327,7 +364,7 @@ bool DIGlobalVariable::Verify() const {
if (!Ty.Verify())
return false;
- if (!getGlobal())
+ if (!getGlobal() && !getConstant())
return false;
return true;
@@ -355,7 +392,7 @@ bool DIVariable::Verify() const {
bool DILocation::Verify() const {
if (!DbgNode)
return false;
-
+
return DbgNode->getNumOperands() == 4;
}
@@ -378,7 +415,7 @@ uint64_t DIDerivedType::getOriginalTypeSize() const {
Tag == dwarf::DW_TAG_const_type || Tag == dwarf::DW_TAG_volatile_type ||
Tag == dwarf::DW_TAG_restrict_type) {
DIType BaseType = getTypeDerivedFrom();
- // If this type is not derived from any type then take conservative
+ // If this type is not derived from any type then take conservative
// approach.
if (!BaseType.isValid())
return getSizeInBits();
@@ -387,17 +424,17 @@ uint64_t DIDerivedType::getOriginalTypeSize() const {
else
return BaseType.getSizeInBits();
}
-
+
return getSizeInBits();
}
-/// isInlinedFnArgument - Return trule if this variable provides debugging
+/// isInlinedFnArgument - Return true if this variable provides debugging
/// information for an inlined function arguments.
bool DIVariable::isInlinedFnArgument(const Function *CurFn) {
assert(CurFn && "Invalid function");
if (!getContext().isSubprogram())
return false;
- // This variable is not inlined function argument if its scope
+ // This variable is not inlined function argument if its scope
// does not describe current function.
return !(DISubprogram(getContext()).describes(CurFn));
}
@@ -416,7 +453,7 @@ bool DISubprogram::describes(const Function *F) {
return false;
}
-unsigned DISubprogram::isOptimized() const {
+unsigned DISubprogram::isOptimized() const {
assert (DbgNode && "Invalid subprogram descriptor!");
if (DbgNode->getNumOperands() == 16)
return getUnsignedField(15);
@@ -426,7 +463,7 @@ unsigned DISubprogram::isOptimized() const {
StringRef DIScope::getFilename() const {
if (!DbgNode)
return StringRef();
- if (isLexicalBlock())
+ if (isLexicalBlock())
return DILexicalBlock(DbgNode).getFilename();
if (isSubprogram())
return DISubprogram(DbgNode).getFilename();
@@ -445,7 +482,7 @@ StringRef DIScope::getFilename() const {
StringRef DIScope::getDirectory() const {
if (!DbgNode)
return StringRef();
- if (isLexicalBlock())
+ if (isLexicalBlock())
return DILexicalBlock(DbgNode).getDirectory();
if (isSubprogram())
return DISubprogram(DbgNode).getDirectory();
@@ -899,7 +936,26 @@ DICompositeType DIFactory::CreateCompositeType(unsigned Tag,
ConstantInt::get(Type::getInt32Ty(VMContext), RuntimeLang),
ContainingType
};
- return DICompositeType(MDNode::get(VMContext, &Elts[0], 13));
+
+ MDNode *Node = MDNode::get(VMContext, &Elts[0], 13);
+ // Create a named metadata so that we do not lose this enum info.
+ if (Tag == dwarf::DW_TAG_enumeration_type) {
+ NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.enum");
+ NMD->addOperand(Node);
+ }
+ return DICompositeType(Node);
+}
+
+
+/// CreateTemporaryType - Create a temporary forward-declared type.
+DIType DIFactory::CreateTemporaryType() {
+ // Give the temporary MDNode a tag. It doesn't matter what tag we
+ // use here as long as DIType accepts it.
+ Value *Elts[] = {
+ GetTagConstant(DW_TAG_base_type)
+ };
+ MDNode *Node = MDNode::getTemporary(VMContext, Elts, array_lengthof(Elts));
+ return DIType(Node);
}
@@ -915,8 +971,8 @@ DICompositeType DIFactory::CreateCompositeTypeEx(unsigned Tag,
unsigned Flags,
DIType DerivedFrom,
DIArray Elements,
- unsigned RuntimeLang) {
-
+ unsigned RuntimeLang,
+ MDNode *ContainingType) {
Value *Elts[] = {
GetTagConstant(Tag),
Context,
@@ -929,9 +985,16 @@ DICompositeType DIFactory::CreateCompositeTypeEx(unsigned Tag,
ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
DerivedFrom,
Elements,
- ConstantInt::get(Type::getInt32Ty(VMContext), RuntimeLang)
+ ConstantInt::get(Type::getInt32Ty(VMContext), RuntimeLang),
+ ContainingType
};
- return DICompositeType(MDNode::get(VMContext, &Elts[0], 12));
+ MDNode *Node = MDNode::get(VMContext, &Elts[0], 13);
+ // Create a named metadata so that we do not lose this enum info.
+ if (Tag == dwarf::DW_TAG_enumeration_type) {
+ NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.enum");
+ NMD->addOperand(Node);
+ }
+ return DICompositeType(Node);
}
@@ -980,8 +1043,8 @@ DISubprogram DIFactory::CreateSubprogram(DIDescriptor Context,
}
/// CreateSubprogramDefinition - Create new subprogram descriptor for the
-/// given declaration.
-DISubprogram DIFactory::CreateSubprogramDefinition(DISubprogram &SPDeclaration) {
+/// given declaration.
+DISubprogram DIFactory::CreateSubprogramDefinition(DISubprogram &SPDeclaration){
if (SPDeclaration.isDefinition())
return DISubprogram(SPDeclaration);
@@ -1046,6 +1109,38 @@ DIFactory::CreateGlobalVariable(DIDescriptor Context, StringRef Name,
return DIGlobalVariable(Node);
}
+/// CreateGlobalVariable - Create a new descriptor for the specified constant.
+DIGlobalVariable
+DIFactory::CreateGlobalVariable(DIDescriptor Context, StringRef Name,
+ StringRef DisplayName,
+ StringRef LinkageName,
+ DIFile F,
+ unsigned LineNo, DIType Ty,bool isLocalToUnit,
+ bool isDefinition, llvm::Constant *Val) {
+ Value *Elts[] = {
+ GetTagConstant(dwarf::DW_TAG_variable),
+ llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Context,
+ MDString::get(VMContext, Name),
+ MDString::get(VMContext, DisplayName),
+ MDString::get(VMContext, LinkageName),
+ F,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
+ Ty,
+ ConstantInt::get(Type::getInt1Ty(VMContext), isLocalToUnit),
+ ConstantInt::get(Type::getInt1Ty(VMContext), isDefinition),
+ Val
+ };
+
+ Value *const *Vs = &Elts[0];
+ MDNode *Node = MDNode::get(VMContext,Vs, 12);
+
+ // Create a named metadata so that we do not lose this mdnode.
+ NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.gv");
+ NMD->addOperand(Node);
+
+ return DIGlobalVariable(Node);
+}
/// CreateVariable - Create a new descriptor for the specified variable.
DIVariable DIFactory::CreateVariable(unsigned Tag, DIDescriptor Context,
@@ -1073,10 +1168,10 @@ DIVariable DIFactory::CreateVariable(unsigned Tag, DIDescriptor Context,
char One = '\1';
if (FName.startswith(StringRef(&One, 1)))
FName = FName.substr(1);
- NamedMDNode *FnLocals = M.getNamedMetadata(Twine("llvm.dbg.lv.", FName));
- if (!FnLocals)
- FnLocals = NamedMDNode::Create(VMContext, Twine("llvm.dbg.lv.", FName),
- NULL, 0, &M);
+
+ SmallString<32> Out;
+ NamedMDNode *FnLocals =
+ M.getOrInsertNamedMetadata(Twine("llvm.dbg.lv.", FName).toStringRef(Out));
FnLocals->addOperand(Node);
}
return DIVariable(Node);
@@ -1089,7 +1184,7 @@ DIVariable DIFactory::CreateComplexVariable(unsigned Tag, DIDescriptor Context,
const std::string &Name,
DIFile F,
unsigned LineNo,
- DIType Ty,
+ DIType Ty,
SmallVector<Value *, 9> &addr) {
SmallVector<Value *, 9> Elts;
Elts.push_back(GetTagConstant(Tag));
@@ -1107,14 +1202,19 @@ DIVariable DIFactory::CreateComplexVariable(unsigned Tag, DIDescriptor Context,
/// CreateBlock - This creates a descriptor for a lexical block with the
/// specified parent VMContext.
DILexicalBlock DIFactory::CreateLexicalBlock(DIDescriptor Context,
- unsigned LineNo, unsigned Col) {
+ DIFile F, unsigned LineNo,
+ unsigned Col) {
+ // Defeat MDNode uniqing for lexical blocks.
+ static unsigned int unique_id = 0;
Value *Elts[] = {
GetTagConstant(dwarf::DW_TAG_lexical_block),
Context,
ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
- ConstantInt::get(Type::getInt32Ty(VMContext), Col)
+ ConstantInt::get(Type::getInt32Ty(VMContext), Col),
+ F,
+ ConstantInt::get(Type::getInt32Ty(VMContext), unique_id++)
};
- return DILexicalBlock(MDNode::get(VMContext, &Elts[0], 4));
+ return DILexicalBlock(MDNode::get(VMContext, &Elts[0], 6));
}
/// CreateNameSpace - This creates new descriptor for a namespace
@@ -1174,7 +1274,7 @@ Instruction *DIFactory::InsertDeclare(Value *Storage, DIVariable D,
// If this block already has a terminator then insert this intrinsic
// before the terminator.
- if (TerminatorInst *T = InsertAtEnd->getTerminator())
+ if (TerminatorInst *T = InsertAtEnd->getTerminator())
return CallInst::Create(DeclareFn, Args, Args+2, "", T);
else
return CallInst::Create(DeclareFn, Args, Args+2, "", InsertAtEnd);}
@@ -1203,7 +1303,7 @@ Instruction *DIFactory::InsertDbgValueIntrinsic(Value *V, uint64_t Offset,
if (!ValueFn)
ValueFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_value);
- Value *Args[] = { MDNode::get(V->getContext(), &V, 1),
+ Value *Args[] = { MDNode::get(V->getContext(), &V, 1),
ConstantInt::get(Type::getInt64Ty(V->getContext()), Offset),
D };
return CallInst::Create(ValueFn, Args, Args+3, "", InsertAtEnd);
@@ -1221,21 +1321,21 @@ void DebugInfoFinder::processModule(Module &M) {
++BI) {
if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(BI))
processDeclare(DDI);
-
+
DebugLoc Loc = BI->getDebugLoc();
if (Loc.isUnknown())
continue;
-
+
LLVMContext &Ctx = BI->getContext();
DIDescriptor Scope(Loc.getScope(Ctx));
-
+
if (Scope.isCompileUnit())
addCompileUnit(DICompileUnit(Scope));
else if (Scope.isSubprogram())
processSubprogram(DISubprogram(Scope));
else if (Scope.isLexicalBlock())
processLexicalBlock(DILexicalBlock(Scope));
-
+
if (MDNode *IA = Loc.getInlinedAt(Ctx))
processLocation(DILocation(IA));
}
@@ -1380,7 +1480,7 @@ static Value *findDbgGlobalDeclare(GlobalVariable *V) {
return 0;
for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
- DIDescriptor DIG(cast_or_null<MDNode>(NMD->getOperand(i)));
+ DIDescriptor DIG(cast<MDNode>(NMD->getOperand(i)));
if (!DIG.isGlobalVariable())
continue;
if (DIGlobalVariable(DIG).getGlobal() == V)
@@ -1393,16 +1493,16 @@ static Value *findDbgGlobalDeclare(GlobalVariable *V) {
/// It looks through pointer casts too.
static const DbgDeclareInst *findDbgDeclare(const Value *V) {
V = V->stripPointerCasts();
-
+
if (!isa<Instruction>(V) && !isa<Argument>(V))
return 0;
-
+
const Function *F = NULL;
if (const Instruction *I = dyn_cast<Instruction>(V))
F = I->getParent()->getParent();
else if (const Argument *A = dyn_cast<Argument>(V))
F = A->getParent();
-
+
for (Function::const_iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI)
for (BasicBlock::const_iterator BI = (*FI).begin(), BE = (*FI).end();
BI != BE; ++BI)
@@ -1460,10 +1560,10 @@ DISubprogram llvm::getDISubprogram(const MDNode *Scope) {
DIDescriptor D(Scope);
if (D.isSubprogram())
return DISubprogram(Scope);
-
+
if (D.isLexicalBlock())
return getDISubprogram(DILexicalBlock(Scope).getContext());
-
+
return DISubprogram();
}
@@ -1471,9 +1571,9 @@ DISubprogram llvm::getDISubprogram(const MDNode *Scope) {
DICompositeType llvm::getDICompositeType(DIType T) {
if (T.isCompositeType())
return DICompositeType(T);
-
+
if (T.isDerivedType())
return getDICompositeType(DIDerivedType(T).getTypeDerivedFrom());
-
+
return DICompositeType();
}
diff --git a/contrib/llvm/lib/Analysis/DomPrinter.cpp b/contrib/llvm/lib/Analysis/DomPrinter.cpp
index d95c376..9f34094 100644
--- a/contrib/llvm/lib/Analysis/DomPrinter.cpp
+++ b/contrib/llvm/lib/Analysis/DomPrinter.cpp
@@ -86,99 +86,100 @@ namespace {
struct DomViewer
: public DOTGraphTraitsViewer<DominatorTree, false> {
static char ID;
- DomViewer() : DOTGraphTraitsViewer<DominatorTree, false>("dom", &ID){}
+ DomViewer() : DOTGraphTraitsViewer<DominatorTree, false>("dom", ID){}
};
struct DomOnlyViewer
: public DOTGraphTraitsViewer<DominatorTree, true> {
static char ID;
- DomOnlyViewer() : DOTGraphTraitsViewer<DominatorTree, true>("domonly", &ID){}
+ DomOnlyViewer() : DOTGraphTraitsViewer<DominatorTree, true>("domonly", ID){}
};
struct PostDomViewer
: public DOTGraphTraitsViewer<PostDominatorTree, false> {
static char ID;
PostDomViewer() :
- DOTGraphTraitsViewer<PostDominatorTree, false>("postdom", &ID){}
+ DOTGraphTraitsViewer<PostDominatorTree, false>("postdom", ID){}
};
struct PostDomOnlyViewer
: public DOTGraphTraitsViewer<PostDominatorTree, true> {
static char ID;
PostDomOnlyViewer() :
- DOTGraphTraitsViewer<PostDominatorTree, true>("postdomonly", &ID){}
+ DOTGraphTraitsViewer<PostDominatorTree, true>("postdomonly", ID){}
};
} // end anonymous namespace
char DomViewer::ID = 0;
-RegisterPass<DomViewer> A("view-dom",
- "View dominance tree of function");
+INITIALIZE_PASS(DomViewer, "view-dom",
+ "View dominance tree of function", false, false);
char DomOnlyViewer::ID = 0;
-RegisterPass<DomOnlyViewer> B("view-dom-only",
- "View dominance tree of function "
- "(with no function bodies)");
+INITIALIZE_PASS(DomOnlyViewer, "view-dom-only",
+ "View dominance tree of function (with no function bodies)",
+ false, false);
char PostDomViewer::ID = 0;
-RegisterPass<PostDomViewer> C("view-postdom",
- "View postdominance tree of function");
+INITIALIZE_PASS(PostDomViewer, "view-postdom",
+ "View postdominance tree of function", false, false);
char PostDomOnlyViewer::ID = 0;
-RegisterPass<PostDomOnlyViewer> D("view-postdom-only",
- "View postdominance tree of function "
- "(with no function bodies)");
+INITIALIZE_PASS(PostDomOnlyViewer, "view-postdom-only",
+ "View postdominance tree of function "
+ "(with no function bodies)",
+ false, false);
namespace {
struct DomPrinter
: public DOTGraphTraitsPrinter<DominatorTree, false> {
static char ID;
- DomPrinter() : DOTGraphTraitsPrinter<DominatorTree, false>("dom", &ID) {}
+ DomPrinter() : DOTGraphTraitsPrinter<DominatorTree, false>("dom", ID) {}
};
struct DomOnlyPrinter
: public DOTGraphTraitsPrinter<DominatorTree, true> {
static char ID;
- DomOnlyPrinter() : DOTGraphTraitsPrinter<DominatorTree, true>("domonly", &ID) {}
+ DomOnlyPrinter() : DOTGraphTraitsPrinter<DominatorTree, true>("domonly", ID) {}
};
struct PostDomPrinter
: public DOTGraphTraitsPrinter<PostDominatorTree, false> {
static char ID;
PostDomPrinter() :
- DOTGraphTraitsPrinter<PostDominatorTree, false>("postdom", &ID) {}
+ DOTGraphTraitsPrinter<PostDominatorTree, false>("postdom", ID) {}
};
struct PostDomOnlyPrinter
: public DOTGraphTraitsPrinter<PostDominatorTree, true> {
static char ID;
PostDomOnlyPrinter() :
- DOTGraphTraitsPrinter<PostDominatorTree, true>("postdomonly", &ID) {}
+ DOTGraphTraitsPrinter<PostDominatorTree, true>("postdomonly", ID) {}
};
} // end anonymous namespace
char DomPrinter::ID = 0;
-RegisterPass<DomPrinter> E("dot-dom",
- "Print dominance tree of function "
- "to 'dot' file");
+INITIALIZE_PASS(DomPrinter, "dot-dom",
+ "Print dominance tree of function to 'dot' file",
+ false, false);
char DomOnlyPrinter::ID = 0;
-RegisterPass<DomOnlyPrinter> F("dot-dom-only",
- "Print dominance tree of function "
- "to 'dot' file "
- "(with no function bodies)");
+INITIALIZE_PASS(DomOnlyPrinter, "dot-dom-only",
+ "Print dominance tree of function to 'dot' file "
+ "(with no function bodies)",
+ false, false);
char PostDomPrinter::ID = 0;
-RegisterPass<PostDomPrinter> G("dot-postdom",
- "Print postdominance tree of function "
- "to 'dot' file");
+INITIALIZE_PASS(PostDomPrinter, "dot-postdom",
+ "Print postdominance tree of function to 'dot' file",
+ false, false);
char PostDomOnlyPrinter::ID = 0;
-RegisterPass<PostDomOnlyPrinter> H("dot-postdom-only",
- "Print postdominance tree of function "
- "to 'dot' file "
- "(with no function bodies)");
+INITIALIZE_PASS(PostDomOnlyPrinter, "dot-postdom-only",
+ "Print postdominance tree of function to 'dot' file "
+ "(with no function bodies)",
+ false, false);
// Create methods available outside of this file, to use them
// "include/llvm/LinkAllPasses.h". Otherwise the pass would be deleted by
diff --git a/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp b/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp
index 65c7c6e..b363528 100644
--- a/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp
@@ -42,7 +42,7 @@ class BasicCallGraph : public ModulePass, public CallGraph {
public:
static char ID; // Class identification, replacement for typeinfo
- BasicCallGraph() : ModulePass(&ID), Root(0),
+ BasicCallGraph() : ModulePass(ID), Root(0),
ExternalCallingNode(0), CallsExternalNode(0) {}
// runOnModule - Compute the call graph for the specified module.
@@ -86,8 +86,8 @@ public:
/// an analysis interface through multiple inheritance. If needed, it should
/// override this to adjust the this pointer as needed for the specified pass
/// info.
- virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
- if (PI->isPassID(&CallGraph::ID))
+ virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+ if (PI == &CallGraph::ID)
return (CallGraph*)this;
return this;
}
@@ -145,8 +145,8 @@ private:
for (Function::iterator BB = F->begin(), BBE = F->end(); BB != BBE; ++BB)
for (BasicBlock::iterator II = BB->begin(), IE = BB->end();
II != IE; ++II) {
- CallSite CS = CallSite::get(II);
- if (CS.getInstruction() && !isa<DbgInfoIntrinsic>(II)) {
+ CallSite CS(cast<Value>(II));
+ if (CS && !isa<DbgInfoIntrinsic>(II)) {
const Function *Callee = CS.getCalledFunction();
if (Callee)
Node->addCalledFunction(CS, getOrInsertFunction(Callee));
@@ -172,9 +172,8 @@ private:
} //End anonymous namespace
static RegisterAnalysisGroup<CallGraph> X("Call Graph");
-static RegisterPass<BasicCallGraph>
-Y("basiccg", "Basic CallGraph Construction", false, true);
-static RegisterAnalysisGroup<CallGraph, true> Z(Y);
+INITIALIZE_AG_PASS(BasicCallGraph, CallGraph, "basiccg",
+ "Basic CallGraph Construction", false, true, true);
char CallGraph::ID = 0;
char BasicCallGraph::ID = 0;
diff --git a/contrib/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp b/contrib/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp
index 0c01ee5..b7a27cb 100644
--- a/contrib/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp
@@ -45,7 +45,7 @@ class CGPassManager : public ModulePass, public PMDataManager {
public:
static char ID;
explicit CGPassManager(int Depth)
- : ModulePass(&ID), PMDataManager(Depth) { }
+ : ModulePass(ID), PMDataManager(Depth) { }
/// run - Execute all of the passes scheduled for execution. Keep track of
/// whether any of the passes modifies the module, and if so, return true.
@@ -209,7 +209,7 @@ bool CGPassManager::RefreshCallGraph(CallGraphSCC &CurSCC,
// If the call edge is not from a call or invoke, then the function
// pass RAUW'd a call with another value. This can happen when
// constant folding happens of well known functions etc.
- CallSite::get(I->first).getInstruction() == 0) {
+ !CallSite(I->first)) {
assert(!CheckingMode &&
"CallGraphSCCPass did not update the CallGraph correctly!");
@@ -245,8 +245,8 @@ bool CGPassManager::RefreshCallGraph(CallGraphSCC &CurSCC,
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
- CallSite CS = CallSite::get(I);
- if (!CS.getInstruction() || isa<DbgInfoIntrinsic>(I)) continue;
+ CallSite CS(cast<Value>(I));
+ if (!CS || isa<DbgInfoIntrinsic>(I)) continue;
// If this call site already existed in the callgraph, just verify it
// matches up to expectations and remove it from CallSites.
@@ -582,9 +582,9 @@ namespace {
public:
static char ID;
- PrintCallGraphPass() : CallGraphSCCPass(&ID), Out(dbgs()) {}
+ PrintCallGraphPass() : CallGraphSCCPass(ID), Out(dbgs()) {}
PrintCallGraphPass(const std::string &B, raw_ostream &o)
- : CallGraphSCCPass(&ID), Banner(B), Out(o) {}
+ : CallGraphSCCPass(ID), Banner(B), Out(o) {}
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
diff --git a/contrib/llvm/lib/Analysis/IPA/FindUsedTypes.cpp b/contrib/llvm/lib/Analysis/IPA/FindUsedTypes.cpp
index c4fb0b9..8eed9d6 100644
--- a/contrib/llvm/lib/Analysis/IPA/FindUsedTypes.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/FindUsedTypes.cpp
@@ -23,8 +23,8 @@
using namespace llvm;
char FindUsedTypes::ID = 0;
-static RegisterPass<FindUsedTypes>
-X("print-used-types", "Find Used Types", false, true);
+INITIALIZE_PASS(FindUsedTypes, "print-used-types",
+ "Find Used Types", false, true);
// IncorporateType - Incorporate one type and all of its subtypes into the
// collection of used types.
diff --git a/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp b/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
index f13deea..6759b0a 100644
--- a/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
@@ -47,14 +47,15 @@ namespace {
/// GlobalInfo - Maintain mod/ref info for all of the globals without
/// addresses taken that are read or written (transitively) by this
/// function.
- std::map<GlobalValue*, unsigned> GlobalInfo;
+ std::map<const GlobalValue*, unsigned> GlobalInfo;
/// MayReadAnyGlobal - May read global variables, but it is not known which.
bool MayReadAnyGlobal;
- unsigned getInfoForGlobal(GlobalValue *GV) const {
+ unsigned getInfoForGlobal(const GlobalValue *GV) const {
unsigned Effect = MayReadAnyGlobal ? AliasAnalysis::Ref : 0;
- std::map<GlobalValue*, unsigned>::const_iterator I = GlobalInfo.find(GV);
+ std::map<const GlobalValue*, unsigned>::const_iterator I =
+ GlobalInfo.find(GV);
if (I != GlobalInfo.end())
Effect |= I->second;
return Effect;
@@ -71,23 +72,23 @@ namespace {
class GlobalsModRef : public ModulePass, public AliasAnalysis {
/// NonAddressTakenGlobals - The globals that do not have their addresses
/// taken.
- std::set<GlobalValue*> NonAddressTakenGlobals;
+ std::set<const GlobalValue*> NonAddressTakenGlobals;
/// IndirectGlobals - The memory pointed to by this global is known to be
/// 'owned' by the global.
- std::set<GlobalValue*> IndirectGlobals;
+ std::set<const GlobalValue*> IndirectGlobals;
/// AllocsForIndirectGlobals - If an instruction allocates memory for an
/// indirect global, this map indicates which one.
- std::map<Value*, GlobalValue*> AllocsForIndirectGlobals;
+ std::map<const Value*, const GlobalValue*> AllocsForIndirectGlobals;
/// FunctionInfo - For each function, keep track of what globals are
/// modified or read.
- std::map<Function*, FunctionRecord> FunctionInfo;
+ std::map<const Function*, FunctionRecord> FunctionInfo;
public:
static char ID;
- GlobalsModRef() : ModulePass(&ID) {}
+ GlobalsModRef() : ModulePass(ID) {}
bool runOnModule(Module &M) {
InitializeAliasAnalysis(this); // set up super class
@@ -107,39 +108,39 @@ namespace {
//
AliasResult alias(const Value *V1, unsigned V1Size,
const Value *V2, unsigned V2Size);
- ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size);
- ModRefResult getModRefInfo(CallSite CS1, CallSite CS2) {
- return AliasAnalysis::getModRefInfo(CS1,CS2);
+ ModRefResult getModRefInfo(ImmutableCallSite CS,
+ const Value *P, unsigned Size);
+ ModRefResult getModRefInfo(ImmutableCallSite CS1,
+ ImmutableCallSite CS2) {
+ return AliasAnalysis::getModRefInfo(CS1, CS2);
}
/// getModRefBehavior - Return the behavior of the specified function if
/// called from the specified call site. The call site may be null in which
/// case the most generic behavior of this function should be returned.
- ModRefBehavior getModRefBehavior(Function *F,
- std::vector<PointerAccessInfo> *Info) {
+ ModRefBehavior getModRefBehavior(const Function *F) {
if (FunctionRecord *FR = getFunctionInfo(F)) {
if (FR->FunctionEffect == 0)
return DoesNotAccessMemory;
else if ((FR->FunctionEffect & Mod) == 0)
return OnlyReadsMemory;
}
- return AliasAnalysis::getModRefBehavior(F, Info);
+ return AliasAnalysis::getModRefBehavior(F);
}
/// getModRefBehavior - Return the behavior of the specified function if
/// called from the specified call site. The call site may be null in which
/// case the most generic behavior of this function should be returned.
- ModRefBehavior getModRefBehavior(CallSite CS,
- std::vector<PointerAccessInfo> *Info) {
- Function* F = CS.getCalledFunction();
- if (!F) return AliasAnalysis::getModRefBehavior(CS, Info);
+ ModRefBehavior getModRefBehavior(ImmutableCallSite CS) {
+ const Function* F = CS.getCalledFunction();
+ if (!F) return AliasAnalysis::getModRefBehavior(CS);
if (FunctionRecord *FR = getFunctionInfo(F)) {
if (FR->FunctionEffect == 0)
return DoesNotAccessMemory;
else if ((FR->FunctionEffect & Mod) == 0)
return OnlyReadsMemory;
}
- return AliasAnalysis::getModRefBehavior(CS, Info);
+ return AliasAnalysis::getModRefBehavior(CS);
}
virtual void deleteValue(Value *V);
@@ -149,8 +150,8 @@ namespace {
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
- virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
- if (PI->isPassID(&AliasAnalysis::ID))
+ virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+ if (PI == &AliasAnalysis::ID)
return (AliasAnalysis*)this;
return this;
}
@@ -158,8 +159,9 @@ namespace {
private:
/// getFunctionInfo - Return the function info for the function, or null if
/// we don't have anything useful to say about it.
- FunctionRecord *getFunctionInfo(Function *F) {
- std::map<Function*, FunctionRecord>::iterator I = FunctionInfo.find(F);
+ FunctionRecord *getFunctionInfo(const Function *F) {
+ std::map<const Function*, FunctionRecord>::iterator I =
+ FunctionInfo.find(F);
if (I != FunctionInfo.end())
return &I->second;
return 0;
@@ -175,9 +177,9 @@ namespace {
}
char GlobalsModRef::ID = 0;
-static RegisterPass<GlobalsModRef>
-X("globalsmodref-aa", "Simple mod/ref analysis for globals", false, true);
-static RegisterAnalysisGroup<AliasAnalysis> Y(X);
+INITIALIZE_AG_PASS(GlobalsModRef, AliasAnalysis,
+ "globalsmodref-aa", "Simple mod/ref analysis for globals",
+ false, true, false);
Pass *llvm::createGlobalsModRefPass() { return new GlobalsModRef(); }
@@ -409,7 +411,7 @@ void GlobalsModRef::AnalyzeCallGraph(CallGraph &CG, Module &M) {
FunctionEffect |= CalleeFR->FunctionEffect;
// Incorporate callee's effects on globals into our info.
- for (std::map<GlobalValue*, unsigned>::iterator GI =
+ for (std::map<const GlobalValue*, unsigned>::iterator GI =
CalleeFR->GlobalInfo.begin(), E = CalleeFR->GlobalInfo.end();
GI != E; ++GI)
FR.GlobalInfo[GI->first] |= GI->second;
@@ -477,13 +479,13 @@ AliasAnalysis::AliasResult
GlobalsModRef::alias(const Value *V1, unsigned V1Size,
const Value *V2, unsigned V2Size) {
// Get the base object these pointers point to.
- Value *UV1 = const_cast<Value*>(V1->getUnderlyingObject());
- Value *UV2 = const_cast<Value*>(V2->getUnderlyingObject());
+ const Value *UV1 = V1->getUnderlyingObject();
+ const Value *UV2 = V2->getUnderlyingObject();
// If either of the underlying values is a global, they may be non-addr-taken
// globals, which we can answer queries about.
- GlobalValue *GV1 = dyn_cast<GlobalValue>(UV1);
- GlobalValue *GV2 = dyn_cast<GlobalValue>(UV2);
+ const GlobalValue *GV1 = dyn_cast<GlobalValue>(UV1);
+ const GlobalValue *GV2 = dyn_cast<GlobalValue>(UV2);
if (GV1 || GV2) {
// If the global's address is taken, pretend we don't know it's a pointer to
// the global.
@@ -503,12 +505,12 @@ GlobalsModRef::alias(const Value *V1, unsigned V1Size,
// so, we may be able to handle this. First check to see if the base pointer
// is a direct load from an indirect global.
GV1 = GV2 = 0;
- if (LoadInst *LI = dyn_cast<LoadInst>(UV1))
+ if (const LoadInst *LI = dyn_cast<LoadInst>(UV1))
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(LI->getOperand(0)))
if (IndirectGlobals.count(GV))
GV1 = GV;
- if (LoadInst *LI = dyn_cast<LoadInst>(UV2))
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(LI->getOperand(0)))
+ if (const LoadInst *LI = dyn_cast<LoadInst>(UV2))
+ if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(LI->getOperand(0)))
if (IndirectGlobals.count(GV))
GV2 = GV;
@@ -530,16 +532,17 @@ GlobalsModRef::alias(const Value *V1, unsigned V1Size,
}
AliasAnalysis::ModRefResult
-GlobalsModRef::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
+GlobalsModRef::getModRefInfo(ImmutableCallSite CS,
+ const Value *P, unsigned Size) {
unsigned Known = ModRef;
// If we are asking for mod/ref info of a direct call with a pointer to a
// global we are tracking, return information if we have it.
- if (GlobalValue *GV = dyn_cast<GlobalValue>(P->getUnderlyingObject()))
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(P->getUnderlyingObject()))
if (GV->hasLocalLinkage())
- if (Function *F = CS.getCalledFunction())
+ if (const Function *F = CS.getCalledFunction())
if (NonAddressTakenGlobals.count(GV))
- if (FunctionRecord *FR = getFunctionInfo(F))
+ if (const FunctionRecord *FR = getFunctionInfo(F))
Known = FR->getInfoForGlobal(GV);
if (Known == NoModRef)
@@ -558,7 +561,7 @@ void GlobalsModRef::deleteValue(Value *V) {
// any AllocRelatedValues for it.
if (IndirectGlobals.erase(GV)) {
// Remove any entries in AllocsForIndirectGlobals for this global.
- for (std::map<Value*, GlobalValue*>::iterator
+ for (std::map<const Value*, const GlobalValue*>::iterator
I = AllocsForIndirectGlobals.begin(),
E = AllocsForIndirectGlobals.end(); I != E; ) {
if (I->second == GV) {
diff --git a/contrib/llvm/lib/Analysis/IVUsers.cpp b/contrib/llvm/lib/Analysis/IVUsers.cpp
index 2c997da..cdf667a 100644
--- a/contrib/llvm/lib/Analysis/IVUsers.cpp
+++ b/contrib/llvm/lib/Analysis/IVUsers.cpp
@@ -21,7 +21,6 @@
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
-#include "llvm/Assembly/AsmAnnotationWriter.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -29,8 +28,7 @@
using namespace llvm;
char IVUsers::ID = 0;
-static RegisterPass<IVUsers>
-X("iv-users", "Induction Variable Users", false, true);
+INITIALIZE_PASS(IVUsers, "iv-users", "Induction Variable Users", false, true);
Pass *llvm::createIVUsersPass() {
return new IVUsers();
@@ -39,27 +37,31 @@ Pass *llvm::createIVUsersPass() {
/// isInteresting - Test whether the given expression is "interesting" when
/// used by the given expression, within the context of analyzing the
/// given loop.
-static bool isInteresting(const SCEV *S, const Instruction *I, const Loop *L) {
- // Anything loop-invariant is interesting.
- if (!isa<SCEVUnknown>(S) && S->isLoopInvariant(L))
- return true;
-
+static bool isInteresting(const SCEV *S, const Instruction *I, const Loop *L,
+ ScalarEvolution *SE) {
// An addrec is interesting if it's affine or if it has an interesting start.
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
// Keep things simple. Don't touch loop-variant strides.
if (AR->getLoop() == L)
return AR->isAffine() || !L->contains(I);
- // Otherwise recurse to see if the start value is interesting.
- return isInteresting(AR->getStart(), I, L);
+ // Otherwise recurse to see if the start value is interesting, and that
+ // the step value is not interesting, since we don't yet know how to
+ // do effective SCEV expansions for addrecs with interesting steps.
+ return isInteresting(AR->getStart(), I, L, SE) &&
+ !isInteresting(AR->getStepRecurrence(*SE), I, L, SE);
}
- // An add is interesting if any of its operands is.
+ // An add is interesting if exactly one of its operands is interesting.
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
+ bool AnyInterestingYet = false;
for (SCEVAddExpr::op_iterator OI = Add->op_begin(), OE = Add->op_end();
OI != OE; ++OI)
- if (isInteresting(*OI, I, L))
- return true;
- return false;
+ if (isInteresting(*OI, I, L, SE)) {
+ if (AnyInterestingYet)
+ return false;
+ AnyInterestingYet = true;
+ }
+ return AnyInterestingYet;
}
// Nothing else is interesting here.
@@ -85,7 +87,7 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) {
// If we've come to an uninteresting expression, stop the traversal and
// call this a user.
- if (!isInteresting(ISE, I, L))
+ if (!isInteresting(ISE, I, L, SE))
return false;
SmallPtrSet<Instruction *, 4> UniqueUsers;
@@ -141,7 +143,7 @@ IVStrideUse &IVUsers::AddUser(Instruction *User, Value *Operand) {
}
IVUsers::IVUsers()
- : LoopPass(&ID) {
+ : LoopPass(ID) {
}
void IVUsers::getAnalysisUsage(AnalysisUsage &AU) const {
@@ -176,9 +178,6 @@ void IVUsers::print(raw_ostream &OS, const Module *M) const {
}
OS << ":\n";
- // Use a default AssemblyAnnotationWriter to suppress the default info
- // comments, which aren't relevant here.
- AssemblyAnnotationWriter Annotator;
for (ilist<IVStrideUse>::const_iterator UI = IVUses.begin(),
E = IVUses.end(); UI != E; ++UI) {
OS << " ";
@@ -192,7 +191,7 @@ void IVUsers::print(raw_ostream &OS, const Module *M) const {
OS << ")";
}
OS << " in ";
- UI->getUser()->print(OS, &Annotator);
+ UI->getUser()->print(OS);
OS << '\n';
}
}
diff --git a/contrib/llvm/lib/Analysis/InlineCost.cpp b/contrib/llvm/lib/Analysis/InlineCost.cpp
index b1df517..3e550f3 100644
--- a/contrib/llvm/lib/Analysis/InlineCost.cpp
+++ b/contrib/llvm/lib/Analysis/InlineCost.cpp
@@ -152,14 +152,14 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB) {
if (isa<CallInst>(II) || isa<InvokeInst>(II)) {
if (isa<DbgInfoIntrinsic>(II))
continue; // Debug intrinsics don't count as size.
-
- CallSite CS = CallSite::get(const_cast<Instruction*>(&*II));
-
+
+ ImmutableCallSite CS(cast<Instruction>(II));
+
// If this function contains a call to setjmp or _setjmp, never inline
// it. This is a hack because we depend on the user marking their local
// variables as volatile if they are live across a setjmp call, and they
// probably won't do this in callers.
- if (Function *F = CS.getCalledFunction()) {
+ if (const Function *F = CS.getCalledFunction()) {
if (F->isDeclaration() &&
(F->getName() == "setjmp" || F->getName() == "_setjmp"))
callsSetJmp = true;
diff --git a/contrib/llvm/lib/Analysis/InstCount.cpp b/contrib/llvm/lib/Analysis/InstCount.cpp
index bb2cf53..dcbcac0 100644
--- a/contrib/llvm/lib/Analysis/InstCount.cpp
+++ b/contrib/llvm/lib/Analysis/InstCount.cpp
@@ -51,7 +51,7 @@ namespace {
}
public:
static char ID; // Pass identification, replacement for typeid
- InstCount() : FunctionPass(&ID) {}
+ InstCount() : FunctionPass(ID) {}
virtual bool runOnFunction(Function &F);
@@ -64,8 +64,8 @@ namespace {
}
char InstCount::ID = 0;
-static RegisterPass<InstCount>
-X("instcount", "Counts the various types of Instructions", false, true);
+INITIALIZE_PASS(InstCount, "instcount",
+ "Counts the various types of Instructions", false, true);
FunctionPass *llvm::createInstCountPass() { return new InstCount(); }
diff --git a/contrib/llvm/lib/Analysis/IntervalPartition.cpp b/contrib/llvm/lib/Analysis/IntervalPartition.cpp
index 1f17b77..1c9e148 100644
--- a/contrib/llvm/lib/Analysis/IntervalPartition.cpp
+++ b/contrib/llvm/lib/Analysis/IntervalPartition.cpp
@@ -16,8 +16,8 @@
using namespace llvm;
char IntervalPartition::ID = 0;
-static RegisterPass<IntervalPartition>
-X("intervals", "Interval Partition Construction", true, true);
+INITIALIZE_PASS(IntervalPartition, "intervals",
+ "Interval Partition Construction", true, true);
//===----------------------------------------------------------------------===//
// IntervalPartition Implementation
@@ -91,7 +91,7 @@ bool IntervalPartition::runOnFunction(Function &F) {
// distinguish it from a copy constructor. Always pass in false for now.
//
IntervalPartition::IntervalPartition(IntervalPartition &IP, bool)
- : FunctionPass(&ID) {
+ : FunctionPass(ID) {
assert(IP.getRootInterval() && "Cannot operate on empty IntervalPartitions!");
// Pass false to intervals_begin because we take ownership of it's memory
diff --git a/contrib/llvm/lib/Analysis/LazyValueInfo.cpp b/contrib/llvm/lib/Analysis/LazyValueInfo.cpp
index ff9026b..e32dbc4 100644
--- a/contrib/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/contrib/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -19,16 +19,18 @@
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Support/CFG.h"
+#include "llvm/Support/ConstantRange.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ValueHandle.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
using namespace llvm;
char LazyValueInfo::ID = 0;
-static RegisterPass<LazyValueInfo>
-X("lazy-value-info", "Lazy Value Information Analysis", false, true);
+INITIALIZE_PASS(LazyValueInfo, "lazy-value-info",
+ "Lazy Value Information Analysis", false, true);
namespace llvm {
FunctionPass *createLazyValueInfoPass() { return new LazyValueInfo(); }
@@ -50,12 +52,15 @@ class LVILatticeVal {
enum LatticeValueTy {
/// undefined - This LLVM Value has no known value yet.
undefined,
+
/// constant - This LLVM Value has a specific constant value.
constant,
-
/// notconstant - This LLVM value is known to not have the specified value.
notconstant,
+ /// constantrange
+ constantrange,
+
/// overdefined - This instruction is not known to be constant, and we know
/// it has a value.
overdefined
@@ -63,42 +68,62 @@ class LVILatticeVal {
/// Val: This stores the current lattice value along with the Constant* for
/// the constant if this is a 'constant' or 'notconstant' value.
- PointerIntPair<Constant *, 2, LatticeValueTy> Val;
+ LatticeValueTy Tag;
+ Constant *Val;
+ ConstantRange Range;
public:
- LVILatticeVal() : Val(0, undefined) {}
+ LVILatticeVal() : Tag(undefined), Val(0), Range(1, true) {}
static LVILatticeVal get(Constant *C) {
LVILatticeVal Res;
- Res.markConstant(C);
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
+ Res.markConstantRange(ConstantRange(CI->getValue(), CI->getValue()+1));
+ else if (!isa<UndefValue>(C))
+ Res.markConstant(C);
return Res;
}
static LVILatticeVal getNot(Constant *C) {
LVILatticeVal Res;
- Res.markNotConstant(C);
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
+ Res.markConstantRange(ConstantRange(CI->getValue()+1, CI->getValue()));
+ else
+ Res.markNotConstant(C);
+ return Res;
+ }
+ static LVILatticeVal getRange(ConstantRange CR) {
+ LVILatticeVal Res;
+ Res.markConstantRange(CR);
return Res;
}
- bool isUndefined() const { return Val.getInt() == undefined; }
- bool isConstant() const { return Val.getInt() == constant; }
- bool isNotConstant() const { return Val.getInt() == notconstant; }
- bool isOverdefined() const { return Val.getInt() == overdefined; }
+ bool isUndefined() const { return Tag == undefined; }
+ bool isConstant() const { return Tag == constant; }
+ bool isNotConstant() const { return Tag == notconstant; }
+ bool isConstantRange() const { return Tag == constantrange; }
+ bool isOverdefined() const { return Tag == overdefined; }
Constant *getConstant() const {
assert(isConstant() && "Cannot get the constant of a non-constant!");
- return Val.getPointer();
+ return Val;
}
Constant *getNotConstant() const {
assert(isNotConstant() && "Cannot get the constant of a non-notconstant!");
- return Val.getPointer();
+ return Val;
+ }
+
+ ConstantRange getConstantRange() const {
+ assert(isConstantRange() &&
+ "Cannot get the constant-range of a non-constant-range!");
+ return Range;
}
/// markOverdefined - Return true if this is a change in status.
bool markOverdefined() {
if (isOverdefined())
return false;
- Val.setInt(overdefined);
+ Tag = overdefined;
return true;
}
@@ -110,9 +135,9 @@ public:
}
assert(isUndefined());
- Val.setInt(constant);
+ Tag = constant;
assert(V && "Marking constant with NULL");
- Val.setPointer(V);
+ Val = V;
return true;
}
@@ -128,9 +153,29 @@ public:
else
assert(isUndefined());
- Val.setInt(notconstant);
+ Tag = notconstant;
assert(V && "Marking constant with NULL");
- Val.setPointer(V);
+ Val = V;
+ return true;
+ }
+
+ /// markConstantRange - Return true if this is a change in status.
+ bool markConstantRange(const ConstantRange NewR) {
+ if (isConstantRange()) {
+ if (NewR.isEmptySet())
+ return markOverdefined();
+
+ bool changed = Range == NewR;
+ Range = NewR;
+ return changed;
+ }
+
+ assert(isUndefined());
+ if (NewR.isEmptySet())
+ return markOverdefined();
+
+ Tag = constantrange;
+ Range = NewR;
return true;
}
@@ -147,20 +192,39 @@ public:
isa<ConstantExpr>(RHS.getNotConstant()))
return markOverdefined();
return false;
- }
- if (isConstant()) {
+ } else if (isConstant()) {
if (getConstant() == RHS.getNotConstant() ||
isa<ConstantExpr>(RHS.getNotConstant()) ||
isa<ConstantExpr>(getConstant()))
return markOverdefined();
return markNotConstant(RHS.getNotConstant());
+ } else if (isConstantRange()) {
+ return markOverdefined();
}
assert(isUndefined() && "Unexpected lattice");
return markNotConstant(RHS.getNotConstant());
}
+ if (RHS.isConstantRange()) {
+ if (isConstantRange()) {
+ ConstantRange NewR = Range.unionWith(RHS.getConstantRange());
+ if (NewR.isFullSet())
+ return markOverdefined();
+ else
+ return markConstantRange(NewR);
+ } else if (!isUndefined()) {
+ return markOverdefined();
+ }
+
+ assert(isUndefined() && "Unexpected lattice");
+ return markConstantRange(RHS.getConstantRange());
+ }
+
// RHS must be a constant, we must be undef, constant, or notconstant.
+ assert(!isConstantRange() &&
+ "Constant and ConstantRange cannot be merged.");
+
if (isUndefined())
return markConstant(RHS.getConstant());
@@ -191,6 +255,9 @@ raw_ostream &operator<<(raw_ostream &OS, const LVILatticeVal &Val) {
if (Val.isNotConstant())
return OS << "notconstant<" << *Val.getNotConstant() << '>';
+ else if (Val.isConstantRange())
+ return OS << "constantrange<" << Val.getConstantRange().getLower() << ", "
+ << Val.getConstantRange().getUpper() << '>';
return OS << "constant<" << *Val.getConstant() << '>';
}
}
@@ -206,17 +273,41 @@ namespace {
public:
/// BlockCacheEntryTy - This is a computed lattice value at the end of the
/// specified basic block for a Value* that depends on context.
- typedef std::pair<BasicBlock*, LVILatticeVal> BlockCacheEntryTy;
+ typedef std::pair<AssertingVH<BasicBlock>, LVILatticeVal> BlockCacheEntryTy;
/// ValueCacheEntryTy - This is all of the cached block information for
/// exactly one Value*. The entries are sorted by the BasicBlock* of the
/// entries, allowing us to do a lookup with a binary search.
- typedef std::vector<BlockCacheEntryTy> ValueCacheEntryTy;
+ typedef std::map<AssertingVH<BasicBlock>, LVILatticeVal> ValueCacheEntryTy;
private:
+ /// LVIValueHandle - A callback value handle update the cache when
+ /// values are erased.
+ struct LVIValueHandle : public CallbackVH {
+ LazyValueInfoCache *Parent;
+
+ LVIValueHandle(Value *V, LazyValueInfoCache *P)
+ : CallbackVH(V), Parent(P) { }
+
+ void deleted();
+ void allUsesReplacedWith(Value* V) {
+ deleted();
+ }
+
+ LVIValueHandle &operator=(Value *V) {
+ return *this = LVIValueHandle(V, Parent);
+ }
+ };
+
/// ValueCache - This is all of the cached information for all values,
/// mapped from Value* to key information.
- DenseMap<Value*, ValueCacheEntryTy> ValueCache;
+ std::map<LVIValueHandle, ValueCacheEntryTy> ValueCache;
+
+ /// OverDefinedCache - This tracks, on a per-block basis, the set of
+ /// values that are over-defined at the end of that block. This is required
+ /// for cache updating.
+ std::set<std::pair<AssertingVH<BasicBlock>, Value*> > OverDefinedCache;
+
public:
/// getValueInBlock - This is the query interface to determine the lattice
@@ -226,29 +317,23 @@ namespace {
/// getValueOnEdge - This is the query interface to determine the lattice
/// value for the specified Value* that is true on the specified edge.
LVILatticeVal getValueOnEdge(Value *V, BasicBlock *FromBB,BasicBlock *ToBB);
- };
-} // end anonymous namespace
-
-namespace {
- struct BlockCacheEntryComparator {
- static int Compare(const void *LHSv, const void *RHSv) {
- const LazyValueInfoCache::BlockCacheEntryTy *LHS =
- static_cast<const LazyValueInfoCache::BlockCacheEntryTy *>(LHSv);
- const LazyValueInfoCache::BlockCacheEntryTy *RHS =
- static_cast<const LazyValueInfoCache::BlockCacheEntryTy *>(RHSv);
- if (LHS->first < RHS->first)
- return -1;
- if (LHS->first > RHS->first)
- return 1;
- return 0;
- }
- bool operator()(const LazyValueInfoCache::BlockCacheEntryTy &LHS,
- const LazyValueInfoCache::BlockCacheEntryTy &RHS) const {
- return LHS.first < RHS.first;
+ /// threadEdge - This is the update interface to inform the cache that an
+ /// edge from PredBB to OldSucc has been threaded to be from PredBB to
+ /// NewSucc.
+ void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc);
+
+ /// eraseBlock - This is part of the update interface to inform the cache
+ /// that a block has been deleted.
+ void eraseBlock(BasicBlock *BB);
+
+ /// clear - Empty the cache.
+ void clear() {
+ ValueCache.clear();
+ OverDefinedCache.clear();
}
};
-}
+} // end anonymous namespace
//===----------------------------------------------------------------------===//
// LVIQuery Impl
@@ -267,78 +352,87 @@ namespace {
/// This is the current value being queried for.
Value *Val;
+ /// This is a pointer to the owning cache, for recursive queries.
+ LazyValueInfoCache &Parent;
+
/// This is all of the cached information about this value.
ValueCacheEntryTy &Cache;
+ /// This tracks, for each block, what values are overdefined.
+ std::set<std::pair<AssertingVH<BasicBlock>, Value*> > &OverDefinedCache;
+
/// NewBlocks - This is a mapping of the new BasicBlocks which have been
/// added to cache but that are not in sorted order.
- DenseMap<BasicBlock*, LVILatticeVal> NewBlockInfo;
+ DenseSet<BasicBlock*> NewBlockInfo;
+
public:
- LVIQuery(Value *V, ValueCacheEntryTy &VC) : Val(V), Cache(VC) {
+ LVIQuery(Value *V, LazyValueInfoCache &P,
+ ValueCacheEntryTy &VC,
+ std::set<std::pair<AssertingVH<BasicBlock>, Value*> > &ODC)
+ : Val(V), Parent(P), Cache(VC), OverDefinedCache(ODC) {
}
~LVIQuery() {
// When the query is done, insert the newly discovered facts into the
// cache in sorted order.
if (NewBlockInfo.empty()) return;
-
- // Grow the cache to exactly fit the new data.
- Cache.reserve(Cache.size() + NewBlockInfo.size());
- // If we only have one new entry, insert it instead of doing a full-on
- // sort.
- if (NewBlockInfo.size() == 1) {
- BlockCacheEntryTy Entry = *NewBlockInfo.begin();
- ValueCacheEntryTy::iterator I =
- std::lower_bound(Cache.begin(), Cache.end(), Entry,
- BlockCacheEntryComparator());
- assert((I == Cache.end() || I->first != Entry.first) &&
- "Entry already in map!");
-
- Cache.insert(I, Entry);
- return;
+ for (DenseSet<BasicBlock*>::iterator I = NewBlockInfo.begin(),
+ E = NewBlockInfo.end(); I != E; ++I) {
+ if (Cache[*I].isOverdefined())
+ OverDefinedCache.insert(std::make_pair(*I, Val));
}
-
- // TODO: If we only have two new elements, INSERT them both.
-
- Cache.insert(Cache.end(), NewBlockInfo.begin(), NewBlockInfo.end());
- array_pod_sort(Cache.begin(), Cache.end(),
- BlockCacheEntryComparator::Compare);
-
}
LVILatticeVal getBlockValue(BasicBlock *BB);
LVILatticeVal getEdgeValue(BasicBlock *FromBB, BasicBlock *ToBB);
private:
- LVILatticeVal &getCachedEntryForBlock(BasicBlock *BB);
+ LVILatticeVal getCachedEntryForBlock(BasicBlock *BB);
};
} // end anonymous namespace
-/// getCachedEntryForBlock - See if we already have a value for this block. If
-/// so, return it, otherwise create a new entry in the NewBlockInfo map to use.
-LVILatticeVal &LVIQuery::getCachedEntryForBlock(BasicBlock *BB) {
-
- // Do a binary search to see if we already have an entry for this block in
- // the cache set. If so, find it.
- if (!Cache.empty()) {
- ValueCacheEntryTy::iterator Entry =
- std::lower_bound(Cache.begin(), Cache.end(),
- BlockCacheEntryTy(BB, LVILatticeVal()),
- BlockCacheEntryComparator());
- if (Entry != Cache.end() && Entry->first == BB)
- return Entry->second;
+void LazyValueInfoCache::LVIValueHandle::deleted() {
+ for (std::set<std::pair<AssertingVH<BasicBlock>, Value*> >::iterator
+ I = Parent->OverDefinedCache.begin(),
+ E = Parent->OverDefinedCache.end();
+ I != E; ) {
+ std::set<std::pair<AssertingVH<BasicBlock>, Value*> >::iterator tmp = I;
+ ++I;
+ if (tmp->second == getValPtr())
+ Parent->OverDefinedCache.erase(tmp);
}
- // Otherwise, check to see if it's in NewBlockInfo or create a new entry if
- // not.
- return NewBlockInfo[BB];
+ // This erasure deallocates *this, so it MUST happen after we're done
+ // using any and all members of *this.
+ Parent->ValueCache.erase(*this);
+}
+
+void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
+ for (std::set<std::pair<AssertingVH<BasicBlock>, Value*> >::iterator
+ I = OverDefinedCache.begin(), E = OverDefinedCache.end(); I != E; ) {
+ std::set<std::pair<AssertingVH<BasicBlock>, Value*> >::iterator tmp = I;
+ ++I;
+ if (tmp->first == BB)
+ OverDefinedCache.erase(tmp);
+ }
+
+ for (std::map<LVIValueHandle, ValueCacheEntryTy>::iterator
+ I = ValueCache.begin(), E = ValueCache.end(); I != E; ++I)
+ I->second.erase(BB);
+}
+
+/// getCachedEntryForBlock - See if we already have a value for this block. If
+/// so, return it, otherwise create a new entry in the Cache map to use.
+LVILatticeVal LVIQuery::getCachedEntryForBlock(BasicBlock *BB) {
+ NewBlockInfo.insert(BB);
+ return Cache[BB];
}
LVILatticeVal LVIQuery::getBlockValue(BasicBlock *BB) {
// See if we already have a value for this block.
- LVILatticeVal &BBLV = getCachedEntryForBlock(BB);
+ LVILatticeVal BBLV = getCachedEntryForBlock(BB);
// If we've already computed this block's value, return it.
if (!BBLV.isUndefined()) {
@@ -350,13 +444,28 @@ LVILatticeVal LVIQuery::getBlockValue(BasicBlock *BB) {
// lattice value to overdefined, so that cycles will terminate and be
// conservatively correct.
BBLV.markOverdefined();
+ Cache[BB] = BBLV;
- // If V is live into BB, see if our predecessors know anything about it.
Instruction *BBI = dyn_cast<Instruction>(Val);
if (BBI == 0 || BBI->getParent() != BB) {
LVILatticeVal Result; // Start Undefined.
- unsigned NumPreds = 0;
+ // If this is a pointer, and there's a load from that pointer in this BB,
+ // then we know that the pointer can't be NULL.
+ bool NotNull = false;
+ if (Val->getType()->isPointerTy()) {
+ for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();BI != BE;++BI){
+ LoadInst *L = dyn_cast<LoadInst>(BI);
+ if (L && L->getPointerAddressSpace() == 0 &&
+ L->getPointerOperand()->getUnderlyingObject() ==
+ Val->getUnderlyingObject()) {
+ NotNull = true;
+ break;
+ }
+ }
+ }
+
+ unsigned NumPreds = 0;
// Loop over all of our predecessors, merging what we know from them into
// result.
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
@@ -367,11 +476,19 @@ LVILatticeVal LVIQuery::getBlockValue(BasicBlock *BB) {
if (Result.isOverdefined()) {
DEBUG(dbgs() << " compute BB '" << BB->getName()
<< "' - overdefined because of pred.\n");
+ // If we previously determined that this is a pointer that can't be null
+ // then return that rather than giving up entirely.
+ if (NotNull) {
+ const PointerType *PTy = cast<PointerType>(Val->getType());
+ Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy));
+ }
+
return Result;
}
++NumPreds;
}
+
// If this is the entry block, we must be asking about an argument. The
// value is overdefined.
if (NumPreds == 0 && BB == &BB->getParent()->front()) {
@@ -382,24 +499,123 @@ LVILatticeVal LVIQuery::getBlockValue(BasicBlock *BB) {
// Return the merged value, which is more precise than 'overdefined'.
assert(!Result.isOverdefined());
- return getCachedEntryForBlock(BB) = Result;
+ return Cache[BB] = Result;
}
// If this value is defined by an instruction in this block, we have to
// process it here somehow or return overdefined.
if (PHINode *PN = dyn_cast<PHINode>(BBI)) {
- (void)PN;
- // TODO: PHI Translation in preds.
- } else {
+ LVILatticeVal Result; // Start Undefined.
+ // Loop over all of our predecessors, merging what we know from them into
+ // result.
+ for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
+ Value* PhiVal = PN->getIncomingValueForBlock(*PI);
+ Result.mergeIn(Parent.getValueOnEdge(PhiVal, *PI, BB));
+
+ // If we hit overdefined, exit early. The BlockVals entry is already set
+ // to overdefined.
+ if (Result.isOverdefined()) {
+ DEBUG(dbgs() << " compute BB '" << BB->getName()
+ << "' - overdefined because of pred.\n");
+ return Result;
+ }
+ }
+
+ // Return the merged value, which is more precise than 'overdefined'.
+ assert(!Result.isOverdefined());
+ return Cache[BB] = Result;
}
-
- DEBUG(dbgs() << " compute BB '" << BB->getName()
- << "' - overdefined because inst def found.\n");
+ assert(Cache[BB].isOverdefined() && "Recursive query changed our cache?");
+
+ // We can only analyze the definitions of certain classes of instructions
+ // (integral binops and casts at the moment), so bail if this isn't one.
LVILatticeVal Result;
- Result.markOverdefined();
- return getCachedEntryForBlock(BB) = Result;
+ if ((!isa<BinaryOperator>(BBI) && !isa<CastInst>(BBI)) ||
+ !BBI->getType()->isIntegerTy()) {
+ DEBUG(dbgs() << " compute BB '" << BB->getName()
+ << "' - overdefined because inst def found.\n");
+ Result.markOverdefined();
+ return Result;
+ }
+
+ // FIXME: We're currently limited to binops with a constant RHS. This should
+ // be improved.
+ BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI);
+ if (BO && !isa<ConstantInt>(BO->getOperand(1))) {
+ DEBUG(dbgs() << " compute BB '" << BB->getName()
+ << "' - overdefined because inst def found.\n");
+
+ Result.markOverdefined();
+ return Result;
+ }
+
+ // Figure out the range of the LHS. If that fails, bail.
+ LVILatticeVal LHSVal = Parent.getValueInBlock(BBI->getOperand(0), BB);
+ if (!LHSVal.isConstantRange()) {
+ Result.markOverdefined();
+ return Result;
+ }
+
+ ConstantInt *RHS = 0;
+ ConstantRange LHSRange = LHSVal.getConstantRange();
+ ConstantRange RHSRange(1);
+ const IntegerType *ResultTy = cast<IntegerType>(BBI->getType());
+ if (isa<BinaryOperator>(BBI)) {
+ RHS = dyn_cast<ConstantInt>(BBI->getOperand(1));
+ if (!RHS) {
+ Result.markOverdefined();
+ return Result;
+ }
+
+ RHSRange = ConstantRange(RHS->getValue(), RHS->getValue()+1);
+ }
+
+ // NOTE: We're currently limited by the set of operations that ConstantRange
+ // can evaluate symbolically. Enhancing that set will allows us to analyze
+ // more definitions.
+ switch (BBI->getOpcode()) {
+ case Instruction::Add:
+ Result.markConstantRange(LHSRange.add(RHSRange));
+ break;
+ case Instruction::Sub:
+ Result.markConstantRange(LHSRange.sub(RHSRange));
+ break;
+ case Instruction::Mul:
+ Result.markConstantRange(LHSRange.multiply(RHSRange));
+ break;
+ case Instruction::UDiv:
+ Result.markConstantRange(LHSRange.udiv(RHSRange));
+ break;
+ case Instruction::Shl:
+ Result.markConstantRange(LHSRange.shl(RHSRange));
+ break;
+ case Instruction::LShr:
+ Result.markConstantRange(LHSRange.lshr(RHSRange));
+ break;
+ case Instruction::Trunc:
+ Result.markConstantRange(LHSRange.truncate(ResultTy->getBitWidth()));
+ break;
+ case Instruction::SExt:
+ Result.markConstantRange(LHSRange.signExtend(ResultTy->getBitWidth()));
+ break;
+ case Instruction::ZExt:
+ Result.markConstantRange(LHSRange.zeroExtend(ResultTy->getBitWidth()));
+ break;
+ case Instruction::BitCast:
+ Result.markConstantRange(LHSRange);
+ break;
+
+ // Unhandled instructions are overdefined.
+ default:
+ DEBUG(dbgs() << " compute BB '" << BB->getName()
+ << "' - overdefined because inst def found.\n");
+ Result.markOverdefined();
+ break;
+ }
+
+ return Cache[BB] = Result;
}
@@ -420,28 +636,57 @@ LVILatticeVal LVIQuery::getEdgeValue(BasicBlock *BBFrom, BasicBlock *BBTo) {
// it is.
if (BI->getCondition() == Val)
return LVILatticeVal::get(ConstantInt::get(
- Type::getInt1Ty(Val->getContext()), isTrueDest));
+ Type::getInt1Ty(Val->getContext()), isTrueDest));
// If the condition of the branch is an equality comparison, we may be
// able to infer the value.
- if (ICmpInst *ICI = dyn_cast<ICmpInst>(BI->getCondition()))
- if (ICI->isEquality() && ICI->getOperand(0) == Val &&
- isa<Constant>(ICI->getOperand(1))) {
+ ICmpInst *ICI = dyn_cast<ICmpInst>(BI->getCondition());
+ if (ICI && ICI->getOperand(0) == Val &&
+ isa<Constant>(ICI->getOperand(1))) {
+ if (ICI->isEquality()) {
// We know that V has the RHS constant if this is a true SETEQ or
// false SETNE.
if (isTrueDest == (ICI->getPredicate() == ICmpInst::ICMP_EQ))
return LVILatticeVal::get(cast<Constant>(ICI->getOperand(1)));
return LVILatticeVal::getNot(cast<Constant>(ICI->getOperand(1)));
}
+
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
+ // Calculate the range of values that would satisfy the comparison.
+ ConstantRange CmpRange(CI->getValue(), CI->getValue()+1);
+ ConstantRange TrueValues =
+ ConstantRange::makeICmpRegion(ICI->getPredicate(), CmpRange);
+
+ // If we're interested in the false dest, invert the condition.
+ if (!isTrueDest) TrueValues = TrueValues.inverse();
+
+ // Figure out the possible values of the query BEFORE this branch.
+ LVILatticeVal InBlock = getBlockValue(BBFrom);
+ if (!InBlock.isConstantRange())
+ return LVILatticeVal::getRange(TrueValues);
+
+ // Find all potential values that satisfy both the input and output
+ // conditions.
+ ConstantRange PossibleValues =
+ TrueValues.intersectWith(InBlock.getConstantRange());
+
+ return LVILatticeVal::getRange(PossibleValues);
+ }
+ }
}
}
// If the edge was formed by a switch on the value, then we may know exactly
// what it is.
if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) {
- // If BBTo is the default destination of the switch, we don't know anything.
- // Given a more powerful range analysis we could know stuff.
- if (SI->getCondition() == Val && SI->getDefaultDest() != BBTo) {
+ if (SI->getCondition() == Val) {
+ // We don't know anything in the default case.
+ if (SI->getDefaultDest() == BBTo) {
+ LVILatticeVal Result;
+ Result.markOverdefined();
+ return Result;
+ }
+
// We only know something if there is exactly one value that goes from
// BBFrom to BBTo.
unsigned NumEdges = 0;
@@ -474,7 +719,9 @@ LVILatticeVal LazyValueInfoCache::getValueInBlock(Value *V, BasicBlock *BB) {
DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"
<< BB->getName() << "'\n");
- LVILatticeVal Result = LVIQuery(V, ValueCache[V]).getBlockValue(BB);
+ LVILatticeVal Result = LVIQuery(V, *this,
+ ValueCache[LVIValueHandle(V, this)],
+ OverDefinedCache).getBlockValue(BB);
DEBUG(dbgs() << " Result = " << Result << "\n");
return Result;
@@ -488,24 +735,80 @@ getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB) {
DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '"
<< FromBB->getName() << "' to '" << ToBB->getName() << "'\n");
+
LVILatticeVal Result =
- LVIQuery(V, ValueCache[V]).getEdgeValue(FromBB, ToBB);
+ LVIQuery(V, *this, ValueCache[LVIValueHandle(V, this)],
+ OverDefinedCache).getEdgeValue(FromBB, ToBB);
DEBUG(dbgs() << " Result = " << Result << "\n");
return Result;
}
+void LazyValueInfoCache::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
+ BasicBlock *NewSucc) {
+ // When an edge in the graph has been threaded, values that we could not
+ // determine a value for before (i.e. were marked overdefined) may be possible
+ // to solve now. We do NOT try to proactively update these values. Instead,
+ // we clear their entries from the cache, and allow lazy updating to recompute
+ // them when needed.
+
+ // The updating process is fairly simple: we need to dropped cached info
+ // for all values that were marked overdefined in OldSucc, and for those same
+ // values in any successor of OldSucc (except NewSucc) in which they were
+ // also marked overdefined.
+ std::vector<BasicBlock*> worklist;
+ worklist.push_back(OldSucc);
+
+ DenseSet<Value*> ClearSet;
+ for (std::set<std::pair<AssertingVH<BasicBlock>, Value*> >::iterator
+ I = OverDefinedCache.begin(), E = OverDefinedCache.end(); I != E; ++I) {
+ if (I->first == OldSucc)
+ ClearSet.insert(I->second);
+ }
+
+ // Use a worklist to perform a depth-first search of OldSucc's successors.
+ // NOTE: We do not need a visited list since any blocks we have already
+ // visited will have had their overdefined markers cleared already, and we
+ // thus won't loop to their successors.
+ while (!worklist.empty()) {
+ BasicBlock *ToUpdate = worklist.back();
+ worklist.pop_back();
+
+ // Skip blocks only accessible through NewSucc.
+ if (ToUpdate == NewSucc) continue;
+
+ bool changed = false;
+ for (DenseSet<Value*>::iterator I = ClearSet.begin(),E = ClearSet.end();
+ I != E; ++I) {
+ // If a value was marked overdefined in OldSucc, and is here too...
+ std::set<std::pair<AssertingVH<BasicBlock>, Value*> >::iterator OI =
+ OverDefinedCache.find(std::make_pair(ToUpdate, *I));
+ if (OI == OverDefinedCache.end()) continue;
+
+ // Remove it from the caches.
+ ValueCacheEntryTy &Entry = ValueCache[LVIValueHandle(*I, this)];
+ ValueCacheEntryTy::iterator CI = Entry.find(ToUpdate);
+
+ assert(CI != Entry.end() && "Couldn't find entry to update?");
+ Entry.erase(CI);
+ OverDefinedCache.erase(OI);
+
+ // If we removed anything, then we potentially need to update
+ // blocks successors too.
+ changed = true;
+ }
+
+ if (!changed) continue;
+
+ worklist.insert(worklist.end(), succ_begin(ToUpdate), succ_end(ToUpdate));
+ }
+}
+
//===----------------------------------------------------------------------===//
// LazyValueInfo Impl
//===----------------------------------------------------------------------===//
-bool LazyValueInfo::runOnFunction(Function &F) {
- TD = getAnalysisIfAvailable<TargetData>();
- // Fully lazy.
- return false;
-}
-
/// getCache - This lazily constructs the LazyValueInfoCache.
static LazyValueInfoCache &getCache(void *&PImpl) {
if (!PImpl)
@@ -513,6 +816,15 @@ static LazyValueInfoCache &getCache(void *&PImpl) {
return *static_cast<LazyValueInfoCache*>(PImpl);
}
+bool LazyValueInfo::runOnFunction(Function &F) {
+ if (PImpl)
+ getCache(PImpl).clear();
+
+ TD = getAnalysisIfAvailable<TargetData>();
+ // Fully lazy.
+ return false;
+}
+
void LazyValueInfo::releaseMemory() {
// If the cache was allocated, free it.
if (PImpl) {
@@ -526,6 +838,11 @@ Constant *LazyValueInfo::getConstant(Value *V, BasicBlock *BB) {
if (Result.isConstant())
return Result.getConstant();
+ else if (Result.isConstantRange()) {
+ ConstantRange CR = Result.getConstantRange();
+ if (const APInt *SingleVal = CR.getSingleElement())
+ return ConstantInt::get(V->getContext(), *SingleVal);
+ }
return 0;
}
@@ -537,6 +854,11 @@ Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
if (Result.isConstant())
return Result.getConstant();
+ else if (Result.isConstantRange()) {
+ ConstantRange CR = Result.getConstantRange();
+ if (const APInt *SingleVal = CR.getSingleElement())
+ return ConstantInt::get(V->getContext(), *SingleVal);
+ }
return 0;
}
@@ -557,6 +879,36 @@ LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
return Unknown;
}
+ if (Result.isConstantRange()) {
+ ConstantInt *CI = dyn_cast<ConstantInt>(C);
+ if (!CI) return Unknown;
+
+ ConstantRange CR = Result.getConstantRange();
+ if (Pred == ICmpInst::ICMP_EQ) {
+ if (!CR.contains(CI->getValue()))
+ return False;
+
+ if (CR.isSingleElement() && CR.contains(CI->getValue()))
+ return True;
+ } else if (Pred == ICmpInst::ICMP_NE) {
+ if (!CR.contains(CI->getValue()))
+ return True;
+
+ if (CR.isSingleElement() && CR.contains(CI->getValue()))
+ return False;
+ }
+
+ // Handle more complex predicates.
+ ConstantRange RHS(CI->getValue(), CI->getValue()+1);
+ ConstantRange TrueValues = ConstantRange::makeICmpRegion(Pred, RHS);
+ if (CR.intersectWith(TrueValues).isEmptySet())
+ return False;
+ else if (TrueValues.contains(CR))
+ return True;
+
+ return Unknown;
+ }
+
if (Result.isNotConstant()) {
// If this is an equality comparison, we can try to fold it knowing that
// "V != C1".
@@ -579,4 +931,11 @@ LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
return Unknown;
}
+void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
+ BasicBlock* NewSucc) {
+ if (PImpl) getCache(PImpl).threadEdge(PredBB, OldSucc, NewSucc);
+}
+void LazyValueInfo::eraseBlock(BasicBlock *BB) {
+ if (PImpl) getCache(PImpl).eraseBlock(BB);
+}
diff --git a/contrib/llvm/lib/Analysis/LibCallAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/LibCallAliasAnalysis.cpp
index 7419659..7f51202 100644
--- a/contrib/llvm/lib/Analysis/LibCallAliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/LibCallAliasAnalysis.cpp
@@ -20,11 +20,8 @@ using namespace llvm;
// Register this pass...
char LibCallAliasAnalysis::ID = 0;
-static RegisterPass<LibCallAliasAnalysis>
-X("libcall-aa", "LibCall Alias Analysis", false, true);
-
-// Declare that we implement the AliasAnalysis interface
-static RegisterAnalysisGroup<AliasAnalysis> Y(X);
+INITIALIZE_AG_PASS(LibCallAliasAnalysis, AliasAnalysis, "libcall-aa",
+ "LibCall Alias Analysis", false, true, false);
FunctionPass *llvm::createLibCallAliasAnalysisPass(LibCallInfo *LCI) {
return new LibCallAliasAnalysis(LCI);
@@ -46,7 +43,7 @@ void LibCallAliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
/// vs the specified pointer/size.
AliasAnalysis::ModRefResult
LibCallAliasAnalysis::AnalyzeLibCallDetails(const LibCallFunctionInfo *FI,
- CallSite CS, Value *P,
+ ImmutableCallSite CS, const Value *P,
unsigned Size) {
// If we have a function, check to see what kind of mod/ref effects it
// has. Start by including any info globally known about the function.
@@ -120,13 +117,14 @@ LibCallAliasAnalysis::AnalyzeLibCallDetails(const LibCallFunctionInfo *FI,
// specified memory object.
//
AliasAnalysis::ModRefResult
-LibCallAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
+LibCallAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
+ const Value *P, unsigned Size) {
ModRefResult MRInfo = ModRef;
// If this is a direct call to a function that LCI knows about, get the
// information about the runtime function.
if (LCI) {
- if (Function *F = CS.getCalledFunction()) {
+ if (const Function *F = CS.getCalledFunction()) {
if (const LibCallFunctionInfo *FI = LCI->getFunctionInfo(F)) {
MRInfo = ModRefResult(MRInfo & AnalyzeLibCallDetails(FI, CS, P, Size));
if (MRInfo == NoModRef) return NoModRef;
diff --git a/contrib/llvm/lib/Analysis/LibCallSemantics.cpp b/contrib/llvm/lib/Analysis/LibCallSemantics.cpp
index e0060c3..81b0f46 100644
--- a/contrib/llvm/lib/Analysis/LibCallSemantics.cpp
+++ b/contrib/llvm/lib/Analysis/LibCallSemantics.cpp
@@ -40,7 +40,8 @@ const LibCallLocationInfo &LibCallInfo::getLocationInfo(unsigned LocID) const {
/// getFunctionInfo - Return the LibCallFunctionInfo object corresponding to
/// the specified function if we have it. If not, return null.
-const LibCallFunctionInfo *LibCallInfo::getFunctionInfo(Function *F) const {
+const LibCallFunctionInfo *
+LibCallInfo::getFunctionInfo(const Function *F) const {
StringMap<const LibCallFunctionInfo*> *Map = getMap(Impl);
/// If this is the first time we are querying for this info, lazily construct
diff --git a/contrib/llvm/lib/Analysis/Lint.cpp b/contrib/llvm/lib/Analysis/Lint.cpp
index 9f1b30d..a9d9724 100644
--- a/contrib/llvm/lib/Analysis/Lint.cpp
+++ b/contrib/llvm/lib/Analysis/Lint.cpp
@@ -108,7 +108,7 @@ namespace {
raw_string_ostream MessagesStr;
static char ID; // Pass identification, replacement for typeid
- Lint() : FunctionPass(&ID), MessagesStr(Messages) {}
+ Lint() : FunctionPass(ID), MessagesStr(Messages) {}
virtual bool runOnFunction(Function &F);
@@ -167,8 +167,7 @@ namespace {
}
char Lint::ID = 0;
-static RegisterPass<Lint>
-X("lint", "Statically lint-checks LLVM IR", false, true);
+INITIALIZE_PASS(Lint, "lint", "Statically lint-checks LLVM IR", false, true);
// Assert - We know that cond should be true, if not print an error message.
#define Assert(C, M) \
@@ -247,8 +246,7 @@ void Lint::visitCallSite(CallSite CS) {
// where nothing is known.
if (Formal->hasNoAliasAttr() && Actual->getType()->isPointerTy())
for (CallSite::arg_iterator BI = CS.arg_begin(); BI != AE; ++BI) {
- Assert1(AI == BI ||
- AA->alias(*AI, ~0u, *BI, ~0u) != AliasAnalysis::MustAlias,
+ Assert1(AI == BI || AA->alias(*AI, *BI) != AliasAnalysis::MustAlias,
"Unusual: noalias argument aliases another argument", &I);
}
@@ -520,6 +518,9 @@ void Lint::visitVAArgInst(VAArgInst &I) {
void Lint::visitIndirectBrInst(IndirectBrInst &I) {
visitMemoryReference(I, I.getAddress(), ~0u, 0, 0, MemRef::Branchee);
+
+ Assert1(I.getNumDestinations() != 0,
+ "Undefined behavior: indirectbr with no destinations", &I);
}
void Lint::visitExtractElementInst(ExtractElementInst &I) {
diff --git a/contrib/llvm/lib/Analysis/LiveValues.cpp b/contrib/llvm/lib/Analysis/LiveValues.cpp
index 23964ff..0225f4f 100644
--- a/contrib/llvm/lib/Analysis/LiveValues.cpp
+++ b/contrib/llvm/lib/Analysis/LiveValues.cpp
@@ -22,10 +22,10 @@ namespace llvm {
}
char LiveValues::ID = 0;
-static RegisterPass<LiveValues>
-X("live-values", "Value Liveness Analysis", false, true);
+INITIALIZE_PASS(LiveValues, "live-values",
+ "Value Liveness Analysis", false, true);
-LiveValues::LiveValues() : FunctionPass(&ID) {}
+LiveValues::LiveValues() : FunctionPass(ID) {}
void LiveValues::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<DominatorTree>();
diff --git a/contrib/llvm/lib/Analysis/LoopDependenceAnalysis.cpp b/contrib/llvm/lib/Analysis/LoopDependenceAnalysis.cpp
index e101947..82c02dc 100644
--- a/contrib/llvm/lib/Analysis/LoopDependenceAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/LoopDependenceAnalysis.cpp
@@ -46,8 +46,8 @@ LoopPass *llvm::createLoopDependenceAnalysisPass() {
return new LoopDependenceAnalysis();
}
-static RegisterPass<LoopDependenceAnalysis>
-R("lda", "Loop Dependence Analysis", false, true);
+INITIALIZE_PASS(LoopDependenceAnalysis, "lda",
+ "Loop Dependence Analysis", false, true);
char LoopDependenceAnalysis::ID = 0;
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Analysis/LoopInfo.cpp b/contrib/llvm/lib/Analysis/LoopInfo.cpp
index 818d0a9..46219d1 100644
--- a/contrib/llvm/lib/Analysis/LoopInfo.cpp
+++ b/contrib/llvm/lib/Analysis/LoopInfo.cpp
@@ -38,8 +38,7 @@ VerifyLoopInfoX("verify-loop-info", cl::location(VerifyLoopInfo),
cl::desc("Verify loop info (time consuming)"));
char LoopInfo::ID = 0;
-static RegisterPass<LoopInfo>
-X("loops", "Natural Loop Information", true, true);
+INITIALIZE_PASS(LoopInfo, "loops", "Natural Loop Information", true, true);
//===----------------------------------------------------------------------===//
// Loop implementation
@@ -124,14 +123,13 @@ PHINode *Loop::getCanonicalInductionVariable() const {
BasicBlock *H = getHeader();
BasicBlock *Incoming = 0, *Backedge = 0;
- typedef GraphTraits<Inverse<BasicBlock*> > InvBlockTraits;
- InvBlockTraits::ChildIteratorType PI = InvBlockTraits::child_begin(H);
- assert(PI != InvBlockTraits::child_end(H) &&
+ pred_iterator PI = pred_begin(H);
+ assert(PI != pred_end(H) &&
"Loop must have at least one backedge!");
Backedge = *PI++;
- if (PI == InvBlockTraits::child_end(H)) return 0; // dead loop
+ if (PI == pred_end(H)) return 0; // dead loop
Incoming = *PI++;
- if (PI != InvBlockTraits::child_end(H)) return 0; // multiple backedges?
+ if (PI != pred_end(H)) return 0; // multiple backedges?
if (contains(Incoming)) {
if (contains(Backedge))
@@ -157,18 +155,6 @@ PHINode *Loop::getCanonicalInductionVariable() const {
return 0;
}
-/// getCanonicalInductionVariableIncrement - Return the LLVM value that holds
-/// the canonical induction variable value for the "next" iteration of the
-/// loop. This always succeeds if getCanonicalInductionVariable succeeds.
-///
-Instruction *Loop::getCanonicalInductionVariableIncrement() const {
- if (PHINode *PN = getCanonicalInductionVariable()) {
- bool P1InLoop = contains(PN->getIncomingBlock(1));
- return cast<Instruction>(PN->getIncomingValue(P1InLoop));
- }
- return 0;
-}
-
/// getTripCount - Return a loop-invariant LLVM value indicating the number of
/// times the loop will be executed. Note that this means that the backedge
/// of the loop executes N-1 times. If the trip-count cannot be determined,
@@ -180,12 +166,12 @@ Instruction *Loop::getCanonicalInductionVariableIncrement() const {
Value *Loop::getTripCount() const {
// Canonical loops will end with a 'cmp ne I, V', where I is the incremented
// canonical induction variable and V is the trip count of the loop.
- Instruction *Inc = getCanonicalInductionVariableIncrement();
- if (Inc == 0) return 0;
- PHINode *IV = cast<PHINode>(Inc->getOperand(0));
+ PHINode *IV = getCanonicalInductionVariable();
+ if (IV == 0 || IV->getNumIncomingValues() != 2) return 0;
- BasicBlock *BackedgeBlock =
- IV->getIncomingBlock(contains(IV->getIncomingBlock(1)));
+ bool P0InLoop = contains(IV->getIncomingBlock(0));
+ Value *Inc = IV->getIncomingValue(!P0InLoop);
+ BasicBlock *BackedgeBlock = IV->getIncomingBlock(!P0InLoop);
if (BranchInst *BI = dyn_cast<BranchInst>(BackedgeBlock->getTerminator()))
if (BI->isConditional()) {
@@ -341,16 +327,12 @@ Loop::getUniqueExitBlocks(SmallVectorImpl<BasicBlock *> &ExitBlocks) const {
BasicBlock *current = *BI;
switchExitBlocks.clear();
- typedef GraphTraits<BasicBlock *> BlockTraits;
- typedef GraphTraits<Inverse<BasicBlock *> > InvBlockTraits;
- for (BlockTraits::ChildIteratorType I =
- BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI);
- I != E; ++I) {
+ for (succ_iterator I = succ_begin(*BI), E = succ_end(*BI); I != E; ++I) {
// If block is inside the loop then it is not a exit block.
if (std::binary_search(LoopBBs.begin(), LoopBBs.end(), *I))
continue;
- InvBlockTraits::ChildIteratorType PI = InvBlockTraits::child_begin(*I);
+ pred_iterator PI = pred_begin(*I);
BasicBlock *firstPred = *PI;
// If current basic block is this exit block's first predecessor
@@ -363,8 +345,7 @@ Loop::getUniqueExitBlocks(SmallVectorImpl<BasicBlock *> &ExitBlocks) const {
// If a terminator has more then two successors, for example SwitchInst,
// then it is possible that there are multiple edges from current block
// to one exit block.
- if (std::distance(BlockTraits::child_begin(current),
- BlockTraits::child_end(current)) <= 2) {
+ if (std::distance(succ_begin(current), succ_end(current)) <= 2) {
ExitBlocks.push_back(*I);
continue;
}
diff --git a/contrib/llvm/lib/Analysis/LoopPass.cpp b/contrib/llvm/lib/Analysis/LoopPass.cpp
index 2727d2f..15d4db8 100644
--- a/contrib/llvm/lib/Analysis/LoopPass.cpp
+++ b/contrib/llvm/lib/Analysis/LoopPass.cpp
@@ -30,9 +30,9 @@ private:
public:
static char ID;
- PrintLoopPass() : LoopPass(&ID), Out(dbgs()) {}
+ PrintLoopPass() : LoopPass(ID), Out(dbgs()) {}
PrintLoopPass(const std::string &B, raw_ostream &o)
- : LoopPass(&ID), Banner(B), Out(o) {}
+ : LoopPass(ID), Banner(B), Out(o) {}
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
@@ -59,7 +59,7 @@ char PrintLoopPass::ID = 0;
char LPPassManager::ID = 0;
LPPassManager::LPPassManager(int Depth)
- : FunctionPass(&ID), PMDataManager(Depth) {
+ : FunctionPass(ID), PMDataManager(Depth) {
skipThisLoop = false;
redoThisLoop = false;
LI = NULL;
@@ -183,7 +183,7 @@ void LPPassManager::redoLoop(Loop *L) {
void LPPassManager::cloneBasicBlockSimpleAnalysis(BasicBlock *From,
BasicBlock *To, Loop *L) {
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
- LoopPass *LP = (LoopPass *)getContainedPass(Index);
+ LoopPass *LP = getContainedPass(Index);
LP->cloneBasicBlockAnalysis(From, To, L);
}
}
@@ -198,7 +198,7 @@ void LPPassManager::deleteSimpleAnalysisValue(Value *V, Loop *L) {
}
}
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
- LoopPass *LP = (LoopPass *)getContainedPass(Index);
+ LoopPass *LP = getContainedPass(Index);
LP->deleteAnalysisValue(V, L);
}
}
@@ -240,7 +240,7 @@ bool LPPassManager::runOnFunction(Function &F) {
I != E; ++I) {
Loop *L = *I;
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
- LoopPass *P = (LoopPass*)getContainedPass(Index);
+ LoopPass *P = getContainedPass(Index);
Changed |= P->doInitialization(L, *this);
}
}
@@ -254,7 +254,7 @@ bool LPPassManager::runOnFunction(Function &F) {
// Run all passes on the current Loop.
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
- LoopPass *P = (LoopPass*)getContainedPass(Index);
+ LoopPass *P = getContainedPass(Index);
dumpPassInfo(P, EXECUTION_MSG, ON_LOOP_MSG,
CurrentLoop->getHeader()->getName());
@@ -320,7 +320,7 @@ bool LPPassManager::runOnFunction(Function &F) {
// Finalization
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
- LoopPass *P = (LoopPass *)getContainedPass(Index);
+ LoopPass *P = getContainedPass(Index);
Changed |= P->doFinalization();
}
diff --git a/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index 1f54d74..d18d5ce 100644
--- a/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -46,11 +46,11 @@ STATISTIC(NumCacheCompleteNonLocalPtr,
char MemoryDependenceAnalysis::ID = 0;
// Register this pass...
-static RegisterPass<MemoryDependenceAnalysis> X("memdep",
- "Memory Dependence Analysis", false, true);
+INITIALIZE_PASS(MemoryDependenceAnalysis, "memdep",
+ "Memory Dependence Analysis", false, true);
MemoryDependenceAnalysis::MemoryDependenceAnalysis()
-: FunctionPass(&ID), PredCache(0) {
+: FunctionPass(ID), PredCache(0) {
}
MemoryDependenceAnalysis::~MemoryDependenceAnalysis() {
}
@@ -120,33 +120,21 @@ getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
Pointer = CI->getArgOperand(0);
// calls to free() erase the entire structure
PointerSize = ~0ULL;
- } else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
+ } else if (CallSite InstCS = cast<Value>(Inst)) {
// Debug intrinsics don't cause dependences.
if (isa<DbgInfoIntrinsic>(Inst)) continue;
- CallSite InstCS = CallSite::get(Inst);
// If these two calls do not interfere, look past it.
switch (AA->getModRefInfo(CS, InstCS)) {
case AliasAnalysis::NoModRef:
- // If the two calls don't interact (e.g. InstCS is readnone) keep
- // scanning.
+ // If the two calls are the same, return InstCS as a Def, so that
+ // CS can be found redundant and eliminated.
+ if (isReadOnlyCall && InstCS.onlyReadsMemory() &&
+ CS.getInstruction()->isIdenticalToWhenDefined(Inst))
+ return MemDepResult::getDef(Inst);
+
+ // Otherwise if the two calls don't interact (e.g. InstCS is readnone)
+ // keep scanning.
continue;
- case AliasAnalysis::Ref:
- // If the two calls read the same memory locations and CS is a readonly
- // function, then we have two cases: 1) the calls may not interfere with
- // each other at all. 2) the calls may produce the same value. In case
- // #1 we want to ignore the values, in case #2, we want to return Inst
- // as a Def dependence. This allows us to CSE in cases like:
- // X = strlen(P);
- // memchr(...);
- // Y = strlen(P); // Y = X
- if (isReadOnlyCall) {
- if (CS.getCalledFunction() != 0 &&
- CS.getCalledFunction() == InstCS.getCalledFunction())
- return MemDepResult::getDef(Inst);
- // Ignore unrelated read/read call dependences.
- continue;
- }
- // FALL THROUGH
default:
return MemDepResult::getClobber(Inst);
}
@@ -196,8 +184,7 @@ getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad,
// FIXME: This only considers queries directly on the invariant-tagged
// pointer, not on query pointers that are indexed off of them. It'd
// be nice to handle that at some point.
- AliasAnalysis::AliasResult R =
- AA->alias(II->getArgOperand(2), ~0U, MemPtr, ~0U);
+ AliasAnalysis::AliasResult R = AA->alias(II->getArgOperand(2), MemPtr);
if (R == AliasAnalysis::MustAlias) {
InvariantTag = II->getArgOperand(0);
continue;
@@ -209,8 +196,7 @@ getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad,
// FIXME: This only considers queries directly on the invariant-tagged
// pointer, not on query pointers that are indexed off of them. It'd
// be nice to handle that at some point.
- AliasAnalysis::AliasResult R =
- AA->alias(II->getArgOperand(1), ~0U, MemPtr, ~0U);
+ AliasAnalysis::AliasResult R = AA->alias(II->getArgOperand(1), MemPtr);
if (R == AliasAnalysis::MustAlias)
return MemDepResult::getDef(II);
}
@@ -387,7 +373,7 @@ MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) {
MemSize = cast<ConstantInt>(II->getArgOperand(1))->getZExtValue();
break;
default:
- CallSite QueryCS = CallSite::get(QueryInst);
+ CallSite QueryCS(QueryInst);
bool isReadOnly = AA->onlyReadsMemory(QueryCS);
LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos,
QueryParent);
diff --git a/contrib/llvm/lib/Analysis/ModuleDebugInfoPrinter.cpp b/contrib/llvm/lib/Analysis/ModuleDebugInfoPrinter.cpp
index 556d4c8..2cc1c2a 100644
--- a/contrib/llvm/lib/Analysis/ModuleDebugInfoPrinter.cpp
+++ b/contrib/llvm/lib/Analysis/ModuleDebugInfoPrinter.cpp
@@ -30,7 +30,7 @@ namespace {
DebugInfoFinder Finder;
public:
static char ID; // Pass identification, replacement for typeid
- ModuleDebugInfoPrinter() : ModulePass(&ID) {}
+ ModuleDebugInfoPrinter() : ModulePass(ID) {}
virtual bool runOnModule(Module &M);
@@ -42,9 +42,8 @@ namespace {
}
char ModuleDebugInfoPrinter::ID = 0;
-static RegisterPass<ModuleDebugInfoPrinter>
-X("module-debuginfo",
- "Decodes module-level debug info", false, true);
+INITIALIZE_PASS(ModuleDebugInfoPrinter, "module-debuginfo",
+ "Decodes module-level debug info", false, true);
ModulePass *llvm::createModuleDebugInfoPrinterPass() {
return new ModuleDebugInfoPrinter();
diff --git a/contrib/llvm/lib/Analysis/PointerTracking.cpp b/contrib/llvm/lib/Analysis/PointerTracking.cpp
index 14df0b7..07f4682 100644
--- a/contrib/llvm/lib/Analysis/PointerTracking.cpp
+++ b/contrib/llvm/lib/Analysis/PointerTracking.cpp
@@ -28,7 +28,7 @@
using namespace llvm;
char PointerTracking::ID = 0;
-PointerTracking::PointerTracking() : FunctionPass(&ID) {}
+PointerTracking::PointerTracking() : FunctionPass(ID) {}
bool PointerTracking::runOnFunction(Function &F) {
predCache.clear();
@@ -144,6 +144,55 @@ const SCEV *PointerTracking::computeAllocationCount(Value *P,
return SE->getCouldNotCompute();
}
+Value *PointerTracking::computeAllocationCountValue(Value *P, const Type *&Ty) const
+{
+ Value *V = P->stripPointerCasts();
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
+ Ty = AI->getAllocatedType();
+ // arraySize elements of type Ty.
+ return AI->getArraySize();
+ }
+
+ if (CallInst *CI = extractMallocCall(V)) {
+ Ty = getMallocAllocatedType(CI);
+ if (!Ty)
+ return 0;
+ Value *arraySize = getMallocArraySize(CI, TD);
+ if (!arraySize) {
+ Ty = Type::getInt8Ty(P->getContext());
+ return CI->getArgOperand(0);
+ }
+ // arraySize elements of type Ty.
+ return arraySize;
+ }
+
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
+ if (GV->hasDefinitiveInitializer()) {
+ Constant *C = GV->getInitializer();
+ if (const ArrayType *ATy = dyn_cast<ArrayType>(C->getType())) {
+ Ty = ATy->getElementType();
+ return ConstantInt::get(Type::getInt32Ty(P->getContext()),
+ ATy->getNumElements());
+ }
+ }
+ Ty = cast<PointerType>(GV->getType())->getElementType();
+ return ConstantInt::get(Type::getInt32Ty(P->getContext()), 1);
+ //TODO: implement more tracking for globals
+ }
+
+ if (CallInst *CI = dyn_cast<CallInst>(V)) {
+ CallSite CS(CI);
+ Function *F = dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
+ if (F == reallocFunc) {
+ Ty = Type::getInt8Ty(P->getContext());
+ // realloc allocates arg1 bytes.
+ return CS.getArgument(1);
+ }
+ }
+
+ return 0;
+}
+
// Calculates the number of elements of type Ty allocated for P.
const SCEV *PointerTracking::computeAllocationCountForType(Value *P,
const Type *Ty)
@@ -263,5 +312,5 @@ void PointerTracking::print(raw_ostream &OS, const Module* M) const {
}
}
-static RegisterPass<PointerTracking> X("pointertracking",
- "Track pointer bounds", false, true);
+INITIALIZE_PASS(PointerTracking, "pointertracking",
+ "Track pointer bounds", false, true);
diff --git a/contrib/llvm/lib/Analysis/PostDominators.cpp b/contrib/llvm/lib/Analysis/PostDominators.cpp
index 7354afa..cbe8d18 100644
--- a/contrib/llvm/lib/Analysis/PostDominators.cpp
+++ b/contrib/llvm/lib/Analysis/PostDominators.cpp
@@ -28,8 +28,8 @@ using namespace llvm;
char PostDominatorTree::ID = 0;
char PostDominanceFrontier::ID = 0;
-static RegisterPass<PostDominatorTree>
-F("postdomtree", "Post-Dominator Tree Construction", true, true);
+INITIALIZE_PASS(PostDominatorTree, "postdomtree",
+ "Post-Dominator Tree Construction", true, true);
bool PostDominatorTree::runOnFunction(Function &F) {
DT->recalculate(F);
@@ -53,8 +53,8 @@ FunctionPass* llvm::createPostDomTree() {
// PostDominanceFrontier Implementation
//===----------------------------------------------------------------------===//
-static RegisterPass<PostDominanceFrontier>
-H("postdomfrontier", "Post-Dominance Frontier Construction", true, true);
+INITIALIZE_PASS(PostDominanceFrontier, "postdomfrontier",
+ "Post-Dominance Frontier Construction", true, true);
const DominanceFrontier::DomSetType &
PostDominanceFrontier::calculate(const PostDominatorTree &DT,
diff --git a/contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp b/contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp
index da4ce47..ecc0a18 100644
--- a/contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp
+++ b/contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp
@@ -39,7 +39,7 @@ namespace {
public:
static char ID; // Class identification, replacement for typeinfo
explicit ProfileEstimatorPass(const double execcount = 0)
- : FunctionPass(&ID), ExecCount(execcount) {
+ : FunctionPass(ID), ExecCount(execcount) {
if (execcount == 0) ExecCount = LoopWeight;
}
@@ -59,8 +59,8 @@ namespace {
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
- virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
- if (PI->isPassID(&ProfileInfo::ID))
+ virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+ if (PI == &ProfileInfo::ID)
return (ProfileInfo*)this;
return this;
}
@@ -72,13 +72,11 @@ namespace {
} // End of anonymous namespace
char ProfileEstimatorPass::ID = 0;
-static RegisterPass<ProfileEstimatorPass>
-X("profile-estimator", "Estimate profiling information", false, true);
-
-static RegisterAnalysisGroup<ProfileInfo> Y(X);
+INITIALIZE_AG_PASS(ProfileEstimatorPass, ProfileInfo, "profile-estimator",
+ "Estimate profiling information", false, true, false);
namespace llvm {
- const PassInfo *ProfileEstimatorPassID = &X;
+ char &ProfileEstimatorPassID = ProfileEstimatorPass::ID;
FunctionPass *createProfileEstimatorPass() {
return new ProfileEstimatorPass();
diff --git a/contrib/llvm/lib/Analysis/ProfileInfo.cpp b/contrib/llvm/lib/Analysis/ProfileInfo.cpp
index 8d2712f..fc7f286 100644
--- a/contrib/llvm/lib/Analysis/ProfileInfo.cpp
+++ b/contrib/llvm/lib/Analysis/ProfileInfo.cpp
@@ -1076,14 +1076,14 @@ raw_ostream& operator<<(raw_ostream &O, std::pair<const MachineBasicBlock *, con
namespace {
struct NoProfileInfo : public ImmutablePass, public ProfileInfo {
static char ID; // Class identification, replacement for typeinfo
- NoProfileInfo() : ImmutablePass(&ID) {}
+ NoProfileInfo() : ImmutablePass(ID) {}
/// getAdjustedAnalysisPointer - This method is used when a pass implements
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
- virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
- if (PI->isPassID(&ProfileInfo::ID))
+ virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+ if (PI == &ProfileInfo::ID)
return (ProfileInfo*)this;
return this;
}
@@ -1096,10 +1096,7 @@ namespace {
char NoProfileInfo::ID = 0;
// Register this pass...
-static RegisterPass<NoProfileInfo>
-X("no-profile", "No Profile Information", false, true);
-
-// Declare that we implement the ProfileInfo interface
-static RegisterAnalysisGroup<ProfileInfo, true> Y(X);
+INITIALIZE_AG_PASS(NoProfileInfo, ProfileInfo, "no-profile",
+ "No Profile Information", false, true, true);
ImmutablePass *llvm::createNoProfileInfoPass() { return new NoProfileInfo(); }
diff --git a/contrib/llvm/lib/Analysis/ProfileInfoLoaderPass.cpp b/contrib/llvm/lib/Analysis/ProfileInfoLoaderPass.cpp
index 8ea4ecf..d325b57 100644
--- a/contrib/llvm/lib/Analysis/ProfileInfoLoaderPass.cpp
+++ b/contrib/llvm/lib/Analysis/ProfileInfoLoaderPass.cpp
@@ -45,7 +45,7 @@ namespace {
public:
static char ID; // Class identification, replacement for typeinfo
explicit LoaderPass(const std::string &filename = "")
- : ModulePass(&ID), Filename(filename) {
+ : ModulePass(ID), Filename(filename) {
if (filename.empty()) Filename = ProfileInfoFilename;
}
@@ -67,8 +67,8 @@ namespace {
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
- virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
- if (PI->isPassID(&ProfileInfo::ID))
+ virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+ if (PI == &ProfileInfo::ID)
return (ProfileInfo*)this;
return this;
}
@@ -79,12 +79,10 @@ namespace {
} // End of anonymous namespace
char LoaderPass::ID = 0;
-static RegisterPass<LoaderPass>
-X("profile-loader", "Load profile information from llvmprof.out", false, true);
+INITIALIZE_AG_PASS(LoaderPass, ProfileInfo, "profile-loader",
+ "Load profile information from llvmprof.out", false, true, false);
-static RegisterAnalysisGroup<ProfileInfo> Y(X);
-
-const PassInfo *llvm::ProfileLoaderPassID = &X;
+char &llvm::ProfileLoaderPassID = LoaderPass::ID;
ModulePass *llvm::createProfileLoaderPass() { return new LoaderPass(); }
diff --git a/contrib/llvm/lib/Analysis/ProfileVerifierPass.cpp b/contrib/llvm/lib/Analysis/ProfileVerifierPass.cpp
index 5d87e14..3f01b2d 100644
--- a/contrib/llvm/lib/Analysis/ProfileVerifierPass.cpp
+++ b/contrib/llvm/lib/Analysis/ProfileVerifierPass.cpp
@@ -59,10 +59,10 @@ namespace llvm {
public:
static char ID; // Class identification, replacement for typeinfo
- explicit ProfileVerifierPassT () : FunctionPass(&ID) {
+ explicit ProfileVerifierPassT () : FunctionPass(ID) {
DisableAssertions = ProfileVerifierDisableAssertions;
}
- explicit ProfileVerifierPassT (bool da) : FunctionPass(&ID),
+ explicit ProfileVerifierPassT (bool da) : FunctionPass(ID),
DisableAssertions(da) {
}
@@ -366,8 +366,8 @@ namespace llvm {
char ProfileVerifierPassT<FType, BType>::ID = 0;
}
-static RegisterPass<ProfileVerifierPass>
-X("profile-verifier", "Verify profiling information", false, true);
+INITIALIZE_PASS(ProfileVerifierPass, "profile-verifier",
+ "Verify profiling information", false, true);
namespace llvm {
FunctionPass *createProfileVerifierPass() {
diff --git a/contrib/llvm/lib/Analysis/RegionInfo.cpp b/contrib/llvm/lib/Analysis/RegionInfo.cpp
new file mode 100644
index 0000000..abc057a
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/RegionInfo.cpp
@@ -0,0 +1,749 @@
+//===- RegionInfo.cpp - SESE region detection analysis --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Detects single entry single exit regions in the control flow graph.
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/RegionInfo.h"
+#include "llvm/Analysis/RegionIterator.h"
+
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Analysis/LoopInfo.h"
+
+#define DEBUG_TYPE "region"
+#include "llvm/Support/Debug.h"
+
+#include <set>
+#include <algorithm>
+
+using namespace llvm;
+
+// Always verify if expensive checking is enabled.
+#ifdef XDEBUG
+static bool VerifyRegionInfo = true;
+#else
+static bool VerifyRegionInfo = false;
+#endif
+
+static cl::opt<bool,true>
+VerifyRegionInfoX("verify-region-info", cl::location(VerifyRegionInfo),
+ cl::desc("Verify region info (time consuming)"));
+
+STATISTIC(numRegions, "The # of regions");
+STATISTIC(numSimpleRegions, "The # of simple regions");
+
+//===----------------------------------------------------------------------===//
+/// PrintStyle - Print region in difference ways.
+enum PrintStyle { PrintNone, PrintBB, PrintRN };
+
+cl::opt<enum PrintStyle> printStyle("print-region-style", cl::Hidden,
+ cl::desc("style of printing regions"),
+ cl::values(
+ clEnumValN(PrintNone, "none", "print no details"),
+ clEnumValN(PrintBB, "bb", "print regions in detail with block_iterator"),
+ clEnumValN(PrintRN, "rn", "print regions in detail with element_iterator"),
+ clEnumValEnd));
+//===----------------------------------------------------------------------===//
+/// Region Implementation
+Region::Region(BasicBlock *Entry, BasicBlock *Exit, RegionInfo* RInfo,
+ DominatorTree *dt, Region *Parent)
+ : RegionNode(Parent, Entry, 1), RI(RInfo), DT(dt), exit(Exit) {}
+
+Region::~Region() {
+ // Free the cached nodes.
+ for (BBNodeMapT::iterator it = BBNodeMap.begin(),
+ ie = BBNodeMap.end(); it != ie; ++it)
+ delete it->second;
+
+ // Only clean the cache for this Region. Caches of child Regions will be
+ // cleaned when the child Regions are deleted.
+ BBNodeMap.clear();
+
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ delete *I;
+}
+
+bool Region::contains(const BasicBlock *B) const {
+ BasicBlock *BB = const_cast<BasicBlock*>(B);
+
+ assert(DT->getNode(BB) && "BB not part of the dominance tree");
+
+ BasicBlock *entry = getEntry(), *exit = getExit();
+
+ // Toplevel region.
+ if (!exit)
+ return true;
+
+ return (DT->dominates(entry, BB)
+ && !(DT->dominates(exit, BB) && DT->dominates(entry, exit)));
+}
+
+bool Region::contains(const Loop *L) const {
+ // BBs that are not part of any loop are element of the Loop
+ // described by the NULL pointer. This loop is not part of any region,
+ // except if the region describes the whole function.
+ if (L == 0)
+ return getExit() == 0;
+
+ if (!contains(L->getHeader()))
+ return false;
+
+ SmallVector<BasicBlock *, 8> ExitingBlocks;
+ L->getExitingBlocks(ExitingBlocks);
+
+ for (SmallVectorImpl<BasicBlock*>::iterator BI = ExitingBlocks.begin(),
+ BE = ExitingBlocks.end(); BI != BE; ++BI)
+ if (!contains(*BI))
+ return false;
+
+ return true;
+}
+
+Loop *Region::outermostLoopInRegion(Loop *L) const {
+ if (!contains(L))
+ return 0;
+
+ while (L && contains(L->getParentLoop())) {
+ L = L->getParentLoop();
+ }
+
+ return L;
+}
+
+Loop *Region::outermostLoopInRegion(LoopInfo *LI, BasicBlock* BB) const {
+ assert(LI && BB && "LI and BB cannot be null!");
+ Loop *L = LI->getLoopFor(BB);
+ return outermostLoopInRegion(L);
+}
+
+bool Region::isSimple() const {
+ bool isSimple = true;
+ bool found = false;
+
+ BasicBlock *entry = getEntry(), *exit = getExit();
+
+ // TopLevelRegion
+ if (!exit)
+ return false;
+
+ for (pred_iterator PI = pred_begin(entry), PE = pred_end(entry); PI != PE;
+ ++PI) {
+ BasicBlock *Pred = *PI;
+ if (DT->getNode(Pred) && !contains(Pred)) {
+ if (found) {
+ isSimple = false;
+ break;
+ }
+ found = true;
+ }
+ }
+
+ found = false;
+
+ for (pred_iterator PI = pred_begin(exit), PE = pred_end(exit); PI != PE;
+ ++PI)
+ if (contains(*PI)) {
+ if (found) {
+ isSimple = false;
+ break;
+ }
+ found = true;
+ }
+
+ return isSimple;
+}
+
+std::string Region::getNameStr() const {
+ std::string exitName;
+ std::string entryName;
+
+ if (getEntry()->getName().empty()) {
+ raw_string_ostream OS(entryName);
+
+ WriteAsOperand(OS, getEntry(), false);
+ entryName = OS.str();
+ } else
+ entryName = getEntry()->getNameStr();
+
+ if (getExit()) {
+ if (getExit()->getName().empty()) {
+ raw_string_ostream OS(exitName);
+
+ WriteAsOperand(OS, getExit(), false);
+ exitName = OS.str();
+ } else
+ exitName = getExit()->getNameStr();
+ } else
+ exitName = "<Function Return>";
+
+ return entryName + " => " + exitName;
+}
+
+void Region::verifyBBInRegion(BasicBlock *BB) const {
+ if (!contains(BB))
+ llvm_unreachable("Broken region found!");
+
+ BasicBlock *entry = getEntry(), *exit = getExit();
+
+ for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI)
+ if (!contains(*SI) && exit != *SI)
+ llvm_unreachable("Broken region found!");
+
+ if (entry != BB)
+ for (pred_iterator SI = pred_begin(BB), SE = pred_end(BB); SI != SE; ++SI)
+ if (!contains(*SI))
+ llvm_unreachable("Broken region found!");
+}
+
+void Region::verifyWalk(BasicBlock *BB, std::set<BasicBlock*> *visited) const {
+ BasicBlock *exit = getExit();
+
+ visited->insert(BB);
+
+ verifyBBInRegion(BB);
+
+ for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI)
+ if (*SI != exit && visited->find(*SI) == visited->end())
+ verifyWalk(*SI, visited);
+}
+
+void Region::verifyRegion() const {
+ // Only do verification when user wants to, otherwise this expensive
+ // check will be invoked by PassManager.
+ if (!VerifyRegionInfo) return;
+
+ std::set<BasicBlock*> visited;
+ verifyWalk(getEntry(), &visited);
+}
+
+void Region::verifyRegionNest() const {
+ for (Region::const_iterator RI = begin(), RE = end(); RI != RE; ++RI)
+ (*RI)->verifyRegionNest();
+
+ verifyRegion();
+}
+
+Region::block_iterator Region::block_begin() {
+ return GraphTraits<FlatIt<Region*> >::nodes_begin(this);
+}
+
+Region::block_iterator Region::block_end() {
+ return GraphTraits<FlatIt<Region*> >::nodes_end(this);
+}
+
+Region::const_block_iterator Region::block_begin() const {
+ return GraphTraits<FlatIt<const Region*> >::nodes_begin(this);
+}
+
+Region::const_block_iterator Region::block_end() const {
+ return GraphTraits<FlatIt<const Region*> >::nodes_end(this);
+}
+
+Region::element_iterator Region::element_begin() {
+ return GraphTraits<Region*>::nodes_begin(this);
+}
+
+Region::element_iterator Region::element_end() {
+ return GraphTraits<Region*>::nodes_end(this);
+}
+
+Region::const_element_iterator Region::element_begin() const {
+ return GraphTraits<const Region*>::nodes_begin(this);
+}
+
+Region::const_element_iterator Region::element_end() const {
+ return GraphTraits<const Region*>::nodes_end(this);
+}
+
+Region* Region::getSubRegionNode(BasicBlock *BB) const {
+ Region *R = RI->getRegionFor(BB);
+
+ if (!R || R == this)
+ return 0;
+
+ // If we pass the BB out of this region, that means our code is broken.
+ assert(contains(R) && "BB not in current region!");
+
+ while (contains(R->getParent()) && R->getParent() != this)
+ R = R->getParent();
+
+ if (R->getEntry() != BB)
+ return 0;
+
+ return R;
+}
+
+RegionNode* Region::getBBNode(BasicBlock *BB) const {
+ assert(contains(BB) && "Can get BB node out of this region!");
+
+ BBNodeMapT::const_iterator at = BBNodeMap.find(BB);
+
+ if (at != BBNodeMap.end())
+ return at->second;
+
+ RegionNode *NewNode = new RegionNode(const_cast<Region*>(this), BB);
+ BBNodeMap.insert(std::make_pair(BB, NewNode));
+ return NewNode;
+}
+
+RegionNode* Region::getNode(BasicBlock *BB) const {
+ assert(contains(BB) && "Can get BB node out of this region!");
+ if (Region* Child = getSubRegionNode(BB))
+ return Child->getNode();
+
+ return getBBNode(BB);
+}
+
+void Region::transferChildrenTo(Region *To) {
+ for (iterator I = begin(), E = end(); I != E; ++I) {
+ (*I)->parent = To;
+ To->children.push_back(*I);
+ }
+ children.clear();
+}
+
+void Region::addSubRegion(Region *SubRegion) {
+ assert(SubRegion->parent == 0 && "SubRegion already has a parent!");
+ SubRegion->parent = this;
+ // Set up the region node.
+ assert(std::find(children.begin(), children.end(), SubRegion) == children.end()
+ && "Node already exist!");
+ children.push_back(SubRegion);
+}
+
+
+Region *Region::removeSubRegion(Region *Child) {
+ assert(Child->parent == this && "Child is not a child of this region!");
+ Child->parent = 0;
+ RegionSet::iterator I = std::find(children.begin(), children.end(), Child);
+ assert(I != children.end() && "Region does not exit. Unable to remove.");
+ children.erase(children.begin()+(I-begin()));
+ return Child;
+}
+
+unsigned Region::getDepth() const {
+ unsigned Depth = 0;
+
+ for (Region *R = parent; R != 0; R = R->parent)
+ ++Depth;
+
+ return Depth;
+}
+
+void Region::print(raw_ostream &OS, bool print_tree, unsigned level) const {
+ if (print_tree)
+ OS.indent(level*2) << "[" << level << "] " << getNameStr();
+ else
+ OS.indent(level*2) << getNameStr();
+
+ OS << "\n";
+
+
+ if (printStyle != PrintNone) {
+ OS.indent(level*2) << "{\n";
+ OS.indent(level*2 + 2);
+
+ if (printStyle == PrintBB) {
+ for (const_block_iterator I = block_begin(), E = block_end(); I!=E; ++I)
+ OS << **I << ", "; // TODO: remove the last ","
+ } else if (printStyle == PrintRN) {
+ for (const_element_iterator I = element_begin(), E = element_end(); I!=E; ++I)
+ OS << **I << ", "; // TODO: remove the last ",
+ }
+
+ OS << "\n";
+ }
+
+ if (print_tree)
+ for (const_iterator RI = begin(), RE = end(); RI != RE; ++RI)
+ (*RI)->print(OS, print_tree, level+1);
+
+ if (printStyle != PrintNone)
+ OS.indent(level*2) << "} \n";
+}
+
+void Region::dump() const {
+ print(dbgs(), true, getDepth());
+}
+
+void Region::clearNodeCache() {
+ BBNodeMap.clear();
+ for (Region::iterator RI = begin(), RE = end(); RI != RE; ++RI)
+ (*RI)->clearNodeCache();
+}
+
+//===----------------------------------------------------------------------===//
+// RegionInfo implementation
+//
+
+bool RegionInfo::isCommonDomFrontier(BasicBlock *BB, BasicBlock *entry,
+ BasicBlock *exit) const {
+ for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) {
+ BasicBlock *P = *PI;
+ if (DT->dominates(entry, P) && !DT->dominates(exit, P))
+ return false;
+ }
+ return true;
+}
+
+bool RegionInfo::isRegion(BasicBlock *entry, BasicBlock *exit) const {
+ assert(entry && exit && "entry and exit must not be null!");
+ typedef DominanceFrontier::DomSetType DST;
+
+ DST *entrySuccs = &DF->find(entry)->second;
+
+ // Exit is the header of a loop that contains the entry. In this case,
+ // the dominance frontier must only contain the exit.
+ if (!DT->dominates(entry, exit)) {
+ for (DST::iterator SI = entrySuccs->begin(), SE = entrySuccs->end();
+ SI != SE; ++SI)
+ if (*SI != exit && *SI != entry)
+ return false;
+
+ return true;
+ }
+
+ DST *exitSuccs = &DF->find(exit)->second;
+
+ // Do not allow edges leaving the region.
+ for (DST::iterator SI = entrySuccs->begin(), SE = entrySuccs->end();
+ SI != SE; ++SI) {
+ if (*SI == exit || *SI == entry)
+ continue;
+ if (exitSuccs->find(*SI) == exitSuccs->end())
+ return false;
+ if (!isCommonDomFrontier(*SI, entry, exit))
+ return false;
+ }
+
+ // Do not allow edges pointing into the region.
+ for (DST::iterator SI = exitSuccs->begin(), SE = exitSuccs->end();
+ SI != SE; ++SI)
+ if (DT->properlyDominates(entry, *SI) && *SI != exit)
+ return false;
+
+
+ return true;
+}
+
+void RegionInfo::insertShortCut(BasicBlock *entry, BasicBlock *exit,
+ BBtoBBMap *ShortCut) const {
+ assert(entry && exit && "entry and exit must not be null!");
+
+ BBtoBBMap::iterator e = ShortCut->find(exit);
+
+ if (e == ShortCut->end())
+ // No further region at exit available.
+ (*ShortCut)[entry] = exit;
+ else {
+ // We found a region e that starts at exit. Therefore (entry, e->second)
+ // is also a region, that is larger than (entry, exit). Insert the
+ // larger one.
+ BasicBlock *BB = e->second;
+ (*ShortCut)[entry] = BB;
+ }
+}
+
+DomTreeNode* RegionInfo::getNextPostDom(DomTreeNode* N,
+ BBtoBBMap *ShortCut) const {
+ BBtoBBMap::iterator e = ShortCut->find(N->getBlock());
+
+ if (e == ShortCut->end())
+ return N->getIDom();
+
+ return PDT->getNode(e->second)->getIDom();
+}
+
+bool RegionInfo::isTrivialRegion(BasicBlock *entry, BasicBlock *exit) const {
+ assert(entry && exit && "entry and exit must not be null!");
+
+ unsigned num_successors = succ_end(entry) - succ_begin(entry);
+
+ if (num_successors <= 1 && exit == *(succ_begin(entry)))
+ return true;
+
+ return false;
+}
+
+void RegionInfo::updateStatistics(Region *R) {
+ ++numRegions;
+
+ // TODO: Slow. Should only be enabled if -stats is used.
+ if (R->isSimple()) ++numSimpleRegions;
+}
+
+Region *RegionInfo::createRegion(BasicBlock *entry, BasicBlock *exit) {
+ assert(entry && exit && "entry and exit must not be null!");
+
+ if (isTrivialRegion(entry, exit))
+ return 0;
+
+ Region *region = new Region(entry, exit, this, DT);
+ BBtoRegion.insert(std::make_pair(entry, region));
+
+ #ifdef XDEBUG
+ region->verifyRegion();
+ #else
+ DEBUG(region->verifyRegion());
+ #endif
+
+ updateStatistics(region);
+ return region;
+}
+
+void RegionInfo::findRegionsWithEntry(BasicBlock *entry, BBtoBBMap *ShortCut) {
+ assert(entry);
+
+ DomTreeNode *N = PDT->getNode(entry);
+
+ if (!N)
+ return;
+
+ Region *lastRegion= 0;
+ BasicBlock *lastExit = entry;
+
+ // As only a BasicBlock that postdominates entry can finish a region, walk the
+ // post dominance tree upwards.
+ while ((N = getNextPostDom(N, ShortCut))) {
+ BasicBlock *exit = N->getBlock();
+
+ if (!exit)
+ break;
+
+ if (isRegion(entry, exit)) {
+ Region *newRegion = createRegion(entry, exit);
+
+ if (lastRegion)
+ newRegion->addSubRegion(lastRegion);
+
+ lastRegion = newRegion;
+ lastExit = exit;
+ }
+
+ // This can never be a region, so stop the search.
+ if (!DT->dominates(entry, exit))
+ break;
+ }
+
+ // Tried to create regions from entry to lastExit. Next time take a
+ // shortcut from entry to lastExit.
+ if (lastExit != entry)
+ insertShortCut(entry, lastExit, ShortCut);
+}
+
+void RegionInfo::scanForRegions(Function &F, BBtoBBMap *ShortCut) {
+ BasicBlock *entry = &(F.getEntryBlock());
+ DomTreeNode *N = DT->getNode(entry);
+
+ // Iterate over the dominance tree in post order to start with the small
+ // regions from the bottom of the dominance tree. If the small regions are
+ // detected first, detection of bigger regions is faster, as we can jump
+ // over the small regions.
+ for (po_iterator<DomTreeNode*> FI = po_begin(N), FE = po_end(N); FI != FE;
+ ++FI) {
+ findRegionsWithEntry(FI->getBlock(), ShortCut);
+ }
+}
+
+Region *RegionInfo::getTopMostParent(Region *region) {
+ while (region->parent)
+ region = region->getParent();
+
+ return region;
+}
+
+void RegionInfo::buildRegionsTree(DomTreeNode *N, Region *region) {
+ BasicBlock *BB = N->getBlock();
+
+ // Passed region exit
+ while (BB == region->getExit())
+ region = region->getParent();
+
+ BBtoRegionMap::iterator it = BBtoRegion.find(BB);
+
+ // This basic block is a start block of a region. It is already in the
+ // BBtoRegion relation. Only the child basic blocks have to be updated.
+ if (it != BBtoRegion.end()) {
+ Region *newRegion = it->second;;
+ region->addSubRegion(getTopMostParent(newRegion));
+ region = newRegion;
+ } else {
+ BBtoRegion[BB] = region;
+ }
+
+ for (DomTreeNode::iterator CI = N->begin(), CE = N->end(); CI != CE; ++CI)
+ buildRegionsTree(*CI, region);
+}
+
+void RegionInfo::releaseMemory() {
+ BBtoRegion.clear();
+ if (TopLevelRegion)
+ delete TopLevelRegion;
+ TopLevelRegion = 0;
+}
+
+RegionInfo::RegionInfo() : FunctionPass(ID) {
+ TopLevelRegion = 0;
+}
+
+RegionInfo::~RegionInfo() {
+ releaseMemory();
+}
+
+void RegionInfo::Calculate(Function &F) {
+ // ShortCut a function where for every BB the exit of the largest region
+ // starting with BB is stored. These regions can be threated as single BBS.
+ // This improves performance on linear CFGs.
+ BBtoBBMap ShortCut;
+
+ scanForRegions(F, &ShortCut);
+ BasicBlock *BB = &F.getEntryBlock();
+ buildRegionsTree(DT->getNode(BB), TopLevelRegion);
+}
+
+bool RegionInfo::runOnFunction(Function &F) {
+ releaseMemory();
+
+ DT = &getAnalysis<DominatorTree>();
+ PDT = &getAnalysis<PostDominatorTree>();
+ DF = &getAnalysis<DominanceFrontier>();
+
+ TopLevelRegion = new Region(&F.getEntryBlock(), 0, this, DT, 0);
+ updateStatistics(TopLevelRegion);
+
+ Calculate(F);
+
+ return false;
+}
+
+void RegionInfo::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AU.addRequiredTransitive<DominatorTree>();
+ AU.addRequired<PostDominatorTree>();
+ AU.addRequired<DominanceFrontier>();
+}
+
+void RegionInfo::print(raw_ostream &OS, const Module *) const {
+ OS << "Region tree:\n";
+ TopLevelRegion->print(OS, true, 0);
+ OS << "End region tree\n";
+}
+
+void RegionInfo::verifyAnalysis() const {
+ // Only do verification when user wants to, otherwise this expensive check
+ // will be invoked by PMDataManager::verifyPreservedAnalysis when
+ // a regionpass (marked PreservedAll) finish.
+ if (!VerifyRegionInfo) return;
+
+ TopLevelRegion->verifyRegionNest();
+}
+
+// Region pass manager support.
+Region *RegionInfo::getRegionFor(BasicBlock *BB) const {
+ BBtoRegionMap::const_iterator I=
+ BBtoRegion.find(BB);
+ return I != BBtoRegion.end() ? I->second : 0;
+}
+
+Region *RegionInfo::operator[](BasicBlock *BB) const {
+ return getRegionFor(BB);
+}
+
+
+BasicBlock *RegionInfo::getMaxRegionExit(BasicBlock *BB) const {
+ BasicBlock *Exit = NULL;
+
+ while (true) {
+ // Get largest region that starts at BB.
+ Region *R = getRegionFor(BB);
+ while (R && R->getParent() && R->getParent()->getEntry() == BB)
+ R = R->getParent();
+
+ // Get the single exit of BB.
+ if (R && R->getEntry() == BB)
+ Exit = R->getExit();
+ else if (++succ_begin(BB) == succ_end(BB))
+ Exit = *succ_begin(BB);
+ else // No single exit exists.
+ return Exit;
+
+ // Get largest region that starts at Exit.
+ Region *ExitR = getRegionFor(Exit);
+ while (ExitR && ExitR->getParent()
+ && ExitR->getParent()->getEntry() == Exit)
+ ExitR = ExitR->getParent();
+
+ for (pred_iterator PI = pred_begin(Exit), PE = pred_end(Exit); PI != PE;
+ ++PI)
+ if (!R->contains(*PI) && !ExitR->contains(*PI))
+ break;
+
+ // This stops infinite cycles.
+ if (DT->dominates(Exit, BB))
+ break;
+
+ BB = Exit;
+ }
+
+ return Exit;
+}
+
+Region*
+RegionInfo::getCommonRegion(Region *A, Region *B) const {
+ assert (A && B && "One of the Regions is NULL");
+
+ if (A->contains(B)) return A;
+
+ while (!B->contains(A))
+ B = B->getParent();
+
+ return B;
+}
+
+Region*
+RegionInfo::getCommonRegion(SmallVectorImpl<Region*> &Regions) const {
+ Region* ret = Regions.back();
+ Regions.pop_back();
+
+ for (SmallVectorImpl<Region*>::const_iterator I = Regions.begin(),
+ E = Regions.end(); I != E; ++I)
+ ret = getCommonRegion(ret, *I);
+
+ return ret;
+}
+
+Region*
+RegionInfo::getCommonRegion(SmallVectorImpl<BasicBlock*> &BBs) const {
+ Region* ret = getRegionFor(BBs.back());
+ BBs.pop_back();
+
+ for (SmallVectorImpl<BasicBlock*>::const_iterator I = BBs.begin(),
+ E = BBs.end(); I != E; ++I)
+ ret = getCommonRegion(ret, getRegionFor(*I));
+
+ return ret;
+}
+
+char RegionInfo::ID = 0;
+INITIALIZE_PASS(RegionInfo, "regions",
+ "Detect single entry single exit regions", true, true);
+
+// Create methods available outside of this file, to use them
+// "include/llvm/LinkAllPasses.h". Otherwise the pass would be deleted by
+// the link time optimization.
+
+namespace llvm {
+ FunctionPass *createRegionInfoPass() {
+ return new RegionInfo();
+ }
+}
+
diff --git a/contrib/llvm/lib/Analysis/RegionPrinter.cpp b/contrib/llvm/lib/Analysis/RegionPrinter.cpp
new file mode 100644
index 0000000..fee5c1b
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/RegionPrinter.cpp
@@ -0,0 +1,186 @@
+//===- RegionPrinter.cpp - Print regions tree pass ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Print out the region tree of a function using dotty/graphviz.
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/RegionInfo.h"
+#include "llvm/Analysis/RegionIterator.h"
+#include "llvm/Analysis/RegionPrinter.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Analysis/DOTGraphTraitsPass.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+/// onlySimpleRegion - Show only the simple regions in the RegionViewer.
+static cl::opt<bool>
+onlySimpleRegions("only-simple-regions",
+ cl::desc("Show only simple regions in the graphviz viewer"),
+ cl::Hidden,
+ cl::init(false));
+
+namespace llvm {
+template<>
+struct DOTGraphTraits<RegionNode*> : public DefaultDOTGraphTraits {
+
+ DOTGraphTraits (bool isSimple=false)
+ : DefaultDOTGraphTraits(isSimple) {}
+
+ std::string getNodeLabel(RegionNode *Node, RegionNode *Graph) {
+
+ if (!Node->isSubRegion()) {
+ BasicBlock *BB = Node->getNodeAs<BasicBlock>();
+
+ if (isSimple())
+ return DOTGraphTraits<const Function*>
+ ::getSimpleNodeLabel(BB, BB->getParent());
+ else
+ return DOTGraphTraits<const Function*>
+ ::getCompleteNodeLabel(BB, BB->getParent());
+ }
+
+ return "Not implemented";
+ }
+};
+
+template<>
+struct DOTGraphTraits<RegionInfo*> : public DOTGraphTraits<RegionNode*> {
+
+ DOTGraphTraits (bool isSimple=false)
+ : DOTGraphTraits<RegionNode*>(isSimple) {}
+
+ static std::string getGraphName(RegionInfo *DT) {
+ return "Region Graph";
+ }
+
+ std::string getNodeLabel(RegionNode *Node, RegionInfo *G) {
+ return DOTGraphTraits<RegionNode*>::getNodeLabel(Node,
+ G->getTopLevelRegion());
+ }
+
+ // Print the cluster of the subregions. This groups the single basic blocks
+ // and adds a different background color for each group.
+ static void printRegionCluster(const Region *R, GraphWriter<RegionInfo*> &GW,
+ unsigned depth = 0) {
+ raw_ostream &O = GW.getOStream();
+ O.indent(2 * depth) << "subgraph cluster_" << static_cast<const void*>(R)
+ << " {\n";
+ O.indent(2 * (depth + 1)) << "label = \"\";\n";
+
+ if (!onlySimpleRegions || R->isSimple()) {
+ O.indent(2 * (depth + 1)) << "style = filled;\n";
+ O.indent(2 * (depth + 1)) << "color = "
+ << ((R->getDepth() * 2 % 12) + 1) << "\n";
+
+ } else {
+ O.indent(2 * (depth + 1)) << "style = solid;\n";
+ O.indent(2 * (depth + 1)) << "color = "
+ << ((R->getDepth() * 2 % 12) + 2) << "\n";
+ }
+
+ for (Region::const_iterator RI = R->begin(), RE = R->end(); RI != RE; ++RI)
+ printRegionCluster(*RI, GW, depth + 1);
+
+ RegionInfo *RI = R->getRegionInfo();
+
+ for (Region::const_block_iterator BI = R->block_begin(),
+ BE = R->block_end(); BI != BE; ++BI) {
+ BasicBlock *BB = (*BI)->getNodeAs<BasicBlock>();
+ if (RI->getRegionFor(BB) == R)
+ O.indent(2 * (depth + 1)) << "Node"
+ << static_cast<const void*>(RI->getTopLevelRegion()->getBBNode(BB))
+ << ";\n";
+ }
+
+ O.indent(2 * depth) << "}\n";
+ }
+
+ static void addCustomGraphFeatures(const RegionInfo* RI,
+ GraphWriter<RegionInfo*> &GW) {
+ raw_ostream &O = GW.getOStream();
+ O << "\tcolorscheme = \"paired12\"\n";
+ printRegionCluster(RI->getTopLevelRegion(), GW, 4);
+ }
+};
+} //end namespace llvm
+
+namespace {
+
+struct RegionViewer
+ : public DOTGraphTraitsViewer<RegionInfo, false> {
+ static char ID;
+ RegionViewer() : DOTGraphTraitsViewer<RegionInfo, false>("reg", ID){}
+};
+
+char RegionViewer::ID = 0;
+INITIALIZE_PASS(RegionViewer, "view-regions", "View regions of function",
+ true, true);
+
+struct RegionOnlyViewer
+ : public DOTGraphTraitsViewer<RegionInfo, true> {
+ static char ID;
+ RegionOnlyViewer() : DOTGraphTraitsViewer<RegionInfo, true>("regonly", ID){}
+};
+
+char RegionOnlyViewer::ID = 0;
+INITIALIZE_PASS(RegionOnlyViewer, "view-regions-only",
+ "View regions of function (with no function bodies)",
+ true, true);
+
+struct RegionPrinter
+ : public DOTGraphTraitsPrinter<RegionInfo, false> {
+ static char ID;
+ RegionPrinter() :
+ DOTGraphTraitsPrinter<RegionInfo, false>("reg", ID) {}
+};
+} //end anonymous namespace
+
+char RegionPrinter::ID = 0;
+INITIALIZE_PASS(RegionPrinter, "dot-regions",
+ "Print regions of function to 'dot' file", true, true);
+
+namespace {
+
+struct RegionOnlyPrinter
+ : public DOTGraphTraitsPrinter<RegionInfo, true> {
+ static char ID;
+ RegionOnlyPrinter() :
+ DOTGraphTraitsPrinter<RegionInfo, true>("reg", ID) {}
+};
+
+}
+
+char RegionOnlyPrinter::ID = 0;
+INITIALIZE_PASS(RegionOnlyPrinter, "dot-regions-only",
+ "Print regions of function to 'dot' file "
+ "(with no function bodies)",
+ true, true);
+
+FunctionPass* llvm::createRegionViewerPass() {
+ return new RegionViewer();
+}
+
+FunctionPass* llvm::createRegionOnlyViewerPass() {
+ return new RegionOnlyViewer();
+}
+
+FunctionPass* llvm::createRegionPrinterPass() {
+ return new RegionPrinter();
+}
+
+FunctionPass* llvm::createRegionOnlyPrinterPass() {
+ return new RegionOnlyPrinter();
+}
+
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolution.cpp b/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
index 413b3b4..b892d85 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -103,8 +103,8 @@ MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
"derived loop"),
cl::init(100));
-static RegisterPass<ScalarEvolution>
-R("scalar-evolution", "Scalar Evolution Analysis", false, true);
+INITIALIZE_PASS(ScalarEvolution, "scalar-evolution",
+ "Scalar Evolution Analysis", false, true);
char ScalarEvolution::ID = 0;
//===----------------------------------------------------------------------===//
@@ -251,28 +251,59 @@ void SCEVCommutativeExpr::print(raw_ostream &OS) const {
OS << "(";
for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
OS << **I;
- if (next(I) != E)
+ if (llvm::next(I) != E)
OS << OpStr;
}
OS << ")";
}
bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
- for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
- if (!getOperand(i)->dominates(BB, DT))
+ for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
+ if (!(*I)->dominates(BB, DT))
return false;
- }
return true;
}
bool SCEVNAryExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
- for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
- if (!getOperand(i)->properlyDominates(BB, DT))
+ for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
+ if (!(*I)->properlyDominates(BB, DT))
return false;
- }
return true;
}
+bool SCEVNAryExpr::isLoopInvariant(const Loop *L) const {
+ for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
+ if (!(*I)->isLoopInvariant(L))
+ return false;
+ return true;
+}
+
+// hasComputableLoopEvolution - N-ary expressions have computable loop
+// evolutions iff they have at least one operand that varies with the loop,
+// but that all varying operands are computable.
+bool SCEVNAryExpr::hasComputableLoopEvolution(const Loop *L) const {
+ bool HasVarying = false;
+ for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
+ const SCEV *S = *I;
+ if (!S->isLoopInvariant(L)) {
+ if (S->hasComputableLoopEvolution(L))
+ HasVarying = true;
+ else
+ return false;
+ }
+ }
+ return HasVarying;
+}
+
+bool SCEVNAryExpr::hasOperand(const SCEV *O) const {
+ for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
+ const SCEV *S = *I;
+ if (O == S || S->hasOperand(O))
+ return true;
+ }
+ return false;
+}
+
bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
return LHS->dominates(BB, DT) && RHS->dominates(BB, DT);
}
@@ -303,10 +334,14 @@ bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const {
if (QueryLoop->contains(L))
return false;
+ // This recurrence is invariant w.r.t. QueryLoop if L contains QueryLoop.
+ if (L->contains(QueryLoop))
+ return true;
+
// This recurrence is variant w.r.t. QueryLoop if any of its operands
// are variant.
- for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
- if (!getOperand(i)->isLoopInvariant(QueryLoop))
+ for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
+ if (!(*I)->isLoopInvariant(QueryLoop))
return false;
// Otherwise it's loop-invariant.
@@ -337,12 +372,36 @@ void SCEVAddRecExpr::print(raw_ostream &OS) const {
OS << ">";
}
+void SCEVUnknown::deleted() {
+ // Clear this SCEVUnknown from ValuesAtScopes.
+ SE->ValuesAtScopes.erase(this);
+
+ // Remove this SCEVUnknown from the uniquing map.
+ SE->UniqueSCEVs.RemoveNode(this);
+
+ // Release the value.
+ setValPtr(0);
+}
+
+void SCEVUnknown::allUsesReplacedWith(Value *New) {
+ // Clear this SCEVUnknown from ValuesAtScopes.
+ SE->ValuesAtScopes.erase(this);
+
+ // Remove this SCEVUnknown from the uniquing map.
+ SE->UniqueSCEVs.RemoveNode(this);
+
+ // Update this SCEVUnknown to point to the new value. This is needed
+ // because there may still be outstanding SCEVs which still point to
+ // this SCEVUnknown.
+ setValPtr(New);
+}
+
bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
// All non-instruction values are loop invariant. All instructions are loop
// invariant if they are not contained in the specified loop.
// Instructions are never considered invariant in the function body
// (null loop) because they are defined within the "loop".
- if (Instruction *I = dyn_cast<Instruction>(V))
+ if (Instruction *I = dyn_cast<Instruction>(getValue()))
return L && !L->contains(I);
return true;
}
@@ -360,11 +419,11 @@ bool SCEVUnknown::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
}
const Type *SCEVUnknown::getType() const {
- return V->getType();
+ return getValue()->getType();
}
bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const {
- if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
+ if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
if (VCE->getOpcode() == Instruction::PtrToInt)
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
if (CE->getOpcode() == Instruction::GetElementPtr &&
@@ -381,7 +440,7 @@ bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const {
}
bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const {
- if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
+ if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
if (VCE->getOpcode() == Instruction::PtrToInt)
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
if (CE->getOpcode() == Instruction::GetElementPtr &&
@@ -406,7 +465,7 @@ bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const {
}
bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const {
- if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
+ if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
if (VCE->getOpcode() == Instruction::PtrToInt)
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
if (CE->getOpcode() == Instruction::GetElementPtr &&
@@ -448,166 +507,183 @@ void SCEVUnknown::print(raw_ostream &OS) const {
}
// Otherwise just print it normally.
- WriteAsOperand(OS, V, false);
+ WriteAsOperand(OS, getValue(), false);
}
//===----------------------------------------------------------------------===//
// SCEV Utilities
//===----------------------------------------------------------------------===//
-static bool CompareTypes(const Type *A, const Type *B) {
- if (A->getTypeID() != B->getTypeID())
- return A->getTypeID() < B->getTypeID();
- if (const IntegerType *AI = dyn_cast<IntegerType>(A)) {
- const IntegerType *BI = cast<IntegerType>(B);
- return AI->getBitWidth() < BI->getBitWidth();
- }
- if (const PointerType *AI = dyn_cast<PointerType>(A)) {
- const PointerType *BI = cast<PointerType>(B);
- return CompareTypes(AI->getElementType(), BI->getElementType());
- }
- if (const ArrayType *AI = dyn_cast<ArrayType>(A)) {
- const ArrayType *BI = cast<ArrayType>(B);
- if (AI->getNumElements() != BI->getNumElements())
- return AI->getNumElements() < BI->getNumElements();
- return CompareTypes(AI->getElementType(), BI->getElementType());
- }
- if (const VectorType *AI = dyn_cast<VectorType>(A)) {
- const VectorType *BI = cast<VectorType>(B);
- if (AI->getNumElements() != BI->getNumElements())
- return AI->getNumElements() < BI->getNumElements();
- return CompareTypes(AI->getElementType(), BI->getElementType());
- }
- if (const StructType *AI = dyn_cast<StructType>(A)) {
- const StructType *BI = cast<StructType>(B);
- if (AI->getNumElements() != BI->getNumElements())
- return AI->getNumElements() < BI->getNumElements();
- for (unsigned i = 0, e = AI->getNumElements(); i != e; ++i)
- if (CompareTypes(AI->getElementType(i), BI->getElementType(i)) ||
- CompareTypes(BI->getElementType(i), AI->getElementType(i)))
- return CompareTypes(AI->getElementType(i), BI->getElementType(i));
- }
- return false;
-}
-
namespace {
/// SCEVComplexityCompare - Return true if the complexity of the LHS is less
/// than the complexity of the RHS. This comparator is used to canonicalize
/// expressions.
class SCEVComplexityCompare {
- LoopInfo *LI;
+ const LoopInfo *const LI;
public:
- explicit SCEVComplexityCompare(LoopInfo *li) : LI(li) {}
+ explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {}
+ // Return true or false if LHS is less than, or at least RHS, respectively.
bool operator()(const SCEV *LHS, const SCEV *RHS) const {
+ return compare(LHS, RHS) < 0;
+ }
+
+ // Return negative, zero, or positive, if LHS is less than, equal to, or
+ // greater than RHS, respectively. A three-way result allows recursive
+ // comparisons to be more efficient.
+ int compare(const SCEV *LHS, const SCEV *RHS) const {
// Fast-path: SCEVs are uniqued so we can do a quick equality check.
if (LHS == RHS)
- return false;
+ return 0;
// Primarily, sort the SCEVs by their getSCEVType().
- if (LHS->getSCEVType() != RHS->getSCEVType())
- return LHS->getSCEVType() < RHS->getSCEVType();
+ unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
+ if (LType != RType)
+ return (int)LType - (int)RType;
// Aside from the getSCEVType() ordering, the particular ordering
// isn't very important except that it's beneficial to be consistent,
// so that (a + b) and (b + a) don't end up as different expressions.
-
- // Sort SCEVUnknown values with some loose heuristics. TODO: This is
- // not as complete as it could be.
- if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) {
+ switch (LType) {
+ case scUnknown: {
+ const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
+ // Sort SCEVUnknown values with some loose heuristics. TODO: This is
+ // not as complete as it could be.
+ const Value *LV = LU->getValue(), *RV = RU->getValue();
+
// Order pointer values after integer values. This helps SCEVExpander
// form GEPs.
- if (LU->getType()->isPointerTy() && !RU->getType()->isPointerTy())
- return false;
- if (RU->getType()->isPointerTy() && !LU->getType()->isPointerTy())
- return true;
+ bool LIsPointer = LV->getType()->isPointerTy(),
+ RIsPointer = RV->getType()->isPointerTy();
+ if (LIsPointer != RIsPointer)
+ return (int)LIsPointer - (int)RIsPointer;
// Compare getValueID values.
- if (LU->getValue()->getValueID() != RU->getValue()->getValueID())
- return LU->getValue()->getValueID() < RU->getValue()->getValueID();
+ unsigned LID = LV->getValueID(),
+ RID = RV->getValueID();
+ if (LID != RID)
+ return (int)LID - (int)RID;
// Sort arguments by their position.
- if (const Argument *LA = dyn_cast<Argument>(LU->getValue())) {
- const Argument *RA = cast<Argument>(RU->getValue());
- return LA->getArgNo() < RA->getArgNo();
+ if (const Argument *LA = dyn_cast<Argument>(LV)) {
+ const Argument *RA = cast<Argument>(RV);
+ unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
+ return (int)LArgNo - (int)RArgNo;
}
- // For instructions, compare their loop depth, and their opcode.
- // This is pretty loose.
- if (Instruction *LV = dyn_cast<Instruction>(LU->getValue())) {
- Instruction *RV = cast<Instruction>(RU->getValue());
+ // For instructions, compare their loop depth, and their operand
+ // count. This is pretty loose.
+ if (const Instruction *LInst = dyn_cast<Instruction>(LV)) {
+ const Instruction *RInst = cast<Instruction>(RV);
// Compare loop depths.
- if (LI->getLoopDepth(LV->getParent()) !=
- LI->getLoopDepth(RV->getParent()))
- return LI->getLoopDepth(LV->getParent()) <
- LI->getLoopDepth(RV->getParent());
-
- // Compare opcodes.
- if (LV->getOpcode() != RV->getOpcode())
- return LV->getOpcode() < RV->getOpcode();
+ const BasicBlock *LParent = LInst->getParent(),
+ *RParent = RInst->getParent();
+ if (LParent != RParent) {
+ unsigned LDepth = LI->getLoopDepth(LParent),
+ RDepth = LI->getLoopDepth(RParent);
+ if (LDepth != RDepth)
+ return (int)LDepth - (int)RDepth;
+ }
// Compare the number of operands.
- if (LV->getNumOperands() != RV->getNumOperands())
- return LV->getNumOperands() < RV->getNumOperands();
+ unsigned LNumOps = LInst->getNumOperands(),
+ RNumOps = RInst->getNumOperands();
+ return (int)LNumOps - (int)RNumOps;
}
- return false;
+ return 0;
}
- // Compare constant values.
- if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) {
+ case scConstant: {
+ const SCEVConstant *LC = cast<SCEVConstant>(LHS);
const SCEVConstant *RC = cast<SCEVConstant>(RHS);
- if (LC->getValue()->getBitWidth() != RC->getValue()->getBitWidth())
- return LC->getValue()->getBitWidth() < RC->getValue()->getBitWidth();
- return LC->getValue()->getValue().ult(RC->getValue()->getValue());
+
+ // Compare constant values.
+ const APInt &LA = LC->getValue()->getValue();
+ const APInt &RA = RC->getValue()->getValue();
+ unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
+ if (LBitWidth != RBitWidth)
+ return (int)LBitWidth - (int)RBitWidth;
+ return LA.ult(RA) ? -1 : 1;
}
- // Compare addrec loop depths.
- if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) {
+ case scAddRecExpr: {
+ const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
- if (LA->getLoop()->getLoopDepth() != RA->getLoop()->getLoopDepth())
- return LA->getLoop()->getLoopDepth() < RA->getLoop()->getLoopDepth();
+
+ // Compare addrec loop depths.
+ const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
+ if (LLoop != RLoop) {
+ unsigned LDepth = LLoop->getLoopDepth(),
+ RDepth = RLoop->getLoopDepth();
+ if (LDepth != RDepth)
+ return (int)LDepth - (int)RDepth;
+ }
+
+ // Addrec complexity grows with operand count.
+ unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
+ if (LNumOps != RNumOps)
+ return (int)LNumOps - (int)RNumOps;
+
+ // Lexicographically compare.
+ for (unsigned i = 0; i != LNumOps; ++i) {
+ long X = compare(LA->getOperand(i), RA->getOperand(i));
+ if (X != 0)
+ return X;
+ }
+
+ return 0;
}
- // Lexicographically compare n-ary expressions.
- if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) {
+ case scAddExpr:
+ case scMulExpr:
+ case scSMaxExpr:
+ case scUMaxExpr: {
+ const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
- for (unsigned i = 0, e = LC->getNumOperands(); i != e; ++i) {
- if (i >= RC->getNumOperands())
- return false;
- if (operator()(LC->getOperand(i), RC->getOperand(i)))
- return true;
- if (operator()(RC->getOperand(i), LC->getOperand(i)))
- return false;
+
+ // Lexicographically compare n-ary expressions.
+ unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
+ for (unsigned i = 0; i != LNumOps; ++i) {
+ if (i >= RNumOps)
+ return 1;
+ long X = compare(LC->getOperand(i), RC->getOperand(i));
+ if (X != 0)
+ return X;
}
- return LC->getNumOperands() < RC->getNumOperands();
+ return (int)LNumOps - (int)RNumOps;
}
- // Lexicographically compare udiv expressions.
- if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) {
+ case scUDivExpr: {
+ const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
- if (operator()(LC->getLHS(), RC->getLHS()))
- return true;
- if (operator()(RC->getLHS(), LC->getLHS()))
- return false;
- if (operator()(LC->getRHS(), RC->getRHS()))
- return true;
- if (operator()(RC->getRHS(), LC->getRHS()))
- return false;
- return false;
+
+ // Lexicographically compare udiv expressions.
+ long X = compare(LC->getLHS(), RC->getLHS());
+ if (X != 0)
+ return X;
+ return compare(LC->getRHS(), RC->getRHS());
}
- // Compare cast expressions by operand.
- if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) {
+ case scTruncate:
+ case scZeroExtend:
+ case scSignExtend: {
+ const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
- return operator()(LC->getOperand(), RC->getOperand());
+
+ // Compare cast expressions by operand.
+ return compare(LC->getOperand(), RC->getOperand());
+ }
+
+ default:
+ break;
}
llvm_unreachable("Unknown SCEV kind!");
- return false;
+ return 0;
}
};
}
@@ -628,8 +704,9 @@ static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
if (Ops.size() == 2) {
// This is the common case, which also happens to be trivially simple.
// Special case it.
- if (SCEVComplexityCompare(LI)(Ops[1], Ops[0]))
- std::swap(Ops[0], Ops[1]);
+ const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
+ if (SCEVComplexityCompare(LI)(RHS, LHS))
+ std::swap(LHS, RHS);
return;
}
@@ -845,6 +922,13 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
return getAddRecExpr(Operands, AddRec->getLoop());
}
+ // As a special case, fold trunc(undef) to undef. We don't want to
+ // know too much about SCEVUnknowns, but this special case is handy
+ // and harmless.
+ if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op))
+ if (isa<UndefValue>(U->getValue()))
+ return getSCEV(UndefValue::get(Ty));
+
// The cast wasn't folded; create an explicit cast node. We can reuse
// the existing insert position since if we get here, we won't have
// made any changes which would invalidate it.
@@ -1163,6 +1247,13 @@ const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
return getAddRecExpr(Ops, AR->getLoop());
}
+ // As a special case, fold anyext(undef) to undef. We don't want to
+ // know too much about SCEVUnknowns, but this special case is handy
+ // and harmless.
+ if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op))
+ if (isa<UndefValue>(U->getValue()))
+ return getSCEV(UndefValue::get(Ty));
+
// If the expression is obviously signed, use the sext cast value.
if (isa<SCEVSMaxExpr>(Op))
return SExt;
@@ -1287,8 +1378,9 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
// If HasNSW is true and all the operands are non-negative, infer HasNUW.
if (!HasNUW && HasNSW) {
bool All = true;
- for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- if (!isKnownNonNegative(Ops[i])) {
+ for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
+ E = Ops.end(); I != E; ++I)
+ if (!isKnownNonNegative(*I)) {
All = false;
break;
}
@@ -1321,22 +1413,29 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
if (Ops.size() == 1) return Ops[0];
}
- // Okay, check to see if the same value occurs in the operand list twice. If
- // so, merge them together into an multiply expression. Since we sorted the
- // list, these values are required to be adjacent.
+ // Okay, check to see if the same value occurs in the operand list more than
+ // once. If so, merge them together into an multiply expression. Since we
+ // sorted the list, these values are required to be adjacent.
const Type *Ty = Ops[0]->getType();
- for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
+ bool FoundMatch = false;
+ for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
- // Found a match, merge the two values into a multiply, and add any
- // remaining values to the result.
- const SCEV *Two = getConstant(Ty, 2);
- const SCEV *Mul = getMulExpr(Ops[i], Two);
- if (Ops.size() == 2)
+ // Scan ahead to count how many equal operands there are.
+ unsigned Count = 2;
+ while (i+Count != e && Ops[i+Count] == Ops[i])
+ ++Count;
+ // Merge the values into a multiply.
+ const SCEV *Scale = getConstant(Ty, Count);
+ const SCEV *Mul = getMulExpr(Scale, Ops[i]);
+ if (Ops.size() == Count)
return Mul;
- Ops.erase(Ops.begin()+i, Ops.begin()+i+2);
- Ops.push_back(Mul);
- return getAddExpr(Ops, HasNUW, HasNSW);
+ Ops[i] = Mul;
+ Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
+ --i; e -= Count - 1;
+ FoundMatch = true;
}
+ if (FoundMatch)
+ return getAddExpr(Ops, HasNUW, HasNSW);
// Check for truncates. If all the operands are truncated from the same
// type, see if factoring out the truncate would permit the result to be
@@ -1433,7 +1532,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
// re-generate the operands list. Group the operands by constant scale,
// to avoid multiplying by the same constant scale multiple times.
std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
- for (SmallVector<const SCEV *, 8>::iterator I = NewOps.begin(),
+ for (SmallVector<const SCEV *, 8>::const_iterator I = NewOps.begin(),
E = NewOps.end(); I != E; ++I)
MulOpLists[M.find(*I)->second].push_back(*I);
// Re-generate the operands list.
@@ -1460,20 +1559,23 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
+ if (isa<SCEVConstant>(MulOpSCEV))
+ continue;
for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
- if (MulOpSCEV == Ops[AddOp] && !isa<SCEVConstant>(Ops[AddOp])) {
+ if (MulOpSCEV == Ops[AddOp]) {
// Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
if (Mul->getNumOperands() != 2) {
// If the multiply has more than two operands, we must get the
// Y*Z term.
- SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), Mul->op_end());
- MulOps.erase(MulOps.begin()+MulOp);
+ SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
+ Mul->op_begin()+MulOp);
+ MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
InnerMul = getMulExpr(MulOps);
}
const SCEV *One = getConstant(Ty, 1);
- const SCEV *AddOne = getAddExpr(InnerMul, One);
- const SCEV *OuterMul = getMulExpr(AddOne, Ops[AddOp]);
+ const SCEV *AddOne = getAddExpr(One, InnerMul);
+ const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV);
if (Ops.size() == 2) return OuterMul;
if (AddOp < Idx) {
Ops.erase(Ops.begin()+AddOp);
@@ -1500,15 +1602,15 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
if (Mul->getNumOperands() != 2) {
SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
- Mul->op_end());
- MulOps.erase(MulOps.begin()+MulOp);
+ Mul->op_begin()+MulOp);
+ MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
InnerMul1 = getMulExpr(MulOps);
}
const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
if (OtherMul->getNumOperands() != 2) {
SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
- OtherMul->op_end());
- MulOps.erase(MulOps.begin()+OMulOp);
+ OtherMul->op_begin()+OMulOp);
+ MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
InnerMul2 = getMulExpr(MulOps);
}
const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
@@ -1574,30 +1676,31 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
// there are multiple AddRec's with the same loop induction variable being
// added together. If so, we can fold them.
for (unsigned OtherIdx = Idx+1;
- OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
- if (OtherIdx != Idx) {
- const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
- if (AddRecLoop == OtherAddRec->getLoop()) {
- // Other + {A,+,B} + {C,+,D} --> Other + {A+C,+,B+D}
- SmallVector<const SCEV *, 4> NewOps(AddRec->op_begin(),
- AddRec->op_end());
- for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) {
- if (i >= NewOps.size()) {
- NewOps.append(OtherAddRec->op_begin()+i,
- OtherAddRec->op_end());
- break;
+ OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
+ ++OtherIdx)
+ if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
+ // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
+ SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
+ AddRec->op_end());
+ for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
+ ++OtherIdx)
+ if (const SCEVAddRecExpr *OtherAddRec =
+ dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
+ if (OtherAddRec->getLoop() == AddRecLoop) {
+ for (unsigned i = 0, e = OtherAddRec->getNumOperands();
+ i != e; ++i) {
+ if (i >= AddRecOps.size()) {
+ AddRecOps.append(OtherAddRec->op_begin()+i,
+ OtherAddRec->op_end());
+ break;
+ }
+ AddRecOps[i] = getAddExpr(AddRecOps[i],
+ OtherAddRec->getOperand(i));
+ }
+ Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
}
- NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i));
- }
- const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRecLoop);
-
- if (Ops.size() == 2) return NewAddRec;
-
- Ops.erase(Ops.begin()+Idx);
- Ops.erase(Ops.begin()+OtherIdx-1);
- Ops.push_back(NewAddRec);
- return getAddExpr(Ops);
- }
+ Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop);
+ return getAddExpr(Ops);
}
// Otherwise couldn't fold anything into this recurrence. Move onto the
@@ -1633,17 +1736,18 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
assert(!Ops.empty() && "Cannot get empty mul!");
if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
+ const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
- assert(getEffectiveSCEVType(Ops[i]->getType()) ==
- getEffectiveSCEVType(Ops[0]->getType()) &&
+ assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
"SCEVMulExpr operand types don't match!");
#endif
// If HasNSW is true and all the operands are non-negative, infer HasNUW.
if (!HasNUW && HasNSW) {
bool All = true;
- for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- if (!isKnownNonNegative(Ops[i])) {
+ for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
+ E = Ops.end(); I != E; ++I)
+ if (!isKnownNonNegative(*I)) {
All = false;
break;
}
@@ -1740,8 +1844,9 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
// they are loop invariant w.r.t. the recurrence.
SmallVector<const SCEV *, 8> LIOps;
const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
+ const Loop *AddRecLoop = AddRec->getLoop();
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
+ if (Ops[i]->isLoopInvariant(AddRecLoop)) {
LIOps.push_back(Ops[i]);
Ops.erase(Ops.begin()+i);
--i; --e;
@@ -1758,7 +1863,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
// Build the new addrec. Propagate the NUW and NSW flags if both the
// outer mul and the inner addrec are guaranteed to have no overflow.
- const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop(),
+ const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop,
HasNUW && AddRec->hasNoUnsignedWrap(),
HasNSW && AddRec->hasNoSignedWrap());
@@ -1778,28 +1883,30 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
// there are multiple AddRec's with the same loop induction variable being
// multiplied together. If so, we can fold them.
for (unsigned OtherIdx = Idx+1;
- OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
- if (OtherIdx != Idx) {
- const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
- if (AddRec->getLoop() == OtherAddRec->getLoop()) {
- // F * G --> {A,+,B} * {C,+,D} --> {A*C,+,F*D + G*B + B*D}
- const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
- const SCEV *NewStart = getMulExpr(F->getStart(),
- G->getStart());
- const SCEV *B = F->getStepRecurrence(*this);
- const SCEV *D = G->getStepRecurrence(*this);
- const SCEV *NewStep = getAddExpr(getMulExpr(F, D),
- getMulExpr(G, B),
- getMulExpr(B, D));
- const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
- F->getLoop());
- if (Ops.size() == 2) return NewAddRec;
-
- Ops.erase(Ops.begin()+Idx);
- Ops.erase(Ops.begin()+OtherIdx-1);
- Ops.push_back(NewAddRec);
- return getMulExpr(Ops);
- }
+ OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
+ ++OtherIdx)
+ if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
+ // F * G, where F = {A,+,B}<L> and G = {C,+,D}<L> -->
+ // {A*C,+,F*D + G*B + B*D}<L>
+ for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
+ ++OtherIdx)
+ if (const SCEVAddRecExpr *OtherAddRec =
+ dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
+ if (OtherAddRec->getLoop() == AddRecLoop) {
+ const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
+ const SCEV *NewStart = getMulExpr(F->getStart(), G->getStart());
+ const SCEV *B = F->getStepRecurrence(*this);
+ const SCEV *D = G->getStepRecurrence(*this);
+ const SCEV *NewStep = getAddExpr(getMulExpr(F, D),
+ getMulExpr(G, B),
+ getMulExpr(B, D));
+ const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
+ F->getLoop());
+ if (Ops.size() == 2) return NewAddRec;
+ Ops[Idx] = AddRec = cast<SCEVAddRecExpr>(NewAddRec);
+ Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
+ }
+ return getMulExpr(Ops);
}
// Otherwise couldn't fold anything into this recurrence. Move onto the
@@ -1848,7 +1955,7 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
// TODO: Generalize this to non-constants by using known-bits information.
const Type *Ty = LHS->getType();
unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
- unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ;
+ unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
// For non-power-of-two values, effectively round the value up to the
// nearest power of two.
if (!RHSC->getValue()->getValue().isPowerOf2())
@@ -1955,9 +2062,9 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
bool HasNUW, bool HasNSW) {
if (Operands.size() == 1) return Operands[0];
#ifndef NDEBUG
+ const Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
for (unsigned i = 1, e = Operands.size(); i != e; ++i)
- assert(getEffectiveSCEVType(Operands[i]->getType()) ==
- getEffectiveSCEVType(Operands[0]->getType()) &&
+ assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
"SCEVAddRecExpr operand types don't match!");
#endif
@@ -1975,8 +2082,9 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
// If HasNSW is true and all the operands are non-negative, infer HasNUW.
if (!HasNUW && HasNSW) {
bool All = true;
- for (unsigned i = 0, e = Operands.size(); i != e; ++i)
- if (!isKnownNonNegative(Operands[i])) {
+ for (SmallVectorImpl<const SCEV *>::const_iterator I = Operands.begin(),
+ E = Operands.end(); I != E; ++I)
+ if (!isKnownNonNegative(*I)) {
All = false;
break;
}
@@ -1986,9 +2094,9 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
// Canonicalize nested AddRecs in by nesting them in order of loop depth.
if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
const Loop *NestedLoop = NestedAR->getLoop();
- if (L->contains(NestedLoop->getHeader()) ?
+ if (L->contains(NestedLoop) ?
(L->getLoopDepth() < NestedLoop->getLoopDepth()) :
- (!NestedLoop->contains(L->getHeader()) &&
+ (!NestedLoop->contains(L) &&
DT->dominates(L->getHeader(), NestedLoop->getHeader()))) {
SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
NestedAR->op_end());
@@ -2055,9 +2163,9 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
assert(!Ops.empty() && "Cannot get empty smax!");
if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
+ const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
- assert(getEffectiveSCEVType(Ops[i]->getType()) ==
- getEffectiveSCEVType(Ops[0]->getType()) &&
+ assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
"SCEVSMaxExpr operand types don't match!");
#endif
@@ -2160,9 +2268,9 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
assert(!Ops.empty() && "Cannot get empty umax!");
if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
+ const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
- assert(getEffectiveSCEVType(Ops[i]->getType()) ==
- getEffectiveSCEVType(Ops[0]->getType()) &&
+ assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
"SCEVUMaxExpr operand types don't match!");
#endif
@@ -2326,8 +2434,14 @@ const SCEV *ScalarEvolution::getUnknown(Value *V) {
ID.AddInteger(scUnknown);
ID.AddPointer(V);
void *IP = 0;
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V);
+ if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
+ assert(cast<SCEVUnknown>(S)->getValue() == V &&
+ "Stale SCEVUnknown in uniquing map!");
+ return S;
+ }
+ SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
+ FirstUnknown);
+ FirstUnknown = cast<SCEVUnknown>(S);
UniqueSCEVs.InsertNode(S, IP);
return S;
}
@@ -2391,10 +2505,15 @@ const SCEV *ScalarEvolution::getCouldNotCompute() {
const SCEV *ScalarEvolution::getSCEV(Value *V) {
assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
- std::map<SCEVCallbackVH, const SCEV *>::iterator I = Scalars.find(V);
- if (I != Scalars.end()) return I->second;
+ ValueExprMapType::const_iterator I = ValueExprMap.find(V);
+ if (I != ValueExprMap.end()) return I->second;
const SCEV *S = createSCEV(V);
- Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S));
+
+ // The process of creating a SCEV for V may have caused other SCEVs
+ // to have been created, so it's necessary to insert the new entry
+ // from scratch, rather than trying to remember the insert position
+ // above.
+ ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S));
return S;
}
@@ -2428,6 +2547,10 @@ const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
///
const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS,
const SCEV *RHS) {
+ // Fast path: X - X --> 0.
+ if (LHS == RHS)
+ return getConstant(LHS->getType(), 0);
+
// X - Y --> X + -Y
return getAddExpr(LHS, getNegativeSCEV(RHS));
}
@@ -2570,12 +2693,12 @@ PushDefUseChildren(Instruction *I,
// Push the def-use children onto the Worklist stack.
for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
UI != UE; ++UI)
- Worklist.push_back(cast<Instruction>(UI));
+ Worklist.push_back(cast<Instruction>(*UI));
}
/// ForgetSymbolicValue - This looks up computed SCEV values for all
/// instructions that depend on the given instruction and removes them from
-/// the Scalars map if they reference SymName. This is used during PHI
+/// the ValueExprMapType map if they reference SymName. This is used during PHI
/// resolution.
void
ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
@@ -2588,9 +2711,9 @@ ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
Instruction *I = Worklist.pop_back_val();
if (!Visited.insert(I)) continue;
- std::map<SCEVCallbackVH, const SCEV *>::iterator It =
- Scalars.find(static_cast<Value *>(I));
- if (It != Scalars.end()) {
+ ValueExprMapType::iterator It =
+ ValueExprMap.find(static_cast<Value *>(I));
+ if (It != ValueExprMap.end()) {
// Short-circuit the def-use traversal if the symbolic name
// ceases to appear in expressions.
if (It->second != SymName && !It->second->hasOperand(SymName))
@@ -2607,7 +2730,7 @@ ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
!isa<SCEVUnknown>(It->second) ||
(I != PN && It->second == SymName)) {
ValuesAtScopes.erase(It->second);
- Scalars.erase(It);
+ ValueExprMap.erase(It);
}
}
@@ -2644,9 +2767,9 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
if (BEValueV && StartValueV) {
// While we are analyzing this PHI node, handle its value symbolically.
const SCEV *SymbolicName = getUnknown(PN);
- assert(Scalars.find(PN) == Scalars.end() &&
+ assert(ValueExprMap.find(PN) == ValueExprMap.end() &&
"PHI node already processed?");
- Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
+ ValueExprMap.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
// Using this symbolic name for the PHI, analyze the value coming around
// the back-edge.
@@ -2707,7 +2830,7 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
// to be symbolic. We now need to go back and purge all of the
// entries for the scalars that use the symbolic expression.
ForgetSymbolicName(PN, SymbolicName);
- Scalars[SCEVCallbackVH(PN, this)] = PHISCEV;
+ ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
return PHISCEV;
}
}
@@ -2732,7 +2855,7 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
// to be symbolic. We now need to go back and purge all of the
// entries for the scalars that use the symbolic expression.
ForgetSymbolicName(PN, SymbolicName);
- Scalars[SCEVCallbackVH(PN, this)] = PHISCEV;
+ ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
return PHISCEV;
}
}
@@ -2777,7 +2900,7 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
return getUnknown(GEP);
const SCEV *TotalOffset = getConstant(IntPtrTy, 0);
gep_type_iterator GTI = gep_type_begin(GEP);
- for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()),
+ for (GetElementPtrInst::op_iterator I = llvm::next(GEP->op_begin()),
E = GEP->op_end();
I != E; ++I) {
Value *Index = *I;
@@ -3200,12 +3323,42 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
Operator *U = cast<Operator>(V);
switch (Opcode) {
- case Instruction::Add:
- return getAddExpr(getSCEV(U->getOperand(0)),
- getSCEV(U->getOperand(1)));
- case Instruction::Mul:
- return getMulExpr(getSCEV(U->getOperand(0)),
- getSCEV(U->getOperand(1)));
+ case Instruction::Add: {
+ // The simple thing to do would be to just call getSCEV on both operands
+ // and call getAddExpr with the result. However if we're looking at a
+ // bunch of things all added together, this can be quite inefficient,
+ // because it leads to N-1 getAddExpr calls for N ultimate operands.
+ // Instead, gather up all the operands and make a single getAddExpr call.
+ // LLVM IR canonical form means we need only traverse the left operands.
+ SmallVector<const SCEV *, 4> AddOps;
+ AddOps.push_back(getSCEV(U->getOperand(1)));
+ for (Value *Op = U->getOperand(0); ; Op = U->getOperand(0)) {
+ unsigned Opcode = Op->getValueID() - Value::InstructionVal;
+ if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
+ break;
+ U = cast<Operator>(Op);
+ const SCEV *Op1 = getSCEV(U->getOperand(1));
+ if (Opcode == Instruction::Sub)
+ AddOps.push_back(getNegativeSCEV(Op1));
+ else
+ AddOps.push_back(Op1);
+ }
+ AddOps.push_back(getSCEV(U->getOperand(0)));
+ return getAddExpr(AddOps);
+ }
+ case Instruction::Mul: {
+ // See the Add code above.
+ SmallVector<const SCEV *, 4> MulOps;
+ MulOps.push_back(getSCEV(U->getOperand(1)));
+ for (Value *Op = U->getOperand(0);
+ Op->getValueID() == Instruction::Mul + Value::InstructionVal;
+ Op = U->getOperand(0)) {
+ U = cast<Operator>(Op);
+ MulOps.push_back(getSCEV(U->getOperand(1)));
+ }
+ MulOps.push_back(getSCEV(U->getOperand(0)));
+ return getMulExpr(MulOps);
+ }
case Instruction::UDiv:
return getUDivExpr(getSCEV(U->getOperand(0)),
getSCEV(U->getOperand(1)));
@@ -3467,7 +3620,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
const SCEV *LDiff = getMinusSCEV(LA, LS);
const SCEV *RDiff = getMinusSCEV(RA, One);
if (LDiff == RDiff)
- return getAddExpr(getUMaxExpr(LS, One), LDiff);
+ return getAddExpr(getUMaxExpr(One, LS), LDiff);
}
break;
case ICmpInst::ICMP_EQ:
@@ -3482,7 +3635,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
const SCEV *LDiff = getMinusSCEV(LA, One);
const SCEV *RDiff = getMinusSCEV(RA, LS);
if (LDiff == RDiff)
- return getAddExpr(getUMaxExpr(LS, One), LDiff);
+ return getAddExpr(getUMaxExpr(One, LS), LDiff);
}
break;
default:
@@ -3579,9 +3732,9 @@ ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
Instruction *I = Worklist.pop_back_val();
if (!Visited.insert(I)) continue;
- std::map<SCEVCallbackVH, const SCEV *>::iterator It =
- Scalars.find(static_cast<Value *>(I));
- if (It != Scalars.end()) {
+ ValueExprMapType::iterator It =
+ ValueExprMap.find(static_cast<Value *>(I));
+ if (It != ValueExprMap.end()) {
// SCEVUnknown for a PHI either means that it has an unrecognized
// structure, or it's a PHI that's in the progress of being computed
// by createNodeForPHI. In the former case, additional loop trip
@@ -3590,7 +3743,7 @@ ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
// own when it gets to that point.
if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second)) {
ValuesAtScopes.erase(It->second);
- Scalars.erase(It);
+ ValueExprMap.erase(It);
}
if (PHINode *PN = dyn_cast<PHINode>(I))
ConstantEvolutionLoopExitValue.erase(PN);
@@ -3619,11 +3772,10 @@ void ScalarEvolution::forgetLoop(const Loop *L) {
Instruction *I = Worklist.pop_back_val();
if (!Visited.insert(I)) continue;
- std::map<SCEVCallbackVH, const SCEV *>::iterator It =
- Scalars.find(static_cast<Value *>(I));
- if (It != Scalars.end()) {
+ ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I));
+ if (It != ValueExprMap.end()) {
ValuesAtScopes.erase(It->second);
- Scalars.erase(It);
+ ValueExprMap.erase(It);
if (PHINode *PN = dyn_cast<PHINode>(I))
ConstantEvolutionLoopExitValue.erase(PN);
}
@@ -3648,35 +3800,14 @@ void ScalarEvolution::forgetValue(Value *V) {
I = Worklist.pop_back_val();
if (!Visited.insert(I)) continue;
- std::map<SCEVCallbackVH, const SCEV *>::iterator It =
- Scalars.find(static_cast<Value *>(I));
- if (It != Scalars.end()) {
+ ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I));
+ if (It != ValueExprMap.end()) {
ValuesAtScopes.erase(It->second);
- Scalars.erase(It);
+ ValueExprMap.erase(It);
if (PHINode *PN = dyn_cast<PHINode>(I))
ConstantEvolutionLoopExitValue.erase(PN);
}
- // If there's a SCEVUnknown tying this value into the SCEV
- // space, remove it from the folding set map. The SCEVUnknown
- // object and any other SCEV objects which reference it
- // (transitively) remain allocated, effectively leaked until
- // the underlying BumpPtrAllocator is freed.
- //
- // This permits SCEV pointers to be used as keys in maps
- // such as the ValuesAtScopes map.
- FoldingSetNodeID ID;
- ID.AddInteger(scUnknown);
- ID.AddPointer(I);
- void *IP;
- if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
- UniqueSCEVs.RemoveNode(S);
-
- // This isn't necessary, but we might as well remove the
- // value from the ValuesAtScopes map too.
- ValuesAtScopes.erase(S);
- }
-
PushDefUseChildren(I, Worklist);
}
}
@@ -3816,14 +3947,13 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
else
MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
} else {
- // Both conditions must be true for the loop to exit.
+ // Both conditions must be true at the same time for the loop to exit.
+ // For now, be conservative.
assert(L->contains(FBB) && "Loop block has no successor in loop!");
- if (BTI0.Exact != getCouldNotCompute() &&
- BTI1.Exact != getCouldNotCompute())
- BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
- if (BTI0.Max != getCouldNotCompute() &&
- BTI1.Max != getCouldNotCompute())
- MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
+ if (BTI0.Max == BTI1.Max)
+ MaxBECount = BTI0.Max;
+ if (BTI0.Exact == BTI1.Exact)
+ BECount = BTI0.Exact;
}
return BackedgeTakenInfo(BECount, MaxBECount);
@@ -3851,14 +3981,13 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
else
MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
} else {
- // Both conditions must be false for the loop to exit.
+ // Both conditions must be false at the same time for the loop to exit.
+ // For now, be conservative.
assert(L->contains(TBB) && "Loop block has no successor in loop!");
- if (BTI0.Exact != getCouldNotCompute() &&
- BTI1.Exact != getCouldNotCompute())
- BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
- if (BTI0.Max != getCouldNotCompute() &&
- BTI1.Max != getCouldNotCompute())
- MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
+ if (BTI0.Max == BTI1.Max)
+ MaxBECount = BTI0.Max;
+ if (BTI0.Exact == BTI1.Exact)
+ BECount = BTI0.Exact;
}
return BackedgeTakenInfo(BECount, MaxBECount);
@@ -4203,7 +4332,7 @@ Constant *
ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
const APInt &BEs,
const Loop *L) {
- std::map<PHINode*, Constant*>::iterator I =
+ std::map<PHINode*, Constant*>::const_iterator I =
ConstantEvolutionLoopExitValue.find(PN);
if (I != ConstantEvolutionLoopExitValue.end())
return I->second;
@@ -5185,7 +5314,8 @@ ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
LoopContinuePredicate->isUnconditional())
return false;
- return isImpliedCond(LoopContinuePredicate->getCondition(), Pred, LHS, RHS,
+ return isImpliedCond(Pred, LHS, RHS,
+ LoopContinuePredicate->getCondition(),
LoopContinuePredicate->getSuccessor(0) != L->getHeader());
}
@@ -5214,7 +5344,8 @@ ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
LoopEntryPredicate->isUnconditional())
continue;
- if (isImpliedCond(LoopEntryPredicate->getCondition(), Pred, LHS, RHS,
+ if (isImpliedCond(Pred, LHS, RHS,
+ LoopEntryPredicate->getCondition(),
LoopEntryPredicate->getSuccessor(0) != Pair.second))
return true;
}
@@ -5224,24 +5355,24 @@ ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
/// isImpliedCond - Test whether the condition described by Pred, LHS,
/// and RHS is true whenever the given Cond value evaluates to true.
-bool ScalarEvolution::isImpliedCond(Value *CondValue,
- ICmpInst::Predicate Pred,
+bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS,
+ Value *FoundCondValue,
bool Inverse) {
// Recursively handle And and Or conditions.
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CondValue)) {
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) {
if (BO->getOpcode() == Instruction::And) {
if (!Inverse)
- return isImpliedCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
- isImpliedCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
+ return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
+ isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
} else if (BO->getOpcode() == Instruction::Or) {
if (Inverse)
- return isImpliedCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
- isImpliedCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
+ return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
+ isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
}
}
- ICmpInst *ICI = dyn_cast<ICmpInst>(CondValue);
+ ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
if (!ICI) return false;
// Bail if the ICmp's operands' types are wider than the needed type
@@ -5658,20 +5789,19 @@ void ScalarEvolution::SCEVCallbackVH::deleted() {
assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
SE->ConstantEvolutionLoopExitValue.erase(PN);
- SE->Scalars.erase(getValPtr());
+ SE->ValueExprMap.erase(getValPtr());
// this now dangles!
}
-void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *) {
+void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
// Forget all the expressions associated with users of the old value,
// so that future queries will recompute the expressions using the new
// value.
+ Value *Old = getValPtr();
SmallVector<User *, 16> Worklist;
SmallPtrSet<User *, 8> Visited;
- Value *Old = getValPtr();
- bool DeleteOld = false;
for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
UI != UE; ++UI)
Worklist.push_back(*UI);
@@ -5679,27 +5809,22 @@ void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *) {
User *U = Worklist.pop_back_val();
// Deleting the Old value will cause this to dangle. Postpone
// that until everything else is done.
- if (U == Old) {
- DeleteOld = true;
+ if (U == Old)
continue;
- }
if (!Visited.insert(U))
continue;
if (PHINode *PN = dyn_cast<PHINode>(U))
SE->ConstantEvolutionLoopExitValue.erase(PN);
- SE->Scalars.erase(U);
+ SE->ValueExprMap.erase(U);
for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
UI != UE; ++UI)
Worklist.push_back(*UI);
}
- // Delete the Old value if it (indirectly) references itself.
- if (DeleteOld) {
- if (PHINode *PN = dyn_cast<PHINode>(Old))
- SE->ConstantEvolutionLoopExitValue.erase(PN);
- SE->Scalars.erase(Old);
- // this now dangles!
- }
- // this may dangle!
+ // Delete the Old value.
+ if (PHINode *PN = dyn_cast<PHINode>(Old))
+ SE->ConstantEvolutionLoopExitValue.erase(PN);
+ SE->ValueExprMap.erase(Old);
+ // this now dangles!
}
ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
@@ -5710,7 +5835,7 @@ ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
//===----------------------------------------------------------------------===//
ScalarEvolution::ScalarEvolution()
- : FunctionPass(&ID) {
+ : FunctionPass(ID), FirstUnknown(0) {
}
bool ScalarEvolution::runOnFunction(Function &F) {
@@ -5722,7 +5847,13 @@ bool ScalarEvolution::runOnFunction(Function &F) {
}
void ScalarEvolution::releaseMemory() {
- Scalars.clear();
+ // Iterate through all the SCEVUnknown instances and call their
+ // destructors, so that they release their references to their values.
+ for (SCEVUnknown *U = FirstUnknown; U; U = U->Next)
+ U->~SCEVUnknown();
+ FirstUnknown = 0;
+
+ ValueExprMap.clear();
BackedgeTakenCounts.clear();
ConstantEvolutionLoopExitValue.clear();
ValuesAtScopes.clear();
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
index 58711b8..93b2a8b 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
@@ -34,14 +34,14 @@ namespace {
public:
static char ID; // Class identification, replacement for typeinfo
- ScalarEvolutionAliasAnalysis() : FunctionPass(&ID), SE(0) {}
+ ScalarEvolutionAliasAnalysis() : FunctionPass(ID), SE(0) {}
/// getAdjustedAnalysisPointer - This method is used when a pass implements
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
- virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
- if (PI->isPassID(&AliasAnalysis::ID))
+ virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+ if (PI == &AliasAnalysis::ID)
return (AliasAnalysis*)this;
return this;
}
@@ -58,11 +58,8 @@ namespace {
// Register this pass...
char ScalarEvolutionAliasAnalysis::ID = 0;
-static RegisterPass<ScalarEvolutionAliasAnalysis>
-X("scev-aa", "ScalarEvolution-based Alias Analysis", false, true);
-
-// Declare that we implement the AliasAnalysis interface
-static RegisterAnalysisGroup<AliasAnalysis> Y(X);
+INITIALIZE_AG_PASS(ScalarEvolutionAliasAnalysis, AliasAnalysis, "scev-aa",
+ "ScalarEvolution-based Alias Analysis", false, true, false);
FunctionPass *llvm::createScalarEvolutionAliasAnalysisPass() {
return new ScalarEvolutionAliasAnalysis();
@@ -158,8 +155,8 @@ ScalarEvolutionAliasAnalysis::alias(const Value *A, unsigned ASize,
Value *AO = GetBaseValue(AS);
Value *BO = GetBaseValue(BS);
if ((AO && AO != A) || (BO && BO != B))
- if (alias(AO ? AO : A, AO ? ~0u : ASize,
- BO ? BO : B, BO ? ~0u : BSize) == NoAlias)
+ if (alias(AO ? AO : A, AO ? UnknownSize : ASize,
+ BO ? BO : B, BO ? UnknownSize : BSize) == NoAlias)
return NoAlias;
// Forward the query to the next analysis.
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp b/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
index d4a4b26..66a06ae 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -647,6 +647,11 @@ public:
bool operator()(std::pair<const Loop *, const SCEV *> LHS,
std::pair<const Loop *, const SCEV *> RHS) const {
+ // Keep pointer operands sorted at the end.
+ if (LHS.second->getType()->isPointerTy() !=
+ RHS.second->getType()->isPointerTy())
+ return LHS.second->getType()->isPointerTy();
+
// Compare loops with PickMostRelevantLoop.
if (LHS.first != RHS.first)
return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
@@ -699,8 +704,15 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
// The running sum expression is a pointer. Try to form a getelementptr
// at this level with that as the base.
SmallVector<const SCEV *, 4> NewOps;
- for (; I != E && I->first == CurLoop; ++I)
- NewOps.push_back(I->second);
+ for (; I != E && I->first == CurLoop; ++I) {
+ // If the operand is SCEVUnknown and not instructions, peek through
+ // it, to enable more of it to be folded into the GEP.
+ const SCEV *X = I->second;
+ if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
+ if (!isa<Instruction>(U->getValue()))
+ X = SE.getSCEV(U->getValue());
+ NewOps.push_back(X);
+ }
Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
} else if (const PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
// The running sum is an integer, and there's a pointer at this level.
@@ -1047,9 +1059,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
// First check for an existing canonical IV in a suitable type.
PHINode *CanonicalIV = 0;
if (PHINode *PN = L->getCanonicalInductionVariable())
- if (SE.isSCEVable(PN->getType()) &&
- SE.getEffectiveSCEVType(PN->getType())->isIntegerTy() &&
- SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
+ if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
CanonicalIV = PN;
// Rewrite an AddRec in terms of the canonical induction variable, if
@@ -1102,21 +1112,13 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
SE.getUnknown(expand(Rest))));
}
- // {0,+,1} --> Insert a canonical induction variable into the loop!
- if (S->isAffine() && S->getOperand(1)->isOne()) {
- // If there's a canonical IV, just use it.
- if (CanonicalIV) {
- assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
- "IVs with types different from the canonical IV should "
- "already have been handled!");
- return CanonicalIV;
- }
-
+ // If we don't yet have a canonical IV, create one.
+ if (!CanonicalIV) {
// Create and insert the PHI node for the induction variable in the
// specified loop.
BasicBlock *Header = L->getHeader();
- PHINode *PN = PHINode::Create(Ty, "indvar", Header->begin());
- rememberInstruction(PN);
+ CanonicalIV = PHINode::Create(Ty, "indvar", Header->begin());
+ rememberInstruction(CanonicalIV);
Constant *One = ConstantInt::get(Ty, 1);
for (pred_iterator HPI = pred_begin(Header), HPE = pred_end(Header);
@@ -1125,40 +1127,45 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
if (L->contains(HP)) {
// Insert a unit add instruction right before the terminator
// corresponding to the back-edge.
- Instruction *Add = BinaryOperator::CreateAdd(PN, One, "indvar.next",
- HP->getTerminator());
+ Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
+ "indvar.next",
+ HP->getTerminator());
rememberInstruction(Add);
- PN->addIncoming(Add, HP);
+ CanonicalIV->addIncoming(Add, HP);
} else {
- PN->addIncoming(Constant::getNullValue(Ty), HP);
+ CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
}
}
}
+ // {0,+,1} --> Insert a canonical induction variable into the loop!
+ if (S->isAffine() && S->getOperand(1)->isOne()) {
+ assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
+ "IVs with types different from the canonical IV should "
+ "already have been handled!");
+ return CanonicalIV;
+ }
+
// {0,+,F} --> {0,+,1} * F
- // Get the canonical induction variable I for this loop.
- Value *I = CanonicalIV ?
- CanonicalIV :
- getOrInsertCanonicalInductionVariable(L, Ty);
// If this is a simple linear addrec, emit it now as a special case.
if (S->isAffine()) // {0,+,F} --> i*F
return
expand(SE.getTruncateOrNoop(
- SE.getMulExpr(SE.getUnknown(I),
+ SE.getMulExpr(SE.getUnknown(CanonicalIV),
SE.getNoopOrAnyExtend(S->getOperand(1),
- I->getType())),
+ CanonicalIV->getType())),
Ty));
// If this is a chain of recurrences, turn it into a closed form, using the
// folders, then expandCodeFor the closed form. This allows the folders to
// simplify the expression without having to build a bunch of special code
// into this folder.
- const SCEV *IH = SE.getUnknown(I); // Get I as a "symbolic" SCEV.
+ const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV.
// Promote S up to the canonical IV type, if the cast is foldable.
const SCEV *NewS = S;
- const SCEV *Ext = SE.getNoopOrAnyExtend(S, I->getType());
+ const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
if (isa<SCEVAddRecExpr>(Ext))
NewS = Ext;
@@ -1337,16 +1344,21 @@ void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) {
/// canonical induction variable of the specified type for the specified
/// loop (inserting one if there is none). A canonical induction variable
/// starts at zero and steps by one on each iteration.
-Value *
+PHINode *
SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
const Type *Ty) {
assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
+
+ // Build a SCEV for {0,+,1}<L>.
const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0),
SE.getConstant(Ty, 1), L);
+
+ // Emit code for it.
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
- Value *V = expandCodeFor(H, 0, L->getHeader()->begin());
+ PHINode *V = cast<PHINode>(expandCodeFor(H, 0, L->getHeader()->begin()));
if (SaveInsertBB)
restoreInsertPoint(SaveInsertBB, SaveInsertPt);
+
return V;
}
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp b/contrib/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
index 563fd2f..ac36cef 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
@@ -26,7 +26,7 @@ using namespace llvm;
/// post-inc value when we cannot) or it can end up adding extra live-ranges to
/// the loop, resulting in reg-reg copies (if we use the pre-inc value when we
/// should use the post-inc value).
-static bool IVUseShouldUsePostIncValue(Instruction *User, Instruction *IV,
+static bool IVUseShouldUsePostIncValue(Instruction *User, Value *Operand,
const Loop *L, DominatorTree *DT) {
// If the user is in the loop, use the preinc value.
if (L->contains(User)) return false;
@@ -45,20 +45,17 @@ static bool IVUseShouldUsePostIncValue(Instruction *User, Instruction *IV,
// their uses occur in the predecessor block, not the block the PHI lives in)
// should still use the post-inc value. Check for this case now.
PHINode *PN = dyn_cast<PHINode>(User);
- if (!PN) return false; // not a phi, not dominated by latch block.
+ if (!PN || !Operand) return false; // not a phi, not dominated by latch block.
- // Look at all of the uses of IV by the PHI node. If any use corresponds to
- // a block that is not dominated by the latch block, give up and use the
+ // Look at all of the uses of Operand by the PHI node. If any use corresponds
+ // to a block that is not dominated by the latch block, give up and use the
// preincremented value.
- unsigned NumUses = 0;
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
- if (PN->getIncomingValue(i) == IV) {
- ++NumUses;
- if (!DT->dominates(LatchBlock, PN->getIncomingBlock(i)))
- return false;
- }
+ if (PN->getIncomingValue(i) == Operand &&
+ !DT->dominates(LatchBlock, PN->getIncomingBlock(i)))
+ return false;
- // Okay, all uses of IV by PN are in predecessor blocks that really are
+ // Okay, all uses of Operand by PN are in predecessor blocks that really are
// dominated by the latch block. Use the post-incremented value.
return true;
}
@@ -72,6 +69,7 @@ const SCEV *llvm::TransformForPostIncUse(TransformKind Kind,
DominatorTree &DT) {
if (isa<SCEVConstant>(S) || isa<SCEVUnknown>(S))
return S;
+
if (const SCEVCastExpr *X = dyn_cast<SCEVCastExpr>(S)) {
const SCEV *O = X->getOperand();
const SCEV *N = TransformForPostIncUse(Kind, O, User, OperandValToReplace,
@@ -85,9 +83,69 @@ const SCEV *llvm::TransformForPostIncUse(TransformKind Kind,
}
return S;
}
+
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
+ // An addrec. This is the interesting part.
+ SmallVector<const SCEV *, 8> Operands;
+ const Loop *L = AR->getLoop();
+ // The addrec conceptually uses its operands at loop entry.
+ Instruction *LUser = L->getHeader()->begin();
+ // Transform each operand.
+ for (SCEVNAryExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
+ I != E; ++I) {
+ const SCEV *O = *I;
+ const SCEV *N = TransformForPostIncUse(Kind, O, LUser, 0, Loops, SE, DT);
+ Operands.push_back(N);
+ }
+ const SCEV *Result = SE.getAddRecExpr(Operands, L);
+ switch (Kind) {
+ default: llvm_unreachable("Unexpected transform name!");
+ case NormalizeAutodetect:
+ if (IVUseShouldUsePostIncValue(User, OperandValToReplace, L, &DT)) {
+ const SCEV *TransformedStep =
+ TransformForPostIncUse(Kind, AR->getStepRecurrence(SE),
+ User, OperandValToReplace, Loops, SE, DT);
+ Result = SE.getMinusSCEV(Result, TransformedStep);
+ Loops.insert(L);
+ }
+#if 0
+ // This assert is conceptually correct, but ScalarEvolution currently
+ // sometimes fails to canonicalize two equal SCEVs to exactly the same
+ // form. It's possibly a pessimization when this happens, but it isn't a
+ // correctness problem, so disable this assert for now.
+ assert(S == TransformForPostIncUse(Denormalize, Result,
+ User, OperandValToReplace,
+ Loops, SE, DT) &&
+ "SCEV normalization is not invertible!");
+#endif
+ break;
+ case Normalize:
+ if (Loops.count(L)) {
+ const SCEV *TransformedStep =
+ TransformForPostIncUse(Kind, AR->getStepRecurrence(SE),
+ User, OperandValToReplace, Loops, SE, DT);
+ Result = SE.getMinusSCEV(Result, TransformedStep);
+ }
+#if 0
+ // See the comment on the assert above.
+ assert(S == TransformForPostIncUse(Denormalize, Result,
+ User, OperandValToReplace,
+ Loops, SE, DT) &&
+ "SCEV normalization is not invertible!");
+#endif
+ break;
+ case Denormalize:
+ if (Loops.count(L))
+ Result = cast<SCEVAddRecExpr>(Result)->getPostIncExpr(SE);
+ break;
+ }
+ return Result;
+ }
+
if (const SCEVNAryExpr *X = dyn_cast<SCEVNAryExpr>(S)) {
SmallVector<const SCEV *, 8> Operands;
bool Changed = false;
+ // Transform each operand.
for (SCEVNAryExpr::op_iterator I = X->op_begin(), E = X->op_end();
I != E; ++I) {
const SCEV *O = *I;
@@ -96,37 +154,7 @@ const SCEV *llvm::TransformForPostIncUse(TransformKind Kind,
Changed |= N != O;
Operands.push_back(N);
}
- if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
- // An addrec. This is the interesting part.
- const Loop *L = AR->getLoop();
- const SCEV *Result = SE.getAddRecExpr(Operands, L);
- switch (Kind) {
- default: llvm_unreachable("Unexpected transform name!");
- case NormalizeAutodetect:
- if (Instruction *OI = dyn_cast<Instruction>(OperandValToReplace))
- if (IVUseShouldUsePostIncValue(User, OI, L, &DT)) {
- const SCEV *TransformedStep =
- TransformForPostIncUse(Kind, AR->getStepRecurrence(SE),
- User, OperandValToReplace, Loops, SE, DT);
- Result = SE.getMinusSCEV(Result, TransformedStep);
- Loops.insert(L);
- }
- break;
- case Normalize:
- if (Loops.count(L)) {
- const SCEV *TransformedStep =
- TransformForPostIncUse(Kind, AR->getStepRecurrence(SE),
- User, OperandValToReplace, Loops, SE, DT);
- Result = SE.getMinusSCEV(Result, TransformedStep);
- }
- break;
- case Denormalize:
- if (Loops.count(L))
- Result = SE.getAddExpr(Result, AR->getStepRecurrence(SE));
- break;
- }
- return Result;
- }
+ // If any operand actually changed, return a transformed result.
if (Changed)
switch (S->getSCEVType()) {
case scAddExpr: return SE.getAddExpr(Operands);
@@ -137,6 +165,7 @@ const SCEV *llvm::TransformForPostIncUse(TransformKind Kind,
}
return S;
}
+
if (const SCEVUDivExpr *X = dyn_cast<SCEVUDivExpr>(S)) {
const SCEV *LO = X->getLHS();
const SCEV *RO = X->getRHS();
@@ -148,6 +177,7 @@ const SCEV *llvm::TransformForPostIncUse(TransformKind Kind,
return SE.getUDivExpr(LN, RN);
return S;
}
+
llvm_unreachable("Unexpected SCEV kind!");
return 0;
}
diff --git a/contrib/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
new file mode 100644
index 0000000..bbfdcec
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
@@ -0,0 +1,191 @@
+//===- TypeBasedAliasAnalysis.cpp - Type-Based Alias Analysis -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TypeBasedAliasAnalysis pass, which implements
+// metadata-based TBAA.
+//
+// In LLVM IR, memory does not have types, so LLVM's own type system is not
+// suitable for doing TBAA. Instead, metadata is added to the IR to describe
+// a type system of a higher level language.
+//
+// This pass is language-independent. The type system is encoded in
+// metadata. This allows this pass to support typical C and C++ TBAA, but
+// it can also support custom aliasing behavior for other languages.
+//
+// This is a work-in-progress. It doesn't work yet, and the metadata
+// format isn't stable.
+//
+// TODO: getModRefBehavior. The AliasAnalysis infrastructure will need to
+// be extended.
+// TODO: AA chaining
+// TODO: struct fields
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Module.h"
+#include "llvm/Metadata.h"
+#include "llvm/Pass.h"
+using namespace llvm;
+
+namespace {
+ /// TBAANode - This is a simple wrapper around an MDNode which provides a
+ /// higher-level interface by hiding the details of how alias analysis
+ /// information is encoded in its operands.
+ class TBAANode {
+ const MDNode *Node;
+
+ public:
+ TBAANode() : Node(0) {}
+ explicit TBAANode(MDNode *N) : Node(N) {}
+
+ /// getNode - Get the MDNode for this TBAANode.
+ const MDNode *getNode() const { return Node; }
+
+ /// getParent - Get this TBAANode's Alias DAG parent.
+ TBAANode getParent() const {
+ if (Node->getNumOperands() < 2)
+ return TBAANode();
+ MDNode *P = dyn_cast<MDNode>(Node->getOperand(1));
+ if (!P)
+ return TBAANode();
+ // Ok, this node has a valid parent. Return it.
+ return TBAANode(P);
+ }
+
+ /// TypeIsImmutable - Test if this TBAANode represents a type for objects
+ /// which are not modified (by any means) in the context where this
+ /// AliasAnalysis is relevant.
+ bool TypeIsImmutable() const {
+ if (Node->getNumOperands() < 3)
+ return false;
+ ConstantInt *CI = dyn_cast<ConstantInt>(Node->getOperand(2));
+ if (!CI)
+ return false;
+ // TODO: Think about the encoding.
+ return CI->isOne();
+ }
+ };
+}
+
+namespace {
+ /// TypeBasedAliasAnalysis - This is a simple alias analysis
+ /// implementation that uses TypeBased to answer queries.
+ class TypeBasedAliasAnalysis : public ImmutablePass,
+ public AliasAnalysis {
+ public:
+ static char ID; // Class identification, replacement for typeinfo
+ TypeBasedAliasAnalysis() : ImmutablePass(ID) {}
+
+ /// getAdjustedAnalysisPointer - This method is used when a pass implements
+ /// an analysis interface through multiple inheritance. If needed, it
+ /// should override this to adjust the this pointer as needed for the
+ /// specified pass info.
+ virtual void *getAdjustedAnalysisPointer(const void *PI) {
+ if (PI == &AliasAnalysis::ID)
+ return (AliasAnalysis*)this;
+ return this;
+ }
+
+ private:
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+ virtual AliasResult alias(const Value *V1, unsigned V1Size,
+ const Value *V2, unsigned V2Size);
+ virtual bool pointsToConstantMemory(const Value *P);
+ };
+} // End of anonymous namespace
+
+// Register this pass...
+char TypeBasedAliasAnalysis::ID = 0;
+INITIALIZE_AG_PASS(TypeBasedAliasAnalysis, AliasAnalysis, "tbaa",
+ "Type-Based Alias Analysis", false, true, false);
+
+ImmutablePass *llvm::createTypeBasedAliasAnalysisPass() {
+ return new TypeBasedAliasAnalysis();
+}
+
+void
+TypeBasedAliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AliasAnalysis::getAnalysisUsage(AU);
+}
+
+AliasAnalysis::AliasResult
+TypeBasedAliasAnalysis::alias(const Value *A, unsigned ASize,
+ const Value *B, unsigned BSize) {
+ // Currently, metadata can only be attached to Instructions.
+ const Instruction *AI = dyn_cast<Instruction>(A);
+ if (!AI) return MayAlias;
+ const Instruction *BI = dyn_cast<Instruction>(B);
+ if (!BI) return MayAlias;
+
+ // Get the attached MDNodes. If either value lacks a tbaa MDNode, we must
+ // be conservative.
+ MDNode *AM =
+ AI->getMetadata(AI->getParent()->getParent()->getParent()
+ ->getMDKindID("tbaa"));
+ if (!AM) return MayAlias;
+ MDNode *BM =
+ BI->getMetadata(BI->getParent()->getParent()->getParent()
+ ->getMDKindID("tbaa"));
+ if (!BM) return MayAlias;
+
+ // Keep track of the root node for A and B.
+ TBAANode RootA, RootB;
+
+ // Climb the DAG from A to see if we reach B.
+ for (TBAANode T(AM); ; ) {
+ if (T.getNode() == BM)
+ // B is an ancestor of A.
+ return MayAlias;
+
+ RootA = T;
+ T = T.getParent();
+ if (!T.getNode())
+ break;
+ }
+
+ // Climb the DAG from B to see if we reach A.
+ for (TBAANode T(BM); ; ) {
+ if (T.getNode() == AM)
+ // A is an ancestor of B.
+ return MayAlias;
+
+ RootB = T;
+ T = T.getParent();
+ if (!T.getNode())
+ break;
+ }
+
+ // Neither node is an ancestor of the other.
+
+ // If they have the same root, then we've proved there's no alias.
+ if (RootA.getNode() == RootB.getNode())
+ return NoAlias;
+
+ // If they have different roots, they're part of different potentially
+ // unrelated type systems, so we must be conservative.
+ return MayAlias;
+}
+
+bool TypeBasedAliasAnalysis::pointsToConstantMemory(const Value *P) {
+ // Currently, metadata can only be attached to Instructions.
+ const Instruction *I = dyn_cast<Instruction>(P);
+ if (!I) return false;
+
+ MDNode *M =
+ I->getMetadata(I->getParent()->getParent()->getParent()
+ ->getMDKindID("tbaa"));
+ if (!M) return false;
+
+ // If this is an "immutable" type, we can assume the pointer is pointing
+ // to constant memory.
+ return TBAANode(M).TypeIsImmutable();
+}
diff --git a/contrib/llvm/lib/Analysis/ValueTracking.cpp b/contrib/llvm/lib/Analysis/ValueTracking.cpp
index b4c9884..181c9b0 100644
--- a/contrib/llvm/lib/Analysis/ValueTracking.cpp
+++ b/contrib/llvm/lib/Analysis/ValueTracking.cpp
@@ -880,19 +880,20 @@ bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
}
Value *Mul0 = NULL;
- Value *Mul1 = NULL;
- bool M0 = ComputeMultiple(Op0, Base, Mul0,
- LookThroughSExt, Depth+1);
- bool M1 = ComputeMultiple(Op1, Base, Mul1,
- LookThroughSExt, Depth+1);
-
- if (M0) {
- if (isa<Constant>(Op1) && isa<Constant>(Mul0)) {
- // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
- Multiple = ConstantExpr::getMul(cast<Constant>(Mul0),
- cast<Constant>(Op1));
- return true;
- }
+ if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
+ if (Constant *Op1C = dyn_cast<Constant>(Op1))
+ if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
+ if (Op1C->getType()->getPrimitiveSizeInBits() <
+ MulC->getType()->getPrimitiveSizeInBits())
+ Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
+ if (Op1C->getType()->getPrimitiveSizeInBits() >
+ MulC->getType()->getPrimitiveSizeInBits())
+ MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
+
+ // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
+ Multiple = ConstantExpr::getMul(MulC, Op1C);
+ return true;
+ }
if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
if (Mul0CI->getValue() == 1) {
@@ -902,13 +903,21 @@ bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
}
}
- if (M1) {
- if (isa<Constant>(Op0) && isa<Constant>(Mul1)) {
- // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
- Multiple = ConstantExpr::getMul(cast<Constant>(Mul1),
- cast<Constant>(Op0));
- return true;
- }
+ Value *Mul1 = NULL;
+ if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
+ if (Constant *Op0C = dyn_cast<Constant>(Op0))
+ if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
+ if (Op0C->getType()->getPrimitiveSizeInBits() <
+ MulC->getType()->getPrimitiveSizeInBits())
+ Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
+ if (Op0C->getType()->getPrimitiveSizeInBits() >
+ MulC->getType()->getPrimitiveSizeInBits())
+ MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
+
+ // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
+ Multiple = ConstantExpr::getMul(MulC, Op0C);
+ return true;
+ }
if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
if (Mul1CI->getValue() == 1) {
@@ -973,195 +982,6 @@ bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) {
return false;
}
-
-/// GetLinearExpression - Analyze the specified value as a linear expression:
-/// "A*V + B", where A and B are constant integers. Return the scale and offset
-/// values as APInts and return V as a Value*. The incoming Value is known to
-/// have IntegerType. Note that this looks through extends, so the high bits
-/// may not be represented in the result.
-static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
- const TargetData *TD, unsigned Depth) {
- assert(V->getType()->isIntegerTy() && "Not an integer value");
-
- // Limit our recursion depth.
- if (Depth == 6) {
- Scale = 1;
- Offset = 0;
- return V;
- }
-
- if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
- if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
- switch (BOp->getOpcode()) {
- default: break;
- case Instruction::Or:
- // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
- // analyze it.
- if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), TD))
- break;
- // FALL THROUGH.
- case Instruction::Add:
- V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, TD, Depth+1);
- Offset += RHSC->getValue();
- return V;
- case Instruction::Mul:
- V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, TD, Depth+1);
- Offset *= RHSC->getValue();
- Scale *= RHSC->getValue();
- return V;
- case Instruction::Shl:
- V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, TD, Depth+1);
- Offset <<= RHSC->getValue().getLimitedValue();
- Scale <<= RHSC->getValue().getLimitedValue();
- return V;
- }
- }
- }
-
- // Since clients don't care about the high bits of the value, just scales and
- // offsets, we can look through extensions.
- if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
- Value *CastOp = cast<CastInst>(V)->getOperand(0);
- unsigned OldWidth = Scale.getBitWidth();
- unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
- Scale.trunc(SmallWidth);
- Offset.trunc(SmallWidth);
- Value *Result = GetLinearExpression(CastOp, Scale, Offset, TD, Depth+1);
- Scale.zext(OldWidth);
- Offset.zext(OldWidth);
- return Result;
- }
-
- Scale = 1;
- Offset = 0;
- return V;
-}
-
-/// DecomposeGEPExpression - If V is a symbolic pointer expression, decompose it
-/// into a base pointer with a constant offset and a number of scaled symbolic
-/// offsets.
-///
-/// The scaled symbolic offsets (represented by pairs of a Value* and a scale in
-/// the VarIndices vector) are Value*'s that are known to be scaled by the
-/// specified amount, but which may have other unrepresented high bits. As such,
-/// the gep cannot necessarily be reconstructed from its decomposed form.
-///
-/// When TargetData is around, this function is capable of analyzing everything
-/// that Value::getUnderlyingObject() can look through. When not, it just looks
-/// through pointer casts.
-///
-const Value *llvm::DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
- SmallVectorImpl<std::pair<const Value*, int64_t> > &VarIndices,
- const TargetData *TD) {
- // Limit recursion depth to limit compile time in crazy cases.
- unsigned MaxLookup = 6;
-
- BaseOffs = 0;
- do {
- // See if this is a bitcast or GEP.
- const Operator *Op = dyn_cast<Operator>(V);
- if (Op == 0) {
- // The only non-operator case we can handle are GlobalAliases.
- if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
- if (!GA->mayBeOverridden()) {
- V = GA->getAliasee();
- continue;
- }
- }
- return V;
- }
-
- if (Op->getOpcode() == Instruction::BitCast) {
- V = Op->getOperand(0);
- continue;
- }
-
- const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
- if (GEPOp == 0)
- return V;
-
- // Don't attempt to analyze GEPs over unsized objects.
- if (!cast<PointerType>(GEPOp->getOperand(0)->getType())
- ->getElementType()->isSized())
- return V;
-
- // If we are lacking TargetData information, we can't compute the offets of
- // elements computed by GEPs. However, we can handle bitcast equivalent
- // GEPs.
- if (!TD) {
- if (!GEPOp->hasAllZeroIndices())
- return V;
- V = GEPOp->getOperand(0);
- continue;
- }
-
- // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
- gep_type_iterator GTI = gep_type_begin(GEPOp);
- for (User::const_op_iterator I = GEPOp->op_begin()+1,
- E = GEPOp->op_end(); I != E; ++I) {
- Value *Index = *I;
- // Compute the (potentially symbolic) offset in bytes for this index.
- if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
- // For a struct, add the member offset.
- unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
- if (FieldNo == 0) continue;
-
- BaseOffs += TD->getStructLayout(STy)->getElementOffset(FieldNo);
- continue;
- }
-
- // For an array/pointer, add the element offset, explicitly scaled.
- if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
- if (CIdx->isZero()) continue;
- BaseOffs += TD->getTypeAllocSize(*GTI)*CIdx->getSExtValue();
- continue;
- }
-
- uint64_t Scale = TD->getTypeAllocSize(*GTI);
-
- // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
- unsigned Width = cast<IntegerType>(Index->getType())->getBitWidth();
- APInt IndexScale(Width, 0), IndexOffset(Width, 0);
- Index = GetLinearExpression(Index, IndexScale, IndexOffset, TD, 0);
-
- // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
- // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
- BaseOffs += IndexOffset.getZExtValue()*Scale;
- Scale *= IndexScale.getZExtValue();
-
-
- // If we already had an occurrance of this index variable, merge this
- // scale into it. For example, we want to handle:
- // A[x][x] -> x*16 + x*4 -> x*20
- // This also ensures that 'x' only appears in the index list once.
- for (unsigned i = 0, e = VarIndices.size(); i != e; ++i) {
- if (VarIndices[i].first == Index) {
- Scale += VarIndices[i].second;
- VarIndices.erase(VarIndices.begin()+i);
- break;
- }
- }
-
- // Make sure that we have a scale that makes sense for this target's
- // pointer size.
- if (unsigned ShiftBits = 64-TD->getPointerSizeInBits()) {
- Scale <<= ShiftBits;
- Scale >>= ShiftBits;
- }
-
- if (Scale)
- VarIndices.push_back(std::make_pair(Index, Scale));
- }
-
- // Analyze the base pointer next.
- V = GEPOp->getOperand(0);
- } while (--MaxLookup);
-
- // If the chain of expressions is too deep, just return early.
- return V;
-}
-
-
// This is the recursive version of BuildSubAggregate. It takes a few different
// arguments. Idxs is the index within the nested struct From that we are
// looking at now (which is of type IndexedType). IdxSkip is the number of
OpenPOWER on IntegriCloud