summaryrefslogtreecommitdiffstats
path: root/lib/Analysis
diff options
context:
space:
mode:
authorrdivacky <rdivacky@FreeBSD.org>2009-12-01 11:07:05 +0000
committerrdivacky <rdivacky@FreeBSD.org>2009-12-01 11:07:05 +0000
commite7908924d847e63b02bc82bfaa1709ab9c774dcd (patch)
treeffe0478472eaa0686f11cb02c6df7d257b8719b0 /lib/Analysis
parentbf68f1ea49e39c4194f339ddd4421b0c3a31988b (diff)
downloadFreeBSD-src-e7908924d847e63b02bc82bfaa1709ab9c774dcd.zip
FreeBSD-src-e7908924d847e63b02bc82bfaa1709ab9c774dcd.tar.gz
Update LLVM to r90226.
Diffstat (limited to 'lib/Analysis')
-rw-r--r--lib/Analysis/AliasAnalysis.cpp17
-rw-r--r--lib/Analysis/AliasDebugger.cpp5
-rw-r--r--lib/Analysis/AliasSetTracker.cpp6
-rw-r--r--lib/Analysis/BasicAliasAnalysis.cpp894
-rw-r--r--lib/Analysis/CaptureTracking.cpp27
-rw-r--r--lib/Analysis/ConstantFolding.cpp14
-rw-r--r--lib/Analysis/DebugInfo.cpp139
-rw-r--r--lib/Analysis/DomPrinter.cpp51
-rw-r--r--lib/Analysis/IPA/Andersens.cpp27
-rw-r--r--lib/Analysis/IPA/GlobalsModRef.cpp1
-rw-r--r--lib/Analysis/IVUsers.cpp8
-rw-r--r--lib/Analysis/InstructionSimplify.cpp73
-rw-r--r--lib/Analysis/LoopInfo.cpp5
-rw-r--r--lib/Analysis/MemoryDependenceAnalysis.cpp464
-rw-r--r--lib/Analysis/ScalarEvolution.cpp4
-rw-r--r--lib/Analysis/ValueTracking.cpp229
16 files changed, 1123 insertions, 841 deletions
diff --git a/lib/Analysis/AliasAnalysis.cpp b/lib/Analysis/AliasAnalysis.cpp
index 0234965..dee9b53 100644
--- a/lib/Analysis/AliasAnalysis.cpp
+++ b/lib/Analysis/AliasAnalysis.cpp
@@ -49,21 +49,11 @@ AliasAnalysis::alias(const Value *V1, unsigned V1Size,
return AA->alias(V1, V1Size, V2, V2Size);
}
-void AliasAnalysis::getMustAliases(Value *P, std::vector<Value*> &RetVals) {
- assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
- return AA->getMustAliases(P, RetVals);
-}
-
bool AliasAnalysis::pointsToConstantMemory(const Value *P) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
return AA->pointsToConstantMemory(P);
}
-bool AliasAnalysis::hasNoModRefInfoForCalls() const {
- assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
- return AA->hasNoModRefInfoForCalls();
-}
-
void AliasAnalysis::deleteValue(Value *V) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
AA->deleteValue(V);
@@ -137,17 +127,18 @@ AliasAnalysis::getModRefBehavior(Function *F,
AliasAnalysis::ModRefResult
AliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
- ModRefResult Mask = ModRef;
ModRefBehavior MRB = getModRefBehavior(CS);
if (MRB == DoesNotAccessMemory)
return NoModRef;
- else if (MRB == OnlyReadsMemory)
+
+ ModRefResult Mask = ModRef;
+ if (MRB == OnlyReadsMemory)
Mask = Ref;
else if (MRB == AliasAnalysis::AccessesArguments) {
bool doesAlias = false;
for (CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
AI != AE; ++AI)
- if (alias(*AI, ~0U, P, Size) != NoAlias) {
+ if (!isNoAlias(*AI, ~0U, P, Size)) {
doesAlias = true;
break;
}
diff --git a/lib/Analysis/AliasDebugger.cpp b/lib/Analysis/AliasDebugger.cpp
index cf4727f..6868e3f 100644
--- a/lib/Analysis/AliasDebugger.cpp
+++ b/lib/Analysis/AliasDebugger.cpp
@@ -90,11 +90,6 @@ namespace {
return AliasAnalysis::getModRefInfo(CS1,CS2);
}
- void getMustAliases(Value *P, std::vector<Value*> &RetVals) {
- assert(Vals.find(P) != Vals.end() && "Never seen value in AA before");
- return AliasAnalysis::getMustAliases(P, RetVals);
- }
-
bool pointsToConstantMemory(const Value *P) {
assert(Vals.find(P) != Vals.end() && "Never seen value in AA before");
return AliasAnalysis::pointsToConstantMemory(P);
diff --git a/lib/Analysis/AliasSetTracker.cpp b/lib/Analysis/AliasSetTracker.cpp
index c037c8d..6634600 100644
--- a/lib/Analysis/AliasSetTracker.cpp
+++ b/lib/Analysis/AliasSetTracker.cpp
@@ -153,9 +153,6 @@ bool AliasSet::aliasesPointer(const Value *Ptr, unsigned Size,
// Check the call sites list and invoke list...
if (!CallSites.empty()) {
- if (AA.hasNoModRefInfoForCalls())
- return true;
-
for (unsigned i = 0, e = CallSites.size(); i != e; ++i)
if (AA.getModRefInfo(CallSites[i], const_cast<Value*>(Ptr), Size)
!= AliasAnalysis::NoModRef)
@@ -169,9 +166,6 @@ bool AliasSet::aliasesCallSite(CallSite CS, AliasAnalysis &AA) const {
if (AA.doesNotAccessMemory(CS))
return false;
- if (AA.hasNoModRefInfoForCalls())
- return true;
-
for (unsigned i = 0, e = CallSites.size(); i != e; ++i)
if (AA.getModRefInfo(CallSites[i], CS) != AliasAnalysis::NoModRef ||
AA.getModRefInfo(CS, CallSites[i]) != AliasAnalysis::NoModRef)
diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp
index b8d69f4..b2983c7 100644
--- a/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/lib/Analysis/BasicAliasAnalysis.cpp
@@ -14,8 +14,6 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/CaptureTracking.h"
-#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
@@ -25,12 +23,13 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/Operator.h"
#include "llvm/Pass.h"
+#include "llvm/Analysis/CaptureTracking.h"
+#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Target/TargetData.h"
-#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
#include <algorithm>
using namespace llvm;
@@ -38,26 +37,6 @@ using namespace llvm;
// Useful predicates
//===----------------------------------------------------------------------===//
-static const Value *GetGEPOperands(const Value *V,
- SmallVector<Value*, 16> &GEPOps) {
- assert(GEPOps.empty() && "Expect empty list to populate!");
- GEPOps.insert(GEPOps.end(), cast<User>(V)->op_begin()+1,
- cast<User>(V)->op_end());
-
- // Accumulate all of the chained indexes into the operand array
- V = cast<User>(V)->getOperand(0);
-
- while (const GEPOperator *G = dyn_cast<GEPOperator>(V)) {
- if (!isa<Constant>(GEPOps[0]) || isa<GlobalValue>(GEPOps[0]) ||
- !cast<Constant>(GEPOps[0])->isNullValue())
- break; // Don't handle folding arbitrary pointer offsets yet...
- GEPOps.erase(GEPOps.begin()); // Drop the zero index
- GEPOps.insert(GEPOps.begin(), G->op_begin()+1, G->op_end());
- V = G->getOperand(0);
- }
- return V;
-}
-
/// isKnownNonNull - Return true if we know that the specified value is never
/// null.
static bool isKnownNonNull(const Value *V) {
@@ -79,7 +58,12 @@ static bool isKnownNonNull(const Value *V) {
static bool isNonEscapingLocalObject(const Value *V) {
// If this is a local allocation, check to see if it escapes.
if (isa<AllocaInst>(V) || isNoAliasCall(V))
- return !PointerMayBeCaptured(V, false);
+ // Set StoreCaptures to True so that we can assume in our callers that the
+ // pointer is not the result of a load instruction. Currently
+ // PointerMayBeCaptured doesn't have any special analysis for the
+ // StoreCaptures=false case; if it did, our callers could be refined to be
+ // more precise.
+ return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
// If this is an argument that corresponds to a byval or noalias argument,
// then it has not escaped before entering the function. Check if it escapes
@@ -89,7 +73,7 @@ static bool isNonEscapingLocalObject(const Value *V) {
// Don't bother analyzing arguments already known not to escape.
if (A->hasNoCaptureAttr())
return true;
- return !PointerMayBeCaptured(V, false);
+ return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
}
return false;
}
@@ -159,7 +143,6 @@ namespace {
llvm_unreachable("This method may not be called on this function!");
}
- virtual void getMustAliases(Value *P, std::vector<Value*> &RetVals) { }
virtual bool pointsToConstantMemory(const Value *P) { return false; }
virtual ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size) {
return ModRef;
@@ -167,7 +150,6 @@ namespace {
virtual ModRefResult getModRefInfo(CallSite CS1, CallSite CS2) {
return ModRef;
}
- virtual bool hasNoModRefInfoForCalls() const { return true; }
virtual void deleteValue(Value *V) {}
virtual void copyValue(Value *From, Value *To) {}
@@ -206,10 +188,6 @@ namespace {
ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size);
ModRefResult getModRefInfo(CallSite CS1, CallSite CS2);
- /// hasNoModRefInfoForCalls - We can provide mod/ref information against
- /// non-escaping allocations.
- virtual bool hasNoModRefInfoForCalls() const { return false; }
-
/// pointsToConstantMemory - Chase pointers until we find a (constant
/// global) or not.
bool pointsToConstantMemory(const Value *P);
@@ -218,13 +196,14 @@ namespace {
// VisitedPHIs - Track PHI nodes visited by a aliasCheck() call.
SmallPtrSet<const Value*, 16> VisitedPHIs;
- // aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction
- // against another.
- AliasResult aliasGEP(const Value *V1, unsigned V1Size,
- const Value *V2, unsigned V2Size);
+ // aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP
+ // instruction against another.
+ AliasResult aliasGEP(const GEPOperator *V1, unsigned V1Size,
+ const Value *V2, unsigned V2Size,
+ const Value *UnderlyingV1, const Value *UnderlyingV2);
- // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI instruction
- // against another.
+ // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI
+ // instruction against another.
AliasResult aliasPHI(const PHINode *PN, unsigned PNSize,
const Value *V2, unsigned V2Size);
@@ -234,15 +213,6 @@ namespace {
AliasResult aliasCheck(const Value *V1, unsigned V1Size,
const Value *V2, unsigned V2Size);
-
- // CheckGEPInstructions - Check two GEP instructions with known
- // must-aliasing base pointers. This checks to see if the index expressions
- // preclude the pointers from aliasing...
- AliasResult
- CheckGEPInstructions(const Type* BasePtr1Ty,
- Value **GEP1Ops, unsigned NumGEP1Ops, unsigned G1Size,
- const Type *BasePtr2Ty,
- Value **GEP2Ops, unsigned NumGEP2Ops, unsigned G2Size);
};
} // End of anonymous namespace
@@ -264,107 +234,124 @@ ImmutablePass *llvm::createBasicAliasAnalysisPass() {
bool BasicAliasAnalysis::pointsToConstantMemory(const Value *P) {
if (const GlobalVariable *GV =
dyn_cast<GlobalVariable>(P->getUnderlyingObject()))
+ // Note: this doesn't require GV to be "ODR" because it isn't legal for a
+ // global to be marked constant in some modules and non-constant in others.
+ // GV may even be a declaration, not a definition.
return GV->isConstant();
return false;
}
-// getModRefInfo - Check to see if the specified callsite can clobber the
-// specified memory object. Since we only look at local properties of this
-// function, we really can't say much about this query. We do, however, use
-// simple "address taken" analysis on local objects.
-//
+/// getModRefInfo - Check to see if the specified callsite can clobber the
+/// specified memory object. Since we only look at local properties of this
+/// function, we really can't say much about this query. We do, however, use
+/// simple "address taken" analysis on local objects.
AliasAnalysis::ModRefResult
BasicAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
- if (!isa<Constant>(P)) {
- const Value *Object = P->getUnderlyingObject();
-
- // If this is a tail call and P points to a stack location, we know that
- // the tail call cannot access or modify the local stack.
- // We cannot exclude byval arguments here; these belong to the caller of
- // the current function not to the current function, and a tail callee
- // may reference them.
- if (isa<AllocaInst>(Object))
- if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
- if (CI->isTailCall())
- return NoModRef;
-
- // If the pointer is to a locally allocated object that does not escape,
- // then the call can not mod/ref the pointer unless the call takes the
- // argument without capturing it.
- if (isNonEscapingLocalObject(Object) && CS.getInstruction() != Object) {
- bool passedAsArg = false;
- // TODO: Eventually only check 'nocapture' arguments.
- for (CallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
- CI != CE; ++CI)
- if (isa<PointerType>((*CI)->getType()) &&
- alias(cast<Value>(CI), ~0U, P, ~0U) != NoAlias)
- passedAsArg = true;
-
- if (!passedAsArg)
+ const Value *Object = P->getUnderlyingObject();
+
+ // If this is a tail call and P points to a stack location, we know that
+ // the tail call cannot access or modify the local stack.
+ // We cannot exclude byval arguments here; these belong to the caller of
+ // the current function not to the current function, and a tail callee
+ // may reference them.
+ if (isa<AllocaInst>(Object))
+ if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
+ if (CI->isTailCall())
return NoModRef;
- }
-
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
- switch (II->getIntrinsicID()) {
- default: break;
- case Intrinsic::memcpy:
- case Intrinsic::memmove: {
- unsigned Len = ~0U;
- if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getOperand(3)))
- Len = LenCI->getZExtValue();
- Value *Dest = II->getOperand(1);
- Value *Src = II->getOperand(2);
- if (alias(Dest, Len, P, Size) == NoAlias) {
- if (alias(Src, Len, P, Size) == NoAlias)
- return NoModRef;
- return Ref;
- }
- }
- break;
- case Intrinsic::memset:
- if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getOperand(3))) {
- unsigned Len = LenCI->getZExtValue();
- Value *Dest = II->getOperand(1);
- if (alias(Dest, Len, P, Size) == NoAlias)
- return NoModRef;
- }
- break;
- case Intrinsic::atomic_cmp_swap:
- case Intrinsic::atomic_swap:
- case Intrinsic::atomic_load_add:
- case Intrinsic::atomic_load_sub:
- case Intrinsic::atomic_load_and:
- case Intrinsic::atomic_load_nand:
- case Intrinsic::atomic_load_or:
- case Intrinsic::atomic_load_xor:
- case Intrinsic::atomic_load_max:
- case Intrinsic::atomic_load_min:
- case Intrinsic::atomic_load_umax:
- case Intrinsic::atomic_load_umin:
- if (TD) {
- Value *Op1 = II->getOperand(1);
- unsigned Op1Size = TD->getTypeStoreSize(Op1->getType());
- if (alias(Op1, Op1Size, P, Size) == NoAlias)
- return NoModRef;
- }
+
+ // If the pointer is to a locally allocated object that does not escape,
+ // then the call can not mod/ref the pointer unless the call takes the pointer
+ // as an argument, and itself doesn't capture it.
+ if (!isa<Constant>(Object) && CS.getInstruction() != Object &&
+ isNonEscapingLocalObject(Object)) {
+ bool PassedAsArg = false;
+ unsigned ArgNo = 0;
+ for (CallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
+ CI != CE; ++CI, ++ArgNo) {
+ // Only look at the no-capture pointer arguments.
+ if (!isa<PointerType>((*CI)->getType()) ||
+ !CS.paramHasAttr(ArgNo+1, Attribute::NoCapture))
+ continue;
+
+ // If this is a no-capture pointer argument, see if we can tell that it
+ // is impossible to alias the pointer we're checking. If not, we have to
+ // assume that the call could touch the pointer, even though it doesn't
+ // escape.
+ if (!isNoAlias(cast<Value>(CI), ~0U, P, ~0U)) {
+ PassedAsArg = true;
break;
- case Intrinsic::lifetime_start:
- case Intrinsic::lifetime_end:
- case Intrinsic::invariant_start: {
- unsigned PtrSize = cast<ConstantInt>(II->getOperand(1))->getZExtValue();
- if (alias(II->getOperand(2), PtrSize, P, Size) == NoAlias)
- return NoModRef;
- }
- break;
- case Intrinsic::invariant_end: {
- unsigned PtrSize = cast<ConstantInt>(II->getOperand(2))->getZExtValue();
- if (alias(II->getOperand(3), PtrSize, P, Size) == NoAlias)
- return NoModRef;
- }
- break;
}
}
+
+ if (!PassedAsArg)
+ return NoModRef;
+ }
+
+ // Finally, handle specific knowledge of intrinsics.
+ IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
+ if (II == 0)
+ return AliasAnalysis::getModRefInfo(CS, P, Size);
+
+ switch (II->getIntrinsicID()) {
+ default: break;
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove: {
+ unsigned Len = ~0U;
+ if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getOperand(3)))
+ Len = LenCI->getZExtValue();
+ Value *Dest = II->getOperand(1);
+ Value *Src = II->getOperand(2);
+ if (isNoAlias(Dest, Len, P, Size)) {
+ if (isNoAlias(Src, Len, P, Size))
+ return NoModRef;
+ return Ref;
+ }
+ break;
+ }
+ case Intrinsic::memset:
+ // Since memset is 'accesses arguments' only, the AliasAnalysis base class
+ // will handle it for the variable length case.
+ if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getOperand(3))) {
+ unsigned Len = LenCI->getZExtValue();
+ Value *Dest = II->getOperand(1);
+ if (isNoAlias(Dest, Len, P, Size))
+ return NoModRef;
+ }
+ break;
+ case Intrinsic::atomic_cmp_swap:
+ case Intrinsic::atomic_swap:
+ case Intrinsic::atomic_load_add:
+ case Intrinsic::atomic_load_sub:
+ case Intrinsic::atomic_load_and:
+ case Intrinsic::atomic_load_nand:
+ case Intrinsic::atomic_load_or:
+ case Intrinsic::atomic_load_xor:
+ case Intrinsic::atomic_load_max:
+ case Intrinsic::atomic_load_min:
+ case Intrinsic::atomic_load_umax:
+ case Intrinsic::atomic_load_umin:
+ if (TD) {
+ Value *Op1 = II->getOperand(1);
+ unsigned Op1Size = TD->getTypeStoreSize(Op1->getType());
+ if (isNoAlias(Op1, Op1Size, P, Size))
+ return NoModRef;
+ }
+ break;
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ case Intrinsic::invariant_start: {
+ unsigned PtrSize = cast<ConstantInt>(II->getOperand(1))->getZExtValue();
+ if (isNoAlias(II->getOperand(2), PtrSize, P, Size))
+ return NoModRef;
+ break;
+ }
+ case Intrinsic::invariant_end: {
+ unsigned PtrSize = cast<ConstantInt>(II->getOperand(2))->getZExtValue();
+ if (isNoAlias(II->getOperand(3), PtrSize, P, Size))
+ return NoModRef;
+ break;
+ }
}
// The AliasAnalysis base class has some smarts, lets use them.
@@ -389,141 +376,157 @@ BasicAliasAnalysis::getModRefInfo(CallSite CS1, CallSite CS2) {
return NoAA::getModRefInfo(CS1, CS2);
}
-// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction
-// against another.
-//
+/// GetIndiceDifference - Dest and Src are the variable indices from two
+/// decomposed GetElementPtr instructions GEP1 and GEP2 which have common base
+/// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic
+/// difference between the two pointers.
+static void GetIndiceDifference(
+ SmallVectorImpl<std::pair<const Value*, int64_t> > &Dest,
+ const SmallVectorImpl<std::pair<const Value*, int64_t> > &Src) {
+ if (Src.empty()) return;
+
+ for (unsigned i = 0, e = Src.size(); i != e; ++i) {
+ const Value *V = Src[i].first;
+ int64_t Scale = Src[i].second;
+
+ // Find V in Dest. This is N^2, but pointer indices almost never have more
+ // than a few variable indexes.
+ for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
+ if (Dest[j].first != V) continue;
+
+ // If we found it, subtract off Scale V's from the entry in Dest. If it
+ // goes to zero, remove the entry.
+ if (Dest[j].second != Scale)
+ Dest[j].second -= Scale;
+ else
+ Dest.erase(Dest.begin()+j);
+ Scale = 0;
+ break;
+ }
+
+ // If we didn't consume this entry, add it to the end of the Dest list.
+ if (Scale)
+ Dest.push_back(std::make_pair(V, -Scale));
+ }
+}
+
+/// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction
+/// against another pointer. We know that V1 is a GEP, but we don't know
+/// anything about V2. UnderlyingV1 is GEP1->getUnderlyingObject(),
+/// UnderlyingV2 is the same for V2.
+///
AliasAnalysis::AliasResult
-BasicAliasAnalysis::aliasGEP(const Value *V1, unsigned V1Size,
- const Value *V2, unsigned V2Size) {
+BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, unsigned V1Size,
+ const Value *V2, unsigned V2Size,
+ const Value *UnderlyingV1,
+ const Value *UnderlyingV2) {
+ int64_t GEP1BaseOffset;
+ SmallVector<std::pair<const Value*, int64_t>, 4> GEP1VariableIndices;
+
// If we have two gep instructions with must-alias'ing base pointers, figure
// out if the indexes to the GEP tell us anything about the derived pointer.
- // Note that we also handle chains of getelementptr instructions as well as
- // constant expression getelementptrs here.
- //
- if (isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
- const User *GEP1 = cast<User>(V1);
- const User *GEP2 = cast<User>(V2);
+ if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
+ // Do the base pointers alias?
+ AliasResult BaseAlias = aliasCheck(UnderlyingV1, ~0U, UnderlyingV2, ~0U);
- // If V1 and V2 are identical GEPs, just recurse down on both of them.
- // This allows us to analyze things like:
- // P = gep A, 0, i, 1
- // Q = gep B, 0, i, 1
- // by just analyzing A and B. This is even safe for variable indices.
- if (GEP1->getType() == GEP2->getType() &&
- GEP1->getNumOperands() == GEP2->getNumOperands() &&
- GEP1->getOperand(0)->getType() == GEP2->getOperand(0)->getType() &&
- // All operands are the same, ignoring the base.
- std::equal(GEP1->op_begin()+1, GEP1->op_end(), GEP2->op_begin()+1))
- return aliasCheck(GEP1->getOperand(0), V1Size,
- GEP2->getOperand(0), V2Size);
+ // If we get a No or May, then return it immediately, no amount of analysis
+ // will improve this situation.
+ if (BaseAlias != MustAlias) return BaseAlias;
- // Drill down into the first non-gep value, to test for must-aliasing of
- // the base pointers.
- while (isa<GEPOperator>(GEP1->getOperand(0)) &&
- GEP1->getOperand(1) ==
- Constant::getNullValue(GEP1->getOperand(1)->getType()))
- GEP1 = cast<User>(GEP1->getOperand(0));
- const Value *BasePtr1 = GEP1->getOperand(0);
-
- while (isa<GEPOperator>(GEP2->getOperand(0)) &&
- GEP2->getOperand(1) ==
- Constant::getNullValue(GEP2->getOperand(1)->getType()))
- GEP2 = cast<User>(GEP2->getOperand(0));
- const Value *BasePtr2 = GEP2->getOperand(0);
-
- // Do the base pointers alias?
- AliasResult BaseAlias = aliasCheck(BasePtr1, ~0U, BasePtr2, ~0U);
- if (BaseAlias == NoAlias) return NoAlias;
- if (BaseAlias == MustAlias) {
- // If the base pointers alias each other exactly, check to see if we can
- // figure out anything about the resultant pointers, to try to prove
- // non-aliasing.
-
- // Collect all of the chained GEP operands together into one simple place
- SmallVector<Value*, 16> GEP1Ops, GEP2Ops;
- BasePtr1 = GetGEPOperands(V1, GEP1Ops);
- BasePtr2 = GetGEPOperands(V2, GEP2Ops);
-
- // If GetGEPOperands were able to fold to the same must-aliased pointer,
- // do the comparison.
- if (BasePtr1 == BasePtr2) {
- AliasResult GAlias =
- CheckGEPInstructions(BasePtr1->getType(),
- &GEP1Ops[0], GEP1Ops.size(), V1Size,
- BasePtr2->getType(),
- &GEP2Ops[0], GEP2Ops.size(), V2Size);
- if (GAlias != MayAlias)
- return GAlias;
- }
+ // Otherwise, we have a MustAlias. Since the base pointers alias each other
+ // exactly, see if the computed offset from the common pointer tells us
+ // about the relation of the resulting pointer.
+ const Value *GEP1BasePtr =
+ DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
+
+ int64_t GEP2BaseOffset;
+ SmallVector<std::pair<const Value*, int64_t>, 4> GEP2VariableIndices;
+ const Value *GEP2BasePtr =
+ DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD);
+
+ // If DecomposeGEPExpression isn't able to look all the way through the
+ // addressing operation, we must not have TD and this is too complex for us
+ // to handle without it.
+ if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
+ assert(TD == 0 &&
+ "DecomposeGEPExpression and getUnderlyingObject disagree!");
+ return MayAlias;
}
- }
+
+ // Subtract the GEP2 pointer from the GEP1 pointer to find out their
+ // symbolic difference.
+ GEP1BaseOffset -= GEP2BaseOffset;
+ GetIndiceDifference(GEP1VariableIndices, GEP2VariableIndices);
+
+ } else {
+ // Check to see if these two pointers are related by the getelementptr
+ // instruction. If one pointer is a GEP with a non-zero index of the other
+ // pointer, we know they cannot alias.
- // Check to see if these two pointers are related by a getelementptr
- // instruction. If one pointer is a GEP with a non-zero index of the other
- // pointer, we know they cannot alias.
- //
- if (V1Size == ~0U || V2Size == ~0U)
- return MayAlias;
+ // If both accesses are unknown size, we can't do anything useful here.
+ if (V1Size == ~0U && V2Size == ~0U)
+ return MayAlias;
- SmallVector<Value*, 16> GEPOperands;
- const Value *BasePtr = GetGEPOperands(V1, GEPOperands);
-
- AliasResult R = aliasCheck(BasePtr, ~0U, V2, V2Size);
- if (R != MustAlias)
- // If V2 may alias GEP base pointer, conservatively returns MayAlias.
- // If V2 is known not to alias GEP base pointer, then the two values
- // cannot alias per GEP semantics: "A pointer value formed from a
- // getelementptr instruction is associated with the addresses associated
- // with the first operand of the getelementptr".
- return R;
-
- // If there is at least one non-zero constant index, we know they cannot
- // alias.
- bool ConstantFound = false;
- bool AllZerosFound = true;
- for (unsigned i = 0, e = GEPOperands.size(); i != e; ++i)
- if (const Constant *C = dyn_cast<Constant>(GEPOperands[i])) {
- if (!C->isNullValue()) {
- ConstantFound = true;
- AllZerosFound = false;
- break;
- }
- } else {
- AllZerosFound = false;
+ AliasResult R = aliasCheck(UnderlyingV1, ~0U, V2, V2Size);
+ if (R != MustAlias)
+ // If V2 may alias GEP base pointer, conservatively returns MayAlias.
+ // If V2 is known not to alias GEP base pointer, then the two values
+ // cannot alias per GEP semantics: "A pointer value formed from a
+ // getelementptr instruction is associated with the addresses associated
+ // with the first operand of the getelementptr".
+ return R;
+
+ const Value *GEP1BasePtr =
+ DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
+
+ // If DecomposeGEPExpression isn't able to look all the way through the
+ // addressing operation, we must not have TD and this is too complex for us
+ // to handle without it.
+ if (GEP1BasePtr != UnderlyingV1) {
+ assert(TD == 0 &&
+ "DecomposeGEPExpression and getUnderlyingObject disagree!");
+ return MayAlias;
}
-
- // If we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2 must aliases
- // the ptr, the end result is a must alias also.
- if (AllZerosFound)
+ }
+
+ // In the two GEP Case, if there is no difference in the offsets of the
+ // computed pointers, the resultant pointers are a must alias. This
+ // hapens when we have two lexically identical GEP's (for example).
+ //
+ // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
+ // must aliases the GEP, the end result is a must alias also.
+ if (GEP1BaseOffset == 0 && GEP1VariableIndices.empty())
return MustAlias;
- if (ConstantFound) {
- if (V2Size <= 1 && V1Size <= 1) // Just pointer check?
+ // If we have a known constant offset, see if this offset is larger than the
+ // access size being queried. If so, and if no variable indices can remove
+ // pieces of this constant, then we know we have a no-alias. For example,
+ // &A[100] != &A.
+
+ // In order to handle cases like &A[100][i] where i is an out of range
+ // subscript, we have to ignore all constant offset pieces that are a multiple
+ // of a scaled index. Do this by removing constant offsets that are a
+ // multiple of any of our variable indices. This allows us to transform
+ // things like &A[i][1] because i has a stride of (e.g.) 8 bytes but the 1
+ // provides an offset of 4 bytes (assuming a <= 4 byte access).
+ for (unsigned i = 0, e = GEP1VariableIndices.size();
+ i != e && GEP1BaseOffset;++i)
+ if (int64_t RemovedOffset = GEP1BaseOffset/GEP1VariableIndices[i].second)
+ GEP1BaseOffset -= RemovedOffset*GEP1VariableIndices[i].second;
+
+ // If our known offset is bigger than the access size, we know we don't have
+ // an alias.
+ if (GEP1BaseOffset) {
+ if (GEP1BaseOffset >= (int64_t)V2Size ||
+ GEP1BaseOffset <= -(int64_t)V1Size)
return NoAlias;
-
- // Otherwise we have to check to see that the distance is more than
- // the size of the argument... build an index vector that is equal to
- // the arguments provided, except substitute 0's for any variable
- // indexes we find...
- if (TD &&
- cast<PointerType>(BasePtr->getType())->getElementType()->isSized()) {
- for (unsigned i = 0; i != GEPOperands.size(); ++i)
- if (!isa<ConstantInt>(GEPOperands[i]))
- GEPOperands[i] = Constant::getNullValue(GEPOperands[i]->getType());
- int64_t Offset = TD->getIndexedOffset(BasePtr->getType(),
- &GEPOperands[0],
- GEPOperands.size());
-
- if (Offset >= (int64_t)V2Size || Offset <= -(int64_t)V1Size)
- return NoAlias;
- }
}
-
+
return MayAlias;
}
-// aliasSelect - Provide a bunch of ad-hoc rules to disambiguate a Select instruction
-// against another.
+/// aliasSelect - Provide a bunch of ad-hoc rules to disambiguate a Select
+/// instruction against another.
AliasAnalysis::AliasResult
BasicAliasAnalysis::aliasSelect(const SelectInst *SI, unsigned SISize,
const Value *V2, unsigned V2Size) {
@@ -683,22 +686,31 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, unsigned V1Size,
(V2Size != ~0U && isObjectSmallerThan(O1, V2Size, *TD)))
return NoAlias;
- // If one pointer is the result of a call/invoke and the other is a
+ // If one pointer is the result of a call/invoke or load and the other is a
// non-escaping local object, then we know the object couldn't escape to a
- // point where the call could return it.
- if ((isa<CallInst>(O1) || isa<InvokeInst>(O1)) &&
- isNonEscapingLocalObject(O2) && O1 != O2)
- return NoAlias;
- if ((isa<CallInst>(O2) || isa<InvokeInst>(O2)) &&
- isNonEscapingLocalObject(O1) && O1 != O2)
- return NoAlias;
+ // point where the call could return it. The load case works because
+ // isNonEscapingLocalObject considers all stores to be escapes (it
+ // passes true for the StoreCaptures argument to PointerMayBeCaptured).
+ if (O1 != O2) {
+ if ((isa<CallInst>(O1) || isa<InvokeInst>(O1) || isa<LoadInst>(O1) ||
+ isa<Argument>(O1)) &&
+ isNonEscapingLocalObject(O2))
+ return NoAlias;
+ if ((isa<CallInst>(O2) || isa<InvokeInst>(O2) || isa<LoadInst>(O2) ||
+ isa<Argument>(O2)) &&
+ isNonEscapingLocalObject(O1))
+ return NoAlias;
+ }
+ // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
+ // GEP can't simplify, we don't even look at the PHI cases.
if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
std::swap(V1, V2);
std::swap(V1Size, V2Size);
+ std::swap(O1, O2);
}
- if (isa<GEPOperator>(V1))
- return aliasGEP(V1, V1Size, V2, V2Size);
+ if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1))
+ return aliasGEP(GV1, V1Size, V2, V2Size, O1, O2);
if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
std::swap(V1, V2);
@@ -717,351 +729,5 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, unsigned V1Size,
return MayAlias;
}
-// This function is used to determine if the indices of two GEP instructions are
-// equal. V1 and V2 are the indices.
-static bool IndexOperandsEqual(Value *V1, Value *V2) {
- if (V1->getType() == V2->getType())
- return V1 == V2;
- if (Constant *C1 = dyn_cast<Constant>(V1))
- if (Constant *C2 = dyn_cast<Constant>(V2)) {
- // Sign extend the constants to long types, if necessary
- if (C1->getType() != Type::getInt64Ty(C1->getContext()))
- C1 = ConstantExpr::getSExt(C1, Type::getInt64Ty(C1->getContext()));
- if (C2->getType() != Type::getInt64Ty(C1->getContext()))
- C2 = ConstantExpr::getSExt(C2, Type::getInt64Ty(C1->getContext()));
- return C1 == C2;
- }
- return false;
-}
-
-/// CheckGEPInstructions - Check two GEP instructions with known must-aliasing
-/// base pointers. This checks to see if the index expressions preclude the
-/// pointers from aliasing...
-AliasAnalysis::AliasResult
-BasicAliasAnalysis::CheckGEPInstructions(
- const Type* BasePtr1Ty, Value **GEP1Ops, unsigned NumGEP1Ops, unsigned G1S,
- const Type *BasePtr2Ty, Value **GEP2Ops, unsigned NumGEP2Ops, unsigned G2S) {
- // We currently can't handle the case when the base pointers have different
- // primitive types. Since this is uncommon anyway, we are happy being
- // extremely conservative.
- if (BasePtr1Ty != BasePtr2Ty)
- return MayAlias;
-
- const PointerType *GEPPointerTy = cast<PointerType>(BasePtr1Ty);
-
- // Find the (possibly empty) initial sequence of equal values... which are not
- // necessarily constants.
- unsigned NumGEP1Operands = NumGEP1Ops, NumGEP2Operands = NumGEP2Ops;
- unsigned MinOperands = std::min(NumGEP1Operands, NumGEP2Operands);
- unsigned MaxOperands = std::max(NumGEP1Operands, NumGEP2Operands);
- unsigned UnequalOper = 0;
- while (UnequalOper != MinOperands &&
- IndexOperandsEqual(GEP1Ops[UnequalOper], GEP2Ops[UnequalOper])) {
- // Advance through the type as we go...
- ++UnequalOper;
- if (const CompositeType *CT = dyn_cast<CompositeType>(BasePtr1Ty))
- BasePtr1Ty = CT->getTypeAtIndex(GEP1Ops[UnequalOper-1]);
- else {
- // If all operands equal each other, then the derived pointers must
- // alias each other...
- BasePtr1Ty = 0;
- assert(UnequalOper == NumGEP1Operands && UnequalOper == NumGEP2Operands &&
- "Ran out of type nesting, but not out of operands?");
- return MustAlias;
- }
- }
-
- // If we have seen all constant operands, and run out of indexes on one of the
- // getelementptrs, check to see if the tail of the leftover one is all zeros.
- // If so, return mustalias.
- if (UnequalOper == MinOperands) {
- if (NumGEP1Ops < NumGEP2Ops) {
- std::swap(GEP1Ops, GEP2Ops);
- std::swap(NumGEP1Ops, NumGEP2Ops);
- }
-
- bool AllAreZeros = true;
- for (unsigned i = UnequalOper; i != MaxOperands; ++i)
- if (!isa<Constant>(GEP1Ops[i]) ||
- !cast<Constant>(GEP1Ops[i])->isNullValue()) {
- AllAreZeros = false;
- break;
- }
- if (AllAreZeros) return MustAlias;
- }
-
-
- // So now we know that the indexes derived from the base pointers,
- // which are known to alias, are different. We can still determine a
- // no-alias result if there are differing constant pairs in the index
- // chain. For example:
- // A[i][0] != A[j][1] iff (&A[0][1]-&A[0][0] >= std::max(G1S, G2S))
- //
- // We have to be careful here about array accesses. In particular, consider:
- // A[1][0] vs A[0][i]
- // In this case, we don't *know* that the array will be accessed in bounds:
- // the index could even be negative. Because of this, we have to
- // conservatively *give up* and return may alias. We disregard differing
- // array subscripts that are followed by a variable index without going
- // through a struct.
- //
- unsigned SizeMax = std::max(G1S, G2S);
- if (SizeMax == ~0U) return MayAlias; // Avoid frivolous work.
-
- // Scan for the first operand that is constant and unequal in the
- // two getelementptrs...
- unsigned FirstConstantOper = UnequalOper;
- for (; FirstConstantOper != MinOperands; ++FirstConstantOper) {
- const Value *G1Oper = GEP1Ops[FirstConstantOper];
- const Value *G2Oper = GEP2Ops[FirstConstantOper];
-
- if (G1Oper != G2Oper) // Found non-equal constant indexes...
- if (Constant *G1OC = dyn_cast<ConstantInt>(const_cast<Value*>(G1Oper)))
- if (Constant *G2OC = dyn_cast<ConstantInt>(const_cast<Value*>(G2Oper))){
- if (G1OC->getType() != G2OC->getType()) {
- // Sign extend both operands to long.
- const Type *Int64Ty = Type::getInt64Ty(G1OC->getContext());
- if (G1OC->getType() != Int64Ty)
- G1OC = ConstantExpr::getSExt(G1OC, Int64Ty);
- if (G2OC->getType() != Int64Ty)
- G2OC = ConstantExpr::getSExt(G2OC, Int64Ty);
- GEP1Ops[FirstConstantOper] = G1OC;
- GEP2Ops[FirstConstantOper] = G2OC;
- }
-
- if (G1OC != G2OC) {
- // Handle the "be careful" case above: if this is an array/vector
- // subscript, scan for a subsequent variable array index.
- if (const SequentialType *STy =
- dyn_cast<SequentialType>(BasePtr1Ty)) {
- const Type *NextTy = STy;
- bool isBadCase = false;
-
- for (unsigned Idx = FirstConstantOper;
- Idx != MinOperands && isa<SequentialType>(NextTy); ++Idx) {
- const Value *V1 = GEP1Ops[Idx], *V2 = GEP2Ops[Idx];
- if (!isa<Constant>(V1) || !isa<Constant>(V2)) {
- isBadCase = true;
- break;
- }
- // If the array is indexed beyond the bounds of the static type
- // at this level, it will also fall into the "be careful" case.
- // It would theoretically be possible to analyze these cases,
- // but for now just be conservatively correct.
- if (const ArrayType *ATy = dyn_cast<ArrayType>(STy))
- if (cast<ConstantInt>(G1OC)->getZExtValue() >=
- ATy->getNumElements() ||
- cast<ConstantInt>(G2OC)->getZExtValue() >=
- ATy->getNumElements()) {
- isBadCase = true;
- break;
- }
- if (const VectorType *VTy = dyn_cast<VectorType>(STy))
- if (cast<ConstantInt>(G1OC)->getZExtValue() >=
- VTy->getNumElements() ||
- cast<ConstantInt>(G2OC)->getZExtValue() >=
- VTy->getNumElements()) {
- isBadCase = true;
- break;
- }
- STy = cast<SequentialType>(NextTy);
- NextTy = cast<SequentialType>(NextTy)->getElementType();
- }
-
- if (isBadCase) G1OC = 0;
- }
-
- // Make sure they are comparable (ie, not constant expressions), and
- // make sure the GEP with the smaller leading constant is GEP1.
- if (G1OC) {
- Constant *Compare = ConstantExpr::getICmp(ICmpInst::ICMP_SGT,
- G1OC, G2OC);
- if (ConstantInt *CV = dyn_cast<ConstantInt>(Compare)) {
- if (CV->getZExtValue()) { // If they are comparable and G2 > G1
- std::swap(GEP1Ops, GEP2Ops); // Make GEP1 < GEP2
- std::swap(NumGEP1Ops, NumGEP2Ops);
- }
- break;
- }
- }
- }
- }
- BasePtr1Ty = cast<CompositeType>(BasePtr1Ty)->getTypeAtIndex(G1Oper);
- }
-
- // No shared constant operands, and we ran out of common operands. At this
- // point, the GEP instructions have run through all of their operands, and we
- // haven't found evidence that there are any deltas between the GEP's.
- // However, one GEP may have more operands than the other. If this is the
- // case, there may still be hope. Check this now.
- if (FirstConstantOper == MinOperands) {
- // Without TargetData, we won't know what the offsets are.
- if (!TD)
- return MayAlias;
-
- // Make GEP1Ops be the longer one if there is a longer one.
- if (NumGEP1Ops < NumGEP2Ops) {
- std::swap(GEP1Ops, GEP2Ops);
- std::swap(NumGEP1Ops, NumGEP2Ops);
- }
-
- // Is there anything to check?
- if (NumGEP1Ops > MinOperands) {
- for (unsigned i = FirstConstantOper; i != MaxOperands; ++i)
- if (isa<ConstantInt>(GEP1Ops[i]) &&
- !cast<ConstantInt>(GEP1Ops[i])->isZero()) {
- // Yup, there's a constant in the tail. Set all variables to
- // constants in the GEP instruction to make it suitable for
- // TargetData::getIndexedOffset.
- for (i = 0; i != MaxOperands; ++i)
- if (!isa<ConstantInt>(GEP1Ops[i]))
- GEP1Ops[i] = Constant::getNullValue(GEP1Ops[i]->getType());
- // Okay, now get the offset. This is the relative offset for the full
- // instruction.
- int64_t Offset1 = TD->getIndexedOffset(GEPPointerTy, GEP1Ops,
- NumGEP1Ops);
-
- // Now check without any constants at the end.
- int64_t Offset2 = TD->getIndexedOffset(GEPPointerTy, GEP1Ops,
- MinOperands);
-
- // Make sure we compare the absolute difference.
- if (Offset1 > Offset2)
- std::swap(Offset1, Offset2);
-
- // If the tail provided a bit enough offset, return noalias!
- if ((uint64_t)(Offset2-Offset1) >= SizeMax)
- return NoAlias;
- // Otherwise break - we don't look for another constant in the tail.
- break;
- }
- }
-
- // Couldn't find anything useful.
- return MayAlias;
- }
-
- // If there are non-equal constants arguments, then we can figure
- // out a minimum known delta between the two index expressions... at
- // this point we know that the first constant index of GEP1 is less
- // than the first constant index of GEP2.
-
- // Advance BasePtr[12]Ty over this first differing constant operand.
- BasePtr2Ty = cast<CompositeType>(BasePtr1Ty)->
- getTypeAtIndex(GEP2Ops[FirstConstantOper]);
- BasePtr1Ty = cast<CompositeType>(BasePtr1Ty)->
- getTypeAtIndex(GEP1Ops[FirstConstantOper]);
-
- // We are going to be using TargetData::getIndexedOffset to determine the
- // offset that each of the GEP's is reaching. To do this, we have to convert
- // all variable references to constant references. To do this, we convert the
- // initial sequence of array subscripts into constant zeros to start with.
- const Type *ZeroIdxTy = GEPPointerTy;
- for (unsigned i = 0; i != FirstConstantOper; ++i) {
- if (!isa<StructType>(ZeroIdxTy))
- GEP1Ops[i] = GEP2Ops[i] =
- Constant::getNullValue(Type::getInt32Ty(ZeroIdxTy->getContext()));
-
- if (const CompositeType *CT = dyn_cast<CompositeType>(ZeroIdxTy))
- ZeroIdxTy = CT->getTypeAtIndex(GEP1Ops[i]);
- }
-
- // We know that GEP1Ops[FirstConstantOper] & GEP2Ops[FirstConstantOper] are ok
-
- // Loop over the rest of the operands...
- for (unsigned i = FirstConstantOper+1; i != MaxOperands; ++i) {
- const Value *Op1 = i < NumGEP1Ops ? GEP1Ops[i] : 0;
- const Value *Op2 = i < NumGEP2Ops ? GEP2Ops[i] : 0;
- // If they are equal, use a zero index...
- if (Op1 == Op2 && BasePtr1Ty == BasePtr2Ty) {
- if (!isa<ConstantInt>(Op1))
- GEP1Ops[i] = GEP2Ops[i] = Constant::getNullValue(Op1->getType());
- // Otherwise, just keep the constants we have.
- } else {
- if (Op1) {
- if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
- // If this is an array index, make sure the array element is in range.
- if (const ArrayType *AT = dyn_cast<ArrayType>(BasePtr1Ty)) {
- if (Op1C->getZExtValue() >= AT->getNumElements())
- return MayAlias; // Be conservative with out-of-range accesses
- } else if (const VectorType *VT = dyn_cast<VectorType>(BasePtr1Ty)) {
- if (Op1C->getZExtValue() >= VT->getNumElements())
- return MayAlias; // Be conservative with out-of-range accesses
- }
-
- } else {
- // GEP1 is known to produce a value less than GEP2. To be
- // conservatively correct, we must assume the largest possible
- // constant is used in this position. This cannot be the initial
- // index to the GEP instructions (because we know we have at least one
- // element before this one with the different constant arguments), so
- // we know that the current index must be into either a struct or
- // array. Because we know it's not constant, this cannot be a
- // structure index. Because of this, we can calculate the maximum
- // value possible.
- //
- if (const ArrayType *AT = dyn_cast<ArrayType>(BasePtr1Ty))
- GEP1Ops[i] =
- ConstantInt::get(Type::getInt64Ty(AT->getContext()),
- AT->getNumElements()-1);
- else if (const VectorType *VT = dyn_cast<VectorType>(BasePtr1Ty))
- GEP1Ops[i] =
- ConstantInt::get(Type::getInt64Ty(VT->getContext()),
- VT->getNumElements()-1);
- }
- }
-
- if (Op2) {
- if (const ConstantInt *Op2C = dyn_cast<ConstantInt>(Op2)) {
- // If this is an array index, make sure the array element is in range.
- if (const ArrayType *AT = dyn_cast<ArrayType>(BasePtr2Ty)) {
- if (Op2C->getZExtValue() >= AT->getNumElements())
- return MayAlias; // Be conservative with out-of-range accesses
- } else if (const VectorType *VT = dyn_cast<VectorType>(BasePtr2Ty)) {
- if (Op2C->getZExtValue() >= VT->getNumElements())
- return MayAlias; // Be conservative with out-of-range accesses
- }
- } else { // Conservatively assume the minimum value for this index
- GEP2Ops[i] = Constant::getNullValue(Op2->getType());
- }
- }
- }
-
- if (BasePtr1Ty && Op1) {
- if (const CompositeType *CT = dyn_cast<CompositeType>(BasePtr1Ty))
- BasePtr1Ty = CT->getTypeAtIndex(GEP1Ops[i]);
- else
- BasePtr1Ty = 0;
- }
-
- if (BasePtr2Ty && Op2) {
- if (const CompositeType *CT = dyn_cast<CompositeType>(BasePtr2Ty))
- BasePtr2Ty = CT->getTypeAtIndex(GEP2Ops[i]);
- else
- BasePtr2Ty = 0;
- }
- }
-
- if (TD && GEPPointerTy->getElementType()->isSized()) {
- int64_t Offset1 =
- TD->getIndexedOffset(GEPPointerTy, GEP1Ops, NumGEP1Ops);
- int64_t Offset2 =
- TD->getIndexedOffset(GEPPointerTy, GEP2Ops, NumGEP2Ops);
- assert(Offset1 != Offset2 &&
- "There is at least one different constant here!");
-
- // Make sure we compare the absolute difference.
- if (Offset1 > Offset2)
- std::swap(Offset1, Offset2);
-
- if ((uint64_t)(Offset2-Offset1) >= SizeMax) {
- //cerr << "Determined that these two GEP's don't alias ["
- // << SizeMax << " bytes]: \n" << *GEP1 << *GEP2;
- return NoAlias;
- }
- }
- return MayAlias;
-}
-
-// Make sure that anything that uses AliasAnalysis pulls in this file...
+// Make sure that anything that uses AliasAnalysis pulls in this file.
DEFINING_FILE_FOR(BasicAliasAnalysis)
diff --git a/lib/Analysis/CaptureTracking.cpp b/lib/Analysis/CaptureTracking.cpp
index f615881..a276c64 100644
--- a/lib/Analysis/CaptureTracking.cpp
+++ b/lib/Analysis/CaptureTracking.cpp
@@ -19,6 +19,7 @@
#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Instructions.h"
#include "llvm/Value.h"
+#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/CallSite.h"
@@ -28,8 +29,11 @@ using namespace llvm;
/// by the enclosing function (which is required to exist). This routine can
/// be expensive, so consider caching the results. The boolean ReturnCaptures
/// specifies whether returning the value (or part of it) from the function
+/// counts as capturing it or not. The boolean StoreCaptures specified whether
+/// storing the value (or part of it) into memory anywhere automatically
/// counts as capturing it or not.
-bool llvm::PointerMayBeCaptured(const Value *V, bool ReturnCaptures) {
+bool llvm::PointerMayBeCaptured(const Value *V,
+ bool ReturnCaptures, bool StoreCaptures) {
assert(isa<PointerType>(V->getType()) && "Capture is for pointers only!");
SmallVector<Use*, 16> Worklist;
SmallSet<Use*, 16> Visited;
@@ -53,8 +57,7 @@ bool llvm::PointerMayBeCaptured(const Value *V, bool ReturnCaptures) {
// Not captured if the callee is readonly, doesn't return a copy through
// its return value and doesn't unwind (a readonly function can leak bits
// by throwing an exception or not depending on the input value).
- if (CS.onlyReadsMemory() && CS.doesNotThrow() &&
- I->getType() == Type::getVoidTy(V->getContext()))
+ if (CS.onlyReadsMemory() && CS.doesNotThrow() && I->getType()->isVoidTy())
break;
// Not captured if only passed via 'nocapture' arguments. Note that
@@ -82,7 +85,11 @@ bool llvm::PointerMayBeCaptured(const Value *V, bool ReturnCaptures) {
break;
case Instruction::Store:
if (V == I->getOperand(0))
- // Stored the pointer - it may be captured.
+ // Stored the pointer - conservatively assume it may be captured.
+ // TODO: If StoreCaptures is not true, we could do Fancy analysis
+ // to determine whether this store is not actually an escape point.
+ // In that case, BasicAliasAnalysis should be updated as well to
+ // take advantage of this.
return true;
// Storing to the pointee does not cause the pointer to be captured.
break;
@@ -98,6 +105,18 @@ bool llvm::PointerMayBeCaptured(const Value *V, bool ReturnCaptures) {
Worklist.push_back(U);
}
break;
+ case Instruction::ICmp:
+ // Don't count comparisons of a no-alias return value against null as
+ // captures. This allows us to ignore comparisons of malloc results
+ // with null, for example.
+ if (isNoAliasCall(V->stripPointerCasts()))
+ if (ConstantPointerNull *CPN =
+ dyn_cast<ConstantPointerNull>(I->getOperand(1)))
+ if (CPN->getType()->getAddressSpace() == 0)
+ break;
+ // Otherwise, be conservative. There are crazy ways to capture pointers
+ // using comparisons.
+ return true;
default:
// Something else - be conservative and say it is captured.
return true;
diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp
index 1cdadbf..96f738e 100644
--- a/lib/Analysis/ConstantFolding.cpp
+++ b/lib/Analysis/ConstantFolding.cpp
@@ -564,6 +564,7 @@ static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
// we eliminate over-indexing of the notional static type array bounds.
// This makes it easy to determine if the getelementptr is "inbounds".
// Also, this helps GlobalOpt do SROA on GlobalVariables.
+ Ptr = cast<Constant>(Ptr->stripPointerCasts());
const Type *Ty = Ptr->getType();
SmallVector<Constant*, 32> NewIdxs;
do {
@@ -671,8 +672,13 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I, const TargetData *TD) {
Constant *llvm::ConstantFoldConstantExpression(ConstantExpr *CE,
const TargetData *TD) {
SmallVector<Constant*, 8> Ops;
- for (User::op_iterator i = CE->op_begin(), e = CE->op_end(); i != e; ++i)
- Ops.push_back(cast<Constant>(*i));
+ for (User::op_iterator i = CE->op_begin(), e = CE->op_end(); i != e; ++i) {
+ Constant *NewC = cast<Constant>(*i);
+ // Recursively fold the ConstantExpr's operands.
+ if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(NewC))
+ NewC = ConstantFoldConstantExpression(NewCE, TD);
+ Ops.push_back(NewC);
+ }
if (CE->isCompare())
return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
@@ -687,6 +693,10 @@ Constant *llvm::ConstantFoldConstantExpression(ConstantExpr *CE,
/// attempting to fold instructions like loads and stores, which have no
/// constant expression form.
///
+/// TODO: This function neither utilizes nor preserves nsw/nuw/inbounds/etc
+/// information, due to only being passed an opcode and operands. Constant
+/// folding using this function strips this information.
+///
Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
Constant* const* Ops, unsigned NumOps,
const TargetData *TD) {
diff --git a/lib/Analysis/DebugInfo.cpp b/lib/Analysis/DebugInfo.cpp
index 8f62245..41d803c 100644
--- a/lib/Analysis/DebugInfo.cpp
+++ b/lib/Analysis/DebugInfo.cpp
@@ -78,19 +78,16 @@ DIDescriptor::DIDescriptor(MDNode *N, unsigned RequiredTag) {
}
}
-const char *
+StringRef
DIDescriptor::getStringField(unsigned Elt) const {
if (DbgNode == 0)
- return NULL;
+ return StringRef();
if (Elt < DbgNode->getNumElements())
- if (MDString *MDS = dyn_cast_or_null<MDString>(DbgNode->getElement(Elt))) {
- if (MDS->getLength() == 0)
- return NULL;
- return MDS->getString().data();
- }
+ if (MDString *MDS = dyn_cast_or_null<MDString>(DbgNode->getElement(Elt)))
+ return MDS->getString();
- return NULL;
+ return StringRef();
}
uint64_t DIDescriptor::getUInt64Field(unsigned Elt) const {
@@ -310,8 +307,8 @@ void DIDerivedType::replaceAllUsesWith(DIDescriptor &D) {
bool DICompileUnit::Verify() const {
if (isNull())
return false;
- const char *N = getFilename();
- if (!N)
+ StringRef N = getFilename();
+ if (N.empty())
return false;
// It is possible that directory and produce string is empty.
return true;
@@ -366,7 +363,7 @@ bool DIGlobalVariable::Verify() const {
if (isNull())
return false;
- if (!getDisplayName())
+ if (getDisplayName().empty())
return false;
if (getContext().isNull())
@@ -426,15 +423,15 @@ uint64_t DIDerivedType::getOriginalTypeSize() const {
/// information for the function F.
bool DISubprogram::describes(const Function *F) {
assert (F && "Invalid function");
- const char *Name = getLinkageName();
- if (!Name)
+ StringRef Name = getLinkageName();
+ if (Name.empty())
Name = getName();
- if (strcmp(F->getName().data(), Name) == 0)
+ if (F->getName() == Name)
return true;
return false;
}
-const char *DIScope::getFilename() const {
+StringRef DIScope::getFilename() const {
if (isLexicalBlock())
return DILexicalBlock(DbgNode).getFilename();
else if (isSubprogram())
@@ -443,10 +440,10 @@ const char *DIScope::getFilename() const {
return DICompileUnit(DbgNode).getFilename();
else
assert (0 && "Invalid DIScope!");
- return NULL;
+ return StringRef();
}
-const char *DIScope::getDirectory() const {
+StringRef DIScope::getDirectory() const {
if (isLexicalBlock())
return DILexicalBlock(DbgNode).getDirectory();
else if (isSubprogram())
@@ -455,7 +452,7 @@ const char *DIScope::getDirectory() const {
return DICompileUnit(DbgNode).getDirectory();
else
assert (0 && "Invalid DIScope!");
- return NULL;
+ return StringRef();
}
//===----------------------------------------------------------------------===//
@@ -481,7 +478,8 @@ void DICompileUnit::dump() const {
void DIType::dump() const {
if (isNull()) return;
- if (const char *Res = getName())
+ StringRef Res = getName();
+ if (!Res.empty())
errs() << " [" << Res << "] ";
unsigned Tag = getTag();
@@ -538,7 +536,8 @@ void DICompositeType::dump() const {
/// dump - Print global.
void DIGlobal::dump() const {
- if (const char *Res = getName())
+ StringRef Res = getName();
+ if (!Res.empty())
errs() << " [" << Res << "] ";
unsigned Tag = getTag();
@@ -562,7 +561,8 @@ void DIGlobal::dump() const {
/// dump - Print subprogram.
void DISubprogram::dump() const {
- if (const char *Res = getName())
+ StringRef Res = getName();
+ if (!Res.empty())
errs() << " [" << Res << "] ";
unsigned Tag = getTag();
@@ -590,7 +590,8 @@ void DIGlobalVariable::dump() const {
/// dump - Print variable.
void DIVariable::dump() const {
- if (const char *Res = getName())
+ StringRef Res = getName();
+ if (!Res.empty())
errs() << " [" << Res << "] ";
getCompileUnit().dump();
@@ -651,12 +652,12 @@ DISubrange DIFactory::GetOrCreateSubrange(int64_t Lo, int64_t Hi) {
/// CreateCompileUnit - Create a new descriptor for the specified compile
/// unit. Note that this does not unique compile units within the module.
DICompileUnit DIFactory::CreateCompileUnit(unsigned LangID,
- const char * Filename,
- const char * Directory,
- const char * Producer,
+ StringRef Filename,
+ StringRef Directory,
+ StringRef Producer,
bool isMain,
bool isOptimized,
- const char *Flags,
+ StringRef Flags,
unsigned RunTimeVer) {
Value *Elts[] = {
GetTagConstant(dwarf::DW_TAG_compile_unit),
@@ -675,7 +676,7 @@ DICompileUnit DIFactory::CreateCompileUnit(unsigned LangID,
}
/// CreateEnumerator - Create a single enumerator value.
-DIEnumerator DIFactory::CreateEnumerator(const char * Name, uint64_t Val){
+DIEnumerator DIFactory::CreateEnumerator(StringRef Name, uint64_t Val){
Value *Elts[] = {
GetTagConstant(dwarf::DW_TAG_enumerator),
MDString::get(VMContext, Name),
@@ -687,7 +688,7 @@ DIEnumerator DIFactory::CreateEnumerator(const char * Name, uint64_t Val){
/// CreateBasicType - Create a basic type like int, float, etc.
DIBasicType DIFactory::CreateBasicType(DIDescriptor Context,
- const char * Name,
+ StringRef Name,
DICompileUnit CompileUnit,
unsigned LineNumber,
uint64_t SizeInBits,
@@ -712,7 +713,7 @@ DIBasicType DIFactory::CreateBasicType(DIDescriptor Context,
/// CreateBasicType - Create a basic type like int, float, etc.
DIBasicType DIFactory::CreateBasicTypeEx(DIDescriptor Context,
- const char * Name,
+ StringRef Name,
DICompileUnit CompileUnit,
unsigned LineNumber,
Constant *SizeInBits,
@@ -739,7 +740,7 @@ DIBasicType DIFactory::CreateBasicTypeEx(DIDescriptor Context,
/// pointer, typedef, etc.
DIDerivedType DIFactory::CreateDerivedType(unsigned Tag,
DIDescriptor Context,
- const char * Name,
+ StringRef Name,
DICompileUnit CompileUnit,
unsigned LineNumber,
uint64_t SizeInBits,
@@ -767,7 +768,7 @@ DIDerivedType DIFactory::CreateDerivedType(unsigned Tag,
/// pointer, typedef, etc.
DIDerivedType DIFactory::CreateDerivedTypeEx(unsigned Tag,
DIDescriptor Context,
- const char * Name,
+ StringRef Name,
DICompileUnit CompileUnit,
unsigned LineNumber,
Constant *SizeInBits,
@@ -794,7 +795,7 @@ DIDerivedType DIFactory::CreateDerivedTypeEx(unsigned Tag,
/// CreateCompositeType - Create a composite type like array, struct, etc.
DICompositeType DIFactory::CreateCompositeType(unsigned Tag,
DIDescriptor Context,
- const char * Name,
+ StringRef Name,
DICompileUnit CompileUnit,
unsigned LineNumber,
uint64_t SizeInBits,
@@ -826,7 +827,7 @@ DICompositeType DIFactory::CreateCompositeType(unsigned Tag,
/// CreateCompositeType - Create a composite type like array, struct, etc.
DICompositeType DIFactory::CreateCompositeTypeEx(unsigned Tag,
DIDescriptor Context,
- const char * Name,
+ StringRef Name,
DICompileUnit CompileUnit,
unsigned LineNumber,
Constant *SizeInBits,
@@ -859,9 +860,9 @@ DICompositeType DIFactory::CreateCompositeTypeEx(unsigned Tag,
/// See comments in DISubprogram for descriptions of these fields. This
/// method does not unique the generated descriptors.
DISubprogram DIFactory::CreateSubprogram(DIDescriptor Context,
- const char * Name,
- const char * DisplayName,
- const char * LinkageName,
+ StringRef Name,
+ StringRef DisplayName,
+ StringRef LinkageName,
DICompileUnit CompileUnit,
unsigned LineNo, DIType Type,
bool isLocalToUnit,
@@ -885,9 +886,9 @@ DISubprogram DIFactory::CreateSubprogram(DIDescriptor Context,
/// CreateGlobalVariable - Create a new descriptor for the specified global.
DIGlobalVariable
-DIFactory::CreateGlobalVariable(DIDescriptor Context, const char * Name,
- const char * DisplayName,
- const char * LinkageName,
+DIFactory::CreateGlobalVariable(DIDescriptor Context, StringRef Name,
+ StringRef DisplayName,
+ StringRef LinkageName,
DICompileUnit CompileUnit,
unsigned LineNo, DIType Type,bool isLocalToUnit,
bool isDefinition, llvm::GlobalVariable *Val) {
@@ -919,7 +920,7 @@ DIFactory::CreateGlobalVariable(DIDescriptor Context, const char * Name,
/// CreateVariable - Create a new descriptor for the specified variable.
DIVariable DIFactory::CreateVariable(unsigned Tag, DIDescriptor Context,
- const char * Name,
+ StringRef Name,
DICompileUnit CompileUnit, unsigned LineNo,
DIType Type) {
Value *Elts[] = {
@@ -976,6 +977,17 @@ DILocation DIFactory::CreateLocation(unsigned LineNo, unsigned ColumnNo,
return DILocation(MDNode::get(VMContext, &Elts[0], 4));
}
+/// CreateLocation - Creates a debug info location.
+DILocation DIFactory::CreateLocation(unsigned LineNo, unsigned ColumnNo,
+ DIScope S, MDNode *OrigLoc) {
+ Value *Elts[] = {
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
+ ConstantInt::get(Type::getInt32Ty(VMContext), ColumnNo),
+ S.getNode(),
+ OrigLoc
+ };
+ return DILocation(MDNode::get(VMContext, &Elts[0], 4));
+}
//===----------------------------------------------------------------------===//
// DIFactory: Routines for inserting code into a function
@@ -1263,7 +1275,8 @@ bool getLocationInfo(const Value *V, std::string &DisplayName,
if (!DIGV) return false;
DIGlobalVariable Var(cast<MDNode>(DIGV));
- if (const char *D = Var.getDisplayName())
+ StringRef D = Var.getDisplayName();
+ if (!D.empty())
DisplayName = D;
LineNo = Var.getLineNumber();
Unit = Var.getCompileUnit();
@@ -1273,18 +1286,22 @@ bool getLocationInfo(const Value *V, std::string &DisplayName,
if (!DDI) return false;
DIVariable Var(cast<MDNode>(DDI->getVariable()));
- if (const char *D = Var.getName())
+ StringRef D = Var.getName();
+ if (!D.empty())
DisplayName = D;
LineNo = Var.getLineNumber();
Unit = Var.getCompileUnit();
TypeD = Var.getType();
}
- if (const char *T = TypeD.getName())
+ StringRef T = TypeD.getName();
+ if (!T.empty())
Type = T;
- if (const char *F = Unit.getFilename())
+ StringRef F = Unit.getFilename();
+ if (!F.empty())
File = F;
- if (const char *D = Unit.getDirectory())
+ StringRef D = Unit.getDirectory();
+ if (!D.empty())
Dir = D;
return true;
}
@@ -1398,4 +1415,36 @@ bool getLocationInfo(const Value *V, std::string &DisplayName,
return DebugLoc::get(Id);
}
+
+ /// getDISubprogram - Find subprogram that is enclosing this scope.
+ DISubprogram getDISubprogram(MDNode *Scope) {
+ DIDescriptor D(Scope);
+ if (D.isNull())
+ return DISubprogram();
+
+ if (D.isCompileUnit())
+ return DISubprogram();
+
+ if (D.isSubprogram())
+ return DISubprogram(Scope);
+
+ if (D.isLexicalBlock())
+ return getDISubprogram(DILexicalBlock(Scope).getContext().getNode());
+
+ return DISubprogram();
+ }
+
+ /// getDICompositeType - Find underlying composite type.
+ DICompositeType getDICompositeType(DIType T) {
+ if (T.isNull())
+ return DICompositeType();
+
+ if (T.isCompositeType())
+ return DICompositeType(T.getNode());
+
+ if (T.isDerivedType())
+ return getDICompositeType(DIDerivedType(T.getNode()).getTypeDerivedFrom());
+
+ return DICompositeType();
+ }
}
diff --git a/lib/Analysis/DomPrinter.cpp b/lib/Analysis/DomPrinter.cpp
index f1b44d0..32b8994 100644
--- a/lib/Analysis/DomPrinter.cpp
+++ b/lib/Analysis/DomPrinter.cpp
@@ -30,46 +30,55 @@ using namespace llvm;
namespace llvm {
template<>
struct DOTGraphTraits<DomTreeNode*> : public DefaultDOTGraphTraits {
- static std::string getNodeLabel(DomTreeNode *Node, DomTreeNode *Graph,
- bool ShortNames) {
+
+ DOTGraphTraits (bool isSimple=false)
+ : DefaultDOTGraphTraits(isSimple) {}
+
+ std::string getNodeLabel(DomTreeNode *Node, DomTreeNode *Graph) {
BasicBlock *BB = Node->getBlock();
if (!BB)
return "Post dominance root node";
- return DOTGraphTraits<const Function*>::getNodeLabel(BB, BB->getParent(),
- ShortNames);
+
+ if (isSimple())
+ return DOTGraphTraits<const Function*>
+ ::getSimpleNodeLabel(BB, BB->getParent());
+ else
+ return DOTGraphTraits<const Function*>
+ ::getCompleteNodeLabel(BB, BB->getParent());
}
};
template<>
struct DOTGraphTraits<DominatorTree*> : public DOTGraphTraits<DomTreeNode*> {
+ DOTGraphTraits (bool isSimple=false)
+ : DOTGraphTraits<DomTreeNode*>(isSimple) {}
+
static std::string getGraphName(DominatorTree *DT) {
return "Dominator tree";
}
- static std::string getNodeLabel(DomTreeNode *Node,
- DominatorTree *G,
- bool ShortNames) {
- return DOTGraphTraits<DomTreeNode*>::getNodeLabel(Node, G->getRootNode(),
- ShortNames);
+ std::string getNodeLabel(DomTreeNode *Node, DominatorTree *G) {
+ return DOTGraphTraits<DomTreeNode*>::getNodeLabel(Node, G->getRootNode());
}
};
template<>
struct DOTGraphTraits<PostDominatorTree*>
: public DOTGraphTraits<DomTreeNode*> {
+
+ DOTGraphTraits (bool isSimple=false)
+ : DOTGraphTraits<DomTreeNode*>(isSimple) {}
+
static std::string getGraphName(PostDominatorTree *DT) {
return "Post dominator tree";
}
- static std::string getNodeLabel(DomTreeNode *Node,
- PostDominatorTree *G,
- bool ShortNames) {
- return DOTGraphTraits<DomTreeNode*>::getNodeLabel(Node,
- G->getRootNode(),
- ShortNames);
+
+ std::string getNodeLabel(DomTreeNode *Node, PostDominatorTree *G ) {
+ return DOTGraphTraits<DomTreeNode*>::getNodeLabel(Node, G->getRootNode());
}
};
}
@@ -85,9 +94,11 @@ struct GenericGraphViewer : public FunctionPass {
virtual bool runOnFunction(Function &F) {
Analysis *Graph;
-
+ std::string Title, GraphName;
Graph = &getAnalysis<Analysis>();
- ViewGraph(Graph, Name, OnlyBBS);
+ GraphName = DOTGraphTraits<Analysis*>::getGraphName(Graph);
+ Title = GraphName + " for '" + F.getNameStr() + "' function";
+ ViewGraph(Graph, Name, OnlyBBS, Title);
return false;
}
@@ -163,8 +174,12 @@ struct GenericGraphPrinter : public FunctionPass {
raw_fd_ostream File(Filename.c_str(), ErrorInfo);
Graph = &getAnalysis<Analysis>();
+ std::string Title, GraphName;
+ GraphName = DOTGraphTraits<Analysis*>::getGraphName(Graph);
+ Title = GraphName + " for '" + F.getNameStr() + "' function";
+
if (ErrorInfo.empty())
- WriteGraph(File, Graph, OnlyBBS);
+ WriteGraph(File, Graph, OnlyBBS, Name, Title);
else
errs() << " error opening file for writing!";
errs() << "\n";
diff --git a/lib/Analysis/IPA/Andersens.cpp b/lib/Analysis/IPA/Andersens.cpp
index 40a8cd5..e12db81 100644
--- a/lib/Analysis/IPA/Andersens.cpp
+++ b/lib/Analysis/IPA/Andersens.cpp
@@ -484,7 +484,6 @@ namespace {
const Value *V2, unsigned V2Size);
virtual ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size);
virtual ModRefResult getModRefInfo(CallSite CS1, CallSite CS2);
- void getMustAliases(Value *P, std::vector<Value*> &RetVals);
bool pointsToConstantMemory(const Value *P);
virtual void deleteValue(Value *V) {
@@ -680,32 +679,6 @@ Andersens::getModRefInfo(CallSite CS1, CallSite CS2) {
return AliasAnalysis::getModRefInfo(CS1,CS2);
}
-/// getMustAlias - We can provide must alias information if we know that a
-/// pointer can only point to a specific function or the null pointer.
-/// Unfortunately we cannot determine must-alias information for global
-/// variables or any other memory memory objects because we do not track whether
-/// a pointer points to the beginning of an object or a field of it.
-void Andersens::getMustAliases(Value *P, std::vector<Value*> &RetVals) {
- Node *N = &GraphNodes[FindNode(getNode(P))];
- if (N->PointsTo->count() == 1) {
- Node *Pointee = &GraphNodes[N->PointsTo->find_first()];
- // If a function is the only object in the points-to set, then it must be
- // the destination. Note that we can't handle global variables here,
- // because we don't know if the pointer is actually pointing to a field of
- // the global or to the beginning of it.
- if (Value *V = Pointee->getValue()) {
- if (Function *F = dyn_cast<Function>(V))
- RetVals.push_back(F);
- } else {
- // If the object in the points-to set is the null object, then the null
- // pointer is a must alias.
- if (Pointee == &GraphNodes[NullObject])
- RetVals.push_back(Constant::getNullValue(P->getType()));
- }
- }
- AliasAnalysis::getMustAliases(P, RetVals);
-}
-
/// pointsToConstantMemory - If we can determine that this pointer only points
/// to constant memory, return true. In practice, this means that if the
/// pointer can only point to constant globals, functions, or the null pointer,
diff --git a/lib/Analysis/IPA/GlobalsModRef.cpp b/lib/Analysis/IPA/GlobalsModRef.cpp
index ddd6ff9..a979a99 100644
--- a/lib/Analysis/IPA/GlobalsModRef.cpp
+++ b/lib/Analysis/IPA/GlobalsModRef.cpp
@@ -111,7 +111,6 @@ namespace {
ModRefResult getModRefInfo(CallSite CS1, CallSite CS2) {
return AliasAnalysis::getModRefInfo(CS1,CS2);
}
- bool hasNoModRefInfoForCalls() const { return false; }
/// getModRefBehavior - Return the behavior of the specified function if
/// called from the specified call site. The call site may be null in which
diff --git a/lib/Analysis/IVUsers.cpp b/lib/Analysis/IVUsers.cpp
index efe40e4..37747b6 100644
--- a/lib/Analysis/IVUsers.cpp
+++ b/lib/Analysis/IVUsers.cpp
@@ -24,7 +24,6 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/CommandLine.h"
#include <algorithm>
using namespace llvm;
@@ -32,10 +31,6 @@ char IVUsers::ID = 0;
static RegisterPass<IVUsers>
X("iv-users", "Induction Variable Users", false, true);
-static cl::opt<bool>
-SimplifyIVUsers("simplify-iv-users", cl::Hidden, cl::init(false),
- cl::desc("Restrict IV Users to loop-invariant strides"));
-
Pass *llvm::createIVUsersPass() {
return new IVUsers();
}
@@ -214,8 +209,7 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) {
return false; // Non-reducible symbolic expression, bail out.
// Keep things simple. Don't touch loop-variant strides.
- if (SimplifyIVUsers && !Stride->isLoopInvariant(L)
- && L->contains(I->getParent()))
+ if (!Stride->isLoopInvariant(L) && L->contains(I->getParent()))
return false;
SmallPtrSet<Instruction *, 4> UniqueUsers;
diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp
index f9953e3..b53ac13 100644
--- a/lib/Analysis/InstructionSimplify.cpp
+++ b/lib/Analysis/InstructionSimplify.cpp
@@ -21,13 +21,41 @@
using namespace llvm;
using namespace llvm::PatternMatch;
-/// SimplifyAndInst - Given operands for an And, see if we can
+/// SimplifyAddInst - Given operands for an Add, see if we can
/// fold the result. If not, this returns null.
-Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1,
+Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const TargetData *TD) {
if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
+ return ConstantFoldInstOperands(Instruction::Add, CLHS->getType(),
+ Ops, 2, TD);
+ }
+
+ // Canonicalize the constant to the RHS.
+ std::swap(Op0, Op1);
+ }
+
+ if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
+ // X + undef -> undef
+ if (isa<UndefValue>(Op1C))
+ return Op1C;
+
+ // X + 0 --> X
+ if (Op1C->isNullValue())
+ return Op0;
+ }
+
+ // FIXME: Could pull several more out of instcombine.
+ return 0;
+}
+
+/// SimplifyAndInst - Given operands for an And, see if we can
+/// fold the result. If not, this returns null.
+Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const TargetData *TD) {
+ if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
+ if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
+ Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::And, CLHS->getType(),
Ops, 2, TD);
}
@@ -83,8 +111,7 @@ Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1,
/// SimplifyOrInst - Given operands for an Or, see if we can
/// fold the result. If not, this returns null.
-Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1,
- const TargetData *TD) {
+Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const TargetData *TD) {
if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
@@ -142,8 +169,6 @@ Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1,
}
-
-
static const Type *GetCompareTy(Value *Op) {
return CmpInst::makeCmpResultType(Op->getType());
}
@@ -264,6 +289,34 @@ Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
return 0;
}
+/// SimplifyGEPInst - Given operands for an GetElementPtrInst, see if we can
+/// fold the result. If not, this returns null.
+Value *llvm::SimplifyGEPInst(Value *const *Ops, unsigned NumOps,
+ const TargetData *TD) {
+ // getelementptr P -> P.
+ if (NumOps == 1)
+ return Ops[0];
+
+ // TODO.
+ //if (isa<UndefValue>(Ops[0]))
+ // return UndefValue::get(GEP.getType());
+
+ // getelementptr P, 0 -> P.
+ if (NumOps == 2)
+ if (ConstantInt *C = dyn_cast<ConstantInt>(Ops[1]))
+ if (C->isZero())
+ return Ops[0];
+
+ // Check to see if this is constant foldable.
+ for (unsigned i = 0; i != NumOps; ++i)
+ if (!isa<Constant>(Ops[i]))
+ return 0;
+
+ return ConstantExpr::getGetElementPtr(cast<Constant>(Ops[0]),
+ (Constant *const*)Ops+1, NumOps-1);
+}
+
+
//=== Helper functions for higher up the class hierarchy.
/// SimplifyBinOp - Given operands for a BinaryOperator, see if we can
@@ -299,6 +352,10 @@ Value *llvm::SimplifyInstruction(Instruction *I, const TargetData *TD) {
switch (I->getOpcode()) {
default:
return ConstantFoldInstruction(I, TD);
+ case Instruction::Add:
+ return SimplifyAddInst(I->getOperand(0), I->getOperand(1),
+ cast<BinaryOperator>(I)->hasNoSignedWrap(),
+ cast<BinaryOperator>(I)->hasNoUnsignedWrap(), TD);
case Instruction::And:
return SimplifyAndInst(I->getOperand(0), I->getOperand(1), TD);
case Instruction::Or:
@@ -309,6 +366,10 @@ Value *llvm::SimplifyInstruction(Instruction *I, const TargetData *TD) {
case Instruction::FCmp:
return SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(),
I->getOperand(0), I->getOperand(1), TD);
+ case Instruction::GetElementPtr: {
+ SmallVector<Value*, 8> Ops(I->op_begin(), I->op_end());
+ return SimplifyGEPInst(&Ops[0], Ops.size(), TD);
+ }
}
}
diff --git a/lib/Analysis/LoopInfo.cpp b/lib/Analysis/LoopInfo.cpp
index 1c614b0..4de756c 100644
--- a/lib/Analysis/LoopInfo.cpp
+++ b/lib/Analysis/LoopInfo.cpp
@@ -243,6 +243,11 @@ unsigned Loop::getSmallConstantTripMultiple() const {
case BinaryOperator::Mul:
Result = dyn_cast<ConstantInt>(BO->getOperand(1));
break;
+ case BinaryOperator::Shl:
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1)))
+ if (CI->getValue().getActiveBits() <= 5)
+ return 1u << CI->getZExtValue();
+ break;
default:
break;
}
diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp
index 0ec0e74..ae6f970 100644
--- a/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -20,6 +20,8 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/Function.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
@@ -117,10 +119,6 @@ getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
Pointer = Inst->getOperand(1);
// calls to free() erase the entire structure
PointerSize = ~0ULL;
- } else if (isFreeCall(Inst)) {
- Pointer = Inst->getOperand(0);
- // calls to free() erase the entire structure
- PointerSize = ~0ULL;
} else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
// Debug intrinsics don't cause dependences.
if (isa<DbgInfoIntrinsic>(Inst)) continue;
@@ -174,7 +172,7 @@ MemDepResult MemoryDependenceAnalysis::
getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad,
BasicBlock::iterator ScanIt, BasicBlock *BB) {
- Value* invariantTag = 0;
+ Value *invariantTag = 0;
// Walk backwards through the basic block, looking for dependencies.
while (ScanIt != BB->begin()) {
@@ -185,12 +183,12 @@ getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad,
if (invariantTag == Inst) {
invariantTag = 0;
continue;
- } else if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst)) {
+ } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
// If we pass an invariant-end marker, then we've just entered an
// invariant region and can start ignoring dependencies.
if (II->getIntrinsicID() == Intrinsic::invariant_end) {
uint64_t invariantSize = ~0ULL;
- if (ConstantInt* CI = dyn_cast<ConstantInt>(II->getOperand(2)))
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getOperand(2)))
invariantSize = CI->getZExtValue();
AliasAnalysis::AliasResult R =
@@ -203,9 +201,9 @@ getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad,
// If we reach a lifetime begin or end marker, then the query ends here
// because the value is undefined.
} else if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
- II->getIntrinsicID() == Intrinsic::lifetime_end) {
+ II->getIntrinsicID() == Intrinsic::lifetime_end) {
uint64_t invariantSize = ~0ULL;
- if (ConstantInt* CI = dyn_cast<ConstantInt>(II->getOperand(1)))
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getOperand(1)))
invariantSize = CI->getZExtValue();
AliasAnalysis::AliasResult R =
@@ -371,20 +369,41 @@ MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) {
// calls to free() erase the entire structure, not just a field.
MemSize = ~0UL;
} else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
- CallSite QueryCS = CallSite::get(QueryInst);
- bool isReadOnly = AA->onlyReadsMemory(QueryCS);
- LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos,
- QueryParent);
+ int IntrinsicID = 0; // Intrinsic IDs start at 1.
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst))
+ IntrinsicID = II->getIntrinsicID();
+
+ switch (IntrinsicID) {
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ case Intrinsic::invariant_start:
+ MemPtr = QueryInst->getOperand(2);
+ MemSize = cast<ConstantInt>(QueryInst->getOperand(1))->getZExtValue();
+ break;
+ case Intrinsic::invariant_end:
+ MemPtr = QueryInst->getOperand(3);
+ MemSize = cast<ConstantInt>(QueryInst->getOperand(2))->getZExtValue();
+ break;
+ default:
+ CallSite QueryCS = CallSite::get(QueryInst);
+ bool isReadOnly = AA->onlyReadsMemory(QueryCS);
+ LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos,
+ QueryParent);
+ }
} else {
// Non-memory instruction.
LocalCache = MemDepResult::getClobber(--BasicBlock::iterator(ScanPos));
}
// If we need to do a pointer scan, make it happen.
- if (MemPtr)
- LocalCache = getPointerDependencyFrom(MemPtr, MemSize,
- isa<LoadInst>(QueryInst),
- ScanPos, QueryParent);
+ if (MemPtr) {
+ bool isLoad = !QueryInst->mayWriteToMemory();
+ if (IntrinsicInst *II = dyn_cast<MemoryUseIntrinsic>(QueryInst)) {
+ isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_end;
+ }
+ LocalCache = getPointerDependencyFrom(MemPtr, MemSize, isLoad, ScanPos,
+ QueryParent);
+ }
// Remember the result!
if (Instruction *I = LocalCache.getInst())
@@ -688,6 +707,274 @@ SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
}
}
+/// isPHITranslatable - Return true if the specified computation is derived from
+/// a PHI node in the current block and if it is simple enough for us to handle.
+static bool isPHITranslatable(Instruction *Inst) {
+ if (isa<PHINode>(Inst))
+ return true;
+
+ // We can handle bitcast of a PHI, but the PHI needs to be in the same block
+ // as the bitcast.
+ if (BitCastInst *BC = dyn_cast<BitCastInst>(Inst)) {
+ Instruction *OpI = dyn_cast<Instruction>(BC->getOperand(0));
+ if (OpI == 0 || OpI->getParent() != Inst->getParent())
+ return true;
+ return isPHITranslatable(OpI);
+ }
+
+ // We can translate a GEP if all of its operands defined in this block are phi
+ // translatable.
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
+ for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) {
+ Instruction *OpI = dyn_cast<Instruction>(GEP->getOperand(i));
+ if (OpI == 0 || OpI->getParent() != Inst->getParent())
+ continue;
+
+ if (!isPHITranslatable(OpI))
+ return false;
+ }
+ return true;
+ }
+
+ if (Inst->getOpcode() == Instruction::Add &&
+ isa<ConstantInt>(Inst->getOperand(1))) {
+ Instruction *OpI = dyn_cast<Instruction>(Inst->getOperand(0));
+ if (OpI == 0 || OpI->getParent() != Inst->getParent())
+ return true;
+ return isPHITranslatable(OpI);
+ }
+
+ // cerr << "MEMDEP: Could not PHI translate: " << *Pointer;
+ // if (isa<BitCastInst>(PtrInst) || isa<GetElementPtrInst>(PtrInst))
+ // cerr << "OP:\t\t\t\t" << *PtrInst->getOperand(0);
+
+ return false;
+}
+
+/// GetPHITranslatedValue - Given a computation that satisfied the
+/// isPHITranslatable predicate, see if we can translate the computation into
+/// the specified predecessor block. If so, return that value.
+Value *MemoryDependenceAnalysis::
+GetPHITranslatedValue(Value *InVal, BasicBlock *CurBB, BasicBlock *Pred,
+ const TargetData *TD) const {
+ // If the input value is not an instruction, or if it is not defined in CurBB,
+ // then we don't need to phi translate it.
+ Instruction *Inst = dyn_cast<Instruction>(InVal);
+ if (Inst == 0 || Inst->getParent() != CurBB)
+ return InVal;
+
+ if (PHINode *PN = dyn_cast<PHINode>(Inst))
+ return PN->getIncomingValueForBlock(Pred);
+
+ // Handle bitcast of PHI.
+ if (BitCastInst *BC = dyn_cast<BitCastInst>(Inst)) {
+ // PHI translate the input operand.
+ Value *PHIIn = GetPHITranslatedValue(BC->getOperand(0), CurBB, Pred, TD);
+ if (PHIIn == 0) return 0;
+
+ // Constants are trivial to phi translate.
+ if (Constant *C = dyn_cast<Constant>(PHIIn))
+ return ConstantExpr::getBitCast(C, BC->getType());
+
+ // Otherwise we have to see if a bitcasted version of the incoming pointer
+ // is available. If so, we can use it, otherwise we have to fail.
+ for (Value::use_iterator UI = PHIIn->use_begin(), E = PHIIn->use_end();
+ UI != E; ++UI) {
+ if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI))
+ if (BCI->getType() == BC->getType())
+ return BCI;
+ }
+ return 0;
+ }
+
+ // Handle getelementptr with at least one PHI translatable operand.
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
+ SmallVector<Value*, 8> GEPOps;
+ BasicBlock *CurBB = GEP->getParent();
+ for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) {
+ Value *GEPOp = GEP->getOperand(i);
+ // No PHI translation is needed of operands whose values are live in to
+ // the predecessor block.
+ if (!isa<Instruction>(GEPOp) ||
+ cast<Instruction>(GEPOp)->getParent() != CurBB) {
+ GEPOps.push_back(GEPOp);
+ continue;
+ }
+
+ // If the operand is a phi node, do phi translation.
+ Value *InOp = GetPHITranslatedValue(GEPOp, CurBB, Pred, TD);
+ if (InOp == 0) return 0;
+
+ GEPOps.push_back(InOp);
+ }
+
+ // Simplify the GEP to handle 'gep x, 0' -> x etc.
+ if (Value *V = SimplifyGEPInst(&GEPOps[0], GEPOps.size(), TD))
+ return V;
+
+ // Scan to see if we have this GEP available.
+ Value *APHIOp = GEPOps[0];
+ for (Value::use_iterator UI = APHIOp->use_begin(), E = APHIOp->use_end();
+ UI != E; ++UI) {
+ if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(*UI))
+ if (GEPI->getType() == GEP->getType() &&
+ GEPI->getNumOperands() == GEPOps.size() &&
+ GEPI->getParent()->getParent() == CurBB->getParent()) {
+ bool Mismatch = false;
+ for (unsigned i = 0, e = GEPOps.size(); i != e; ++i)
+ if (GEPI->getOperand(i) != GEPOps[i]) {
+ Mismatch = true;
+ break;
+ }
+ if (!Mismatch)
+ return GEPI;
+ }
+ }
+ return 0;
+ }
+
+ // Handle add with a constant RHS.
+ if (Inst->getOpcode() == Instruction::Add &&
+ isa<ConstantInt>(Inst->getOperand(1))) {
+ // PHI translate the LHS.
+ Value *LHS;
+ Constant *RHS = cast<ConstantInt>(Inst->getOperand(1));
+ Instruction *OpI = dyn_cast<Instruction>(Inst->getOperand(0));
+ bool isNSW = cast<BinaryOperator>(Inst)->hasNoSignedWrap();
+ bool isNUW = cast<BinaryOperator>(Inst)->hasNoUnsignedWrap();
+
+ if (OpI == 0 || OpI->getParent() != Inst->getParent())
+ LHS = Inst->getOperand(0);
+ else {
+ LHS = GetPHITranslatedValue(Inst->getOperand(0), CurBB, Pred, TD);
+ if (LHS == 0)
+ return 0;
+ }
+
+ // If the PHI translated LHS is an add of a constant, fold the immediates.
+ if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(LHS))
+ if (BOp->getOpcode() == Instruction::Add)
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
+ LHS = BOp->getOperand(0);
+ RHS = ConstantExpr::getAdd(RHS, CI);
+ isNSW = isNUW = false;
+ }
+
+ // See if the add simplifies away.
+ if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, TD))
+ return Res;
+
+ // Otherwise, see if we have this add available somewhere.
+ for (Value::use_iterator UI = LHS->use_begin(), E = LHS->use_end();
+ UI != E; ++UI) {
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(*UI))
+ if (BO->getOperand(0) == LHS && BO->getOperand(1) == RHS &&
+ BO->getParent()->getParent() == CurBB->getParent())
+ return BO;
+ }
+
+ return 0;
+ }
+
+ return 0;
+}
+
+/// GetAvailablePHITranslatePointer - Return the value computed by
+/// PHITranslatePointer if it dominates PredBB, otherwise return null.
+Value *MemoryDependenceAnalysis::
+GetAvailablePHITranslatedValue(Value *V,
+ BasicBlock *CurBB, BasicBlock *PredBB,
+ const TargetData *TD,
+ const DominatorTree &DT) const {
+ // See if PHI translation succeeds.
+ V = GetPHITranslatedValue(V, CurBB, PredBB, TD);
+ if (V == 0) return 0;
+
+ // Make sure the value is live in the predecessor.
+ if (Instruction *Inst = dyn_cast_or_null<Instruction>(V))
+ if (!DT.dominates(Inst->getParent(), PredBB))
+ return 0;
+ return V;
+}
+
+
+/// InsertPHITranslatedPointer - Insert a computation of the PHI translated
+/// version of 'V' for the edge PredBB->CurBB into the end of the PredBB
+/// block. All newly created instructions are added to the NewInsts list.
+///
+Value *MemoryDependenceAnalysis::
+InsertPHITranslatedPointer(Value *InVal, BasicBlock *CurBB,
+ BasicBlock *PredBB, const TargetData *TD,
+ const DominatorTree &DT,
+ SmallVectorImpl<Instruction*> &NewInsts) const {
+ // See if we have a version of this value already available and dominating
+ // PredBB. If so, there is no need to insert a new copy.
+ if (Value *Res = GetAvailablePHITranslatedValue(InVal, CurBB, PredBB, TD, DT))
+ return Res;
+
+ // If we don't have an available version of this value, it must be an
+ // instruction.
+ Instruction *Inst = cast<Instruction>(InVal);
+
+ // Handle bitcast of PHI translatable value.
+ if (BitCastInst *BC = dyn_cast<BitCastInst>(Inst)) {
+ Value *OpVal = InsertPHITranslatedPointer(BC->getOperand(0),
+ CurBB, PredBB, TD, DT, NewInsts);
+ if (OpVal == 0) return 0;
+
+ // Otherwise insert a bitcast at the end of PredBB.
+ BitCastInst *New = new BitCastInst(OpVal, InVal->getType(),
+ InVal->getName()+".phi.trans.insert",
+ PredBB->getTerminator());
+ NewInsts.push_back(New);
+ return New;
+ }
+
+ // Handle getelementptr with at least one PHI operand.
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
+ SmallVector<Value*, 8> GEPOps;
+ BasicBlock *CurBB = GEP->getParent();
+ for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) {
+ Value *OpVal = InsertPHITranslatedPointer(GEP->getOperand(i),
+ CurBB, PredBB, TD, DT, NewInsts);
+ if (OpVal == 0) return 0;
+ GEPOps.push_back(OpVal);
+ }
+
+ GetElementPtrInst *Result =
+ GetElementPtrInst::Create(GEPOps[0], GEPOps.begin()+1, GEPOps.end(),
+ InVal->getName()+".phi.trans.insert",
+ PredBB->getTerminator());
+ Result->setIsInBounds(GEP->isInBounds());
+ NewInsts.push_back(Result);
+ return Result;
+ }
+
+#if 0
+ // FIXME: This code works, but it is unclear that we actually want to insert
+ // a big chain of computation in order to make a value available in a block.
+ // This needs to be evaluated carefully to consider its cost trade offs.
+
+ // Handle add with a constant RHS.
+ if (Inst->getOpcode() == Instruction::Add &&
+ isa<ConstantInt>(Inst->getOperand(1))) {
+ // PHI translate the LHS.
+ Value *OpVal = InsertPHITranslatedPointer(Inst->getOperand(0),
+ CurBB, PredBB, TD, DT, NewInsts);
+ if (OpVal == 0) return 0;
+
+ BinaryOperator *Res = BinaryOperator::CreateAdd(OpVal, Inst->getOperand(1),
+ InVal->getName()+".phi.trans.insert",
+ PredBB->getTerminator());
+ Res->setHasNoSignedWrap(cast<BinaryOperator>(Inst)->hasNoSignedWrap());
+ Res->setHasNoUnsignedWrap(cast<BinaryOperator>(Inst)->hasNoUnsignedWrap());
+ NewInsts.push_back(Res);
+ return Res;
+ }
+#endif
+
+ return 0;
+}
/// getNonLocalPointerDepFromBB - Perform a dependency query based on
/// pointer/pointeesize starting at the end of StartBB. Add any clobber/def
@@ -831,66 +1118,107 @@ getNonLocalPointerDepFromBB(Value *Pointer, uint64_t PointeeSize,
NumSortedEntries = Cache->size();
}
- // If this is directly a PHI node, just use the incoming values for each
- // pred as the phi translated version.
- if (PHINode *PtrPHI = dyn_cast<PHINode>(PtrInst)) {
- Cache = 0;
-
- for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
- BasicBlock *Pred = *PI;
- Value *PredPtr = PtrPHI->getIncomingValueForBlock(Pred);
-
- // Check to see if we have already visited this pred block with another
- // pointer. If so, we can't do this lookup. This failure can occur
- // with PHI translation when a critical edge exists and the PHI node in
- // the successor translates to a pointer value different than the
- // pointer the block was first analyzed with.
- std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
- InsertRes = Visited.insert(std::make_pair(Pred, PredPtr));
+ // If this is a computation derived from a PHI node, use the suitably
+ // translated incoming values for each pred as the phi translated version.
+ if (!isPHITranslatable(PtrInst))
+ goto PredTranslationFailure;
- if (!InsertRes.second) {
- // If the predecessor was visited with PredPtr, then we already did
- // the analysis and can ignore it.
- if (InsertRes.first->second == PredPtr)
- continue;
-
- // Otherwise, the block was previously analyzed with a different
- // pointer. We can't represent the result of this case, so we just
- // treat this as a phi translation failure.
- goto PredTranslationFailure;
- }
+ Cache = 0;
+
+ for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
+ BasicBlock *Pred = *PI;
+ // Get the PHI translated pointer in this predecessor. This can fail and
+ // return null if not translatable.
+ Value *PredPtr = GetPHITranslatedValue(PtrInst, BB, Pred, TD);
+
+ // Check to see if we have already visited this pred block with another
+ // pointer. If so, we can't do this lookup. This failure can occur
+ // with PHI translation when a critical edge exists and the PHI node in
+ // the successor translates to a pointer value different than the
+ // pointer the block was first analyzed with.
+ std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
+ InsertRes = Visited.insert(std::make_pair(Pred, PredPtr));
- // FIXME: it is entirely possible that PHI translating will end up with
- // the same value. Consider PHI translating something like:
- // X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need*
- // to recurse here, pedantically speaking.
+ if (!InsertRes.second) {
+ // If the predecessor was visited with PredPtr, then we already did
+ // the analysis and can ignore it.
+ if (InsertRes.first->second == PredPtr)
+ continue;
- // If we have a problem phi translating, fall through to the code below
- // to handle the failure condition.
- if (getNonLocalPointerDepFromBB(PredPtr, PointeeSize, isLoad, Pred,
- Result, Visited))
- goto PredTranslationFailure;
+ // Otherwise, the block was previously analyzed with a different
+ // pointer. We can't represent the result of this case, so we just
+ // treat this as a phi translation failure.
+ goto PredTranslationFailure;
}
- // Refresh the CacheInfo/Cache pointer so that it isn't invalidated.
- CacheInfo = &NonLocalPointerDeps[CacheKey];
- Cache = &CacheInfo->second;
- NumSortedEntries = Cache->size();
+ // If PHI translation was unable to find an available pointer in this
+ // predecessor, then we have to assume that the pointer is clobbered in
+ // that predecessor. We can still do PRE of the load, which would insert
+ // a computation of the pointer in this predecessor.
+ if (PredPtr == 0) {
+ // Add the entry to the Result list.
+ NonLocalDepEntry Entry(Pred,
+ MemDepResult::getClobber(Pred->getTerminator()));
+ Result.push_back(Entry);
+
+ // Add it to the cache for this CacheKey so that subsequent queries get
+ // this result.
+ Cache = &NonLocalPointerDeps[CacheKey].second;
+ MemoryDependenceAnalysis::NonLocalDepInfo::iterator It =
+ std::upper_bound(Cache->begin(), Cache->end(), Entry);
+
+ if (It != Cache->begin() && prior(It)->first == Pred)
+ --It;
+
+ if (It == Cache->end() || It->first != Pred) {
+ Cache->insert(It, Entry);
+ // Add it to the reverse map.
+ ReverseNonLocalPtrDeps[Pred->getTerminator()].insert(CacheKey);
+ } else if (!It->second.isDirty()) {
+ // noop
+ } else if (It->second.getInst() == Pred->getTerminator()) {
+ // Same instruction, clear the dirty marker.
+ It->second = Entry.second;
+ } else if (It->second.getInst() == 0) {
+ // Dirty, with no instruction, just add this.
+ It->second = Entry.second;
+ ReverseNonLocalPtrDeps[Pred->getTerminator()].insert(CacheKey);
+ } else {
+ // Otherwise, dirty with a different instruction.
+ RemoveFromReverseMap(ReverseNonLocalPtrDeps, It->second.getInst(),
+ CacheKey);
+ It->second = Entry.second;
+ ReverseNonLocalPtrDeps[Pred->getTerminator()].insert(CacheKey);
+ }
+ Cache = 0;
+ continue;
+ }
+
+ // FIXME: it is entirely possible that PHI translating will end up with
+ // the same value. Consider PHI translating something like:
+ // X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need*
+ // to recurse here, pedantically speaking.
- // Since we did phi translation, the "Cache" set won't contain all of the
- // results for the query. This is ok (we can still use it to accelerate
- // specific block queries) but we can't do the fastpath "return all
- // results from the set" Clear out the indicator for this.
- CacheInfo->first = BBSkipFirstBlockPair();
- SkipFirstBlock = false;
- continue;
+ // If we have a problem phi translating, fall through to the code below
+ // to handle the failure condition.
+ if (getNonLocalPointerDepFromBB(PredPtr, PointeeSize, isLoad, Pred,
+ Result, Visited))
+ goto PredTranslationFailure;
}
- // TODO: BITCAST, GEP.
+ // Refresh the CacheInfo/Cache pointer so that it isn't invalidated.
+ CacheInfo = &NonLocalPointerDeps[CacheKey];
+ Cache = &CacheInfo->second;
+ NumSortedEntries = Cache->size();
- // cerr << "MEMDEP: Could not PHI translate: " << *Pointer;
- // if (isa<BitCastInst>(PtrInst) || isa<GetElementPtrInst>(PtrInst))
- // cerr << "OP:\t\t\t\t" << *PtrInst->getOperand(0);
+ // Since we did phi translation, the "Cache" set won't contain all of the
+ // results for the query. This is ok (we can still use it to accelerate
+ // specific block queries) but we can't do the fastpath "return all
+ // results from the set" Clear out the indicator for this.
+ CacheInfo->first = BBSkipFirstBlockPair();
+ SkipFirstBlock = false;
+ continue;
+
PredTranslationFailure:
if (Cache == 0) {
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp
index ea4af40..c6835ef 100644
--- a/lib/Analysis/ScalarEvolution.cpp
+++ b/lib/Analysis/ScalarEvolution.cpp
@@ -3644,7 +3644,7 @@ EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
/// the addressed element of the initializer or null if the index expression is
/// invalid.
static Constant *
-GetAddressedElementFromGlobal(LLVMContext &Context, GlobalVariable *GV,
+GetAddressedElementFromGlobal(GlobalVariable *GV,
const std::vector<ConstantInt*> &Indices) {
Constant *Init = GV->getInitializer();
for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
@@ -3732,7 +3732,7 @@ ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount(
// Form the GEP offset.
Indexes[VarIdxNum] = Val;
- Constant *Result = GetAddressedElementFromGlobal(getContext(), GV, Indexes);
+ Constant *Result = GetAddressedElementFromGlobal(GV, Indexes);
if (Result == 0) break; // Cannot compute!
// Evaluate the condition for this iteration.
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index b0e6900..31d3ccc 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -325,7 +325,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
APInt Mask2(Mask.shl(ShiftAmt));
ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero,KnownOne, TD,
Depth+1);
- assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
// high bits known zero.
@@ -343,7 +343,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
APInt Mask2(Mask.shl(ShiftAmt));
ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD,
Depth+1);
- assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
@@ -380,7 +380,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
}
// fall through
case Instruction::Add: {
- // If one of the operands has trailing zeros, than the bits that the
+ // If one of the operands has trailing zeros, then the bits that the
// other operand has in those bit positions will be preserved in the
// result. For an add, this works with either operand. For a subtract,
// this only works if the known zeros are in the right operand.
@@ -436,7 +436,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
KnownZero |= KnownZero2 & Mask;
- assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
}
}
break;
@@ -449,7 +449,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
KnownZero |= ~LowBits & Mask;
ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD,
Depth+1);
- assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
break;
}
}
@@ -833,14 +833,12 @@ bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
switch (I->getOpcode()) {
default: break;
- case Instruction::SExt: {
+ case Instruction::SExt:
if (!LookThroughSExt) return false;
// otherwise fall through to ZExt
- }
- case Instruction::ZExt: {
+ case Instruction::ZExt:
return ComputeMultiple(I->getOperand(0), Base, Multiple,
LookThroughSExt, Depth+1);
- }
case Instruction::Shl:
case Instruction::Mul: {
Value *Op0 = I->getOperand(0);
@@ -950,6 +948,195 @@ bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) {
return false;
}
+
+/// GetLinearExpression - Analyze the specified value as a linear expression:
+/// "A*V + B", where A and B are constant integers. Return the scale and offset
+/// values as APInts and return V as a Value*. The incoming Value is known to
+/// have IntegerType. Note that this looks through extends, so the high bits
+/// may not be represented in the result.
+static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
+ const TargetData *TD, unsigned Depth) {
+ assert(isa<IntegerType>(V->getType()) && "Not an integer value");
+
+ // Limit our recursion depth.
+ if (Depth == 6) {
+ Scale = 1;
+ Offset = 0;
+ return V;
+ }
+
+ if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
+ if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
+ switch (BOp->getOpcode()) {
+ default: break;
+ case Instruction::Or:
+ // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
+ // analyze it.
+ if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), TD))
+ break;
+ // FALL THROUGH.
+ case Instruction::Add:
+ V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, TD, Depth+1);
+ Offset += RHSC->getValue();
+ return V;
+ case Instruction::Mul:
+ V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, TD, Depth+1);
+ Offset *= RHSC->getValue();
+ Scale *= RHSC->getValue();
+ return V;
+ case Instruction::Shl:
+ V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, TD, Depth+1);
+ Offset <<= RHSC->getValue().getLimitedValue();
+ Scale <<= RHSC->getValue().getLimitedValue();
+ return V;
+ }
+ }
+ }
+
+ // Since clients don't care about the high bits of the value, just scales and
+ // offsets, we can look through extensions.
+ if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
+ Value *CastOp = cast<CastInst>(V)->getOperand(0);
+ unsigned OldWidth = Scale.getBitWidth();
+ unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
+ Scale.trunc(SmallWidth);
+ Offset.trunc(SmallWidth);
+ Value *Result = GetLinearExpression(CastOp, Scale, Offset, TD, Depth+1);
+ Scale.zext(OldWidth);
+ Offset.zext(OldWidth);
+ return Result;
+ }
+
+ Scale = 1;
+ Offset = 0;
+ return V;
+}
+
+/// DecomposeGEPExpression - If V is a symbolic pointer expression, decompose it
+/// into a base pointer with a constant offset and a number of scaled symbolic
+/// offsets.
+///
+/// The scaled symbolic offsets (represented by pairs of a Value* and a scale in
+/// the VarIndices vector) are Value*'s that are known to be scaled by the
+/// specified amount, but which may have other unrepresented high bits. As such,
+/// the gep cannot necessarily be reconstructed from its decomposed form.
+///
+/// When TargetData is around, this function is capable of analyzing everything
+/// that Value::getUnderlyingObject() can look through. When not, it just looks
+/// through pointer casts.
+///
+const Value *llvm::DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
+ SmallVectorImpl<std::pair<const Value*, int64_t> > &VarIndices,
+ const TargetData *TD) {
+ // Limit recursion depth to limit compile time in crazy cases.
+ unsigned MaxLookup = 6;
+
+ BaseOffs = 0;
+ do {
+ // See if this is a bitcast or GEP.
+ const Operator *Op = dyn_cast<Operator>(V);
+ if (Op == 0) {
+ // The only non-operator case we can handle are GlobalAliases.
+ if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
+ if (!GA->mayBeOverridden()) {
+ V = GA->getAliasee();
+ continue;
+ }
+ }
+ return V;
+ }
+
+ if (Op->getOpcode() == Instruction::BitCast) {
+ V = Op->getOperand(0);
+ continue;
+ }
+
+ const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
+ if (GEPOp == 0)
+ return V;
+
+ // Don't attempt to analyze GEPs over unsized objects.
+ if (!cast<PointerType>(GEPOp->getOperand(0)->getType())
+ ->getElementType()->isSized())
+ return V;
+
+ // If we are lacking TargetData information, we can't compute the offets of
+ // elements computed by GEPs. However, we can handle bitcast equivalent
+ // GEPs.
+ if (!TD) {
+ if (!GEPOp->hasAllZeroIndices())
+ return V;
+ V = GEPOp->getOperand(0);
+ continue;
+ }
+
+ // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
+ gep_type_iterator GTI = gep_type_begin(GEPOp);
+ for (User::const_op_iterator I = GEPOp->op_begin()+1,
+ E = GEPOp->op_end(); I != E; ++I) {
+ Value *Index = *I;
+ // Compute the (potentially symbolic) offset in bytes for this index.
+ if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
+ // For a struct, add the member offset.
+ unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
+ if (FieldNo == 0) continue;
+
+ BaseOffs += TD->getStructLayout(STy)->getElementOffset(FieldNo);
+ continue;
+ }
+
+ // For an array/pointer, add the element offset, explicitly scaled.
+ if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
+ if (CIdx->isZero()) continue;
+ BaseOffs += TD->getTypeAllocSize(*GTI)*CIdx->getSExtValue();
+ continue;
+ }
+
+ uint64_t Scale = TD->getTypeAllocSize(*GTI);
+
+ // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
+ unsigned Width = cast<IntegerType>(Index->getType())->getBitWidth();
+ APInt IndexScale(Width, 0), IndexOffset(Width, 0);
+ Index = GetLinearExpression(Index, IndexScale, IndexOffset, TD, 0);
+
+ // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
+ // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
+ BaseOffs += IndexOffset.getZExtValue()*Scale;
+ Scale *= IndexScale.getZExtValue();
+
+
+ // If we already had an occurrance of this index variable, merge this
+ // scale into it. For example, we want to handle:
+ // A[x][x] -> x*16 + x*4 -> x*20
+ // This also ensures that 'x' only appears in the index list once.
+ for (unsigned i = 0, e = VarIndices.size(); i != e; ++i) {
+ if (VarIndices[i].first == Index) {
+ Scale += VarIndices[i].second;
+ VarIndices.erase(VarIndices.begin()+i);
+ break;
+ }
+ }
+
+ // Make sure that we have a scale that makes sense for this target's
+ // pointer size.
+ if (unsigned ShiftBits = 64-TD->getPointerSizeInBits()) {
+ Scale <<= ShiftBits;
+ Scale >>= ShiftBits;
+ }
+
+ if (Scale)
+ VarIndices.push_back(std::make_pair(Index, Scale));
+ }
+
+ // Analyze the base pointer next.
+ V = GEPOp->getOperand(0);
+ } while (--MaxLookup);
+
+ // If the chain of expressions is too deep, just return early.
+ return V;
+}
+
+
// This is the recursive version of BuildSubAggregate. It takes a few different
// arguments. Idxs is the index within the nested struct From that we are
// looking at now (which is of type IndexedType). IdxSkip is the number of
@@ -959,7 +1146,6 @@ bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) {
static Value *BuildSubAggregate(Value *From, Value* To, const Type *IndexedType,
SmallVector<unsigned, 10> &Idxs,
unsigned IdxSkip,
- LLVMContext &Context,
Instruction *InsertBefore) {
const llvm::StructType *STy = llvm::dyn_cast<llvm::StructType>(IndexedType);
if (STy) {
@@ -971,7 +1157,7 @@ static Value *BuildSubAggregate(Value *From, Value* To, const Type *IndexedType,
Idxs.push_back(i);
Value *PrevTo = To;
To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
- Context, InsertBefore);
+ InsertBefore);
Idxs.pop_back();
if (!To) {
// Couldn't find any inserted value for this index? Cleanup
@@ -994,7 +1180,7 @@ static Value *BuildSubAggregate(Value *From, Value* To, const Type *IndexedType,
// we might be able to find the complete struct somewhere.
// Find the value that is at that particular spot
- Value *V = FindInsertedValue(From, Idxs.begin(), Idxs.end(), Context);
+ Value *V = FindInsertedValue(From, Idxs.begin(), Idxs.end());
if (!V)
return NULL;
@@ -1017,7 +1203,7 @@ static Value *BuildSubAggregate(Value *From, Value* To, const Type *IndexedType,
//
// All inserted insertvalue instructions are inserted before InsertBefore
static Value *BuildSubAggregate(Value *From, const unsigned *idx_begin,
- const unsigned *idx_end, LLVMContext &Context,
+ const unsigned *idx_end,
Instruction *InsertBefore) {
assert(InsertBefore && "Must have someplace to insert!");
const Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
@@ -1027,8 +1213,7 @@ static Value *BuildSubAggregate(Value *From, const unsigned *idx_begin,
SmallVector<unsigned, 10> Idxs(idx_begin, idx_end);
unsigned IdxSkip = Idxs.size();
- return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip,
- Context, InsertBefore);
+ return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
}
/// FindInsertedValue - Given an aggregrate and an sequence of indices, see if
@@ -1038,8 +1223,7 @@ static Value *BuildSubAggregate(Value *From, const unsigned *idx_begin,
/// If InsertBefore is not null, this function will duplicate (modified)
/// insertvalues when a part of a nested struct is extracted.
Value *llvm::FindInsertedValue(Value *V, const unsigned *idx_begin,
- const unsigned *idx_end, LLVMContext &Context,
- Instruction *InsertBefore) {
+ const unsigned *idx_end, Instruction *InsertBefore) {
// Nothing to index? Just return V then (this is useful at the end of our
// recursion)
if (idx_begin == idx_end)
@@ -1063,7 +1247,7 @@ Value *llvm::FindInsertedValue(Value *V, const unsigned *idx_begin,
if (isa<ConstantArray>(C) || isa<ConstantStruct>(C))
// Recursively process this constant
return FindInsertedValue(C->getOperand(*idx_begin), idx_begin + 1,
- idx_end, Context, InsertBefore);
+ idx_end, InsertBefore);
} else if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
// Loop the indices for the insertvalue instruction in parallel with the
// requested indices
@@ -1082,8 +1266,7 @@ Value *llvm::FindInsertedValue(Value *V, const unsigned *idx_begin,
// %C = insertvalue {i32, i32 } %A, i32 11, 1
// which allows the unused 0,0 element from the nested struct to be
// removed.
- return BuildSubAggregate(V, idx_begin, req_idx,
- Context, InsertBefore);
+ return BuildSubAggregate(V, idx_begin, req_idx, InsertBefore);
else
// We can't handle this without inserting insertvalues
return 0;
@@ -1094,13 +1277,13 @@ Value *llvm::FindInsertedValue(Value *V, const unsigned *idx_begin,
// looking for, then.
if (*req_idx != *i)
return FindInsertedValue(I->getAggregateOperand(), idx_begin, idx_end,
- Context, InsertBefore);
+ InsertBefore);
}
// If we end up here, the indices of the insertvalue match with those
// requested (though possibly only partially). Now we recursively look at
// the inserted value, passing any remaining indices.
return FindInsertedValue(I->getInsertedValueOperand(), req_idx, idx_end,
- Context, InsertBefore);
+ InsertBefore);
} else if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
// If we're extracting a value from an aggregrate that was extracted from
// something else, we can extract from that something else directly instead.
@@ -1124,7 +1307,7 @@ Value *llvm::FindInsertedValue(Value *V, const unsigned *idx_begin,
&& "Number of indices added not correct?");
return FindInsertedValue(I->getAggregateOperand(), Idxs.begin(), Idxs.end(),
- Context, InsertBefore);
+ InsertBefore);
}
// Otherwise, we don't know (such as, extracting from a function return value
// or load instruction)
OpenPOWER on IntegriCloud