summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Analysis
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Analysis')
-rw-r--r--contrib/llvm/lib/Analysis/AliasAnalysis.cpp10
-rw-r--r--contrib/llvm/lib/Analysis/AliasSetTracker.cpp6
-rw-r--r--contrib/llvm/lib/Analysis/Analysis.cpp4
-rw-r--r--contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp170
-rw-r--r--contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp134
-rw-r--r--contrib/llvm/lib/Analysis/CaptureTracking.cpp4
-rw-r--r--contrib/llvm/lib/Analysis/CodeMetrics.cpp10
-rw-r--r--contrib/llvm/lib/Analysis/ConstantFolding.cpp265
-rw-r--r--contrib/llvm/lib/Analysis/CostModel.cpp193
-rw-r--r--contrib/llvm/lib/Analysis/DependenceAnalysis.cpp3786
-rw-r--r--contrib/llvm/lib/Analysis/DominanceFrontier.cpp2
-rw-r--r--contrib/llvm/lib/Analysis/IPA/CallGraph.cpp13
-rw-r--r--contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp6
-rw-r--r--contrib/llvm/lib/Analysis/IVUsers.cpp6
-rw-r--r--contrib/llvm/lib/Analysis/InlineCost.cpp91
-rw-r--r--contrib/llvm/lib/Analysis/InstructionSimplify.cpp89
-rw-r--r--contrib/llvm/lib/Analysis/LazyValueInfo.cpp52
-rw-r--r--contrib/llvm/lib/Analysis/Lint.cpp56
-rw-r--r--contrib/llvm/lib/Analysis/Loads.cpp8
-rw-r--r--contrib/llvm/lib/Analysis/LoopDependenceAnalysis.cpp362
-rw-r--r--contrib/llvm/lib/Analysis/LoopInfo.cpp11
-rw-r--r--contrib/llvm/lib/Analysis/MemoryBuiltins.cpp184
-rw-r--r--contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp30
-rw-r--r--contrib/llvm/lib/Analysis/NoAliasAnalysis.cpp4
-rw-r--r--contrib/llvm/lib/Analysis/PHITransAddr.cpp2
-rw-r--r--contrib/llvm/lib/Analysis/ProfileDataLoader.cpp155
-rw-r--r--contrib/llvm/lib/Analysis/ProfileDataLoaderPass.cpp188
-rw-r--r--contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp2
-rw-r--r--contrib/llvm/lib/Analysis/ProfileInfo.cpp26
-rw-r--r--contrib/llvm/lib/Analysis/RegionInfo.cpp26
-rw-r--r--contrib/llvm/lib/Analysis/RegionPass.cpp5
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolution.cpp116
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp23
-rw-r--r--contrib/llvm/lib/Analysis/Trace.cpp2
-rw-r--r--contrib/llvm/lib/Analysis/ValueTracking.cpp61
35 files changed, 5210 insertions, 892 deletions
diff --git a/contrib/llvm/lib/Analysis/AliasAnalysis.cpp b/contrib/llvm/lib/Analysis/AliasAnalysis.cpp
index 3b6aab1..752edd5 100644
--- a/contrib/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -35,7 +35,8 @@
#include "llvm/Instructions.h"
#include "llvm/LLVMContext.h"
#include "llvm/Type.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
+#include "llvm/Target/TargetLibraryInfo.h"
using namespace llvm;
// Register the AliasAnalysis interface, providing a nice name to refer to.
@@ -451,7 +452,8 @@ AliasAnalysis::~AliasAnalysis() {}
/// AliasAnalysis interface before any other methods are called.
///
void AliasAnalysis::InitializeAliasAnalysis(Pass *P) {
- TD = P->getAnalysisIfAvailable<TargetData>();
+ TD = P->getAnalysisIfAvailable<DataLayout>();
+ TLI = P->getAnalysisIfAvailable<TargetLibraryInfo>();
AA = &P->getAnalysis<AliasAnalysis>();
}
@@ -461,7 +463,7 @@ void AliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<AliasAnalysis>(); // All AA's chain
}
-/// getTypeStoreSize - Return the TargetData store size for the given type,
+/// getTypeStoreSize - Return the DataLayout store size for the given type,
/// if known, or a conservative value otherwise.
///
uint64_t AliasAnalysis::getTypeStoreSize(Type *Ty) {
@@ -501,7 +503,7 @@ bool AliasAnalysis::canInstructionRangeModify(const Instruction &I1,
bool llvm::isNoAliasCall(const Value *V) {
if (isa<CallInst>(V) || isa<InvokeInst>(V))
return ImmutableCallSite(cast<Instruction>(V))
- .paramHasAttr(0, Attribute::NoAlias);
+ .paramHasAttr(0, Attributes::NoAlias);
return false;
}
diff --git a/contrib/llvm/lib/Analysis/AliasSetTracker.cpp b/contrib/llvm/lib/Analysis/AliasSetTracker.cpp
index 92e8906..388c755 100644
--- a/contrib/llvm/lib/Analysis/AliasSetTracker.cpp
+++ b/contrib/llvm/lib/Analysis/AliasSetTracker.cpp
@@ -18,7 +18,7 @@
#include "llvm/LLVMContext.h"
#include "llvm/Pass.h"
#include "llvm/Type.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -550,7 +550,7 @@ void AliasSetTracker::copyValue(Value *From, Value *To) {
//===----------------------------------------------------------------------===//
void AliasSet::print(raw_ostream &OS) const {
- OS << " AliasSet[" << (void*)this << ", " << RefCount << "] ";
+ OS << " AliasSet[" << (const void*)this << ", " << RefCount << "] ";
OS << (AliasTy == MustAlias ? "must" : "may") << " alias, ";
switch (AccessTy) {
case NoModRef: OS << "No access "; break;
@@ -590,8 +590,10 @@ void AliasSetTracker::print(raw_ostream &OS) const {
OS << "\n";
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void AliasSet::dump() const { print(dbgs()); }
void AliasSetTracker::dump() const { print(dbgs()); }
+#endif
//===----------------------------------------------------------------------===//
// ASTCallbackVH Class Implementation
diff --git a/contrib/llvm/lib/Analysis/Analysis.cpp b/contrib/llvm/lib/Analysis/Analysis.cpp
index 0ba6af9..9dc81a6 100644
--- a/contrib/llvm/lib/Analysis/Analysis.cpp
+++ b/contrib/llvm/lib/Analysis/Analysis.cpp
@@ -26,11 +26,13 @@ void llvm::initializeAnalysis(PassRegistry &Registry) {
initializeBasicAliasAnalysisPass(Registry);
initializeBlockFrequencyInfoPass(Registry);
initializeBranchProbabilityInfoPass(Registry);
+ initializeCostModelAnalysisPass(Registry);
initializeCFGViewerPass(Registry);
initializeCFGPrinterPass(Registry);
initializeCFGOnlyViewerPass(Registry);
initializeCFGOnlyPrinterPass(Registry);
initializePrintDbgInfoPass(Registry);
+ initializeDependenceAnalysisPass(Registry);
initializeDominanceFrontierPass(Registry);
initializeDomViewerPass(Registry);
initializeDomPrinterPass(Registry);
@@ -46,7 +48,6 @@ void llvm::initializeAnalysis(PassRegistry &Registry) {
initializeLazyValueInfoPass(Registry);
initializeLibCallAliasAnalysisPass(Registry);
initializeLintPass(Registry);
- initializeLoopDependenceAnalysisPass(Registry);
initializeLoopInfoPass(Registry);
initializeMemDepPrinterPass(Registry);
initializeMemoryDependenceAnalysisPass(Registry);
@@ -61,6 +62,7 @@ void llvm::initializeAnalysis(PassRegistry &Registry) {
initializePathProfileLoaderPassPass(Registry);
initializeProfileVerifierPassPass(Registry);
initializePathProfileVerifierPass(Registry);
+ initializeProfileMetadataLoaderPassPass(Registry);
initializeRegionInfoPass(Registry);
initializeRegionViewerPass(Registry);
initializeRegionPrinterPass(Registry);
diff --git a/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 1d028c2..4bb93ee 100644
--- a/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -29,7 +29,7 @@
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
@@ -58,12 +58,12 @@ static bool isNonEscapingLocalObject(const Value *V) {
// then it has not escaped before entering the function. Check if it escapes
// inside the function.
if (const Argument *A = dyn_cast<Argument>(V))
- if (A->hasByValAttr() || A->hasNoAliasAttr()) {
- // Don't bother analyzing arguments already known not to escape.
- if (A->hasNoCaptureAttr())
- return true;
+ if (A->hasByValAttr() || A->hasNoAliasAttr())
+ // Note even if the argument is marked nocapture we still need to check
+ // for copies made inside the function. The nocapture attribute only
+ // specifies that there are no copies made that outlive the function.
return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
- }
+
return false;
}
@@ -84,10 +84,11 @@ static bool isEscapeSource(const Value *V) {
/// getObjectSize - Return the size of the object specified by V, or
/// UnknownSize if unknown.
-static uint64_t getObjectSize(const Value *V, const TargetData &TD,
+static uint64_t getObjectSize(const Value *V, const DataLayout &TD,
+ const TargetLibraryInfo &TLI,
bool RoundToAlign = false) {
uint64_t Size;
- if (getObjectSize(V, Size, &TD, RoundToAlign))
+ if (getObjectSize(V, Size, &TD, &TLI, RoundToAlign))
return Size;
return AliasAnalysis::UnknownSize;
}
@@ -95,10 +96,11 @@ static uint64_t getObjectSize(const Value *V, const TargetData &TD,
/// isObjectSmallerThan - Return true if we can prove that the object specified
/// by V is smaller than Size.
static bool isObjectSmallerThan(const Value *V, uint64_t Size,
- const TargetData &TD) {
+ const DataLayout &TD,
+ const TargetLibraryInfo &TLI) {
// This function needs to use the aligned object size because we allow
// reads a bit past the end given sufficient alignment.
- uint64_t ObjectSize = getObjectSize(V, TD, /*RoundToAlign*/true);
+ uint64_t ObjectSize = getObjectSize(V, TD, TLI, /*RoundToAlign*/true);
return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize < Size;
}
@@ -106,8 +108,8 @@ static bool isObjectSmallerThan(const Value *V, uint64_t Size,
/// isObjectSize - Return true if we can prove that the object specified
/// by V has size Size.
static bool isObjectSize(const Value *V, uint64_t Size,
- const TargetData &TD) {
- uint64_t ObjectSize = getObjectSize(V, TD);
+ const DataLayout &TD, const TargetLibraryInfo &TLI) {
+ uint64_t ObjectSize = getObjectSize(V, TD, TLI);
return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize == Size;
}
@@ -126,6 +128,15 @@ namespace {
const Value *V;
ExtensionKind Extension;
int64_t Scale;
+
+ bool operator==(const VariableGEPIndex &Other) const {
+ return V == Other.V && Extension == Other.Extension &&
+ Scale == Other.Scale;
+ }
+
+ bool operator!=(const VariableGEPIndex &Other) const {
+ return !operator==(Other);
+ }
};
}
@@ -140,7 +151,7 @@ namespace {
/// represented in the result.
static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
ExtensionKind &Extension,
- const TargetData &TD, unsigned Depth) {
+ const DataLayout &TD, unsigned Depth) {
assert(V->getType()->isIntegerTy() && "Not an integer value");
// Limit our recursion depth.
@@ -215,14 +226,14 @@ static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
/// specified amount, but which may have other unrepresented high bits. As such,
/// the gep cannot necessarily be reconstructed from its decomposed form.
///
-/// When TargetData is around, this function is capable of analyzing everything
+/// When DataLayout is around, this function is capable of analyzing everything
/// that GetUnderlyingObject can look through. When not, it just looks
/// through pointer casts.
///
static const Value *
DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
SmallVectorImpl<VariableGEPIndex> &VarIndices,
- const TargetData *TD) {
+ const DataLayout *TD) {
// Limit recursion depth to limit compile time in crazy cases.
unsigned MaxLookup = 6;
@@ -266,7 +277,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
->getElementType()->isSized())
return V;
- // If we are lacking TargetData information, we can't compute the offets of
+ // If we are lacking DataLayout information, we can't compute the offets of
// elements computed by GEPs. However, we can handle bitcast equivalent
// GEPs.
if (TD == 0) {
@@ -417,13 +428,7 @@ namespace {
/// BasicAliasAnalysis - This is the primary alias analysis implementation.
struct BasicAliasAnalysis : public ImmutablePass, public AliasAnalysis {
static char ID; // Class identification, replacement for typeinfo
- BasicAliasAnalysis() : ImmutablePass(ID),
- // AliasCache rarely has more than 1 or 2 elements,
- // so start it off fairly small so that clear()
- // doesn't have to tromp through 64 (the default)
- // elements on each alias query. This really wants
- // something like a SmallDenseMap.
- AliasCache(8) {
+ BasicAliasAnalysis() : ImmutablePass(ID) {
initializeBasicAliasAnalysisPass(*PassRegistry::getPassRegistry());
}
@@ -443,7 +448,11 @@ namespace {
"BasicAliasAnalysis doesn't support interprocedural queries.");
AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.TBAATag,
LocB.Ptr, LocB.Size, LocB.TBAATag);
- AliasCache.clear();
+ // AliasCache rarely has more than 1 or 2 elements, always use
+ // shrink_and_clear so it quickly returns to the inline capacity of the
+ // SmallDenseMap if it ever grows larger.
+ // FIXME: This should really be shrink_to_inline_capacity_and_clear().
+ AliasCache.shrink_and_clear();
return Alias;
}
@@ -481,7 +490,7 @@ namespace {
private:
// AliasCache - Track alias queries to guard against recursion.
typedef std::pair<Location, Location> LocPair;
- typedef DenseMap<LocPair, AliasResult> AliasCacheTy;
+ typedef SmallDenseMap<LocPair, AliasResult, 8> AliasCacheTy;
AliasCacheTy AliasCache;
// Visited - Track instructions visited by pointsToConstantMemory.
@@ -490,6 +499,7 @@ namespace {
// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP
// instruction against another.
AliasResult aliasGEP(const GEPOperator *V1, uint64_t V1Size,
+ const MDNode *V1TBAAInfo,
const Value *V2, uint64_t V2Size,
const MDNode *V2TBAAInfo,
const Value *UnderlyingV1, const Value *UnderlyingV2);
@@ -807,6 +817,21 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
return ModRefResult(AliasAnalysis::getModRefInfo(CS, Loc) & Min);
}
+static bool areVarIndicesEqual(SmallVector<VariableGEPIndex, 4> &Indices1,
+ SmallVector<VariableGEPIndex, 4> &Indices2) {
+ unsigned Size1 = Indices1.size();
+ unsigned Size2 = Indices2.size();
+
+ if (Size1 != Size2)
+ return false;
+
+ for (unsigned I = 0; I != Size1; ++I)
+ if (Indices1[I] != Indices2[I])
+ return false;
+
+ return true;
+}
+
/// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction
/// against another pointer. We know that V1 is a GEP, but we don't know
/// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, TD),
@@ -814,6 +839,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
///
AliasAnalysis::AliasResult
BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
+ const MDNode *V1TBAAInfo,
const Value *V2, uint64_t V2Size,
const MDNode *V2TBAAInfo,
const Value *UnderlyingV1,
@@ -821,9 +847,41 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
int64_t GEP1BaseOffset;
SmallVector<VariableGEPIndex, 4> GEP1VariableIndices;
- // If we have two gep instructions with must-alias'ing base pointers, figure
- // out if the indexes to the GEP tell us anything about the derived pointer.
+ // If we have two gep instructions with must-alias or not-alias'ing base
+ // pointers, figure out if the indexes to the GEP tell us anything about the
+ // derived pointer.
if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
+ // Check for geps of non-aliasing underlying pointers where the offsets are
+ // identical.
+ if (V1Size == V2Size) {
+ // Do the base pointers alias assuming type and size.
+ AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size,
+ V1TBAAInfo, UnderlyingV2,
+ V2Size, V2TBAAInfo);
+ if (PreciseBaseAlias == NoAlias) {
+ // See if the computed offset from the common pointer tells us about the
+ // relation of the resulting pointer.
+ int64_t GEP2BaseOffset;
+ SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
+ const Value *GEP2BasePtr =
+ DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD);
+ const Value *GEP1BasePtr =
+ DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
+ // DecomposeGEPExpression and GetUnderlyingObject should return the
+ // same result except when DecomposeGEPExpression has no DataLayout.
+ if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
+ assert(TD == 0 &&
+ "DecomposeGEPExpression and GetUnderlyingObject disagree!");
+ return MayAlias;
+ }
+ // Same offsets.
+ if (GEP1BaseOffset == GEP2BaseOffset &&
+ areVarIndicesEqual(GEP1VariableIndices, GEP2VariableIndices))
+ return NoAlias;
+ GEP1VariableIndices.clear();
+ }
+ }
+
// Do the base pointers alias?
AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize, 0,
UnderlyingV2, UnknownSize, 0);
@@ -843,9 +901,8 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
const Value *GEP2BasePtr =
DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD);
- // If DecomposeGEPExpression isn't able to look all the way through the
- // addressing operation, we must not have TD and this is too complex for us
- // to handle without it.
+ // DecomposeGEPExpression and GetUnderlyingObject should return the
+ // same result except when DecomposeGEPExpression has no DataLayout.
if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
assert(TD == 0 &&
"DecomposeGEPExpression and GetUnderlyingObject disagree!");
@@ -879,9 +936,8 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
const Value *GEP1BasePtr =
DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
- // If DecomposeGEPExpression isn't able to look all the way through the
- // addressing operation, we must not have TD and this is too complex for us
- // to handle without it.
+ // DecomposeGEPExpression and GetUnderlyingObject should return the
+ // same result except when DecomposeGEPExpression has no DataLayout.
if (GEP1BasePtr != UnderlyingV1) {
assert(TD == 0 &&
"DecomposeGEPExpression and GetUnderlyingObject disagree!");
@@ -1004,12 +1060,42 @@ BasicAliasAnalysis::aliasPHI(const PHINode *PN, uint64_t PNSize,
// on corresponding edges.
if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
if (PN2->getParent() == PN->getParent()) {
+ LocPair Locs(Location(PN, PNSize, PNTBAAInfo),
+ Location(V2, V2Size, V2TBAAInfo));
+ if (PN > V2)
+ std::swap(Locs.first, Locs.second);
+
AliasResult Alias =
aliasCheck(PN->getIncomingValue(0), PNSize, PNTBAAInfo,
PN2->getIncomingValueForBlock(PN->getIncomingBlock(0)),
V2Size, V2TBAAInfo);
if (Alias == MayAlias)
return MayAlias;
+
+ // If the first source of the PHI nodes NoAlias and the other inputs are
+ // the PHI node itself through some amount of recursion this does not add
+ // any new information so just return NoAlias.
+ // bb:
+ // ptr = ptr2 + 1
+ // loop:
+ // ptr_phi = phi [bb, ptr], [loop, ptr_plus_one]
+ // ptr2_phi = phi [bb, ptr2], [loop, ptr2_plus_one]
+ // ...
+ // ptr_plus_one = gep ptr_phi, 1
+ // ptr2_plus_one = gep ptr2_phi, 1
+ // We assume for the recursion that the the phis (ptr_phi, ptr2_phi) do
+ // not alias each other.
+ bool ArePhisAssumedNoAlias = false;
+ AliasResult OrigAliasResult = NoAlias;
+ if (Alias == NoAlias) {
+ // Pretend the phis do not alias.
+ assert(AliasCache.count(Locs) &&
+ "There must exist an entry for the phi node");
+ OrigAliasResult = AliasCache[Locs];
+ AliasCache[Locs] = NoAlias;
+ ArePhisAssumedNoAlias = true;
+ }
+
for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) {
AliasResult ThisAlias =
aliasCheck(PN->getIncomingValue(i), PNSize, PNTBAAInfo,
@@ -1019,6 +1105,11 @@ BasicAliasAnalysis::aliasPHI(const PHINode *PN, uint64_t PNSize,
if (Alias == MayAlias)
break;
}
+
+ // Reset if speculation failed.
+ if (ArePhisAssumedNoAlias && Alias != NoAlias)
+ AliasCache[Locs] = OrigAliasResult;
+
return Alias;
}
@@ -1133,8 +1224,8 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
// If the size of one access is larger than the entire object on the other
// side, then we know such behavior is undefined and can assume no alias.
if (TD)
- if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *TD)) ||
- (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *TD)))
+ if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *TD, *TLI)) ||
+ (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *TD, *TLI)))
return NoAlias;
// Check the cache before climbing up use-def chains. This also terminates
@@ -1154,15 +1245,17 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
std::swap(V1, V2);
std::swap(V1Size, V2Size);
std::swap(O1, O2);
+ std::swap(V1TBAAInfo, V2TBAAInfo);
}
if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
- AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, V2TBAAInfo, O1, O2);
+ AliasResult Result = aliasGEP(GV1, V1Size, V1TBAAInfo, V2, V2Size, V2TBAAInfo, O1, O2);
if (Result != MayAlias) return AliasCache[Locs] = Result;
}
if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
std::swap(V1, V2);
std::swap(V1Size, V2Size);
+ std::swap(V1TBAAInfo, V2TBAAInfo);
}
if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
AliasResult Result = aliasPHI(PN, V1Size, V1TBAAInfo,
@@ -1173,6 +1266,7 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
std::swap(V1, V2);
std::swap(V1Size, V2Size);
+ std::swap(V1TBAAInfo, V2TBAAInfo);
}
if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
AliasResult Result = aliasSelect(S1, V1Size, V1TBAAInfo,
@@ -1184,8 +1278,8 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
// accesses is accessing the entire object, then the accesses must
// overlap in some way.
if (TD && O1 == O2)
- if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *TD)) ||
- (V2Size != UnknownSize && isObjectSize(O2, V2Size, *TD)))
+ if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *TD, *TLI)) ||
+ (V2Size != UnknownSize && isObjectSize(O2, V2Size, *TD, *TLI)))
return AliasCache[Locs] = PartialAlias;
AliasResult Result =
diff --git a/contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp b/contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp
index b255ce6..04a6560 100644
--- a/contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp
+++ b/contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp
@@ -115,14 +115,14 @@ bool BranchProbabilityInfo::calcUnreachableHeuristics(BasicBlock *BB) {
return false;
}
- SmallPtrSet<BasicBlock *, 4> UnreachableEdges;
- SmallPtrSet<BasicBlock *, 4> ReachableEdges;
+ SmallVector<unsigned, 4> UnreachableEdges;
+ SmallVector<unsigned, 4> ReachableEdges;
for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
if (PostDominatedByUnreachable.count(*I))
- UnreachableEdges.insert(*I);
+ UnreachableEdges.push_back(I.getSuccessorIndex());
else
- ReachableEdges.insert(*I);
+ ReachableEdges.push_back(I.getSuccessorIndex());
}
// If all successors are in the set of blocks post-dominated by unreachable,
@@ -136,18 +136,19 @@ bool BranchProbabilityInfo::calcUnreachableHeuristics(BasicBlock *BB) {
return false;
uint32_t UnreachableWeight =
- std::max(UR_TAKEN_WEIGHT / UnreachableEdges.size(), MIN_WEIGHT);
- for (SmallPtrSet<BasicBlock *, 4>::iterator I = UnreachableEdges.begin(),
- E = UnreachableEdges.end();
+ std::max(UR_TAKEN_WEIGHT / (unsigned)UnreachableEdges.size(), MIN_WEIGHT);
+ for (SmallVector<unsigned, 4>::iterator I = UnreachableEdges.begin(),
+ E = UnreachableEdges.end();
I != E; ++I)
setEdgeWeight(BB, *I, UnreachableWeight);
if (ReachableEdges.empty())
return true;
uint32_t ReachableWeight =
- std::max(UR_NONTAKEN_WEIGHT / ReachableEdges.size(), NORMAL_WEIGHT);
- for (SmallPtrSet<BasicBlock *, 4>::iterator I = ReachableEdges.begin(),
- E = ReachableEdges.end();
+ std::max(UR_NONTAKEN_WEIGHT / (unsigned)ReachableEdges.size(),
+ NORMAL_WEIGHT);
+ for (SmallVector<unsigned, 4>::iterator I = ReachableEdges.begin(),
+ E = ReachableEdges.end();
I != E; ++I)
setEdgeWeight(BB, *I, ReachableWeight);
@@ -187,7 +188,7 @@ bool BranchProbabilityInfo::calcMetadataWeights(BasicBlock *BB) {
}
assert(Weights.size() == TI->getNumSuccessors() && "Checked above");
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
- setEdgeWeight(BB, TI->getSuccessor(i), Weights[i]);
+ setEdgeWeight(BB, i, Weights[i]);
return true;
}
@@ -211,19 +212,17 @@ bool BranchProbabilityInfo::calcPointerHeuristics(BasicBlock *BB) {
assert(CI->getOperand(1)->getType()->isPointerTy());
- BasicBlock *Taken = BI->getSuccessor(0);
- BasicBlock *NonTaken = BI->getSuccessor(1);
-
// p != 0 -> isProb = true
// p == 0 -> isProb = false
// p != q -> isProb = true
// p == q -> isProb = false;
+ unsigned TakenIdx = 0, NonTakenIdx = 1;
bool isProb = CI->getPredicate() == ICmpInst::ICMP_NE;
if (!isProb)
- std::swap(Taken, NonTaken);
+ std::swap(TakenIdx, NonTakenIdx);
- setEdgeWeight(BB, Taken, PH_TAKEN_WEIGHT);
- setEdgeWeight(BB, NonTaken, PH_NONTAKEN_WEIGHT);
+ setEdgeWeight(BB, TakenIdx, PH_TAKEN_WEIGHT);
+ setEdgeWeight(BB, NonTakenIdx, PH_NONTAKEN_WEIGHT);
return true;
}
@@ -234,17 +233,17 @@ bool BranchProbabilityInfo::calcLoopBranchHeuristics(BasicBlock *BB) {
if (!L)
return false;
- SmallPtrSet<BasicBlock *, 8> BackEdges;
- SmallPtrSet<BasicBlock *, 8> ExitingEdges;
- SmallPtrSet<BasicBlock *, 8> InEdges; // Edges from header to the loop.
+ SmallVector<unsigned, 8> BackEdges;
+ SmallVector<unsigned, 8> ExitingEdges;
+ SmallVector<unsigned, 8> InEdges; // Edges from header to the loop.
for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
if (!L->contains(*I))
- ExitingEdges.insert(*I);
+ ExitingEdges.push_back(I.getSuccessorIndex());
else if (L->getHeader() == *I)
- BackEdges.insert(*I);
+ BackEdges.push_back(I.getSuccessorIndex());
else
- InEdges.insert(*I);
+ InEdges.push_back(I.getSuccessorIndex());
}
if (uint32_t numBackEdges = BackEdges.size()) {
@@ -252,10 +251,9 @@ bool BranchProbabilityInfo::calcLoopBranchHeuristics(BasicBlock *BB) {
if (backWeight < NORMAL_WEIGHT)
backWeight = NORMAL_WEIGHT;
- for (SmallPtrSet<BasicBlock *, 8>::iterator EI = BackEdges.begin(),
+ for (SmallVector<unsigned, 8>::iterator EI = BackEdges.begin(),
EE = BackEdges.end(); EI != EE; ++EI) {
- BasicBlock *Back = *EI;
- setEdgeWeight(BB, Back, backWeight);
+ setEdgeWeight(BB, *EI, backWeight);
}
}
@@ -264,10 +262,9 @@ bool BranchProbabilityInfo::calcLoopBranchHeuristics(BasicBlock *BB) {
if (inWeight < NORMAL_WEIGHT)
inWeight = NORMAL_WEIGHT;
- for (SmallPtrSet<BasicBlock *, 8>::iterator EI = InEdges.begin(),
+ for (SmallVector<unsigned, 8>::iterator EI = InEdges.begin(),
EE = InEdges.end(); EI != EE; ++EI) {
- BasicBlock *Back = *EI;
- setEdgeWeight(BB, Back, inWeight);
+ setEdgeWeight(BB, *EI, inWeight);
}
}
@@ -276,10 +273,9 @@ bool BranchProbabilityInfo::calcLoopBranchHeuristics(BasicBlock *BB) {
if (exitWeight < MIN_WEIGHT)
exitWeight = MIN_WEIGHT;
- for (SmallPtrSet<BasicBlock *, 8>::iterator EI = ExitingEdges.begin(),
+ for (SmallVector<unsigned, 8>::iterator EI = ExitingEdges.begin(),
EE = ExitingEdges.end(); EI != EE; ++EI) {
- BasicBlock *Exiting = *EI;
- setEdgeWeight(BB, Exiting, exitWeight);
+ setEdgeWeight(BB, *EI, exitWeight);
}
}
@@ -335,14 +331,13 @@ bool BranchProbabilityInfo::calcZeroHeuristics(BasicBlock *BB) {
return false;
}
- BasicBlock *Taken = BI->getSuccessor(0);
- BasicBlock *NonTaken = BI->getSuccessor(1);
+ unsigned TakenIdx = 0, NonTakenIdx = 1;
if (!isProb)
- std::swap(Taken, NonTaken);
+ std::swap(TakenIdx, NonTakenIdx);
- setEdgeWeight(BB, Taken, ZH_TAKEN_WEIGHT);
- setEdgeWeight(BB, NonTaken, ZH_NONTAKEN_WEIGHT);
+ setEdgeWeight(BB, TakenIdx, ZH_TAKEN_WEIGHT);
+ setEdgeWeight(BB, NonTakenIdx, ZH_NONTAKEN_WEIGHT);
return true;
}
@@ -372,14 +367,13 @@ bool BranchProbabilityInfo::calcFloatingPointHeuristics(BasicBlock *BB) {
return false;
}
- BasicBlock *Taken = BI->getSuccessor(0);
- BasicBlock *NonTaken = BI->getSuccessor(1);
+ unsigned TakenIdx = 0, NonTakenIdx = 1;
if (!isProb)
- std::swap(Taken, NonTaken);
+ std::swap(TakenIdx, NonTakenIdx);
- setEdgeWeight(BB, Taken, FPH_TAKEN_WEIGHT);
- setEdgeWeight(BB, NonTaken, FPH_NONTAKEN_WEIGHT);
+ setEdgeWeight(BB, TakenIdx, FPH_TAKEN_WEIGHT);
+ setEdgeWeight(BB, NonTakenIdx, FPH_NONTAKEN_WEIGHT);
return true;
}
@@ -389,11 +383,8 @@ bool BranchProbabilityInfo::calcInvokeHeuristics(BasicBlock *BB) {
if (!II)
return false;
- BasicBlock *Normal = II->getNormalDest();
- BasicBlock *Unwind = II->getUnwindDest();
-
- setEdgeWeight(BB, Normal, IH_TAKEN_WEIGHT);
- setEdgeWeight(BB, Unwind, IH_NONTAKEN_WEIGHT);
+ setEdgeWeight(BB, 0/*Index for Normal*/, IH_TAKEN_WEIGHT);
+ setEdgeWeight(BB, 1/*Index for Unwind*/, IH_NONTAKEN_WEIGHT);
return true;
}
@@ -450,8 +441,7 @@ uint32_t BranchProbabilityInfo::getSumForBlock(const BasicBlock *BB) const {
uint32_t Sum = 0;
for (succ_const_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
- const BasicBlock *Succ = *I;
- uint32_t Weight = getEdgeWeight(BB, Succ);
+ uint32_t Weight = getEdgeWeight(BB, I.getSuccessorIndex());
uint32_t PrevSum = Sum;
Sum += Weight;
@@ -494,11 +484,13 @@ BasicBlock *BranchProbabilityInfo::getHotSucc(BasicBlock *BB) const {
return 0;
}
-// Return edge's weight. If can't find it, return DEFAULT_WEIGHT value.
+/// Get the raw edge weight for the edge. If can't find it, return
+/// DEFAULT_WEIGHT value. Here an edge is specified using PredBlock and an index
+/// to the successors.
uint32_t BranchProbabilityInfo::
-getEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst) const {
- Edge E(Src, Dst);
- DenseMap<Edge, uint32_t>::const_iterator I = Weights.find(E);
+getEdgeWeight(const BasicBlock *Src, unsigned IndexInSuccessors) const {
+ DenseMap<Edge, uint32_t>::const_iterator I =
+ Weights.find(std::make_pair(Src, IndexInSuccessors));
if (I != Weights.end())
return I->second;
@@ -506,15 +498,43 @@ getEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst) const {
return DEFAULT_WEIGHT;
}
+/// Get the raw edge weight calculated for the block pair. This returns the sum
+/// of all raw edge weights from Src to Dst.
+uint32_t BranchProbabilityInfo::
+getEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst) const {
+ uint32_t Weight = 0;
+ DenseMap<Edge, uint32_t>::const_iterator MapI;
+ for (succ_const_iterator I = succ_begin(Src), E = succ_end(Src); I != E; ++I)
+ if (*I == Dst) {
+ MapI = Weights.find(std::make_pair(Src, I.getSuccessorIndex()));
+ if (MapI != Weights.end())
+ Weight += MapI->second;
+ }
+ return (Weight == 0) ? DEFAULT_WEIGHT : Weight;
+}
+
+/// Set the edge weight for a given edge specified by PredBlock and an index
+/// to the successors.
void BranchProbabilityInfo::
-setEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst, uint32_t Weight) {
- Weights[std::make_pair(Src, Dst)] = Weight;
+setEdgeWeight(const BasicBlock *Src, unsigned IndexInSuccessors,
+ uint32_t Weight) {
+ Weights[std::make_pair(Src, IndexInSuccessors)] = Weight;
DEBUG(dbgs() << "set edge " << Src->getName() << " -> "
- << Dst->getName() << " weight to " << Weight
- << (isEdgeHot(Src, Dst) ? " [is HOT now]\n" : "\n"));
+ << IndexInSuccessors << " successor weight to "
+ << Weight << "\n");
}
+/// Get an edge's probability, relative to other out-edges from Src.
+BranchProbability BranchProbabilityInfo::
+getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const {
+ uint32_t N = getEdgeWeight(Src, IndexInSuccessors);
+ uint32_t D = getSumForBlock(Src);
+
+ return BranchProbability(N, D);
+}
+/// Get the probability of going from Src to Dst. It returns the sum of all
+/// probabilities for edges from Src to Dst.
BranchProbability BranchProbabilityInfo::
getEdgeProbability(const BasicBlock *Src, const BasicBlock *Dst) const {
diff --git a/contrib/llvm/lib/Analysis/CaptureTracking.cpp b/contrib/llvm/lib/Analysis/CaptureTracking.cpp
index 974b906..d9c0299 100644
--- a/contrib/llvm/lib/Analysis/CaptureTracking.cpp
+++ b/contrib/llvm/lib/Analysis/CaptureTracking.cpp
@@ -23,6 +23,8 @@ using namespace llvm;
CaptureTracker::~CaptureTracker() {}
+bool CaptureTracker::shouldExplore(Use *U) { return true; }
+
namespace {
struct SimpleCaptureTracker : public CaptureTracker {
explicit SimpleCaptureTracker(bool ReturnCaptures)
@@ -30,8 +32,6 @@ namespace {
void tooManyUses() { Captured = true; }
- bool shouldExplore(Use *U) { return true; }
-
bool captured(Use *U) {
if (isa<ReturnInst>(U->getUser()) && !ReturnCaptures)
return false;
diff --git a/contrib/llvm/lib/Analysis/CodeMetrics.cpp b/contrib/llvm/lib/Analysis/CodeMetrics.cpp
index acda34b..651a54b 100644
--- a/contrib/llvm/lib/Analysis/CodeMetrics.cpp
+++ b/contrib/llvm/lib/Analysis/CodeMetrics.cpp
@@ -15,7 +15,7 @@
#include "llvm/Function.h"
#include "llvm/Support/CallSite.h"
#include "llvm/IntrinsicInst.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace llvm;
@@ -54,7 +54,7 @@ bool llvm::callIsSmall(ImmutableCallSite CS) {
return false;
}
-bool llvm::isInstructionFree(const Instruction *I, const TargetData *TD) {
+bool llvm::isInstructionFree(const Instruction *I, const DataLayout *TD) {
if (isa<PHINode>(I))
return true;
@@ -119,7 +119,7 @@ bool llvm::isInstructionFree(const Instruction *I, const TargetData *TD) {
/// analyzeBasicBlock - Fill in the current structure with information gleaned
/// from the specified block.
void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB,
- const TargetData *TD) {
+ const DataLayout *TD) {
++NumBlocks;
unsigned NumInstsBeforeThisBB = NumInsts;
for (BasicBlock::const_iterator II = BB->begin(), E = BB->end();
@@ -189,14 +189,14 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB,
NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB;
}
-void CodeMetrics::analyzeFunction(Function *F, const TargetData *TD) {
+void CodeMetrics::analyzeFunction(Function *F, const DataLayout *TD) {
// If this function contains a call that "returns twice" (e.g., setjmp or
// _setjmp) and it isn't marked with "returns twice" itself, never inline it.
// This is a hack because we depend on the user marking their local variables
// as volatile if they are live across a setjmp call, and they probably
// won't do this in callers.
exposesReturnsTwice = F->callsFunctionThatReturnsTwice() &&
- !F->hasFnAttr(Attribute::ReturnsTwice);
+ !F->getFnAttributes().hasAttribute(Attributes::ReturnsTwice);
// Look at the size of the callee.
for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
diff --git a/contrib/llvm/lib/Analysis/ConstantFolding.cpp b/contrib/llvm/lib/Analysis/ConstantFolding.cpp
index f5e619c..91a5b84 100644
--- a/contrib/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/contrib/llvm/lib/Analysis/ConstantFolding.cpp
@@ -11,7 +11,7 @@
//
// Also, to supplement the basic VMCore ConstantExpr simplifications,
// this file defines some additional folding routines that can make use of
-// TargetData information. These functions cannot go in VMCore due to library
+// DataLayout information. These functions cannot go in VMCore due to library
// dependency issues.
//
//===----------------------------------------------------------------------===//
@@ -25,7 +25,7 @@
#include "llvm/Intrinsics.h"
#include "llvm/Operator.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
@@ -41,11 +41,11 @@ using namespace llvm;
// Constant Folding internal helper functions
//===----------------------------------------------------------------------===//
-/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
-/// TargetData. This always returns a non-null constant, but it may be a
+/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
+/// DataLayout. This always returns a non-null constant, but it may be a
/// ConstantExpr if unfoldable.
static Constant *FoldBitCast(Constant *C, Type *DestTy,
- const TargetData &TD) {
+ const DataLayout &TD) {
// Catch the obvious splat cases.
if (C->isNullValue() && !DestTy->isX86_MMXTy())
return Constant::getNullValue(DestTy);
@@ -59,9 +59,9 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
return ConstantExpr::getBitCast(C, DestTy);
unsigned NumSrcElts = CDV->getType()->getNumElements();
-
+
Type *SrcEltTy = CDV->getType()->getElementType();
-
+
// If the vector is a vector of floating point, convert it to vector of int
// to simplify things.
if (SrcEltTy->isFloatingPointTy()) {
@@ -72,7 +72,7 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
C = ConstantExpr::getBitCast(C, SrcIVTy);
CDV = cast<ConstantDataVector>(C);
}
-
+
// Now that we know that the input value is a vector of integers, just shift
// and insert them into our result.
unsigned BitShift = TD.getTypeAllocSizeInBits(SrcEltTy);
@@ -84,43 +84,43 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
else
Result |= CDV->getElementAsInteger(i);
}
-
+
return ConstantInt::get(IT, Result);
}
-
+
// The code below only handles casts to vectors currently.
VectorType *DestVTy = dyn_cast<VectorType>(DestTy);
if (DestVTy == 0)
return ConstantExpr::getBitCast(C, DestTy);
-
+
// If this is a scalar -> vector cast, convert the input into a <1 x scalar>
// vector so the code below can handle it uniformly.
if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
Constant *Ops = C; // don't take the address of C!
return FoldBitCast(ConstantVector::get(Ops), DestTy, TD);
}
-
+
// If this is a bitcast from constant vector -> vector, fold it.
if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
return ConstantExpr::getBitCast(C, DestTy);
-
+
// If the element types match, VMCore can fold it.
unsigned NumDstElt = DestVTy->getNumElements();
unsigned NumSrcElt = C->getType()->getVectorNumElements();
if (NumDstElt == NumSrcElt)
return ConstantExpr::getBitCast(C, DestTy);
-
+
Type *SrcEltTy = C->getType()->getVectorElementType();
Type *DstEltTy = DestVTy->getElementType();
-
- // Otherwise, we're changing the number of elements in a vector, which
+
+ // Otherwise, we're changing the number of elements in a vector, which
// requires endianness information to do the right thing. For example,
// bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
// folds to (little endian):
// <4 x i32> <i32 0, i32 0, i32 1, i32 0>
// and to (big endian):
// <4 x i32> <i32 0, i32 0, i32 0, i32 1>
-
+
// First thing is first. We only want to think about integer here, so if
// we have something in FP form, recast it as integer.
if (DstEltTy->isFloatingPointTy()) {
@@ -130,11 +130,11 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
// Recursively handle this integer conversion, if possible.
C = FoldBitCast(C, DestIVTy, TD);
-
+
// Finally, VMCore can handle this now that #elts line up.
return ConstantExpr::getBitCast(C, DestTy);
}
-
+
// Okay, we know the destination is integer, if the input is FP, convert
// it to integer first.
if (SrcEltTy->isFloatingPointTy()) {
@@ -148,13 +148,13 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
!isa<ConstantDataVector>(C))
return C;
}
-
+
// Now we know that the input and output vectors are both integer vectors
// of the same size, and that their #elements is not the same. Do the
// conversion here, which depends on whether the input or output has
// more elements.
bool isLittleEndian = TD.isLittleEndian();
-
+
SmallVector<Constant*, 32> Result;
if (NumDstElt < NumSrcElt) {
// Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
@@ -170,15 +170,15 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
Constant *Src =dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++));
if (!Src) // Reject constantexpr elements.
return ConstantExpr::getBitCast(C, DestTy);
-
+
// Zero extend the element to the right size.
Src = ConstantExpr::getZExt(Src, Elt->getType());
-
+
// Shift it to the right place, depending on endianness.
- Src = ConstantExpr::getShl(Src,
+ Src = ConstantExpr::getShl(Src,
ConstantInt::get(Src->getType(), ShiftAmt));
ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
-
+
// Mix it in.
Elt = ConstantExpr::getOr(Elt, Src);
}
@@ -186,30 +186,30 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
}
return ConstantVector::get(Result);
}
-
+
// Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
unsigned Ratio = NumDstElt/NumSrcElt;
unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits();
-
+
// Loop over each source value, expanding into multiple results.
for (unsigned i = 0; i != NumSrcElt; ++i) {
Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i));
if (!Src) // Reject constantexpr elements.
return ConstantExpr::getBitCast(C, DestTy);
-
+
unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
for (unsigned j = 0; j != Ratio; ++j) {
// Shift the piece of the value into the right place, depending on
// endianness.
- Constant *Elt = ConstantExpr::getLShr(Src,
+ Constant *Elt = ConstantExpr::getLShr(Src,
ConstantInt::get(Src->getType(), ShiftAmt));
ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
-
+
// Truncate and remember this piece.
Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
}
}
-
+
return ConstantVector::get(Result);
}
@@ -218,34 +218,34 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
/// from a global, return the global and the constant. Because of
/// constantexprs, this function is recursive.
static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
- int64_t &Offset, const TargetData &TD) {
+ int64_t &Offset, const DataLayout &TD) {
// Trivial case, constant is the global.
if ((GV = dyn_cast<GlobalValue>(C))) {
Offset = 0;
return true;
}
-
+
// Otherwise, if this isn't a constant expr, bail out.
ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
if (!CE) return false;
-
+
// Look through ptr->int and ptr->ptr casts.
if (CE->getOpcode() == Instruction::PtrToInt ||
CE->getOpcode() == Instruction::BitCast)
return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD);
-
- // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
+
+ // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
if (CE->getOpcode() == Instruction::GetElementPtr) {
// Cannot compute this if the element type of the pointer is missing size
// info.
if (!cast<PointerType>(CE->getOperand(0)->getType())
->getElementType()->isSized())
return false;
-
+
// If the base isn't a global+constant, we aren't either.
if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD))
return false;
-
+
// Otherwise, add any offset that our operands provide.
gep_type_iterator GTI = gep_type_begin(CE);
for (User::const_op_iterator i = CE->op_begin() + 1, e = CE->op_end();
@@ -253,7 +253,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
ConstantInt *CI = dyn_cast<ConstantInt>(*i);
if (!CI) return false; // Index isn't a simple constant?
if (CI->isZero()) continue; // Not adding anything.
-
+
if (StructType *ST = dyn_cast<StructType>(*GTI)) {
// N = N + Offset
Offset += TD.getStructLayout(ST)->getElementOffset(CI->getZExtValue());
@@ -264,7 +264,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
}
return true;
}
-
+
return false;
}
@@ -274,30 +274,33 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
/// the CurPtr buffer. TD is the target data.
static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
unsigned char *CurPtr, unsigned BytesLeft,
- const TargetData &TD) {
+ const DataLayout &TD) {
assert(ByteOffset <= TD.getTypeAllocSize(C->getType()) &&
"Out of range access");
-
+
// If this element is zero or undefined, we can just return since *CurPtr is
// zero initialized.
if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
return true;
-
+
if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
if (CI->getBitWidth() > 64 ||
(CI->getBitWidth() & 7) != 0)
return false;
-
+
uint64_t Val = CI->getZExtValue();
unsigned IntBytes = unsigned(CI->getBitWidth()/8);
-
+
for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
- CurPtr[i] = (unsigned char)(Val >> (ByteOffset * 8));
+ int n = ByteOffset;
+ if (!TD.isLittleEndian())
+ n = IntBytes - n - 1;
+ CurPtr[i] = (unsigned char)(Val >> (n * 8));
++ByteOffset;
}
return true;
}
-
+
if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
if (CFP->getType()->isDoubleTy()) {
C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), TD);
@@ -309,13 +312,13 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
}
return false;
}
-
+
if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) {
const StructLayout *SL = TD.getStructLayout(CS->getType());
unsigned Index = SL->getElementContainingOffset(ByteOffset);
uint64_t CurEltOffset = SL->getElementOffset(Index);
ByteOffset -= CurEltOffset;
-
+
while (1) {
// If the element access is to the element itself and not to tail padding,
// read the bytes from the element.
@@ -325,9 +328,9 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
!ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
BytesLeft, TD))
return false;
-
+
++Index;
-
+
// Check to see if we read from the last struct element, if so we're done.
if (Index == CS->getType()->getNumElements())
return true;
@@ -375,11 +378,11 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
}
return true;
}
-
+
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
if (CE->getOpcode() == Instruction::IntToPtr &&
- CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getContext()))
- return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
+ CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getContext()))
+ return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
BytesLeft, TD);
}
@@ -388,10 +391,10 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
}
static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
- const TargetData &TD) {
+ const DataLayout &TD) {
Type *LoadTy = cast<PointerType>(C->getType())->getElementType();
IntegerType *IntType = dyn_cast<IntegerType>(LoadTy);
-
+
// If this isn't an integer load we can't fold it directly.
if (!IntType) {
// If this is a float/double load, we can try folding it as an int32/64 load
@@ -415,15 +418,15 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
return FoldBitCast(Res, LoadTy, TD);
return 0;
}
-
+
unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
if (BytesLoaded > 32 || BytesLoaded == 0) return 0;
-
+
GlobalValue *GVal;
int64_t Offset;
if (!IsConstantOffsetFromGlobal(C, GVal, Offset, TD))
return 0;
-
+
GlobalVariable *GV = dyn_cast<GlobalVariable>(GVal);
if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
!GV->getInitializer()->getType()->isSized())
@@ -432,20 +435,29 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
// If we're loading off the beginning of the global, some bytes may be valid,
// but we don't try to handle this.
if (Offset < 0) return 0;
-
+
// If we're not accessing anything in this constant, the result is undefined.
if (uint64_t(Offset) >= TD.getTypeAllocSize(GV->getInitializer()->getType()))
return UndefValue::get(IntType);
-
+
unsigned char RawBytes[32] = {0};
if (!ReadDataFromGlobal(GV->getInitializer(), Offset, RawBytes,
BytesLoaded, TD))
return 0;
- APInt ResultVal = APInt(IntType->getBitWidth(), RawBytes[BytesLoaded-1]);
- for (unsigned i = 1; i != BytesLoaded; ++i) {
- ResultVal <<= 8;
- ResultVal |= RawBytes[BytesLoaded-1-i];
+ APInt ResultVal = APInt(IntType->getBitWidth(), 0);
+ if (TD.isLittleEndian()) {
+ ResultVal = RawBytes[BytesLoaded - 1];
+ for (unsigned i = 1; i != BytesLoaded; ++i) {
+ ResultVal <<= 8;
+ ResultVal |= RawBytes[BytesLoaded-1-i];
+ }
+ } else {
+ ResultVal = RawBytes[0];
+ for (unsigned i = 1; i != BytesLoaded; ++i) {
+ ResultVal <<= 8;
+ ResultVal |= RawBytes[i];
+ }
}
return ConstantInt::get(IntType->getContext(), ResultVal);
@@ -455,7 +467,7 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
/// produce if it is constant and determinable. If this is not determinable,
/// return null.
Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
- const TargetData *TD) {
+ const DataLayout *TD) {
// First, try the easy cases:
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
if (GV->isConstant() && GV->hasDefinitiveInitializer())
@@ -464,15 +476,15 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
// If the loaded value isn't a constant expr, we can't handle it.
ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
if (!CE) return 0;
-
+
if (CE->getOpcode() == Instruction::GetElementPtr) {
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0)))
if (GV->isConstant() && GV->hasDefinitiveInitializer())
- if (Constant *V =
+ if (Constant *V =
ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
return V;
}
-
+
// Instead of loading constant c string, use corresponding integer value
// directly if string length is small enough.
StringRef Str;
@@ -500,14 +512,14 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
SingleChar = 0;
StrVal = (StrVal << 8) | SingleChar;
}
-
+
Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
if (Ty->isFloatingPointTy())
Res = ConstantExpr::getBitCast(Res, Ty);
return Res;
}
}
-
+
// If this load comes from anywhere in a constant global, and if the global
// is all undef or zero, we know what it loads.
if (GlobalVariable *GV =
@@ -520,18 +532,16 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
return UndefValue::get(ResTy);
}
}
-
- // Try hard to fold loads from bitcasted strange and non-type-safe things. We
- // currently don't do any of this for big endian systems. It can be
- // generalized in the future if someone is interested.
- if (TD && TD->isLittleEndian())
+
+ // Try hard to fold loads from bitcasted strange and non-type-safe things.
+ if (TD)
return FoldReinterpretLoadFromConstPtr(CE, *TD);
return 0;
}
-static Constant *ConstantFoldLoadInst(const LoadInst *LI, const TargetData *TD){
+static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){
if (LI->isVolatile()) return 0;
-
+
if (Constant *C = dyn_cast<Constant>(LI->getOperand(0)))
return ConstantFoldLoadFromConstPtr(C, TD);
@@ -540,23 +550,23 @@ static Constant *ConstantFoldLoadInst(const LoadInst *LI, const TargetData *TD){
/// SymbolicallyEvaluateBinop - One of Op0/Op1 is a constant expression.
/// Attempt to symbolically evaluate the result of a binary operator merging
-/// these together. If target data info is available, it is provided as TD,
+/// these together. If target data info is available, it is provided as TD,
/// otherwise TD is null.
static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
- Constant *Op1, const TargetData *TD){
+ Constant *Op1, const DataLayout *TD){
// SROA
-
+
// Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
// Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
// bits.
-
-
+
+
// If the constant expr is something like &A[123] - &A[4].f, fold this into a
// constant. This happens frequently when iterating over a global array.
if (Opc == Instruction::Sub && TD) {
GlobalValue *GV1, *GV2;
int64_t Offs1, Offs2;
-
+
if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, *TD))
if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, *TD) &&
GV1 == GV2) {
@@ -564,7 +574,7 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
return ConstantInt::get(Op0->getType(), Offs1-Offs2);
}
}
-
+
return 0;
}
@@ -572,7 +582,7 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
/// explicitly cast them so that they aren't implicitly casted by the
/// getelementptr.
static Constant *CastGEPIndices(ArrayRef<Constant *> Ops,
- Type *ResultTy, const TargetData *TD,
+ Type *ResultTy, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TD) return 0;
Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext());
@@ -622,20 +632,20 @@ static Constant* StripPtrCastKeepAS(Constant* Ptr) {
/// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP
/// constant expression, do so.
static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
- Type *ResultTy, const TargetData *TD,
+ Type *ResultTy, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
Constant *Ptr = Ops[0];
if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized() ||
!Ptr->getType()->isPointerTy())
return 0;
-
+
Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext());
// If this is a constant expr gep that is effectively computing an
// "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
if (!isa<ConstantInt>(Ops[i])) {
-
+
// If this is "gep i8* Ptr, (sub 0, V)", fold this as:
// "inttoptr (sub (ptrtoint Ptr), V)"
if (Ops.size() == 2 &&
@@ -659,7 +669,8 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
unsigned BitWidth = TD->getTypeSizeInBits(IntPtrTy);
APInt Offset =
APInt(BitWidth, TD->getIndexedOffset(Ptr->getType(),
- makeArrayRef((Value **)Ops.data() + 1,
+ makeArrayRef((Value *const*)
+ Ops.data() + 1,
Ops.size() - 1)));
Ptr = StripPtrCastKeepAS(Ptr);
@@ -708,12 +719,12 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
// The only pointer indexing we'll do is on the first index of the GEP.
if (!NewIdxs.empty())
break;
-
+
// Only handle pointers to sized types, not pointers to functions.
if (!ATy->getElementType()->isSized())
return 0;
}
-
+
// Determine which element of the array the offset points into.
APInt ElemSize(BitWidth, TD->getTypeAllocSize(ATy->getElementType()));
IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext());
@@ -785,7 +796,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
/// this function can only fail when attempting to fold instructions like loads
/// and stores, which have no constant expression form.
Constant *llvm::ConstantFoldInstruction(Instruction *I,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI) {
// Handle PHI nodes quickly here...
if (PHINode *PN = dyn_cast<PHINode>(I)) {
@@ -836,7 +847,7 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I,
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
TD, TLI);
-
+
if (const LoadInst *LI = dyn_cast<LoadInst>(I))
return ConstantFoldLoadInst(LI, TD);
@@ -855,10 +866,10 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I,
}
/// ConstantFoldConstantExpression - Attempt to fold the constant expression
-/// using the specified TargetData. If successful, the constant result is
+/// using the specified DataLayout. If successful, the constant result is
/// result is returned, if not, null is returned.
Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI) {
SmallVector<Constant*, 8> Ops;
for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end();
@@ -886,19 +897,19 @@ Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
/// information, due to only being passed an opcode and operands. Constant
/// folding using this function strips this information.
///
-Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
+Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
ArrayRef<Constant *> Ops,
- const TargetData *TD,
- const TargetLibraryInfo *TLI) {
+ const DataLayout *TD,
+ const TargetLibraryInfo *TLI) {
// Handle easy binops first.
if (Instruction::isBinaryOp(Opcode)) {
if (isa<ConstantExpr>(Ops[0]) || isa<ConstantExpr>(Ops[1]))
if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], TD))
return C;
-
+
return ConstantExpr::get(Opcode, Ops[0], Ops[1]);
}
-
+
switch (Opcode) {
default: return 0;
case Instruction::ICmp:
@@ -916,7 +927,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
Constant *Input = CE->getOperand(0);
unsigned InWidth = Input->getType()->getScalarSizeInBits();
if (TD->getPointerSizeInBits() < InWidth) {
- Constant *Mask =
+ Constant *Mask =
ConstantInt::get(CE->getContext(), APInt::getLowBitsSet(InWidth,
TD->getPointerSizeInBits()));
Input = ConstantExpr::getAnd(Input, Mask);
@@ -964,7 +975,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
return C;
if (Constant *C = SymbolicallyEvaluateGEP(Ops, DestTy, TD, TLI))
return C;
-
+
return ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1));
}
}
@@ -974,8 +985,8 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
/// returns a constant expression of the specified operands.
///
Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
- Constant *Ops0, Constant *Ops1,
- const TargetData *TD,
+ Constant *Ops0, Constant *Ops1,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI) {
// fold: icmp (inttoptr x), null -> icmp x, 0
// fold: icmp (ptrtoint x), 0 -> icmp x, null
@@ -995,17 +1006,17 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
Constant *Null = Constant::getNullValue(C->getType());
return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
}
-
+
// Only do this transformation if the int is intptrty in size, otherwise
// there is a truncation or extension that we aren't modeling.
- if (CE0->getOpcode() == Instruction::PtrToInt &&
+ if (CE0->getOpcode() == Instruction::PtrToInt &&
CE0->getType() == IntPtrTy) {
Constant *C = CE0->getOperand(0);
Constant *Null = Constant::getNullValue(C->getType());
return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
}
}
-
+
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
if (TD && CE0->getOpcode() == CE1->getOpcode()) {
Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
@@ -1029,24 +1040,24 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
CE1->getOperand(0), TD, TLI);
}
}
-
+
// icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
// icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
- Constant *LHS =
+ Constant *LHS =
ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), Ops1,
TD, TLI);
- Constant *RHS =
+ Constant *RHS =
ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(1), Ops1,
TD, TLI);
- unsigned OpC =
+ unsigned OpC =
Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
Constant *Ops[] = { LHS, RHS };
return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, TD, TLI);
}
}
-
+
return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
}
@@ -1054,7 +1065,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
/// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a
/// getelementptr constantexpr, return the constant value being addressed by the
/// constant expression, or null if something is funny and we can't decide.
-Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
+Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
ConstantExpr *CE) {
if (!CE->getOperand(1)->isNullValue())
return 0; // Do not allow stepping over the value!
@@ -1124,14 +1135,14 @@ llvm::canConstantFoldCallTo(const Function *F) {
if (!F->hasName()) return false;
StringRef Name = F->getName();
-
+
// In these cases, the check of the length is required. We don't want to
// return true for a name like "cos\0blah" which strcmp would return equal to
// "cos", but has length 8.
switch (Name[0]) {
default: return false;
case 'a':
- return Name == "acos" || Name == "asin" ||
+ return Name == "acos" || Name == "asin" ||
Name == "atan" || Name == "atan2";
case 'c':
return Name == "cos" || Name == "ceil" || Name == "cosf" || Name == "cosh";
@@ -1151,7 +1162,7 @@ llvm::canConstantFoldCallTo(const Function *F) {
}
}
-static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
+static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
Type *Ty) {
sys::llvm_fenv_clearexcept();
V = NativeFP(V);
@@ -1159,7 +1170,7 @@ static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
sys::llvm_fenv_clearexcept();
return 0;
}
-
+
if (Ty->isFloatTy())
return ConstantFP::get(Ty->getContext(), APFloat((float)V));
if (Ty->isDoubleTy())
@@ -1175,7 +1186,7 @@ static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
sys::llvm_fenv_clearexcept();
return 0;
}
-
+
if (Ty->isFloatTy())
return ConstantFP::get(Ty->getContext(), APFloat((float)V));
if (Ty->isDoubleTy())
@@ -1269,7 +1280,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
case 'e':
if (Name == "exp" && TLI->has(LibFunc::exp))
return ConstantFoldFP(exp, V, Ty);
-
+
if (Name == "exp2" && TLI->has(LibFunc::exp2)) {
// Constant fold exp2(x) as pow(2,x) in case the host doesn't have a
// C99 library.
@@ -1345,7 +1356,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
}
// Support ConstantVector in case we have an Undef in the top.
- if (isa<ConstantVector>(Operands[0]) ||
+ if (isa<ConstantVector>(Operands[0]) ||
isa<ConstantDataVector>(Operands[0])) {
Constant *Op = cast<Constant>(Operands[0]);
switch (F->getIntrinsicID()) {
@@ -1364,11 +1375,11 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
case Intrinsic::x86_sse2_cvttsd2si64:
if (ConstantFP *FPOp =
dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
- return ConstantFoldConvertToInt(FPOp->getValueAPF(),
+ return ConstantFoldConvertToInt(FPOp->getValueAPF(),
/*roundTowardZero=*/true, Ty);
}
}
-
+
if (isa<UndefValue>(Operands[0])) {
if (F->getIntrinsicID() == Intrinsic::bswap)
return Operands[0];
@@ -1382,14 +1393,14 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
if (ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
if (!Ty->isFloatTy() && !Ty->isDoubleTy())
return 0;
- double Op1V = Ty->isFloatTy() ?
+ double Op1V = Ty->isFloatTy() ?
(double)Op1->getValueAPF().convertToFloat() :
Op1->getValueAPF().convertToDouble();
if (ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
if (Op2->getType() != Op1->getType())
return 0;
- double Op2V = Ty->isFloatTy() ?
+ double Op2V = Ty->isFloatTy() ?
(double)Op2->getValueAPF().convertToFloat():
Op2->getValueAPF().convertToDouble();
@@ -1416,7 +1427,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
}
return 0;
}
-
+
if (ConstantInt *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
if (ConstantInt *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
switch (F->getIntrinsicID()) {
@@ -1466,7 +1477,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros());
}
}
-
+
return 0;
}
return 0;
diff --git a/contrib/llvm/lib/Analysis/CostModel.cpp b/contrib/llvm/lib/Analysis/CostModel.cpp
new file mode 100644
index 0000000..5adbf45
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/CostModel.cpp
@@ -0,0 +1,193 @@
+//===- CostModel.cpp ------ Cost Model Analysis ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the cost model analysis. It provides a very basic cost
+// estimation for LLVM-IR. The cost result can be thought of as cycles, but it
+// is really unit-less. The estimated cost is ment to be used for comparing
+// alternatives.
+//
+//===----------------------------------------------------------------------===//
+
+#define CM_NAME "cost-model"
+#define DEBUG_TYPE CM_NAME
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Function.h"
+#include "llvm/Instructions.h"
+#include "llvm/Pass.h"
+#include "llvm/TargetTransformInfo.h"
+#include "llvm/Value.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+namespace {
+ class CostModelAnalysis : public FunctionPass {
+
+ public:
+ static char ID; // Class identification, replacement for typeinfo
+ CostModelAnalysis() : FunctionPass(ID), F(0), VTTI(0) {
+ initializeCostModelAnalysisPass(
+ *PassRegistry::getPassRegistry());
+ }
+
+ /// Returns the expected cost of the instruction.
+ /// Returns -1 if the cost is unknown.
+ /// Note, this method does not cache the cost calculation and it
+ /// can be expensive in some cases.
+ unsigned getInstructionCost(Instruction *I) const;
+
+ private:
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+ virtual bool runOnFunction(Function &F);
+ virtual void print(raw_ostream &OS, const Module*) const;
+
+ /// The function that we analyze.
+ Function *F;
+ /// Vector target information.
+ const VectorTargetTransformInfo *VTTI;
+ };
+} // End of anonymous namespace
+
+// Register this pass.
+char CostModelAnalysis::ID = 0;
+static const char cm_name[] = "Cost Model Analysis";
+INITIALIZE_PASS_BEGIN(CostModelAnalysis, CM_NAME, cm_name, false, true)
+INITIALIZE_PASS_END (CostModelAnalysis, CM_NAME, cm_name, false, true)
+
+FunctionPass *llvm::createCostModelAnalysisPass() {
+ return new CostModelAnalysis();
+}
+
+void
+CostModelAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+}
+
+bool
+CostModelAnalysis::runOnFunction(Function &F) {
+ this->F = &F;
+
+ // Target information.
+ TargetTransformInfo *TTI;
+ TTI = getAnalysisIfAvailable<TargetTransformInfo>();
+ if (TTI)
+ VTTI = TTI->getVectorTargetTransformInfo();
+
+ return false;
+}
+
+unsigned CostModelAnalysis::getInstructionCost(Instruction *I) const {
+ if (!VTTI)
+ return -1;
+
+ switch (I->getOpcode()) {
+ case Instruction::Ret:
+ case Instruction::PHI:
+ case Instruction::Br: {
+ return VTTI->getCFInstrCost(I->getOpcode());
+ }
+ case Instruction::Add:
+ case Instruction::FAdd:
+ case Instruction::Sub:
+ case Instruction::FSub:
+ case Instruction::Mul:
+ case Instruction::FMul:
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::FDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::FRem:
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor: {
+ return VTTI->getArithmeticInstrCost(I->getOpcode(), I->getType());
+ }
+ case Instruction::Select: {
+ SelectInst *SI = cast<SelectInst>(I);
+ Type *CondTy = SI->getCondition()->getType();
+ return VTTI->getCmpSelInstrCost(I->getOpcode(), I->getType(), CondTy);
+ }
+ case Instruction::ICmp:
+ case Instruction::FCmp: {
+ Type *ValTy = I->getOperand(0)->getType();
+ return VTTI->getCmpSelInstrCost(I->getOpcode(), ValTy);
+ }
+ case Instruction::Store: {
+ StoreInst *SI = cast<StoreInst>(I);
+ Type *ValTy = SI->getValueOperand()->getType();
+ return VTTI->getMemoryOpCost(I->getOpcode(), ValTy,
+ SI->getAlignment(),
+ SI->getPointerAddressSpace());
+ }
+ case Instruction::Load: {
+ LoadInst *LI = cast<LoadInst>(I);
+ return VTTI->getMemoryOpCost(I->getOpcode(), I->getType(),
+ LI->getAlignment(),
+ LI->getPointerAddressSpace());
+ }
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ case Instruction::FPExt:
+ case Instruction::PtrToInt:
+ case Instruction::IntToPtr:
+ case Instruction::SIToFP:
+ case Instruction::UIToFP:
+ case Instruction::Trunc:
+ case Instruction::FPTrunc:
+ case Instruction::BitCast: {
+ Type *SrcTy = I->getOperand(0)->getType();
+ return VTTI->getCastInstrCost(I->getOpcode(), I->getType(), SrcTy);
+ }
+ case Instruction::ExtractElement: {
+ ExtractElementInst * EEI = cast<ExtractElementInst>(I);
+ ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1));
+ unsigned Idx = -1;
+ if (CI)
+ Idx = CI->getZExtValue();
+ return VTTI->getVectorInstrCost(I->getOpcode(),
+ EEI->getOperand(0)->getType(), Idx);
+ }
+ case Instruction::InsertElement: {
+ InsertElementInst * IE = cast<InsertElementInst>(I);
+ ConstantInt *CI = dyn_cast<ConstantInt>(IE->getOperand(2));
+ unsigned Idx = -1;
+ if (CI)
+ Idx = CI->getZExtValue();
+ return VTTI->getVectorInstrCost(I->getOpcode(),
+ IE->getType(), Idx);
+ }
+ default:
+ // We don't have any information on this instruction.
+ return -1;
+ }
+}
+
+void CostModelAnalysis::print(raw_ostream &OS, const Module*) const {
+ if (!F)
+ return;
+
+ for (Function::iterator B = F->begin(), BE = F->end(); B != BE; ++B) {
+ for (BasicBlock::iterator it = B->begin(), e = B->end(); it != e; ++it) {
+ Instruction *Inst = it;
+ unsigned Cost = getInstructionCost(Inst);
+ if (Cost != (unsigned)-1)
+ OS << "Cost Model: Found an estimated cost of " << Cost;
+ else
+ OS << "Cost Model: Unknown cost";
+
+ OS << " for instruction: "<< *Inst << "\n";
+ }
+ }
+}
diff --git a/contrib/llvm/lib/Analysis/DependenceAnalysis.cpp b/contrib/llvm/lib/Analysis/DependenceAnalysis.cpp
new file mode 100644
index 0000000..95ac5ea
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/DependenceAnalysis.cpp
@@ -0,0 +1,3786 @@
+//===-- DependenceAnalysis.cpp - DA Implementation --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// DependenceAnalysis is an LLVM pass that analyses dependences between memory
+// accesses. Currently, it is an (incomplete) implementation of the approach
+// described in
+//
+// Practical Dependence Testing
+// Goff, Kennedy, Tseng
+// PLDI 1991
+//
+// There's a single entry point that analyzes the dependence between a pair
+// of memory references in a function, returning either NULL, for no dependence,
+// or a more-or-less detailed description of the dependence between them.
+//
+// Currently, the implementation cannot propagate constraints between
+// coupled RDIV subscripts and lacks a multi-subscript MIV test.
+// Both of these are conservative weaknesses;
+// that is, not a source of correctness problems.
+//
+// The implementation depends on the GEP instruction to
+// differentiate subscripts. Since Clang linearizes subscripts
+// for most arrays, we give up some precision (though the existing MIV tests
+// will help). We trust that the GEP instruction will eventually be extended.
+// In the meantime, we should explore Maslov's ideas about delinearization.
+//
+// We should pay some careful attention to the possibility of integer overflow
+// in the implementation of the various tests. This could happen with Add,
+// Subtract, or Multiply, with both APInt's and SCEV's.
+//
+// Some non-linear subscript pairs can be handled by the GCD test
+// (and perhaps other tests).
+// Should explore how often these things occur.
+//
+// Finally, it seems like certain test cases expose weaknesses in the SCEV
+// simplification, especially in the handling of sign and zero extensions.
+// It could be useful to spend time exploring these.
+//
+// Please note that this is work in progress and the interface is subject to
+// change.
+//
+//===----------------------------------------------------------------------===//
+// //
+// In memory of Ken Kennedy, 1945 - 2007 //
+// //
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "da"
+
+#include "llvm/Analysis/DependenceAnalysis.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Operator.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/InstIterator.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// statistics
+
+STATISTIC(TotalArrayPairs, "Array pairs tested");
+STATISTIC(SeparableSubscriptPairs, "Separable subscript pairs");
+STATISTIC(CoupledSubscriptPairs, "Coupled subscript pairs");
+STATISTIC(NonlinearSubscriptPairs, "Nonlinear subscript pairs");
+STATISTIC(ZIVapplications, "ZIV applications");
+STATISTIC(ZIVindependence, "ZIV independence");
+STATISTIC(StrongSIVapplications, "Strong SIV applications");
+STATISTIC(StrongSIVsuccesses, "Strong SIV successes");
+STATISTIC(StrongSIVindependence, "Strong SIV independence");
+STATISTIC(WeakCrossingSIVapplications, "Weak-Crossing SIV applications");
+STATISTIC(WeakCrossingSIVsuccesses, "Weak-Crossing SIV successes");
+STATISTIC(WeakCrossingSIVindependence, "Weak-Crossing SIV independence");
+STATISTIC(ExactSIVapplications, "Exact SIV applications");
+STATISTIC(ExactSIVsuccesses, "Exact SIV successes");
+STATISTIC(ExactSIVindependence, "Exact SIV independence");
+STATISTIC(WeakZeroSIVapplications, "Weak-Zero SIV applications");
+STATISTIC(WeakZeroSIVsuccesses, "Weak-Zero SIV successes");
+STATISTIC(WeakZeroSIVindependence, "Weak-Zero SIV independence");
+STATISTIC(ExactRDIVapplications, "Exact RDIV applications");
+STATISTIC(ExactRDIVindependence, "Exact RDIV independence");
+STATISTIC(SymbolicRDIVapplications, "Symbolic RDIV applications");
+STATISTIC(SymbolicRDIVindependence, "Symbolic RDIV independence");
+STATISTIC(DeltaApplications, "Delta applications");
+STATISTIC(DeltaSuccesses, "Delta successes");
+STATISTIC(DeltaIndependence, "Delta independence");
+STATISTIC(DeltaPropagations, "Delta propagations");
+STATISTIC(GCDapplications, "GCD applications");
+STATISTIC(GCDsuccesses, "GCD successes");
+STATISTIC(GCDindependence, "GCD independence");
+STATISTIC(BanerjeeApplications, "Banerjee applications");
+STATISTIC(BanerjeeIndependence, "Banerjee independence");
+STATISTIC(BanerjeeSuccesses, "Banerjee successes");
+
+//===----------------------------------------------------------------------===//
+// basics
+
+INITIALIZE_PASS_BEGIN(DependenceAnalysis, "da",
+ "Dependence Analysis", true, true)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(DependenceAnalysis, "da",
+ "Dependence Analysis", true, true)
+
+char DependenceAnalysis::ID = 0;
+
+
+FunctionPass *llvm::createDependenceAnalysisPass() {
+ return new DependenceAnalysis();
+}
+
+
+bool DependenceAnalysis::runOnFunction(Function &F) {
+ this->F = &F;
+ AA = &getAnalysis<AliasAnalysis>();
+ SE = &getAnalysis<ScalarEvolution>();
+ LI = &getAnalysis<LoopInfo>();
+ return false;
+}
+
+
+void DependenceAnalysis::releaseMemory() {
+}
+
+
+void DependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AU.addRequiredTransitive<AliasAnalysis>();
+ AU.addRequiredTransitive<ScalarEvolution>();
+ AU.addRequiredTransitive<LoopInfo>();
+}
+
+
+// Used to test the dependence analyzer.
+// Looks through the function, noting the first store instruction
+// and the first load instruction
+// (which always follows the first load in our tests).
+// Calls depends() and prints out the result.
+// Ignores all other instructions.
+static
+void dumpExampleDependence(raw_ostream &OS, Function *F,
+ DependenceAnalysis *DA) {
+ for (inst_iterator SrcI = inst_begin(F), SrcE = inst_end(F);
+ SrcI != SrcE; ++SrcI) {
+ if (const StoreInst *Src = dyn_cast<StoreInst>(&*SrcI)) {
+ for (inst_iterator DstI = SrcI, DstE = inst_end(F);
+ DstI != DstE; ++DstI) {
+ if (const LoadInst *Dst = dyn_cast<LoadInst>(&*DstI)) {
+ OS << "da analyze - ";
+ if (Dependence *D = DA->depends(Src, Dst, true)) {
+ D->dump(OS);
+ for (unsigned Level = 1; Level <= D->getLevels(); Level++) {
+ if (D->isSplitable(Level)) {
+ OS << "da analyze - split level = " << Level;
+ OS << ", iteration = " << *DA->getSplitIteration(D, Level);
+ OS << "!\n";
+ }
+ }
+ delete D;
+ }
+ else
+ OS << "none!\n";
+ return;
+ }
+ }
+ }
+ }
+}
+
+
+void DependenceAnalysis::print(raw_ostream &OS, const Module*) const {
+ dumpExampleDependence(OS, F, const_cast<DependenceAnalysis *>(this));
+}
+
+//===----------------------------------------------------------------------===//
+// Dependence methods
+
+// Returns true if this is an input dependence.
+bool Dependence::isInput() const {
+ return Src->mayReadFromMemory() && Dst->mayReadFromMemory();
+}
+
+
+// Returns true if this is an output dependence.
+bool Dependence::isOutput() const {
+ return Src->mayWriteToMemory() && Dst->mayWriteToMemory();
+}
+
+
+// Returns true if this is an flow (aka true) dependence.
+bool Dependence::isFlow() const {
+ return Src->mayWriteToMemory() && Dst->mayReadFromMemory();
+}
+
+
+// Returns true if this is an anti dependence.
+bool Dependence::isAnti() const {
+ return Src->mayReadFromMemory() && Dst->mayWriteToMemory();
+}
+
+
+// Returns true if a particular level is scalar; that is,
+// if no subscript in the source or destination mention the induction
+// variable associated with the loop at this level.
+// Leave this out of line, so it will serve as a virtual method anchor
+bool Dependence::isScalar(unsigned level) const {
+ return false;
+}
+
+
+//===----------------------------------------------------------------------===//
+// FullDependence methods
+
+FullDependence::FullDependence(const Instruction *Source,
+ const Instruction *Destination,
+ bool PossiblyLoopIndependent,
+ unsigned CommonLevels) :
+ Dependence(Source, Destination),
+ Levels(CommonLevels),
+ LoopIndependent(PossiblyLoopIndependent) {
+ Consistent = true;
+ DV = CommonLevels ? new DVEntry[CommonLevels] : NULL;
+}
+
+// The rest are simple getters that hide the implementation.
+
+// getDirection - Returns the direction associated with a particular level.
+unsigned FullDependence::getDirection(unsigned Level) const {
+ assert(0 < Level && Level <= Levels && "Level out of range");
+ return DV[Level - 1].Direction;
+}
+
+
+// Returns the distance (or NULL) associated with a particular level.
+const SCEV *FullDependence::getDistance(unsigned Level) const {
+ assert(0 < Level && Level <= Levels && "Level out of range");
+ return DV[Level - 1].Distance;
+}
+
+
+// Returns true if a particular level is scalar; that is,
+// if no subscript in the source or destination mention the induction
+// variable associated with the loop at this level.
+bool FullDependence::isScalar(unsigned Level) const {
+ assert(0 < Level && Level <= Levels && "Level out of range");
+ return DV[Level - 1].Scalar;
+}
+
+
+// Returns true if peeling the first iteration from this loop
+// will break this dependence.
+bool FullDependence::isPeelFirst(unsigned Level) const {
+ assert(0 < Level && Level <= Levels && "Level out of range");
+ return DV[Level - 1].PeelFirst;
+}
+
+
+// Returns true if peeling the last iteration from this loop
+// will break this dependence.
+bool FullDependence::isPeelLast(unsigned Level) const {
+ assert(0 < Level && Level <= Levels && "Level out of range");
+ return DV[Level - 1].PeelLast;
+}
+
+
+// Returns true if splitting this loop will break the dependence.
+bool FullDependence::isSplitable(unsigned Level) const {
+ assert(0 < Level && Level <= Levels && "Level out of range");
+ return DV[Level - 1].Splitable;
+}
+
+
+//===----------------------------------------------------------------------===//
+// DependenceAnalysis::Constraint methods
+
+// If constraint is a point <X, Y>, returns X.
+// Otherwise assert.
+const SCEV *DependenceAnalysis::Constraint::getX() const {
+ assert(Kind == Point && "Kind should be Point");
+ return A;
+}
+
+
+// If constraint is a point <X, Y>, returns Y.
+// Otherwise assert.
+const SCEV *DependenceAnalysis::Constraint::getY() const {
+ assert(Kind == Point && "Kind should be Point");
+ return B;
+}
+
+
+// If constraint is a line AX + BY = C, returns A.
+// Otherwise assert.
+const SCEV *DependenceAnalysis::Constraint::getA() const {
+ assert((Kind == Line || Kind == Distance) &&
+ "Kind should be Line (or Distance)");
+ return A;
+}
+
+
+// If constraint is a line AX + BY = C, returns B.
+// Otherwise assert.
+const SCEV *DependenceAnalysis::Constraint::getB() const {
+ assert((Kind == Line || Kind == Distance) &&
+ "Kind should be Line (or Distance)");
+ return B;
+}
+
+
+// If constraint is a line AX + BY = C, returns C.
+// Otherwise assert.
+const SCEV *DependenceAnalysis::Constraint::getC() const {
+ assert((Kind == Line || Kind == Distance) &&
+ "Kind should be Line (or Distance)");
+ return C;
+}
+
+
+// If constraint is a distance, returns D.
+// Otherwise assert.
+const SCEV *DependenceAnalysis::Constraint::getD() const {
+ assert(Kind == Distance && "Kind should be Distance");
+ return SE->getNegativeSCEV(C);
+}
+
+
+// Returns the loop associated with this constraint.
+const Loop *DependenceAnalysis::Constraint::getAssociatedLoop() const {
+ assert((Kind == Distance || Kind == Line || Kind == Point) &&
+ "Kind should be Distance, Line, or Point");
+ return AssociatedLoop;
+}
+
+
+void DependenceAnalysis::Constraint::setPoint(const SCEV *X,
+ const SCEV *Y,
+ const Loop *CurLoop) {
+ Kind = Point;
+ A = X;
+ B = Y;
+ AssociatedLoop = CurLoop;
+}
+
+
+void DependenceAnalysis::Constraint::setLine(const SCEV *AA,
+ const SCEV *BB,
+ const SCEV *CC,
+ const Loop *CurLoop) {
+ Kind = Line;
+ A = AA;
+ B = BB;
+ C = CC;
+ AssociatedLoop = CurLoop;
+}
+
+
+void DependenceAnalysis::Constraint::setDistance(const SCEV *D,
+ const Loop *CurLoop) {
+ Kind = Distance;
+ A = SE->getConstant(D->getType(), 1);
+ B = SE->getNegativeSCEV(A);
+ C = SE->getNegativeSCEV(D);
+ AssociatedLoop = CurLoop;
+}
+
+
+void DependenceAnalysis::Constraint::setEmpty() {
+ Kind = Empty;
+}
+
+
+void DependenceAnalysis::Constraint::setAny(ScalarEvolution *NewSE) {
+ SE = NewSE;
+ Kind = Any;
+}
+
+
+// For debugging purposes. Dumps the constraint out to OS.
+void DependenceAnalysis::Constraint::dump(raw_ostream &OS) const {
+ if (isEmpty())
+ OS << " Empty\n";
+ else if (isAny())
+ OS << " Any\n";
+ else if (isPoint())
+ OS << " Point is <" << *getX() << ", " << *getY() << ">\n";
+ else if (isDistance())
+ OS << " Distance is " << *getD() <<
+ " (" << *getA() << "*X + " << *getB() << "*Y = " << *getC() << ")\n";
+ else if (isLine())
+ OS << " Line is " << *getA() << "*X + " <<
+ *getB() << "*Y = " << *getC() << "\n";
+ else
+ llvm_unreachable("unknown constraint type in Constraint::dump");
+}
+
+
+// Updates X with the intersection
+// of the Constraints X and Y. Returns true if X has changed.
+// Corresponds to Figure 4 from the paper
+//
+// Practical Dependence Testing
+// Goff, Kennedy, Tseng
+// PLDI 1991
+bool DependenceAnalysis::intersectConstraints(Constraint *X,
+ const Constraint *Y) {
+ ++DeltaApplications;
+ DEBUG(dbgs() << "\tintersect constraints\n");
+ DEBUG(dbgs() << "\t X ="; X->dump(dbgs()));
+ DEBUG(dbgs() << "\t Y ="; Y->dump(dbgs()));
+ assert(!Y->isPoint() && "Y must not be a Point");
+ if (X->isAny()) {
+ if (Y->isAny())
+ return false;
+ *X = *Y;
+ return true;
+ }
+ if (X->isEmpty())
+ return false;
+ if (Y->isEmpty()) {
+ X->setEmpty();
+ return true;
+ }
+
+ if (X->isDistance() && Y->isDistance()) {
+ DEBUG(dbgs() << "\t intersect 2 distances\n");
+ if (isKnownPredicate(CmpInst::ICMP_EQ, X->getD(), Y->getD()))
+ return false;
+ if (isKnownPredicate(CmpInst::ICMP_NE, X->getD(), Y->getD())) {
+ X->setEmpty();
+ ++DeltaSuccesses;
+ return true;
+ }
+ // Hmmm, interesting situation.
+ // I guess if either is constant, keep it and ignore the other.
+ if (isa<SCEVConstant>(Y->getD())) {
+ *X = *Y;
+ return true;
+ }
+ return false;
+ }
+
+ // At this point, the pseudo-code in Figure 4 of the paper
+ // checks if (X->isPoint() && Y->isPoint()).
+ // This case can't occur in our implementation,
+ // since a Point can only arise as the result of intersecting
+ // two Line constraints, and the right-hand value, Y, is never
+ // the result of an intersection.
+ assert(!(X->isPoint() && Y->isPoint()) &&
+ "We shouldn't ever see X->isPoint() && Y->isPoint()");
+
+ if (X->isLine() && Y->isLine()) {
+ DEBUG(dbgs() << "\t intersect 2 lines\n");
+ const SCEV *Prod1 = SE->getMulExpr(X->getA(), Y->getB());
+ const SCEV *Prod2 = SE->getMulExpr(X->getB(), Y->getA());
+ if (isKnownPredicate(CmpInst::ICMP_EQ, Prod1, Prod2)) {
+ // slopes are equal, so lines are parallel
+ DEBUG(dbgs() << "\t\tsame slope\n");
+ Prod1 = SE->getMulExpr(X->getC(), Y->getB());
+ Prod2 = SE->getMulExpr(X->getB(), Y->getC());
+ if (isKnownPredicate(CmpInst::ICMP_EQ, Prod1, Prod2))
+ return false;
+ if (isKnownPredicate(CmpInst::ICMP_NE, Prod1, Prod2)) {
+ X->setEmpty();
+ ++DeltaSuccesses;
+ return true;
+ }
+ return false;
+ }
+ if (isKnownPredicate(CmpInst::ICMP_NE, Prod1, Prod2)) {
+ // slopes differ, so lines intersect
+ DEBUG(dbgs() << "\t\tdifferent slopes\n");
+ const SCEV *C1B2 = SE->getMulExpr(X->getC(), Y->getB());
+ const SCEV *C1A2 = SE->getMulExpr(X->getC(), Y->getA());
+ const SCEV *C2B1 = SE->getMulExpr(Y->getC(), X->getB());
+ const SCEV *C2A1 = SE->getMulExpr(Y->getC(), X->getA());
+ const SCEV *A1B2 = SE->getMulExpr(X->getA(), Y->getB());
+ const SCEV *A2B1 = SE->getMulExpr(Y->getA(), X->getB());
+ const SCEVConstant *C1A2_C2A1 =
+ dyn_cast<SCEVConstant>(SE->getMinusSCEV(C1A2, C2A1));
+ const SCEVConstant *C1B2_C2B1 =
+ dyn_cast<SCEVConstant>(SE->getMinusSCEV(C1B2, C2B1));
+ const SCEVConstant *A1B2_A2B1 =
+ dyn_cast<SCEVConstant>(SE->getMinusSCEV(A1B2, A2B1));
+ const SCEVConstant *A2B1_A1B2 =
+ dyn_cast<SCEVConstant>(SE->getMinusSCEV(A2B1, A1B2));
+ if (!C1B2_C2B1 || !C1A2_C2A1 ||
+ !A1B2_A2B1 || !A2B1_A1B2)
+ return false;
+ APInt Xtop = C1B2_C2B1->getValue()->getValue();
+ APInt Xbot = A1B2_A2B1->getValue()->getValue();
+ APInt Ytop = C1A2_C2A1->getValue()->getValue();
+ APInt Ybot = A2B1_A1B2->getValue()->getValue();
+ DEBUG(dbgs() << "\t\tXtop = " << Xtop << "\n");
+ DEBUG(dbgs() << "\t\tXbot = " << Xbot << "\n");
+ DEBUG(dbgs() << "\t\tYtop = " << Ytop << "\n");
+ DEBUG(dbgs() << "\t\tYbot = " << Ybot << "\n");
+ APInt Xq = Xtop; // these need to be initialized, even
+ APInt Xr = Xtop; // though they're just going to be overwritten
+ APInt::sdivrem(Xtop, Xbot, Xq, Xr);
+ APInt Yq = Ytop;
+ APInt Yr = Ytop;;
+ APInt::sdivrem(Ytop, Ybot, Yq, Yr);
+ if (Xr != 0 || Yr != 0) {
+ X->setEmpty();
+ ++DeltaSuccesses;
+ return true;
+ }
+ DEBUG(dbgs() << "\t\tX = " << Xq << ", Y = " << Yq << "\n");
+ if (Xq.slt(0) || Yq.slt(0)) {
+ X->setEmpty();
+ ++DeltaSuccesses;
+ return true;
+ }
+ if (const SCEVConstant *CUB =
+ collectConstantUpperBound(X->getAssociatedLoop(), Prod1->getType())) {
+ APInt UpperBound = CUB->getValue()->getValue();
+ DEBUG(dbgs() << "\t\tupper bound = " << UpperBound << "\n");
+ if (Xq.sgt(UpperBound) || Yq.sgt(UpperBound)) {
+ X->setEmpty();
+ ++DeltaSuccesses;
+ return true;
+ }
+ }
+ X->setPoint(SE->getConstant(Xq),
+ SE->getConstant(Yq),
+ X->getAssociatedLoop());
+ ++DeltaSuccesses;
+ return true;
+ }
+ return false;
+ }
+
+ // if (X->isLine() && Y->isPoint()) This case can't occur.
+ assert(!(X->isLine() && Y->isPoint()) && "This case should never occur");
+
+ if (X->isPoint() && Y->isLine()) {
+ DEBUG(dbgs() << "\t intersect Point and Line\n");
+ const SCEV *A1X1 = SE->getMulExpr(Y->getA(), X->getX());
+ const SCEV *B1Y1 = SE->getMulExpr(Y->getB(), X->getY());
+ const SCEV *Sum = SE->getAddExpr(A1X1, B1Y1);
+ if (isKnownPredicate(CmpInst::ICMP_EQ, Sum, Y->getC()))
+ return false;
+ if (isKnownPredicate(CmpInst::ICMP_NE, Sum, Y->getC())) {
+ X->setEmpty();
+ ++DeltaSuccesses;
+ return true;
+ }
+ return false;
+ }
+
+ llvm_unreachable("shouldn't reach the end of Constraint intersection");
+ return false;
+}
+
+
+//===----------------------------------------------------------------------===//
+// DependenceAnalysis methods
+
+// For debugging purposes. Dumps a dependence to OS.
+void Dependence::dump(raw_ostream &OS) const {
+ bool Splitable = false;
+ if (isConfused())
+ OS << "confused";
+ else {
+ if (isConsistent())
+ OS << "consistent ";
+ if (isFlow())
+ OS << "flow";
+ else if (isOutput())
+ OS << "output";
+ else if (isAnti())
+ OS << "anti";
+ else if (isInput())
+ OS << "input";
+ unsigned Levels = getLevels();
+ if (Levels) {
+ OS << " [";
+ for (unsigned II = 1; II <= Levels; ++II) {
+ if (isSplitable(II))
+ Splitable = true;
+ if (isPeelFirst(II))
+ OS << 'p';
+ const SCEV *Distance = getDistance(II);
+ if (Distance)
+ OS << *Distance;
+ else if (isScalar(II))
+ OS << "S";
+ else {
+ unsigned Direction = getDirection(II);
+ if (Direction == DVEntry::ALL)
+ OS << "*";
+ else {
+ if (Direction & DVEntry::LT)
+ OS << "<";
+ if (Direction & DVEntry::EQ)
+ OS << "=";
+ if (Direction & DVEntry::GT)
+ OS << ">";
+ }
+ }
+ if (isPeelLast(II))
+ OS << 'p';
+ if (II < Levels)
+ OS << " ";
+ }
+ if (isLoopIndependent())
+ OS << "|<";
+ OS << "]";
+ if (Splitable)
+ OS << " splitable";
+ }
+ }
+ OS << "!\n";
+}
+
+
+
+static
+AliasAnalysis::AliasResult underlyingObjectsAlias(AliasAnalysis *AA,
+ const Value *A,
+ const Value *B) {
+ const Value *AObj = GetUnderlyingObject(A);
+ const Value *BObj = GetUnderlyingObject(B);
+ return AA->alias(AObj, AA->getTypeStoreSize(AObj->getType()),
+ BObj, AA->getTypeStoreSize(BObj->getType()));
+}
+
+
+// Returns true if the load or store can be analyzed. Atomic and volatile
+// operations have properties which this analysis does not understand.
+static
+bool isLoadOrStore(const Instruction *I) {
+ if (const LoadInst *LI = dyn_cast<LoadInst>(I))
+ return LI->isUnordered();
+ else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
+ return SI->isUnordered();
+ return false;
+}
+
+
+static
+const Value *getPointerOperand(const Instruction *I) {
+ if (const LoadInst *LI = dyn_cast<LoadInst>(I))
+ return LI->getPointerOperand();
+ if (const StoreInst *SI = dyn_cast<StoreInst>(I))
+ return SI->getPointerOperand();
+ llvm_unreachable("Value is not load or store instruction");
+ return 0;
+}
+
+
+// Examines the loop nesting of the Src and Dst
+// instructions and establishes their shared loops. Sets the variables
+// CommonLevels, SrcLevels, and MaxLevels.
+// The source and destination instructions needn't be contained in the same
+// loop. The routine establishNestingLevels finds the level of most deeply
+// nested loop that contains them both, CommonLevels. An instruction that's
+// not contained in a loop is at level = 0. MaxLevels is equal to the level
+// of the source plus the level of the destination, minus CommonLevels.
+// This lets us allocate vectors MaxLevels in length, with room for every
+// distinct loop referenced in both the source and destination subscripts.
+// The variable SrcLevels is the nesting depth of the source instruction.
+// It's used to help calculate distinct loops referenced by the destination.
+// Here's the map from loops to levels:
+// 0 - unused
+// 1 - outermost common loop
+// ... - other common loops
+// CommonLevels - innermost common loop
+// ... - loops containing Src but not Dst
+// SrcLevels - innermost loop containing Src but not Dst
+// ... - loops containing Dst but not Src
+// MaxLevels - innermost loops containing Dst but not Src
+// Consider the follow code fragment:
+// for (a = ...) {
+// for (b = ...) {
+// for (c = ...) {
+// for (d = ...) {
+// A[] = ...;
+// }
+// }
+// for (e = ...) {
+// for (f = ...) {
+// for (g = ...) {
+// ... = A[];
+// }
+// }
+// }
+// }
+// }
+// If we're looking at the possibility of a dependence between the store
+// to A (the Src) and the load from A (the Dst), we'll note that they
+// have 2 loops in common, so CommonLevels will equal 2 and the direction
+// vector for Result will have 2 entries. SrcLevels = 4 and MaxLevels = 7.
+// A map from loop names to loop numbers would look like
+// a - 1
+// b - 2 = CommonLevels
+// c - 3
+// d - 4 = SrcLevels
+// e - 5
+// f - 6
+// g - 7 = MaxLevels
+void DependenceAnalysis::establishNestingLevels(const Instruction *Src,
+ const Instruction *Dst) {
+ const BasicBlock *SrcBlock = Src->getParent();
+ const BasicBlock *DstBlock = Dst->getParent();
+ unsigned SrcLevel = LI->getLoopDepth(SrcBlock);
+ unsigned DstLevel = LI->getLoopDepth(DstBlock);
+ const Loop *SrcLoop = LI->getLoopFor(SrcBlock);
+ const Loop *DstLoop = LI->getLoopFor(DstBlock);
+ SrcLevels = SrcLevel;
+ MaxLevels = SrcLevel + DstLevel;
+ while (SrcLevel > DstLevel) {
+ SrcLoop = SrcLoop->getParentLoop();
+ SrcLevel--;
+ }
+ while (DstLevel > SrcLevel) {
+ DstLoop = DstLoop->getParentLoop();
+ DstLevel--;
+ }
+ while (SrcLoop != DstLoop) {
+ SrcLoop = SrcLoop->getParentLoop();
+ DstLoop = DstLoop->getParentLoop();
+ SrcLevel--;
+ }
+ CommonLevels = SrcLevel;
+ MaxLevels -= CommonLevels;
+}
+
+
+// Given one of the loops containing the source, return
+// its level index in our numbering scheme.
+unsigned DependenceAnalysis::mapSrcLoop(const Loop *SrcLoop) const {
+ return SrcLoop->getLoopDepth();
+}
+
+
+// Given one of the loops containing the destination,
+// return its level index in our numbering scheme.
+unsigned DependenceAnalysis::mapDstLoop(const Loop *DstLoop) const {
+ unsigned D = DstLoop->getLoopDepth();
+ if (D > CommonLevels)
+ return D - CommonLevels + SrcLevels;
+ else
+ return D;
+}
+
+
+// Returns true if Expression is loop invariant in LoopNest.
+bool DependenceAnalysis::isLoopInvariant(const SCEV *Expression,
+ const Loop *LoopNest) const {
+ if (!LoopNest)
+ return true;
+ return SE->isLoopInvariant(Expression, LoopNest) &&
+ isLoopInvariant(Expression, LoopNest->getParentLoop());
+}
+
+
+
+// Finds the set of loops from the LoopNest that
+// have a level <= CommonLevels and are referred to by the SCEV Expression.
+void DependenceAnalysis::collectCommonLoops(const SCEV *Expression,
+ const Loop *LoopNest,
+ SmallBitVector &Loops) const {
+ while (LoopNest) {
+ unsigned Level = LoopNest->getLoopDepth();
+ if (Level <= CommonLevels && !SE->isLoopInvariant(Expression, LoopNest))
+ Loops.set(Level);
+ LoopNest = LoopNest->getParentLoop();
+ }
+}
+
+
+// removeMatchingExtensions - Examines a subscript pair.
+// If the source and destination are identically sign (or zero)
+// extended, it strips off the extension in an effect to simplify
+// the actual analysis.
+void DependenceAnalysis::removeMatchingExtensions(Subscript *Pair) {
+ const SCEV *Src = Pair->Src;
+ const SCEV *Dst = Pair->Dst;
+ if ((isa<SCEVZeroExtendExpr>(Src) && isa<SCEVZeroExtendExpr>(Dst)) ||
+ (isa<SCEVSignExtendExpr>(Src) && isa<SCEVSignExtendExpr>(Dst))) {
+ const SCEVCastExpr *SrcCast = cast<SCEVCastExpr>(Src);
+ const SCEVCastExpr *DstCast = cast<SCEVCastExpr>(Dst);
+ if (SrcCast->getType() == DstCast->getType()) {
+ Pair->Src = SrcCast->getOperand();
+ Pair->Dst = DstCast->getOperand();
+ }
+ }
+}
+
+
+// Examine the scev and return true iff it's linear.
+// Collect any loops mentioned in the set of "Loops".
+bool DependenceAnalysis::checkSrcSubscript(const SCEV *Src,
+ const Loop *LoopNest,
+ SmallBitVector &Loops) {
+ const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Src);
+ if (!AddRec)
+ return isLoopInvariant(Src, LoopNest);
+ const SCEV *Start = AddRec->getStart();
+ const SCEV *Step = AddRec->getStepRecurrence(*SE);
+ if (!isLoopInvariant(Step, LoopNest))
+ return false;
+ Loops.set(mapSrcLoop(AddRec->getLoop()));
+ return checkSrcSubscript(Start, LoopNest, Loops);
+}
+
+
+
+// Examine the scev and return true iff it's linear.
+// Collect any loops mentioned in the set of "Loops".
+bool DependenceAnalysis::checkDstSubscript(const SCEV *Dst,
+ const Loop *LoopNest,
+ SmallBitVector &Loops) {
+ const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Dst);
+ if (!AddRec)
+ return isLoopInvariant(Dst, LoopNest);
+ const SCEV *Start = AddRec->getStart();
+ const SCEV *Step = AddRec->getStepRecurrence(*SE);
+ if (!isLoopInvariant(Step, LoopNest))
+ return false;
+ Loops.set(mapDstLoop(AddRec->getLoop()));
+ return checkDstSubscript(Start, LoopNest, Loops);
+}
+
+
+// Examines the subscript pair (the Src and Dst SCEVs)
+// and classifies it as either ZIV, SIV, RDIV, MIV, or Nonlinear.
+// Collects the associated loops in a set.
+DependenceAnalysis::Subscript::ClassificationKind
+DependenceAnalysis::classifyPair(const SCEV *Src, const Loop *SrcLoopNest,
+ const SCEV *Dst, const Loop *DstLoopNest,
+ SmallBitVector &Loops) {
+ SmallBitVector SrcLoops(MaxLevels + 1);
+ SmallBitVector DstLoops(MaxLevels + 1);
+ if (!checkSrcSubscript(Src, SrcLoopNest, SrcLoops))
+ return Subscript::NonLinear;
+ if (!checkDstSubscript(Dst, DstLoopNest, DstLoops))
+ return Subscript::NonLinear;
+ Loops = SrcLoops;
+ Loops |= DstLoops;
+ unsigned N = Loops.count();
+ if (N == 0)
+ return Subscript::ZIV;
+ if (N == 1)
+ return Subscript::SIV;
+ if (N == 2 && (SrcLoops.count() == 0 ||
+ DstLoops.count() == 0 ||
+ (SrcLoops.count() == 1 && DstLoops.count() == 1)))
+ return Subscript::RDIV;
+ return Subscript::MIV;
+}
+
+
+// A wrapper around SCEV::isKnownPredicate.
+// Looks for cases where we're interested in comparing for equality.
+// If both X and Y have been identically sign or zero extended,
+// it strips off the (confusing) extensions before invoking
+// SCEV::isKnownPredicate. Perhaps, someday, the ScalarEvolution package
+// will be similarly updated.
+//
+// If SCEV::isKnownPredicate can't prove the predicate,
+// we try simple subtraction, which seems to help in some cases
+// involving symbolics.
+bool DependenceAnalysis::isKnownPredicate(ICmpInst::Predicate Pred,
+ const SCEV *X,
+ const SCEV *Y) const {
+ if (Pred == CmpInst::ICMP_EQ ||
+ Pred == CmpInst::ICMP_NE) {
+ if ((isa<SCEVSignExtendExpr>(X) &&
+ isa<SCEVSignExtendExpr>(Y)) ||
+ (isa<SCEVZeroExtendExpr>(X) &&
+ isa<SCEVZeroExtendExpr>(Y))) {
+ const SCEVCastExpr *CX = cast<SCEVCastExpr>(X);
+ const SCEVCastExpr *CY = cast<SCEVCastExpr>(Y);
+ const SCEV *Xop = CX->getOperand();
+ const SCEV *Yop = CY->getOperand();
+ if (Xop->getType() == Yop->getType()) {
+ X = Xop;
+ Y = Yop;
+ }
+ }
+ }
+ if (SE->isKnownPredicate(Pred, X, Y))
+ return true;
+ // If SE->isKnownPredicate can't prove the condition,
+ // we try the brute-force approach of subtracting
+ // and testing the difference.
+ // By testing with SE->isKnownPredicate first, we avoid
+ // the possibility of overflow when the arguments are constants.
+ const SCEV *Delta = SE->getMinusSCEV(X, Y);
+ switch (Pred) {
+ case CmpInst::ICMP_EQ:
+ return Delta->isZero();
+ case CmpInst::ICMP_NE:
+ return SE->isKnownNonZero(Delta);
+ case CmpInst::ICMP_SGE:
+ return SE->isKnownNonNegative(Delta);
+ case CmpInst::ICMP_SLE:
+ return SE->isKnownNonPositive(Delta);
+ case CmpInst::ICMP_SGT:
+ return SE->isKnownPositive(Delta);
+ case CmpInst::ICMP_SLT:
+ return SE->isKnownNegative(Delta);
+ default:
+ llvm_unreachable("unexpected predicate in isKnownPredicate");
+ }
+}
+
+
+// All subscripts are all the same type.
+// Loop bound may be smaller (e.g., a char).
+// Should zero extend loop bound, since it's always >= 0.
+// This routine collects upper bound and extends if needed.
+// Return null if no bound available.
+const SCEV *DependenceAnalysis::collectUpperBound(const Loop *L,
+ Type *T) const {
+ if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
+ const SCEV *UB = SE->getBackedgeTakenCount(L);
+ return SE->getNoopOrZeroExtend(UB, T);
+ }
+ return NULL;
+}
+
+
+// Calls collectUpperBound(), then attempts to cast it to SCEVConstant.
+// If the cast fails, returns NULL.
+const SCEVConstant *DependenceAnalysis::collectConstantUpperBound(const Loop *L,
+ Type *T
+ ) const {
+ if (const SCEV *UB = collectUpperBound(L, T))
+ return dyn_cast<SCEVConstant>(UB);
+ return NULL;
+}
+
+
+// testZIV -
+// When we have a pair of subscripts of the form [c1] and [c2],
+// where c1 and c2 are both loop invariant, we attack it using
+// the ZIV test. Basically, we test by comparing the two values,
+// but there are actually three possible results:
+// 1) the values are equal, so there's a dependence
+// 2) the values are different, so there's no dependence
+// 3) the values might be equal, so we have to assume a dependence.
+//
+// Return true if dependence disproved.
+bool DependenceAnalysis::testZIV(const SCEV *Src,
+ const SCEV *Dst,
+ FullDependence &Result) const {
+ DEBUG(dbgs() << " src = " << *Src << "\n");
+ DEBUG(dbgs() << " dst = " << *Dst << "\n");
+ ++ZIVapplications;
+ if (isKnownPredicate(CmpInst::ICMP_EQ, Src, Dst)) {
+ DEBUG(dbgs() << " provably dependent\n");
+ return false; // provably dependent
+ }
+ if (isKnownPredicate(CmpInst::ICMP_NE, Src, Dst)) {
+ DEBUG(dbgs() << " provably independent\n");
+ ++ZIVindependence;
+ return true; // provably independent
+ }
+ DEBUG(dbgs() << " possibly dependent\n");
+ Result.Consistent = false;
+ return false; // possibly dependent
+}
+
+
+// strongSIVtest -
+// From the paper, Practical Dependence Testing, Section 4.2.1
+//
+// When we have a pair of subscripts of the form [c1 + a*i] and [c2 + a*i],
+// where i is an induction variable, c1 and c2 are loop invariant,
+// and a is a constant, we can solve it exactly using the Strong SIV test.
+//
+// Can prove independence. Failing that, can compute distance (and direction).
+// In the presence of symbolic terms, we can sometimes make progress.
+//
+// If there's a dependence,
+//
+// c1 + a*i = c2 + a*i'
+//
+// The dependence distance is
+//
+// d = i' - i = (c1 - c2)/a
+//
+// A dependence only exists if d is an integer and abs(d) <= U, where U is the
+// loop's upper bound. If a dependence exists, the dependence direction is
+// defined as
+//
+// { < if d > 0
+// direction = { = if d = 0
+// { > if d < 0
+//
+// Return true if dependence disproved.
+bool DependenceAnalysis::strongSIVtest(const SCEV *Coeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *CurLoop,
+ unsigned Level,
+ FullDependence &Result,
+ Constraint &NewConstraint) const {
+ DEBUG(dbgs() << "\tStrong SIV test\n");
+ DEBUG(dbgs() << "\t Coeff = " << *Coeff);
+ DEBUG(dbgs() << ", " << *Coeff->getType() << "\n");
+ DEBUG(dbgs() << "\t SrcConst = " << *SrcConst);
+ DEBUG(dbgs() << ", " << *SrcConst->getType() << "\n");
+ DEBUG(dbgs() << "\t DstConst = " << *DstConst);
+ DEBUG(dbgs() << ", " << *DstConst->getType() << "\n");
+ ++StrongSIVapplications;
+ assert(0 < Level && Level <= CommonLevels && "level out of range");
+ Level--;
+
+ const SCEV *Delta = SE->getMinusSCEV(SrcConst, DstConst);
+ DEBUG(dbgs() << "\t Delta = " << *Delta);
+ DEBUG(dbgs() << ", " << *Delta->getType() << "\n");
+
+ // check that |Delta| < iteration count
+ if (const SCEV *UpperBound = collectUpperBound(CurLoop, Delta->getType())) {
+ DEBUG(dbgs() << "\t UpperBound = " << *UpperBound);
+ DEBUG(dbgs() << ", " << *UpperBound->getType() << "\n");
+ const SCEV *AbsDelta =
+ SE->isKnownNonNegative(Delta) ? Delta : SE->getNegativeSCEV(Delta);
+ const SCEV *AbsCoeff =
+ SE->isKnownNonNegative(Coeff) ? Coeff : SE->getNegativeSCEV(Coeff);
+ const SCEV *Product = SE->getMulExpr(UpperBound, AbsCoeff);
+ if (isKnownPredicate(CmpInst::ICMP_SGT, AbsDelta, Product)) {
+ // Distance greater than trip count - no dependence
+ ++StrongSIVindependence;
+ ++StrongSIVsuccesses;
+ return true;
+ }
+ }
+
+ // Can we compute distance?
+ if (isa<SCEVConstant>(Delta) && isa<SCEVConstant>(Coeff)) {
+ APInt ConstDelta = cast<SCEVConstant>(Delta)->getValue()->getValue();
+ APInt ConstCoeff = cast<SCEVConstant>(Coeff)->getValue()->getValue();
+ APInt Distance = ConstDelta; // these need to be initialized
+ APInt Remainder = ConstDelta;
+ APInt::sdivrem(ConstDelta, ConstCoeff, Distance, Remainder);
+ DEBUG(dbgs() << "\t Distance = " << Distance << "\n");
+ DEBUG(dbgs() << "\t Remainder = " << Remainder << "\n");
+ // Make sure Coeff divides Delta exactly
+ if (Remainder != 0) {
+ // Coeff doesn't divide Distance, no dependence
+ ++StrongSIVindependence;
+ ++StrongSIVsuccesses;
+ return true;
+ }
+ Result.DV[Level].Distance = SE->getConstant(Distance);
+ NewConstraint.setDistance(SE->getConstant(Distance), CurLoop);
+ if (Distance.sgt(0))
+ Result.DV[Level].Direction &= Dependence::DVEntry::LT;
+ else if (Distance.slt(0))
+ Result.DV[Level].Direction &= Dependence::DVEntry::GT;
+ else
+ Result.DV[Level].Direction &= Dependence::DVEntry::EQ;
+ ++StrongSIVsuccesses;
+ }
+ else if (Delta->isZero()) {
+ // since 0/X == 0
+ Result.DV[Level].Distance = Delta;
+ NewConstraint.setDistance(Delta, CurLoop);
+ Result.DV[Level].Direction &= Dependence::DVEntry::EQ;
+ ++StrongSIVsuccesses;
+ }
+ else {
+ if (Coeff->isOne()) {
+ DEBUG(dbgs() << "\t Distance = " << *Delta << "\n");
+ Result.DV[Level].Distance = Delta; // since X/1 == X
+ NewConstraint.setDistance(Delta, CurLoop);
+ }
+ else {
+ Result.Consistent = false;
+ NewConstraint.setLine(Coeff,
+ SE->getNegativeSCEV(Coeff),
+ SE->getNegativeSCEV(Delta), CurLoop);
+ }
+
+ // maybe we can get a useful direction
+ bool DeltaMaybeZero = !SE->isKnownNonZero(Delta);
+ bool DeltaMaybePositive = !SE->isKnownNonPositive(Delta);
+ bool DeltaMaybeNegative = !SE->isKnownNonNegative(Delta);
+ bool CoeffMaybePositive = !SE->isKnownNonPositive(Coeff);
+ bool CoeffMaybeNegative = !SE->isKnownNonNegative(Coeff);
+ // The double negatives above are confusing.
+ // It helps to read !SE->isKnownNonZero(Delta)
+ // as "Delta might be Zero"
+ unsigned NewDirection = Dependence::DVEntry::NONE;
+ if ((DeltaMaybePositive && CoeffMaybePositive) ||
+ (DeltaMaybeNegative && CoeffMaybeNegative))
+ NewDirection = Dependence::DVEntry::LT;
+ if (DeltaMaybeZero)
+ NewDirection |= Dependence::DVEntry::EQ;
+ if ((DeltaMaybeNegative && CoeffMaybePositive) ||
+ (DeltaMaybePositive && CoeffMaybeNegative))
+ NewDirection |= Dependence::DVEntry::GT;
+ if (NewDirection < Result.DV[Level].Direction)
+ ++StrongSIVsuccesses;
+ Result.DV[Level].Direction &= NewDirection;
+ }
+ return false;
+}
+
+
+// weakCrossingSIVtest -
+// From the paper, Practical Dependence Testing, Section 4.2.2
+//
+// When we have a pair of subscripts of the form [c1 + a*i] and [c2 - a*i],
+// where i is an induction variable, c1 and c2 are loop invariant,
+// and a is a constant, we can solve it exactly using the
+// Weak-Crossing SIV test.
+//
+// Given c1 + a*i = c2 - a*i', we can look for the intersection of
+// the two lines, where i = i', yielding
+//
+// c1 + a*i = c2 - a*i
+// 2a*i = c2 - c1
+// i = (c2 - c1)/2a
+//
+// If i < 0, there is no dependence.
+// If i > upperbound, there is no dependence.
+// If i = 0 (i.e., if c1 = c2), there's a dependence with distance = 0.
+// If i = upperbound, there's a dependence with distance = 0.
+// If i is integral, there's a dependence (all directions).
+// If the non-integer part = 1/2, there's a dependence (<> directions).
+// Otherwise, there's no dependence.
+//
+// Can prove independence. Failing that,
+// can sometimes refine the directions.
+// Can determine iteration for splitting.
+//
+// Return true if dependence disproved.
+bool DependenceAnalysis::weakCrossingSIVtest(const SCEV *Coeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *CurLoop,
+ unsigned Level,
+ FullDependence &Result,
+ Constraint &NewConstraint,
+ const SCEV *&SplitIter) const {
+ DEBUG(dbgs() << "\tWeak-Crossing SIV test\n");
+ DEBUG(dbgs() << "\t Coeff = " << *Coeff << "\n");
+ DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n");
+ DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n");
+ ++WeakCrossingSIVapplications;
+ assert(0 < Level && Level <= CommonLevels && "Level out of range");
+ Level--;
+ Result.Consistent = false;
+ const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst);
+ DEBUG(dbgs() << "\t Delta = " << *Delta << "\n");
+ NewConstraint.setLine(Coeff, Coeff, Delta, CurLoop);
+ if (Delta->isZero()) {
+ Result.DV[Level].Direction &= unsigned(~Dependence::DVEntry::LT);
+ Result.DV[Level].Direction &= unsigned(~Dependence::DVEntry::GT);
+ ++WeakCrossingSIVsuccesses;
+ if (!Result.DV[Level].Direction) {
+ ++WeakCrossingSIVindependence;
+ return true;
+ }
+ Result.DV[Level].Distance = Delta; // = 0
+ return false;
+ }
+ const SCEVConstant *ConstCoeff = dyn_cast<SCEVConstant>(Coeff);
+ if (!ConstCoeff)
+ return false;
+
+ Result.DV[Level].Splitable = true;
+ if (SE->isKnownNegative(ConstCoeff)) {
+ ConstCoeff = dyn_cast<SCEVConstant>(SE->getNegativeSCEV(ConstCoeff));
+ assert(ConstCoeff &&
+ "dynamic cast of negative of ConstCoeff should yield constant");
+ Delta = SE->getNegativeSCEV(Delta);
+ }
+ assert(SE->isKnownPositive(ConstCoeff) && "ConstCoeff should be positive");
+
+ // compute SplitIter for use by DependenceAnalysis::getSplitIteration()
+ SplitIter =
+ SE->getUDivExpr(SE->getSMaxExpr(SE->getConstant(Delta->getType(), 0),
+ Delta),
+ SE->getMulExpr(SE->getConstant(Delta->getType(), 2),
+ ConstCoeff));
+ DEBUG(dbgs() << "\t Split iter = " << *SplitIter << "\n");
+
+ const SCEVConstant *ConstDelta = dyn_cast<SCEVConstant>(Delta);
+ if (!ConstDelta)
+ return false;
+
+ // We're certain that ConstCoeff > 0; therefore,
+ // if Delta < 0, then no dependence.
+ DEBUG(dbgs() << "\t Delta = " << *Delta << "\n");
+ DEBUG(dbgs() << "\t ConstCoeff = " << *ConstCoeff << "\n");
+ if (SE->isKnownNegative(Delta)) {
+ // No dependence, Delta < 0
+ ++WeakCrossingSIVindependence;
+ ++WeakCrossingSIVsuccesses;
+ return true;
+ }
+
+ // We're certain that Delta > 0 and ConstCoeff > 0.
+ // Check Delta/(2*ConstCoeff) against upper loop bound
+ if (const SCEV *UpperBound = collectUpperBound(CurLoop, Delta->getType())) {
+ DEBUG(dbgs() << "\t UpperBound = " << *UpperBound << "\n");
+ const SCEV *ConstantTwo = SE->getConstant(UpperBound->getType(), 2);
+ const SCEV *ML = SE->getMulExpr(SE->getMulExpr(ConstCoeff, UpperBound),
+ ConstantTwo);
+ DEBUG(dbgs() << "\t ML = " << *ML << "\n");
+ if (isKnownPredicate(CmpInst::ICMP_SGT, Delta, ML)) {
+ // Delta too big, no dependence
+ ++WeakCrossingSIVindependence;
+ ++WeakCrossingSIVsuccesses;
+ return true;
+ }
+ if (isKnownPredicate(CmpInst::ICMP_EQ, Delta, ML)) {
+ // i = i' = UB
+ Result.DV[Level].Direction &= unsigned(~Dependence::DVEntry::LT);
+ Result.DV[Level].Direction &= unsigned(~Dependence::DVEntry::GT);
+ ++WeakCrossingSIVsuccesses;
+ if (!Result.DV[Level].Direction) {
+ ++WeakCrossingSIVindependence;
+ return true;
+ }
+ Result.DV[Level].Splitable = false;
+ Result.DV[Level].Distance = SE->getConstant(Delta->getType(), 0);
+ return false;
+ }
+ }
+
+ // check that Coeff divides Delta
+ APInt APDelta = ConstDelta->getValue()->getValue();
+ APInt APCoeff = ConstCoeff->getValue()->getValue();
+ APInt Distance = APDelta; // these need to be initialzed
+ APInt Remainder = APDelta;
+ APInt::sdivrem(APDelta, APCoeff, Distance, Remainder);
+ DEBUG(dbgs() << "\t Remainder = " << Remainder << "\n");
+ if (Remainder != 0) {
+ // Coeff doesn't divide Delta, no dependence
+ ++WeakCrossingSIVindependence;
+ ++WeakCrossingSIVsuccesses;
+ return true;
+ }
+ DEBUG(dbgs() << "\t Distance = " << Distance << "\n");
+
+ // if 2*Coeff doesn't divide Delta, then the equal direction isn't possible
+ APInt Two = APInt(Distance.getBitWidth(), 2, true);
+ Remainder = Distance.srem(Two);
+ DEBUG(dbgs() << "\t Remainder = " << Remainder << "\n");
+ if (Remainder != 0) {
+ // Equal direction isn't possible
+ Result.DV[Level].Direction &= unsigned(~Dependence::DVEntry::EQ);
+ ++WeakCrossingSIVsuccesses;
+ }
+ return false;
+}
+
+
+// Kirch's algorithm, from
+//
+// Optimizing Supercompilers for Supercomputers
+// Michael Wolfe
+// MIT Press, 1989
+//
+// Program 2.1, page 29.
+// Computes the GCD of AM and BM.
+// Also finds a solution to the equation ax - by = gdc(a, b).
+// Returns true iff the gcd divides Delta.
+static
+bool findGCD(unsigned Bits, APInt AM, APInt BM, APInt Delta,
+ APInt &G, APInt &X, APInt &Y) {
+ APInt A0(Bits, 1, true), A1(Bits, 0, true);
+ APInt B0(Bits, 0, true), B1(Bits, 1, true);
+ APInt G0 = AM.abs();
+ APInt G1 = BM.abs();
+ APInt Q = G0; // these need to be initialized
+ APInt R = G0;
+ APInt::sdivrem(G0, G1, Q, R);
+ while (R != 0) {
+ APInt A2 = A0 - Q*A1; A0 = A1; A1 = A2;
+ APInt B2 = B0 - Q*B1; B0 = B1; B1 = B2;
+ G0 = G1; G1 = R;
+ APInt::sdivrem(G0, G1, Q, R);
+ }
+ G = G1;
+ DEBUG(dbgs() << "\t GCD = " << G << "\n");
+ X = AM.slt(0) ? -A1 : A1;
+ Y = BM.slt(0) ? B1 : -B1;
+
+ // make sure gcd divides Delta
+ R = Delta.srem(G);
+ if (R != 0)
+ return true; // gcd doesn't divide Delta, no dependence
+ Q = Delta.sdiv(G);
+ X *= Q;
+ Y *= Q;
+ return false;
+}
+
+
+static
+APInt floorOfQuotient(APInt A, APInt B) {
+ APInt Q = A; // these need to be initialized
+ APInt R = A;
+ APInt::sdivrem(A, B, Q, R);
+ if (R == 0)
+ return Q;
+ if ((A.sgt(0) && B.sgt(0)) ||
+ (A.slt(0) && B.slt(0)))
+ return Q;
+ else
+ return Q - 1;
+}
+
+
+static
+APInt ceilingOfQuotient(APInt A, APInt B) {
+ APInt Q = A; // these need to be initialized
+ APInt R = A;
+ APInt::sdivrem(A, B, Q, R);
+ if (R == 0)
+ return Q;
+ if ((A.sgt(0) && B.sgt(0)) ||
+ (A.slt(0) && B.slt(0)))
+ return Q + 1;
+ else
+ return Q;
+}
+
+
+static
+APInt maxAPInt(APInt A, APInt B) {
+ return A.sgt(B) ? A : B;
+}
+
+
+static
+APInt minAPInt(APInt A, APInt B) {
+ return A.slt(B) ? A : B;
+}
+
+
+// exactSIVtest -
+// When we have a pair of subscripts of the form [c1 + a1*i] and [c2 + a2*i],
+// where i is an induction variable, c1 and c2 are loop invariant, and a1
+// and a2 are constant, we can solve it exactly using an algorithm developed
+// by Banerjee and Wolfe. See Section 2.5.3 in
+//
+// Optimizing Supercompilers for Supercomputers
+// Michael Wolfe
+// MIT Press, 1989
+//
+// It's slower than the specialized tests (strong SIV, weak-zero SIV, etc),
+// so use them if possible. They're also a bit better with symbolics and,
+// in the case of the strong SIV test, can compute Distances.
+//
+// Return true if dependence disproved.
+bool DependenceAnalysis::exactSIVtest(const SCEV *SrcCoeff,
+ const SCEV *DstCoeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *CurLoop,
+ unsigned Level,
+ FullDependence &Result,
+ Constraint &NewConstraint) const {
+ DEBUG(dbgs() << "\tExact SIV test\n");
+ DEBUG(dbgs() << "\t SrcCoeff = " << *SrcCoeff << " = AM\n");
+ DEBUG(dbgs() << "\t DstCoeff = " << *DstCoeff << " = BM\n");
+ DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n");
+ DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n");
+ ++ExactSIVapplications;
+ assert(0 < Level && Level <= CommonLevels && "Level out of range");
+ Level--;
+ Result.Consistent = false;
+ const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst);
+ DEBUG(dbgs() << "\t Delta = " << *Delta << "\n");
+ NewConstraint.setLine(SrcCoeff, SE->getNegativeSCEV(DstCoeff),
+ Delta, CurLoop);
+ const SCEVConstant *ConstDelta = dyn_cast<SCEVConstant>(Delta);
+ const SCEVConstant *ConstSrcCoeff = dyn_cast<SCEVConstant>(SrcCoeff);
+ const SCEVConstant *ConstDstCoeff = dyn_cast<SCEVConstant>(DstCoeff);
+ if (!ConstDelta || !ConstSrcCoeff || !ConstDstCoeff)
+ return false;
+
+ // find gcd
+ APInt G, X, Y;
+ APInt AM = ConstSrcCoeff->getValue()->getValue();
+ APInt BM = ConstDstCoeff->getValue()->getValue();
+ unsigned Bits = AM.getBitWidth();
+ if (findGCD(Bits, AM, BM, ConstDelta->getValue()->getValue(), G, X, Y)) {
+ // gcd doesn't divide Delta, no dependence
+ ++ExactSIVindependence;
+ ++ExactSIVsuccesses;
+ return true;
+ }
+
+ DEBUG(dbgs() << "\t X = " << X << ", Y = " << Y << "\n");
+
+ // since SCEV construction normalizes, LM = 0
+ APInt UM(Bits, 1, true);
+ bool UMvalid = false;
+ // UM is perhaps unavailable, let's check
+ if (const SCEVConstant *CUB =
+ collectConstantUpperBound(CurLoop, Delta->getType())) {
+ UM = CUB->getValue()->getValue();
+ DEBUG(dbgs() << "\t UM = " << UM << "\n");
+ UMvalid = true;
+ }
+
+ APInt TU(APInt::getSignedMaxValue(Bits));
+ APInt TL(APInt::getSignedMinValue(Bits));
+
+ // test(BM/G, LM-X) and test(-BM/G, X-UM)
+ APInt TMUL = BM.sdiv(G);
+ if (TMUL.sgt(0)) {
+ TL = maxAPInt(TL, ceilingOfQuotient(-X, TMUL));
+ DEBUG(dbgs() << "\t TL = " << TL << "\n");
+ if (UMvalid) {
+ TU = minAPInt(TU, floorOfQuotient(UM - X, TMUL));
+ DEBUG(dbgs() << "\t TU = " << TU << "\n");
+ }
+ }
+ else {
+ TU = minAPInt(TU, floorOfQuotient(-X, TMUL));
+ DEBUG(dbgs() << "\t TU = " << TU << "\n");
+ if (UMvalid) {
+ TL = maxAPInt(TL, ceilingOfQuotient(UM - X, TMUL));
+ DEBUG(dbgs() << "\t TL = " << TL << "\n");
+ }
+ }
+
+ // test(AM/G, LM-Y) and test(-AM/G, Y-UM)
+ TMUL = AM.sdiv(G);
+ if (TMUL.sgt(0)) {
+ TL = maxAPInt(TL, ceilingOfQuotient(-Y, TMUL));
+ DEBUG(dbgs() << "\t TL = " << TL << "\n");
+ if (UMvalid) {
+ TU = minAPInt(TU, floorOfQuotient(UM - Y, TMUL));
+ DEBUG(dbgs() << "\t TU = " << TU << "\n");
+ }
+ }
+ else {
+ TU = minAPInt(TU, floorOfQuotient(-Y, TMUL));
+ DEBUG(dbgs() << "\t TU = " << TU << "\n");
+ if (UMvalid) {
+ TL = maxAPInt(TL, ceilingOfQuotient(UM - Y, TMUL));
+ DEBUG(dbgs() << "\t TL = " << TL << "\n");
+ }
+ }
+ if (TL.sgt(TU)) {
+ ++ExactSIVindependence;
+ ++ExactSIVsuccesses;
+ return true;
+ }
+
+ // explore directions
+ unsigned NewDirection = Dependence::DVEntry::NONE;
+
+ // less than
+ APInt SaveTU(TU); // save these
+ APInt SaveTL(TL);
+ DEBUG(dbgs() << "\t exploring LT direction\n");
+ TMUL = AM - BM;
+ if (TMUL.sgt(0)) {
+ TL = maxAPInt(TL, ceilingOfQuotient(X - Y + 1, TMUL));
+ DEBUG(dbgs() << "\t\t TL = " << TL << "\n");
+ }
+ else {
+ TU = minAPInt(TU, floorOfQuotient(X - Y + 1, TMUL));
+ DEBUG(dbgs() << "\t\t TU = " << TU << "\n");
+ }
+ if (TL.sle(TU)) {
+ NewDirection |= Dependence::DVEntry::LT;
+ ++ExactSIVsuccesses;
+ }
+
+ // equal
+ TU = SaveTU; // restore
+ TL = SaveTL;
+ DEBUG(dbgs() << "\t exploring EQ direction\n");
+ if (TMUL.sgt(0)) {
+ TL = maxAPInt(TL, ceilingOfQuotient(X - Y, TMUL));
+ DEBUG(dbgs() << "\t\t TL = " << TL << "\n");
+ }
+ else {
+ TU = minAPInt(TU, floorOfQuotient(X - Y, TMUL));
+ DEBUG(dbgs() << "\t\t TU = " << TU << "\n");
+ }
+ TMUL = BM - AM;
+ if (TMUL.sgt(0)) {
+ TL = maxAPInt(TL, ceilingOfQuotient(Y - X, TMUL));
+ DEBUG(dbgs() << "\t\t TL = " << TL << "\n");
+ }
+ else {
+ TU = minAPInt(TU, floorOfQuotient(Y - X, TMUL));
+ DEBUG(dbgs() << "\t\t TU = " << TU << "\n");
+ }
+ if (TL.sle(TU)) {
+ NewDirection |= Dependence::DVEntry::EQ;
+ ++ExactSIVsuccesses;
+ }
+
+ // greater than
+ TU = SaveTU; // restore
+ TL = SaveTL;
+ DEBUG(dbgs() << "\t exploring GT direction\n");
+ if (TMUL.sgt(0)) {
+ TL = maxAPInt(TL, ceilingOfQuotient(Y - X + 1, TMUL));
+ DEBUG(dbgs() << "\t\t TL = " << TL << "\n");
+ }
+ else {
+ TU = minAPInt(TU, floorOfQuotient(Y - X + 1, TMUL));
+ DEBUG(dbgs() << "\t\t TU = " << TU << "\n");
+ }
+ if (TL.sle(TU)) {
+ NewDirection |= Dependence::DVEntry::GT;
+ ++ExactSIVsuccesses;
+ }
+
+ // finished
+ Result.DV[Level].Direction &= NewDirection;
+ if (Result.DV[Level].Direction == Dependence::DVEntry::NONE)
+ ++ExactSIVindependence;
+ return Result.DV[Level].Direction == Dependence::DVEntry::NONE;
+}
+
+
+
+// Return true if the divisor evenly divides the dividend.
+static
+bool isRemainderZero(const SCEVConstant *Dividend,
+ const SCEVConstant *Divisor) {
+ APInt ConstDividend = Dividend->getValue()->getValue();
+ APInt ConstDivisor = Divisor->getValue()->getValue();
+ return ConstDividend.srem(ConstDivisor) == 0;
+}
+
+
+// weakZeroSrcSIVtest -
+// From the paper, Practical Dependence Testing, Section 4.2.2
+//
+// When we have a pair of subscripts of the form [c1] and [c2 + a*i],
+// where i is an induction variable, c1 and c2 are loop invariant,
+// and a is a constant, we can solve it exactly using the
+// Weak-Zero SIV test.
+//
+// Given
+//
+// c1 = c2 + a*i
+//
+// we get
+//
+// (c1 - c2)/a = i
+//
+// If i is not an integer, there's no dependence.
+// If i < 0 or > UB, there's no dependence.
+// If i = 0, the direction is <= and peeling the
+// 1st iteration will break the dependence.
+// If i = UB, the direction is >= and peeling the
+// last iteration will break the dependence.
+// Otherwise, the direction is *.
+//
+// Can prove independence. Failing that, we can sometimes refine
+// the directions. Can sometimes show that first or last
+// iteration carries all the dependences (so worth peeling).
+//
+// (see also weakZeroDstSIVtest)
+//
+// Return true if dependence disproved.
+bool DependenceAnalysis::weakZeroSrcSIVtest(const SCEV *DstCoeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *CurLoop,
+ unsigned Level,
+ FullDependence &Result,
+ Constraint &NewConstraint) const {
+ // For the WeakSIV test, it's possible the loop isn't common to
+ // the Src and Dst loops. If it isn't, then there's no need to
+ // record a direction.
+ DEBUG(dbgs() << "\tWeak-Zero (src) SIV test\n");
+ DEBUG(dbgs() << "\t DstCoeff = " << *DstCoeff << "\n");
+ DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n");
+ DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n");
+ ++WeakZeroSIVapplications;
+ assert(0 < Level && Level <= MaxLevels && "Level out of range");
+ Level--;
+ Result.Consistent = false;
+ const SCEV *Delta = SE->getMinusSCEV(SrcConst, DstConst);
+ NewConstraint.setLine(SE->getConstant(Delta->getType(), 0),
+ DstCoeff, Delta, CurLoop);
+ DEBUG(dbgs() << "\t Delta = " << *Delta << "\n");
+ if (isKnownPredicate(CmpInst::ICMP_EQ, SrcConst, DstConst)) {
+ if (Level < CommonLevels) {
+ Result.DV[Level].Direction &= Dependence::DVEntry::LE;
+ Result.DV[Level].PeelFirst = true;
+ ++WeakZeroSIVsuccesses;
+ }
+ return false; // dependences caused by first iteration
+ }
+ const SCEVConstant *ConstCoeff = dyn_cast<SCEVConstant>(DstCoeff);
+ if (!ConstCoeff)
+ return false;
+ const SCEV *AbsCoeff =
+ SE->isKnownNegative(ConstCoeff) ?
+ SE->getNegativeSCEV(ConstCoeff) : ConstCoeff;
+ const SCEV *NewDelta =
+ SE->isKnownNegative(ConstCoeff) ? SE->getNegativeSCEV(Delta) : Delta;
+
+ // check that Delta/SrcCoeff < iteration count
+ // really check NewDelta < count*AbsCoeff
+ if (const SCEV *UpperBound = collectUpperBound(CurLoop, Delta->getType())) {
+ DEBUG(dbgs() << "\t UpperBound = " << *UpperBound << "\n");
+ const SCEV *Product = SE->getMulExpr(AbsCoeff, UpperBound);
+ if (isKnownPredicate(CmpInst::ICMP_SGT, NewDelta, Product)) {
+ ++WeakZeroSIVindependence;
+ ++WeakZeroSIVsuccesses;
+ return true;
+ }
+ if (isKnownPredicate(CmpInst::ICMP_EQ, NewDelta, Product)) {
+ // dependences caused by last iteration
+ if (Level < CommonLevels) {
+ Result.DV[Level].Direction &= Dependence::DVEntry::GE;
+ Result.DV[Level].PeelLast = true;
+ ++WeakZeroSIVsuccesses;
+ }
+ return false;
+ }
+ }
+
+ // check that Delta/SrcCoeff >= 0
+ // really check that NewDelta >= 0
+ if (SE->isKnownNegative(NewDelta)) {
+ // No dependence, newDelta < 0
+ ++WeakZeroSIVindependence;
+ ++WeakZeroSIVsuccesses;
+ return true;
+ }
+
+ // if SrcCoeff doesn't divide Delta, then no dependence
+ if (isa<SCEVConstant>(Delta) &&
+ !isRemainderZero(cast<SCEVConstant>(Delta), ConstCoeff)) {
+ ++WeakZeroSIVindependence;
+ ++WeakZeroSIVsuccesses;
+ return true;
+ }
+ return false;
+}
+
+
+// weakZeroDstSIVtest -
+// From the paper, Practical Dependence Testing, Section 4.2.2
+//
+// When we have a pair of subscripts of the form [c1 + a*i] and [c2],
+// where i is an induction variable, c1 and c2 are loop invariant,
+// and a is a constant, we can solve it exactly using the
+// Weak-Zero SIV test.
+//
+// Given
+//
+// c1 + a*i = c2
+//
+// we get
+//
+// i = (c2 - c1)/a
+//
+// If i is not an integer, there's no dependence.
+// If i < 0 or > UB, there's no dependence.
+// If i = 0, the direction is <= and peeling the
+// 1st iteration will break the dependence.
+// If i = UB, the direction is >= and peeling the
+// last iteration will break the dependence.
+// Otherwise, the direction is *.
+//
+// Can prove independence. Failing that, we can sometimes refine
+// the directions. Can sometimes show that first or last
+// iteration carries all the dependences (so worth peeling).
+//
+// (see also weakZeroSrcSIVtest)
+//
+// Return true if dependence disproved.
+bool DependenceAnalysis::weakZeroDstSIVtest(const SCEV *SrcCoeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *CurLoop,
+ unsigned Level,
+ FullDependence &Result,
+ Constraint &NewConstraint) const {
+ // For the WeakSIV test, it's possible the loop isn't common to the
+ // Src and Dst loops. If it isn't, then there's no need to record a direction.
+ DEBUG(dbgs() << "\tWeak-Zero (dst) SIV test\n");
+ DEBUG(dbgs() << "\t SrcCoeff = " << *SrcCoeff << "\n");
+ DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n");
+ DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n");
+ ++WeakZeroSIVapplications;
+ assert(0 < Level && Level <= SrcLevels && "Level out of range");
+ Level--;
+ Result.Consistent = false;
+ const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst);
+ NewConstraint.setLine(SrcCoeff, SE->getConstant(Delta->getType(), 0),
+ Delta, CurLoop);
+ DEBUG(dbgs() << "\t Delta = " << *Delta << "\n");
+ if (isKnownPredicate(CmpInst::ICMP_EQ, DstConst, SrcConst)) {
+ if (Level < CommonLevels) {
+ Result.DV[Level].Direction &= Dependence::DVEntry::LE;
+ Result.DV[Level].PeelFirst = true;
+ ++WeakZeroSIVsuccesses;
+ }
+ return false; // dependences caused by first iteration
+ }
+ const SCEVConstant *ConstCoeff = dyn_cast<SCEVConstant>(SrcCoeff);
+ if (!ConstCoeff)
+ return false;
+ const SCEV *AbsCoeff =
+ SE->isKnownNegative(ConstCoeff) ?
+ SE->getNegativeSCEV(ConstCoeff) : ConstCoeff;
+ const SCEV *NewDelta =
+ SE->isKnownNegative(ConstCoeff) ? SE->getNegativeSCEV(Delta) : Delta;
+
+ // check that Delta/SrcCoeff < iteration count
+ // really check NewDelta < count*AbsCoeff
+ if (const SCEV *UpperBound = collectUpperBound(CurLoop, Delta->getType())) {
+ DEBUG(dbgs() << "\t UpperBound = " << *UpperBound << "\n");
+ const SCEV *Product = SE->getMulExpr(AbsCoeff, UpperBound);
+ if (isKnownPredicate(CmpInst::ICMP_SGT, NewDelta, Product)) {
+ ++WeakZeroSIVindependence;
+ ++WeakZeroSIVsuccesses;
+ return true;
+ }
+ if (isKnownPredicate(CmpInst::ICMP_EQ, NewDelta, Product)) {
+ // dependences caused by last iteration
+ if (Level < CommonLevels) {
+ Result.DV[Level].Direction &= Dependence::DVEntry::GE;
+ Result.DV[Level].PeelLast = true;
+ ++WeakZeroSIVsuccesses;
+ }
+ return false;
+ }
+ }
+
+ // check that Delta/SrcCoeff >= 0
+ // really check that NewDelta >= 0
+ if (SE->isKnownNegative(NewDelta)) {
+ // No dependence, newDelta < 0
+ ++WeakZeroSIVindependence;
+ ++WeakZeroSIVsuccesses;
+ return true;
+ }
+
+ // if SrcCoeff doesn't divide Delta, then no dependence
+ if (isa<SCEVConstant>(Delta) &&
+ !isRemainderZero(cast<SCEVConstant>(Delta), ConstCoeff)) {
+ ++WeakZeroSIVindependence;
+ ++WeakZeroSIVsuccesses;
+ return true;
+ }
+ return false;
+}
+
+
+// exactRDIVtest - Tests the RDIV subscript pair for dependence.
+// Things of the form [c1 + a*i] and [c2 + b*j],
+// where i and j are induction variable, c1 and c2 are loop invariant,
+// and a and b are constants.
+// Returns true if any possible dependence is disproved.
+// Marks the result as inconsistent.
+// Works in some cases that symbolicRDIVtest doesn't, and vice versa.
+bool DependenceAnalysis::exactRDIVtest(const SCEV *SrcCoeff,
+ const SCEV *DstCoeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *SrcLoop,
+ const Loop *DstLoop,
+ FullDependence &Result) const {
+ DEBUG(dbgs() << "\tExact RDIV test\n");
+ DEBUG(dbgs() << "\t SrcCoeff = " << *SrcCoeff << " = AM\n");
+ DEBUG(dbgs() << "\t DstCoeff = " << *DstCoeff << " = BM\n");
+ DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n");
+ DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n");
+ ++ExactRDIVapplications;
+ Result.Consistent = false;
+ const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst);
+ DEBUG(dbgs() << "\t Delta = " << *Delta << "\n");
+ const SCEVConstant *ConstDelta = dyn_cast<SCEVConstant>(Delta);
+ const SCEVConstant *ConstSrcCoeff = dyn_cast<SCEVConstant>(SrcCoeff);
+ const SCEVConstant *ConstDstCoeff = dyn_cast<SCEVConstant>(DstCoeff);
+ if (!ConstDelta || !ConstSrcCoeff || !ConstDstCoeff)
+ return false;
+
+ // find gcd
+ APInt G, X, Y;
+ APInt AM = ConstSrcCoeff->getValue()->getValue();
+ APInt BM = ConstDstCoeff->getValue()->getValue();
+ unsigned Bits = AM.getBitWidth();
+ if (findGCD(Bits, AM, BM, ConstDelta->getValue()->getValue(), G, X, Y)) {
+ // gcd doesn't divide Delta, no dependence
+ ++ExactRDIVindependence;
+ return true;
+ }
+
+ DEBUG(dbgs() << "\t X = " << X << ", Y = " << Y << "\n");
+
+ // since SCEV construction seems to normalize, LM = 0
+ APInt SrcUM(Bits, 1, true);
+ bool SrcUMvalid = false;
+ // SrcUM is perhaps unavailable, let's check
+ if (const SCEVConstant *UpperBound =
+ collectConstantUpperBound(SrcLoop, Delta->getType())) {
+ SrcUM = UpperBound->getValue()->getValue();
+ DEBUG(dbgs() << "\t SrcUM = " << SrcUM << "\n");
+ SrcUMvalid = true;
+ }
+
+ APInt DstUM(Bits, 1, true);
+ bool DstUMvalid = false;
+ // UM is perhaps unavailable, let's check
+ if (const SCEVConstant *UpperBound =
+ collectConstantUpperBound(DstLoop, Delta->getType())) {
+ DstUM = UpperBound->getValue()->getValue();
+ DEBUG(dbgs() << "\t DstUM = " << DstUM << "\n");
+ DstUMvalid = true;
+ }
+
+ APInt TU(APInt::getSignedMaxValue(Bits));
+ APInt TL(APInt::getSignedMinValue(Bits));
+
+ // test(BM/G, LM-X) and test(-BM/G, X-UM)
+ APInt TMUL = BM.sdiv(G);
+ if (TMUL.sgt(0)) {
+ TL = maxAPInt(TL, ceilingOfQuotient(-X, TMUL));
+ DEBUG(dbgs() << "\t TL = " << TL << "\n");
+ if (SrcUMvalid) {
+ TU = minAPInt(TU, floorOfQuotient(SrcUM - X, TMUL));
+ DEBUG(dbgs() << "\t TU = " << TU << "\n");
+ }
+ }
+ else {
+ TU = minAPInt(TU, floorOfQuotient(-X, TMUL));
+ DEBUG(dbgs() << "\t TU = " << TU << "\n");
+ if (SrcUMvalid) {
+ TL = maxAPInt(TL, ceilingOfQuotient(SrcUM - X, TMUL));
+ DEBUG(dbgs() << "\t TL = " << TL << "\n");
+ }
+ }
+
+ // test(AM/G, LM-Y) and test(-AM/G, Y-UM)
+ TMUL = AM.sdiv(G);
+ if (TMUL.sgt(0)) {
+ TL = maxAPInt(TL, ceilingOfQuotient(-Y, TMUL));
+ DEBUG(dbgs() << "\t TL = " << TL << "\n");
+ if (DstUMvalid) {
+ TU = minAPInt(TU, floorOfQuotient(DstUM - Y, TMUL));
+ DEBUG(dbgs() << "\t TU = " << TU << "\n");
+ }
+ }
+ else {
+ TU = minAPInt(TU, floorOfQuotient(-Y, TMUL));
+ DEBUG(dbgs() << "\t TU = " << TU << "\n");
+ if (DstUMvalid) {
+ TL = maxAPInt(TL, ceilingOfQuotient(DstUM - Y, TMUL));
+ DEBUG(dbgs() << "\t TL = " << TL << "\n");
+ }
+ }
+ if (TL.sgt(TU))
+ ++ExactRDIVindependence;
+ return TL.sgt(TU);
+}
+
+
+// symbolicRDIVtest -
+// In Section 4.5 of the Practical Dependence Testing paper,the authors
+// introduce a special case of Banerjee's Inequalities (also called the
+// Extreme-Value Test) that can handle some of the SIV and RDIV cases,
+// particularly cases with symbolics. Since it's only able to disprove
+// dependence (not compute distances or directions), we'll use it as a
+// fall back for the other tests.
+//
+// When we have a pair of subscripts of the form [c1 + a1*i] and [c2 + a2*j]
+// where i and j are induction variables and c1 and c2 are loop invariants,
+// we can use the symbolic tests to disprove some dependences, serving as a
+// backup for the RDIV test. Note that i and j can be the same variable,
+// letting this test serve as a backup for the various SIV tests.
+//
+// For a dependence to exist, c1 + a1*i must equal c2 + a2*j for some
+// 0 <= i <= N1 and some 0 <= j <= N2, where N1 and N2 are the (normalized)
+// loop bounds for the i and j loops, respectively. So, ...
+//
+// c1 + a1*i = c2 + a2*j
+// a1*i - a2*j = c2 - c1
+//
+// To test for a dependence, we compute c2 - c1 and make sure it's in the
+// range of the maximum and minimum possible values of a1*i - a2*j.
+// Considering the signs of a1 and a2, we have 4 possible cases:
+//
+// 1) If a1 >= 0 and a2 >= 0, then
+// a1*0 - a2*N2 <= c2 - c1 <= a1*N1 - a2*0
+// -a2*N2 <= c2 - c1 <= a1*N1
+//
+// 2) If a1 >= 0 and a2 <= 0, then
+// a1*0 - a2*0 <= c2 - c1 <= a1*N1 - a2*N2
+// 0 <= c2 - c1 <= a1*N1 - a2*N2
+//
+// 3) If a1 <= 0 and a2 >= 0, then
+// a1*N1 - a2*N2 <= c2 - c1 <= a1*0 - a2*0
+// a1*N1 - a2*N2 <= c2 - c1 <= 0
+//
+// 4) If a1 <= 0 and a2 <= 0, then
+// a1*N1 - a2*0 <= c2 - c1 <= a1*0 - a2*N2
+// a1*N1 <= c2 - c1 <= -a2*N2
+//
+// return true if dependence disproved
+bool DependenceAnalysis::symbolicRDIVtest(const SCEV *A1,
+ const SCEV *A2,
+ const SCEV *C1,
+ const SCEV *C2,
+ const Loop *Loop1,
+ const Loop *Loop2) const {
+ ++SymbolicRDIVapplications;
+ DEBUG(dbgs() << "\ttry symbolic RDIV test\n");
+ DEBUG(dbgs() << "\t A1 = " << *A1);
+ DEBUG(dbgs() << ", type = " << *A1->getType() << "\n");
+ DEBUG(dbgs() << "\t A2 = " << *A2 << "\n");
+ DEBUG(dbgs() << "\t C1 = " << *C1 << "\n");
+ DEBUG(dbgs() << "\t C2 = " << *C2 << "\n");
+ const SCEV *N1 = collectUpperBound(Loop1, A1->getType());
+ const SCEV *N2 = collectUpperBound(Loop2, A1->getType());
+ DEBUG(if (N1) dbgs() << "\t N1 = " << *N1 << "\n");
+ DEBUG(if (N2) dbgs() << "\t N2 = " << *N2 << "\n");
+ const SCEV *C2_C1 = SE->getMinusSCEV(C2, C1);
+ const SCEV *C1_C2 = SE->getMinusSCEV(C1, C2);
+ DEBUG(dbgs() << "\t C2 - C1 = " << *C2_C1 << "\n");
+ DEBUG(dbgs() << "\t C1 - C2 = " << *C1_C2 << "\n");
+ if (SE->isKnownNonNegative(A1)) {
+ if (SE->isKnownNonNegative(A2)) {
+ // A1 >= 0 && A2 >= 0
+ if (N1) {
+ // make sure that c2 - c1 <= a1*N1
+ const SCEV *A1N1 = SE->getMulExpr(A1, N1);
+ DEBUG(dbgs() << "\t A1*N1 = " << *A1N1 << "\n");
+ if (isKnownPredicate(CmpInst::ICMP_SGT, C2_C1, A1N1)) {
+ ++SymbolicRDIVindependence;
+ return true;
+ }
+ }
+ if (N2) {
+ // make sure that -a2*N2 <= c2 - c1, or a2*N2 >= c1 - c2
+ const SCEV *A2N2 = SE->getMulExpr(A2, N2);
+ DEBUG(dbgs() << "\t A2*N2 = " << *A2N2 << "\n");
+ if (isKnownPredicate(CmpInst::ICMP_SLT, A2N2, C1_C2)) {
+ ++SymbolicRDIVindependence;
+ return true;
+ }
+ }
+ }
+ else if (SE->isKnownNonPositive(A2)) {
+ // a1 >= 0 && a2 <= 0
+ if (N1 && N2) {
+ // make sure that c2 - c1 <= a1*N1 - a2*N2
+ const SCEV *A1N1 = SE->getMulExpr(A1, N1);
+ const SCEV *A2N2 = SE->getMulExpr(A2, N2);
+ const SCEV *A1N1_A2N2 = SE->getMinusSCEV(A1N1, A2N2);
+ DEBUG(dbgs() << "\t A1*N1 - A2*N2 = " << *A1N1_A2N2 << "\n");
+ if (isKnownPredicate(CmpInst::ICMP_SGT, C2_C1, A1N1_A2N2)) {
+ ++SymbolicRDIVindependence;
+ return true;
+ }
+ }
+ // make sure that 0 <= c2 - c1
+ if (SE->isKnownNegative(C2_C1)) {
+ ++SymbolicRDIVindependence;
+ return true;
+ }
+ }
+ }
+ else if (SE->isKnownNonPositive(A1)) {
+ if (SE->isKnownNonNegative(A2)) {
+ // a1 <= 0 && a2 >= 0
+ if (N1 && N2) {
+ // make sure that a1*N1 - a2*N2 <= c2 - c1
+ const SCEV *A1N1 = SE->getMulExpr(A1, N1);
+ const SCEV *A2N2 = SE->getMulExpr(A2, N2);
+ const SCEV *A1N1_A2N2 = SE->getMinusSCEV(A1N1, A2N2);
+ DEBUG(dbgs() << "\t A1*N1 - A2*N2 = " << *A1N1_A2N2 << "\n");
+ if (isKnownPredicate(CmpInst::ICMP_SGT, A1N1_A2N2, C2_C1)) {
+ ++SymbolicRDIVindependence;
+ return true;
+ }
+ }
+ // make sure that c2 - c1 <= 0
+ if (SE->isKnownPositive(C2_C1)) {
+ ++SymbolicRDIVindependence;
+ return true;
+ }
+ }
+ else if (SE->isKnownNonPositive(A2)) {
+ // a1 <= 0 && a2 <= 0
+ if (N1) {
+ // make sure that a1*N1 <= c2 - c1
+ const SCEV *A1N1 = SE->getMulExpr(A1, N1);
+ DEBUG(dbgs() << "\t A1*N1 = " << *A1N1 << "\n");
+ if (isKnownPredicate(CmpInst::ICMP_SGT, A1N1, C2_C1)) {
+ ++SymbolicRDIVindependence;
+ return true;
+ }
+ }
+ if (N2) {
+ // make sure that c2 - c1 <= -a2*N2, or c1 - c2 >= a2*N2
+ const SCEV *A2N2 = SE->getMulExpr(A2, N2);
+ DEBUG(dbgs() << "\t A2*N2 = " << *A2N2 << "\n");
+ if (isKnownPredicate(CmpInst::ICMP_SLT, C1_C2, A2N2)) {
+ ++SymbolicRDIVindependence;
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+}
+
+
+// testSIV -
+// When we have a pair of subscripts of the form [c1 + a1*i] and [c2 - a2*i]
+// where i is an induction variable, c1 and c2 are loop invariant, and a1 and
+// a2 are constant, we attack it with an SIV test. While they can all be
+// solved with the Exact SIV test, it's worthwhile to use simpler tests when
+// they apply; they're cheaper and sometimes more precise.
+//
+// Return true if dependence disproved.
+bool DependenceAnalysis::testSIV(const SCEV *Src,
+ const SCEV *Dst,
+ unsigned &Level,
+ FullDependence &Result,
+ Constraint &NewConstraint,
+ const SCEV *&SplitIter) const {
+ DEBUG(dbgs() << " src = " << *Src << "\n");
+ DEBUG(dbgs() << " dst = " << *Dst << "\n");
+ const SCEVAddRecExpr *SrcAddRec = dyn_cast<SCEVAddRecExpr>(Src);
+ const SCEVAddRecExpr *DstAddRec = dyn_cast<SCEVAddRecExpr>(Dst);
+ if (SrcAddRec && DstAddRec) {
+ const SCEV *SrcConst = SrcAddRec->getStart();
+ const SCEV *DstConst = DstAddRec->getStart();
+ const SCEV *SrcCoeff = SrcAddRec->getStepRecurrence(*SE);
+ const SCEV *DstCoeff = DstAddRec->getStepRecurrence(*SE);
+ const Loop *CurLoop = SrcAddRec->getLoop();
+ assert(CurLoop == DstAddRec->getLoop() &&
+ "both loops in SIV should be same");
+ Level = mapSrcLoop(CurLoop);
+ bool disproven;
+ if (SrcCoeff == DstCoeff)
+ disproven = strongSIVtest(SrcCoeff, SrcConst, DstConst, CurLoop,
+ Level, Result, NewConstraint);
+ else if (SrcCoeff == SE->getNegativeSCEV(DstCoeff))
+ disproven = weakCrossingSIVtest(SrcCoeff, SrcConst, DstConst, CurLoop,
+ Level, Result, NewConstraint, SplitIter);
+ else
+ disproven = exactSIVtest(SrcCoeff, DstCoeff, SrcConst, DstConst, CurLoop,
+ Level, Result, NewConstraint);
+ return disproven ||
+ gcdMIVtest(Src, Dst, Result) ||
+ symbolicRDIVtest(SrcCoeff, DstCoeff, SrcConst, DstConst, CurLoop, CurLoop);
+ }
+ if (SrcAddRec) {
+ const SCEV *SrcConst = SrcAddRec->getStart();
+ const SCEV *SrcCoeff = SrcAddRec->getStepRecurrence(*SE);
+ const SCEV *DstConst = Dst;
+ const Loop *CurLoop = SrcAddRec->getLoop();
+ Level = mapSrcLoop(CurLoop);
+ return weakZeroDstSIVtest(SrcCoeff, SrcConst, DstConst, CurLoop,
+ Level, Result, NewConstraint) ||
+ gcdMIVtest(Src, Dst, Result);
+ }
+ if (DstAddRec) {
+ const SCEV *DstConst = DstAddRec->getStart();
+ const SCEV *DstCoeff = DstAddRec->getStepRecurrence(*SE);
+ const SCEV *SrcConst = Src;
+ const Loop *CurLoop = DstAddRec->getLoop();
+ Level = mapDstLoop(CurLoop);
+ return weakZeroSrcSIVtest(DstCoeff, SrcConst, DstConst,
+ CurLoop, Level, Result, NewConstraint) ||
+ gcdMIVtest(Src, Dst, Result);
+ }
+ llvm_unreachable("SIV test expected at least one AddRec");
+ return false;
+}
+
+
+// testRDIV -
+// When we have a pair of subscripts of the form [c1 + a1*i] and [c2 + a2*j]
+// where i and j are induction variables, c1 and c2 are loop invariant,
+// and a1 and a2 are constant, we can solve it exactly with an easy adaptation
+// of the Exact SIV test, the Restricted Double Index Variable (RDIV) test.
+// It doesn't make sense to talk about distance or direction in this case,
+// so there's no point in making special versions of the Strong SIV test or
+// the Weak-crossing SIV test.
+//
+// With minor algebra, this test can also be used for things like
+// [c1 + a1*i + a2*j][c2].
+//
+// Return true if dependence disproved.
+bool DependenceAnalysis::testRDIV(const SCEV *Src,
+ const SCEV *Dst,
+ FullDependence &Result) const {
+ // we have 3 possible situations here:
+ // 1) [a*i + b] and [c*j + d]
+ // 2) [a*i + c*j + b] and [d]
+ // 3) [b] and [a*i + c*j + d]
+ // We need to find what we've got and get organized
+
+ const SCEV *SrcConst, *DstConst;
+ const SCEV *SrcCoeff, *DstCoeff;
+ const Loop *SrcLoop, *DstLoop;
+
+ DEBUG(dbgs() << " src = " << *Src << "\n");
+ DEBUG(dbgs() << " dst = " << *Dst << "\n");
+ const SCEVAddRecExpr *SrcAddRec = dyn_cast<SCEVAddRecExpr>(Src);
+ const SCEVAddRecExpr *DstAddRec = dyn_cast<SCEVAddRecExpr>(Dst);
+ if (SrcAddRec && DstAddRec) {
+ SrcConst = SrcAddRec->getStart();
+ SrcCoeff = SrcAddRec->getStepRecurrence(*SE);
+ SrcLoop = SrcAddRec->getLoop();
+ DstConst = DstAddRec->getStart();
+ DstCoeff = DstAddRec->getStepRecurrence(*SE);
+ DstLoop = DstAddRec->getLoop();
+ }
+ else if (SrcAddRec) {
+ if (const SCEVAddRecExpr *tmpAddRec =
+ dyn_cast<SCEVAddRecExpr>(SrcAddRec->getStart())) {
+ SrcConst = tmpAddRec->getStart();
+ SrcCoeff = tmpAddRec->getStepRecurrence(*SE);
+ SrcLoop = tmpAddRec->getLoop();
+ DstConst = Dst;
+ DstCoeff = SE->getNegativeSCEV(SrcAddRec->getStepRecurrence(*SE));
+ DstLoop = SrcAddRec->getLoop();
+ }
+ else
+ llvm_unreachable("RDIV reached by surprising SCEVs");
+ }
+ else if (DstAddRec) {
+ if (const SCEVAddRecExpr *tmpAddRec =
+ dyn_cast<SCEVAddRecExpr>(DstAddRec->getStart())) {
+ DstConst = tmpAddRec->getStart();
+ DstCoeff = tmpAddRec->getStepRecurrence(*SE);
+ DstLoop = tmpAddRec->getLoop();
+ SrcConst = Src;
+ SrcCoeff = SE->getNegativeSCEV(DstAddRec->getStepRecurrence(*SE));
+ SrcLoop = DstAddRec->getLoop();
+ }
+ else
+ llvm_unreachable("RDIV reached by surprising SCEVs");
+ }
+ else
+ llvm_unreachable("RDIV expected at least one AddRec");
+ return exactRDIVtest(SrcCoeff, DstCoeff,
+ SrcConst, DstConst,
+ SrcLoop, DstLoop,
+ Result) ||
+ gcdMIVtest(Src, Dst, Result) ||
+ symbolicRDIVtest(SrcCoeff, DstCoeff,
+ SrcConst, DstConst,
+ SrcLoop, DstLoop);
+}
+
+
+// Tests the single-subscript MIV pair (Src and Dst) for dependence.
+// Return true if dependence disproved.
+// Can sometimes refine direction vectors.
+bool DependenceAnalysis::testMIV(const SCEV *Src,
+ const SCEV *Dst,
+ const SmallBitVector &Loops,
+ FullDependence &Result) const {
+ DEBUG(dbgs() << " src = " << *Src << "\n");
+ DEBUG(dbgs() << " dst = " << *Dst << "\n");
+ Result.Consistent = false;
+ return gcdMIVtest(Src, Dst, Result) ||
+ banerjeeMIVtest(Src, Dst, Loops, Result);
+}
+
+
+// Given a product, e.g., 10*X*Y, returns the first constant operand,
+// in this case 10. If there is no constant part, returns NULL.
+static
+const SCEVConstant *getConstantPart(const SCEVMulExpr *Product) {
+ for (unsigned Op = 0, Ops = Product->getNumOperands(); Op < Ops; Op++) {
+ if (const SCEVConstant *Constant = dyn_cast<SCEVConstant>(Product->getOperand(Op)))
+ return Constant;
+ }
+ return NULL;
+}
+
+
+//===----------------------------------------------------------------------===//
+// gcdMIVtest -
+// Tests an MIV subscript pair for dependence.
+// Returns true if any possible dependence is disproved.
+// Marks the result as inconsistent.
+// Can sometimes disprove the equal direction for 1 or more loops,
+// as discussed in Michael Wolfe's book,
+// High Performance Compilers for Parallel Computing, page 235.
+//
+// We spend some effort (code!) to handle cases like
+// [10*i + 5*N*j + 15*M + 6], where i and j are induction variables,
+// but M and N are just loop-invariant variables.
+// This should help us handle linearized subscripts;
+// also makes this test a useful backup to the various SIV tests.
+//
+// It occurs to me that the presence of loop-invariant variables
+// changes the nature of the test from "greatest common divisor"
+// to "a common divisor!"
+bool DependenceAnalysis::gcdMIVtest(const SCEV *Src,
+ const SCEV *Dst,
+ FullDependence &Result) const {
+ DEBUG(dbgs() << "starting gcd\n");
+ ++GCDapplications;
+ unsigned BitWidth = Src->getType()->getIntegerBitWidth();
+ APInt RunningGCD = APInt::getNullValue(BitWidth);
+
+ // Examine Src coefficients.
+ // Compute running GCD and record source constant.
+ // Because we're looking for the constant at the end of the chain,
+ // we can't quit the loop just because the GCD == 1.
+ const SCEV *Coefficients = Src;
+ while (const SCEVAddRecExpr *AddRec =
+ dyn_cast<SCEVAddRecExpr>(Coefficients)) {
+ const SCEV *Coeff = AddRec->getStepRecurrence(*SE);
+ const SCEVConstant *Constant = dyn_cast<SCEVConstant>(Coeff);
+ if (const SCEVMulExpr *Product = dyn_cast<SCEVMulExpr>(Coeff))
+ // If the coefficient is the product of a constant and other stuff,
+ // we can use the constant in the GCD computation.
+ Constant = getConstantPart(Product);
+ if (!Constant)
+ return false;
+ APInt ConstCoeff = Constant->getValue()->getValue();
+ RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ConstCoeff.abs());
+ Coefficients = AddRec->getStart();
+ }
+ const SCEV *SrcConst = Coefficients;
+
+ // Examine Dst coefficients.
+ // Compute running GCD and record destination constant.
+ // Because we're looking for the constant at the end of the chain,
+ // we can't quit the loop just because the GCD == 1.
+ Coefficients = Dst;
+ while (const SCEVAddRecExpr *AddRec =
+ dyn_cast<SCEVAddRecExpr>(Coefficients)) {
+ const SCEV *Coeff = AddRec->getStepRecurrence(*SE);
+ const SCEVConstant *Constant = dyn_cast<SCEVConstant>(Coeff);
+ if (const SCEVMulExpr *Product = dyn_cast<SCEVMulExpr>(Coeff))
+ // If the coefficient is the product of a constant and other stuff,
+ // we can use the constant in the GCD computation.
+ Constant = getConstantPart(Product);
+ if (!Constant)
+ return false;
+ APInt ConstCoeff = Constant->getValue()->getValue();
+ RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ConstCoeff.abs());
+ Coefficients = AddRec->getStart();
+ }
+ const SCEV *DstConst = Coefficients;
+
+ APInt ExtraGCD = APInt::getNullValue(BitWidth);
+ const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst);
+ DEBUG(dbgs() << " Delta = " << *Delta << "\n");
+ const SCEVConstant *Constant = dyn_cast<SCEVConstant>(Delta);
+ if (const SCEVAddExpr *Sum = dyn_cast<SCEVAddExpr>(Delta)) {
+ // If Delta is a sum of products, we may be able to make further progress.
+ for (unsigned Op = 0, Ops = Sum->getNumOperands(); Op < Ops; Op++) {
+ const SCEV *Operand = Sum->getOperand(Op);
+ if (isa<SCEVConstant>(Operand)) {
+ assert(!Constant && "Surprised to find multiple constants");
+ Constant = cast<SCEVConstant>(Operand);
+ }
+ else if (const SCEVMulExpr *Product = dyn_cast<SCEVMulExpr>(Operand)) {
+ // Search for constant operand to participate in GCD;
+ // If none found; return false.
+ const SCEVConstant *ConstOp = getConstantPart(Product);
+ if (!ConstOp)
+ return false;
+ APInt ConstOpValue = ConstOp->getValue()->getValue();
+ ExtraGCD = APIntOps::GreatestCommonDivisor(ExtraGCD,
+ ConstOpValue.abs());
+ }
+ else
+ return false;
+ }
+ }
+ if (!Constant)
+ return false;
+ APInt ConstDelta = cast<SCEVConstant>(Constant)->getValue()->getValue();
+ DEBUG(dbgs() << " ConstDelta = " << ConstDelta << "\n");
+ if (ConstDelta == 0)
+ return false;
+ RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ExtraGCD);
+ DEBUG(dbgs() << " RunningGCD = " << RunningGCD << "\n");
+ APInt Remainder = ConstDelta.srem(RunningGCD);
+ if (Remainder != 0) {
+ ++GCDindependence;
+ return true;
+ }
+
+ // Try to disprove equal directions.
+ // For example, given a subscript pair [3*i + 2*j] and [i' + 2*j' - 1],
+ // the code above can't disprove the dependence because the GCD = 1.
+ // So we consider what happen if i = i' and what happens if j = j'.
+ // If i = i', we can simplify the subscript to [2*i + 2*j] and [2*j' - 1],
+ // which is infeasible, so we can disallow the = direction for the i level.
+ // Setting j = j' doesn't help matters, so we end up with a direction vector
+ // of [<>, *]
+ //
+ // Given A[5*i + 10*j*M + 9*M*N] and A[15*i + 20*j*M - 21*N*M + 5],
+ // we need to remember that the constant part is 5 and the RunningGCD should
+ // be initialized to ExtraGCD = 30.
+ DEBUG(dbgs() << " ExtraGCD = " << ExtraGCD << '\n');
+
+ bool Improved = false;
+ Coefficients = Src;
+ while (const SCEVAddRecExpr *AddRec =
+ dyn_cast<SCEVAddRecExpr>(Coefficients)) {
+ Coefficients = AddRec->getStart();
+ const Loop *CurLoop = AddRec->getLoop();
+ RunningGCD = ExtraGCD;
+ const SCEV *SrcCoeff = AddRec->getStepRecurrence(*SE);
+ const SCEV *DstCoeff = SE->getMinusSCEV(SrcCoeff, SrcCoeff);
+ const SCEV *Inner = Src;
+ while (RunningGCD != 1 && isa<SCEVAddRecExpr>(Inner)) {
+ AddRec = cast<SCEVAddRecExpr>(Inner);
+ const SCEV *Coeff = AddRec->getStepRecurrence(*SE);
+ if (CurLoop == AddRec->getLoop())
+ ; // SrcCoeff == Coeff
+ else {
+ if (const SCEVMulExpr *Product = dyn_cast<SCEVMulExpr>(Coeff))
+ // If the coefficient is the product of a constant and other stuff,
+ // we can use the constant in the GCD computation.
+ Constant = getConstantPart(Product);
+ else
+ Constant = cast<SCEVConstant>(Coeff);
+ APInt ConstCoeff = Constant->getValue()->getValue();
+ RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ConstCoeff.abs());
+ }
+ Inner = AddRec->getStart();
+ }
+ Inner = Dst;
+ while (RunningGCD != 1 && isa<SCEVAddRecExpr>(Inner)) {
+ AddRec = cast<SCEVAddRecExpr>(Inner);
+ const SCEV *Coeff = AddRec->getStepRecurrence(*SE);
+ if (CurLoop == AddRec->getLoop())
+ DstCoeff = Coeff;
+ else {
+ if (const SCEVMulExpr *Product = dyn_cast<SCEVMulExpr>(Coeff))
+ // If the coefficient is the product of a constant and other stuff,
+ // we can use the constant in the GCD computation.
+ Constant = getConstantPart(Product);
+ else
+ Constant = cast<SCEVConstant>(Coeff);
+ APInt ConstCoeff = Constant->getValue()->getValue();
+ RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ConstCoeff.abs());
+ }
+ Inner = AddRec->getStart();
+ }
+ Delta = SE->getMinusSCEV(SrcCoeff, DstCoeff);
+ if (const SCEVMulExpr *Product = dyn_cast<SCEVMulExpr>(Delta))
+ // If the coefficient is the product of a constant and other stuff,
+ // we can use the constant in the GCD computation.
+ Constant = getConstantPart(Product);
+ else if (isa<SCEVConstant>(Delta))
+ Constant = cast<SCEVConstant>(Delta);
+ else {
+ // The difference of the two coefficients might not be a product
+ // or constant, in which case we give up on this direction.
+ continue;
+ }
+ APInt ConstCoeff = Constant->getValue()->getValue();
+ RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ConstCoeff.abs());
+ DEBUG(dbgs() << "\tRunningGCD = " << RunningGCD << "\n");
+ if (RunningGCD != 0) {
+ Remainder = ConstDelta.srem(RunningGCD);
+ DEBUG(dbgs() << "\tRemainder = " << Remainder << "\n");
+ if (Remainder != 0) {
+ unsigned Level = mapSrcLoop(CurLoop);
+ Result.DV[Level - 1].Direction &= unsigned(~Dependence::DVEntry::EQ);
+ Improved = true;
+ }
+ }
+ }
+ if (Improved)
+ ++GCDsuccesses;
+ DEBUG(dbgs() << "all done\n");
+ return false;
+}
+
+
+//===----------------------------------------------------------------------===//
+// banerjeeMIVtest -
+// Use Banerjee's Inequalities to test an MIV subscript pair.
+// (Wolfe, in the race-car book, calls this the Extreme Value Test.)
+// Generally follows the discussion in Section 2.5.2 of
+//
+// Optimizing Supercompilers for Supercomputers
+// Michael Wolfe
+//
+// The inequalities given on page 25 are simplified in that loops are
+// normalized so that the lower bound is always 0 and the stride is always 1.
+// For example, Wolfe gives
+//
+// LB^<_k = (A^-_k - B_k)^- (U_k - L_k - N_k) + (A_k - B_k)L_k - B_k N_k
+//
+// where A_k is the coefficient of the kth index in the source subscript,
+// B_k is the coefficient of the kth index in the destination subscript,
+// U_k is the upper bound of the kth index, L_k is the lower bound of the Kth
+// index, and N_k is the stride of the kth index. Since all loops are normalized
+// by the SCEV package, N_k = 1 and L_k = 0, allowing us to simplify the
+// equation to
+//
+// LB^<_k = (A^-_k - B_k)^- (U_k - 0 - 1) + (A_k - B_k)0 - B_k 1
+// = (A^-_k - B_k)^- (U_k - 1) - B_k
+//
+// Similar simplifications are possible for the other equations.
+//
+// When we can't determine the number of iterations for a loop,
+// we use NULL as an indicator for the worst case, infinity.
+// When computing the upper bound, NULL denotes +inf;
+// for the lower bound, NULL denotes -inf.
+//
+// Return true if dependence disproved.
+bool DependenceAnalysis::banerjeeMIVtest(const SCEV *Src,
+ const SCEV *Dst,
+ const SmallBitVector &Loops,
+ FullDependence &Result) const {
+ DEBUG(dbgs() << "starting Banerjee\n");
+ ++BanerjeeApplications;
+ DEBUG(dbgs() << " Src = " << *Src << '\n');
+ const SCEV *A0;
+ CoefficientInfo *A = collectCoeffInfo(Src, true, A0);
+ DEBUG(dbgs() << " Dst = " << *Dst << '\n');
+ const SCEV *B0;
+ CoefficientInfo *B = collectCoeffInfo(Dst, false, B0);
+ BoundInfo *Bound = new BoundInfo[MaxLevels + 1];
+ const SCEV *Delta = SE->getMinusSCEV(B0, A0);
+ DEBUG(dbgs() << "\tDelta = " << *Delta << '\n');
+
+ // Compute bounds for all the * directions.
+ DEBUG(dbgs() << "\tBounds[*]\n");
+ for (unsigned K = 1; K <= MaxLevels; ++K) {
+ Bound[K].Iterations = A[K].Iterations ? A[K].Iterations : B[K].Iterations;
+ Bound[K].Direction = Dependence::DVEntry::ALL;
+ Bound[K].DirSet = Dependence::DVEntry::NONE;
+ findBoundsALL(A, B, Bound, K);
+#ifndef NDEBUG
+ DEBUG(dbgs() << "\t " << K << '\t');
+ if (Bound[K].Lower[Dependence::DVEntry::ALL])
+ DEBUG(dbgs() << *Bound[K].Lower[Dependence::DVEntry::ALL] << '\t');
+ else
+ DEBUG(dbgs() << "-inf\t");
+ if (Bound[K].Upper[Dependence::DVEntry::ALL])
+ DEBUG(dbgs() << *Bound[K].Upper[Dependence::DVEntry::ALL] << '\n');
+ else
+ DEBUG(dbgs() << "+inf\n");
+#endif
+ }
+
+ // Test the *, *, *, ... case.
+ bool Disproved = false;
+ if (testBounds(Dependence::DVEntry::ALL, 0, Bound, Delta)) {
+ // Explore the direction vector hierarchy.
+ unsigned DepthExpanded = 0;
+ unsigned NewDeps = exploreDirections(1, A, B, Bound,
+ Loops, DepthExpanded, Delta);
+ if (NewDeps > 0) {
+ bool Improved = false;
+ for (unsigned K = 1; K <= CommonLevels; ++K) {
+ if (Loops[K]) {
+ unsigned Old = Result.DV[K - 1].Direction;
+ Result.DV[K - 1].Direction = Old & Bound[K].DirSet;
+ Improved |= Old != Result.DV[K - 1].Direction;
+ if (!Result.DV[K - 1].Direction) {
+ Improved = false;
+ Disproved = true;
+ break;
+ }
+ }
+ }
+ if (Improved)
+ ++BanerjeeSuccesses;
+ }
+ else {
+ ++BanerjeeIndependence;
+ Disproved = true;
+ }
+ }
+ else {
+ ++BanerjeeIndependence;
+ Disproved = true;
+ }
+ delete [] Bound;
+ delete [] A;
+ delete [] B;
+ return Disproved;
+}
+
+
+// Hierarchically expands the direction vector
+// search space, combining the directions of discovered dependences
+// in the DirSet field of Bound. Returns the number of distinct
+// dependences discovered. If the dependence is disproved,
+// it will return 0.
+unsigned DependenceAnalysis::exploreDirections(unsigned Level,
+ CoefficientInfo *A,
+ CoefficientInfo *B,
+ BoundInfo *Bound,
+ const SmallBitVector &Loops,
+ unsigned &DepthExpanded,
+ const SCEV *Delta) const {
+ if (Level > CommonLevels) {
+ // record result
+ DEBUG(dbgs() << "\t[");
+ for (unsigned K = 1; K <= CommonLevels; ++K) {
+ if (Loops[K]) {
+ Bound[K].DirSet |= Bound[K].Direction;
+#ifndef NDEBUG
+ switch (Bound[K].Direction) {
+ case Dependence::DVEntry::LT:
+ DEBUG(dbgs() << " <");
+ break;
+ case Dependence::DVEntry::EQ:
+ DEBUG(dbgs() << " =");
+ break;
+ case Dependence::DVEntry::GT:
+ DEBUG(dbgs() << " >");
+ break;
+ case Dependence::DVEntry::ALL:
+ DEBUG(dbgs() << " *");
+ break;
+ default:
+ llvm_unreachable("unexpected Bound[K].Direction");
+ }
+#endif
+ }
+ }
+ DEBUG(dbgs() << " ]\n");
+ return 1;
+ }
+ if (Loops[Level]) {
+ if (Level > DepthExpanded) {
+ DepthExpanded = Level;
+ // compute bounds for <, =, > at current level
+ findBoundsLT(A, B, Bound, Level);
+ findBoundsGT(A, B, Bound, Level);
+ findBoundsEQ(A, B, Bound, Level);
+#ifndef NDEBUG
+ DEBUG(dbgs() << "\tBound for level = " << Level << '\n');
+ DEBUG(dbgs() << "\t <\t");
+ if (Bound[Level].Lower[Dependence::DVEntry::LT])
+ DEBUG(dbgs() << *Bound[Level].Lower[Dependence::DVEntry::LT] << '\t');
+ else
+ DEBUG(dbgs() << "-inf\t");
+ if (Bound[Level].Upper[Dependence::DVEntry::LT])
+ DEBUG(dbgs() << *Bound[Level].Upper[Dependence::DVEntry::LT] << '\n');
+ else
+ DEBUG(dbgs() << "+inf\n");
+ DEBUG(dbgs() << "\t =\t");
+ if (Bound[Level].Lower[Dependence::DVEntry::EQ])
+ DEBUG(dbgs() << *Bound[Level].Lower[Dependence::DVEntry::EQ] << '\t');
+ else
+ DEBUG(dbgs() << "-inf\t");
+ if (Bound[Level].Upper[Dependence::DVEntry::EQ])
+ DEBUG(dbgs() << *Bound[Level].Upper[Dependence::DVEntry::EQ] << '\n');
+ else
+ DEBUG(dbgs() << "+inf\n");
+ DEBUG(dbgs() << "\t >\t");
+ if (Bound[Level].Lower[Dependence::DVEntry::GT])
+ DEBUG(dbgs() << *Bound[Level].Lower[Dependence::DVEntry::GT] << '\t');
+ else
+ DEBUG(dbgs() << "-inf\t");
+ if (Bound[Level].Upper[Dependence::DVEntry::GT])
+ DEBUG(dbgs() << *Bound[Level].Upper[Dependence::DVEntry::GT] << '\n');
+ else
+ DEBUG(dbgs() << "+inf\n");
+#endif
+ }
+
+ unsigned NewDeps = 0;
+
+ // test bounds for <, *, *, ...
+ if (testBounds(Dependence::DVEntry::LT, Level, Bound, Delta))
+ NewDeps += exploreDirections(Level + 1, A, B, Bound,
+ Loops, DepthExpanded, Delta);
+
+ // Test bounds for =, *, *, ...
+ if (testBounds(Dependence::DVEntry::EQ, Level, Bound, Delta))
+ NewDeps += exploreDirections(Level + 1, A, B, Bound,
+ Loops, DepthExpanded, Delta);
+
+ // test bounds for >, *, *, ...
+ if (testBounds(Dependence::DVEntry::GT, Level, Bound, Delta))
+ NewDeps += exploreDirections(Level + 1, A, B, Bound,
+ Loops, DepthExpanded, Delta);
+
+ Bound[Level].Direction = Dependence::DVEntry::ALL;
+ return NewDeps;
+ }
+ else
+ return exploreDirections(Level + 1, A, B, Bound, Loops, DepthExpanded, Delta);
+}
+
+
+// Returns true iff the current bounds are plausible.
+bool DependenceAnalysis::testBounds(unsigned char DirKind,
+ unsigned Level,
+ BoundInfo *Bound,
+ const SCEV *Delta) const {
+ Bound[Level].Direction = DirKind;
+ if (const SCEV *LowerBound = getLowerBound(Bound))
+ if (isKnownPredicate(CmpInst::ICMP_SGT, LowerBound, Delta))
+ return false;
+ if (const SCEV *UpperBound = getUpperBound(Bound))
+ if (isKnownPredicate(CmpInst::ICMP_SGT, Delta, UpperBound))
+ return false;
+ return true;
+}
+
+
+// Computes the upper and lower bounds for level K
+// using the * direction. Records them in Bound.
+// Wolfe gives the equations
+//
+// LB^*_k = (A^-_k - B^+_k)(U_k - L_k) + (A_k - B_k)L_k
+// UB^*_k = (A^+_k - B^-_k)(U_k - L_k) + (A_k - B_k)L_k
+//
+// Since we normalize loops, we can simplify these equations to
+//
+// LB^*_k = (A^-_k - B^+_k)U_k
+// UB^*_k = (A^+_k - B^-_k)U_k
+//
+// We must be careful to handle the case where the upper bound is unknown.
+// Note that the lower bound is always <= 0
+// and the upper bound is always >= 0.
+void DependenceAnalysis::findBoundsALL(CoefficientInfo *A,
+ CoefficientInfo *B,
+ BoundInfo *Bound,
+ unsigned K) const {
+ Bound[K].Lower[Dependence::DVEntry::ALL] = NULL; // Default value = -infinity.
+ Bound[K].Upper[Dependence::DVEntry::ALL] = NULL; // Default value = +infinity.
+ if (Bound[K].Iterations) {
+ Bound[K].Lower[Dependence::DVEntry::ALL] =
+ SE->getMulExpr(SE->getMinusSCEV(A[K].NegPart, B[K].PosPart),
+ Bound[K].Iterations);
+ Bound[K].Upper[Dependence::DVEntry::ALL] =
+ SE->getMulExpr(SE->getMinusSCEV(A[K].PosPart, B[K].NegPart),
+ Bound[K].Iterations);
+ }
+ else {
+ // If the difference is 0, we won't need to know the number of iterations.
+ if (isKnownPredicate(CmpInst::ICMP_EQ, A[K].NegPart, B[K].PosPart))
+ Bound[K].Lower[Dependence::DVEntry::ALL] =
+ SE->getConstant(A[K].Coeff->getType(), 0);
+ if (isKnownPredicate(CmpInst::ICMP_EQ, A[K].PosPart, B[K].NegPart))
+ Bound[K].Upper[Dependence::DVEntry::ALL] =
+ SE->getConstant(A[K].Coeff->getType(), 0);
+ }
+}
+
+
+// Computes the upper and lower bounds for level K
+// using the = direction. Records them in Bound.
+// Wolfe gives the equations
+//
+// LB^=_k = (A_k - B_k)^- (U_k - L_k) + (A_k - B_k)L_k
+// UB^=_k = (A_k - B_k)^+ (U_k - L_k) + (A_k - B_k)L_k
+//
+// Since we normalize loops, we can simplify these equations to
+//
+// LB^=_k = (A_k - B_k)^- U_k
+// UB^=_k = (A_k - B_k)^+ U_k
+//
+// We must be careful to handle the case where the upper bound is unknown.
+// Note that the lower bound is always <= 0
+// and the upper bound is always >= 0.
+void DependenceAnalysis::findBoundsEQ(CoefficientInfo *A,
+ CoefficientInfo *B,
+ BoundInfo *Bound,
+ unsigned K) const {
+ Bound[K].Lower[Dependence::DVEntry::EQ] = NULL; // Default value = -infinity.
+ Bound[K].Upper[Dependence::DVEntry::EQ] = NULL; // Default value = +infinity.
+ if (Bound[K].Iterations) {
+ const SCEV *Delta = SE->getMinusSCEV(A[K].Coeff, B[K].Coeff);
+ const SCEV *NegativePart = getNegativePart(Delta);
+ Bound[K].Lower[Dependence::DVEntry::EQ] =
+ SE->getMulExpr(NegativePart, Bound[K].Iterations);
+ const SCEV *PositivePart = getPositivePart(Delta);
+ Bound[K].Upper[Dependence::DVEntry::EQ] =
+ SE->getMulExpr(PositivePart, Bound[K].Iterations);
+ }
+ else {
+ // If the positive/negative part of the difference is 0,
+ // we won't need to know the number of iterations.
+ const SCEV *Delta = SE->getMinusSCEV(A[K].Coeff, B[K].Coeff);
+ const SCEV *NegativePart = getNegativePart(Delta);
+ if (NegativePart->isZero())
+ Bound[K].Lower[Dependence::DVEntry::EQ] = NegativePart; // Zero
+ const SCEV *PositivePart = getPositivePart(Delta);
+ if (PositivePart->isZero())
+ Bound[K].Upper[Dependence::DVEntry::EQ] = PositivePart; // Zero
+ }
+}
+
+
+// Computes the upper and lower bounds for level K
+// using the < direction. Records them in Bound.
+// Wolfe gives the equations
+//
+// LB^<_k = (A^-_k - B_k)^- (U_k - L_k - N_k) + (A_k - B_k)L_k - B_k N_k
+// UB^<_k = (A^+_k - B_k)^+ (U_k - L_k - N_k) + (A_k - B_k)L_k - B_k N_k
+//
+// Since we normalize loops, we can simplify these equations to
+//
+// LB^<_k = (A^-_k - B_k)^- (U_k - 1) - B_k
+// UB^<_k = (A^+_k - B_k)^+ (U_k - 1) - B_k
+//
+// We must be careful to handle the case where the upper bound is unknown.
+void DependenceAnalysis::findBoundsLT(CoefficientInfo *A,
+ CoefficientInfo *B,
+ BoundInfo *Bound,
+ unsigned K) const {
+ Bound[K].Lower[Dependence::DVEntry::LT] = NULL; // Default value = -infinity.
+ Bound[K].Upper[Dependence::DVEntry::LT] = NULL; // Default value = +infinity.
+ if (Bound[K].Iterations) {
+ const SCEV *Iter_1 =
+ SE->getMinusSCEV(Bound[K].Iterations,
+ SE->getConstant(Bound[K].Iterations->getType(), 1));
+ const SCEV *NegPart =
+ getNegativePart(SE->getMinusSCEV(A[K].NegPart, B[K].Coeff));
+ Bound[K].Lower[Dependence::DVEntry::LT] =
+ SE->getMinusSCEV(SE->getMulExpr(NegPart, Iter_1), B[K].Coeff);
+ const SCEV *PosPart =
+ getPositivePart(SE->getMinusSCEV(A[K].PosPart, B[K].Coeff));
+ Bound[K].Upper[Dependence::DVEntry::LT] =
+ SE->getMinusSCEV(SE->getMulExpr(PosPart, Iter_1), B[K].Coeff);
+ }
+ else {
+ // If the positive/negative part of the difference is 0,
+ // we won't need to know the number of iterations.
+ const SCEV *NegPart =
+ getNegativePart(SE->getMinusSCEV(A[K].NegPart, B[K].Coeff));
+ if (NegPart->isZero())
+ Bound[K].Lower[Dependence::DVEntry::LT] = SE->getNegativeSCEV(B[K].Coeff);
+ const SCEV *PosPart =
+ getPositivePart(SE->getMinusSCEV(A[K].PosPart, B[K].Coeff));
+ if (PosPart->isZero())
+ Bound[K].Upper[Dependence::DVEntry::LT] = SE->getNegativeSCEV(B[K].Coeff);
+ }
+}
+
+
+// Computes the upper and lower bounds for level K
+// using the > direction. Records them in Bound.
+// Wolfe gives the equations
+//
+// LB^>_k = (A_k - B^+_k)^- (U_k - L_k - N_k) + (A_k - B_k)L_k + A_k N_k
+// UB^>_k = (A_k - B^-_k)^+ (U_k - L_k - N_k) + (A_k - B_k)L_k + A_k N_k
+//
+// Since we normalize loops, we can simplify these equations to
+//
+// LB^>_k = (A_k - B^+_k)^- (U_k - 1) + A_k
+// UB^>_k = (A_k - B^-_k)^+ (U_k - 1) + A_k
+//
+// We must be careful to handle the case where the upper bound is unknown.
+void DependenceAnalysis::findBoundsGT(CoefficientInfo *A,
+ CoefficientInfo *B,
+ BoundInfo *Bound,
+ unsigned K) const {
+ Bound[K].Lower[Dependence::DVEntry::GT] = NULL; // Default value = -infinity.
+ Bound[K].Upper[Dependence::DVEntry::GT] = NULL; // Default value = +infinity.
+ if (Bound[K].Iterations) {
+ const SCEV *Iter_1 =
+ SE->getMinusSCEV(Bound[K].Iterations,
+ SE->getConstant(Bound[K].Iterations->getType(), 1));
+ const SCEV *NegPart =
+ getNegativePart(SE->getMinusSCEV(A[K].Coeff, B[K].PosPart));
+ Bound[K].Lower[Dependence::DVEntry::GT] =
+ SE->getAddExpr(SE->getMulExpr(NegPart, Iter_1), A[K].Coeff);
+ const SCEV *PosPart =
+ getPositivePart(SE->getMinusSCEV(A[K].Coeff, B[K].NegPart));
+ Bound[K].Upper[Dependence::DVEntry::GT] =
+ SE->getAddExpr(SE->getMulExpr(PosPart, Iter_1), A[K].Coeff);
+ }
+ else {
+ // If the positive/negative part of the difference is 0,
+ // we won't need to know the number of iterations.
+ const SCEV *NegPart = getNegativePart(SE->getMinusSCEV(A[K].Coeff, B[K].PosPart));
+ if (NegPart->isZero())
+ Bound[K].Lower[Dependence::DVEntry::GT] = A[K].Coeff;
+ const SCEV *PosPart = getPositivePart(SE->getMinusSCEV(A[K].Coeff, B[K].NegPart));
+ if (PosPart->isZero())
+ Bound[K].Upper[Dependence::DVEntry::GT] = A[K].Coeff;
+ }
+}
+
+
+// X^+ = max(X, 0)
+const SCEV *DependenceAnalysis::getPositivePart(const SCEV *X) const {
+ return SE->getSMaxExpr(X, SE->getConstant(X->getType(), 0));
+}
+
+
+// X^- = min(X, 0)
+const SCEV *DependenceAnalysis::getNegativePart(const SCEV *X) const {
+ return SE->getSMinExpr(X, SE->getConstant(X->getType(), 0));
+}
+
+
+// Walks through the subscript,
+// collecting each coefficient, the associated loop bounds,
+// and recording its positive and negative parts for later use.
+DependenceAnalysis::CoefficientInfo *
+DependenceAnalysis::collectCoeffInfo(const SCEV *Subscript,
+ bool SrcFlag,
+ const SCEV *&Constant) const {
+ const SCEV *Zero = SE->getConstant(Subscript->getType(), 0);
+ CoefficientInfo *CI = new CoefficientInfo[MaxLevels + 1];
+ for (unsigned K = 1; K <= MaxLevels; ++K) {
+ CI[K].Coeff = Zero;
+ CI[K].PosPart = Zero;
+ CI[K].NegPart = Zero;
+ CI[K].Iterations = NULL;
+ }
+ while (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Subscript)) {
+ const Loop *L = AddRec->getLoop();
+ unsigned K = SrcFlag ? mapSrcLoop(L) : mapDstLoop(L);
+ CI[K].Coeff = AddRec->getStepRecurrence(*SE);
+ CI[K].PosPart = getPositivePart(CI[K].Coeff);
+ CI[K].NegPart = getNegativePart(CI[K].Coeff);
+ CI[K].Iterations = collectUpperBound(L, Subscript->getType());
+ Subscript = AddRec->getStart();
+ }
+ Constant = Subscript;
+#ifndef NDEBUG
+ DEBUG(dbgs() << "\tCoefficient Info\n");
+ for (unsigned K = 1; K <= MaxLevels; ++K) {
+ DEBUG(dbgs() << "\t " << K << "\t" << *CI[K].Coeff);
+ DEBUG(dbgs() << "\tPos Part = ");
+ DEBUG(dbgs() << *CI[K].PosPart);
+ DEBUG(dbgs() << "\tNeg Part = ");
+ DEBUG(dbgs() << *CI[K].NegPart);
+ DEBUG(dbgs() << "\tUpper Bound = ");
+ if (CI[K].Iterations)
+ DEBUG(dbgs() << *CI[K].Iterations);
+ else
+ DEBUG(dbgs() << "+inf");
+ DEBUG(dbgs() << '\n');
+ }
+ DEBUG(dbgs() << "\t Constant = " << *Subscript << '\n');
+#endif
+ return CI;
+}
+
+
+// Looks through all the bounds info and
+// computes the lower bound given the current direction settings
+// at each level. If the lower bound for any level is -inf,
+// the result is -inf.
+const SCEV *DependenceAnalysis::getLowerBound(BoundInfo *Bound) const {
+ const SCEV *Sum = Bound[1].Lower[Bound[1].Direction];
+ for (unsigned K = 2; Sum && K <= MaxLevels; ++K) {
+ if (Bound[K].Lower[Bound[K].Direction])
+ Sum = SE->getAddExpr(Sum, Bound[K].Lower[Bound[K].Direction]);
+ else
+ Sum = NULL;
+ }
+ return Sum;
+}
+
+
+// Looks through all the bounds info and
+// computes the upper bound given the current direction settings
+// at each level. If the upper bound at any level is +inf,
+// the result is +inf.
+const SCEV *DependenceAnalysis::getUpperBound(BoundInfo *Bound) const {
+ const SCEV *Sum = Bound[1].Upper[Bound[1].Direction];
+ for (unsigned K = 2; Sum && K <= MaxLevels; ++K) {
+ if (Bound[K].Upper[Bound[K].Direction])
+ Sum = SE->getAddExpr(Sum, Bound[K].Upper[Bound[K].Direction]);
+ else
+ Sum = NULL;
+ }
+ return Sum;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Constraint manipulation for Delta test.
+
+// Given a linear SCEV,
+// return the coefficient (the step)
+// corresponding to the specified loop.
+// If there isn't one, return 0.
+// For example, given a*i + b*j + c*k, zeroing the coefficient
+// corresponding to the j loop would yield b.
+const SCEV *DependenceAnalysis::findCoefficient(const SCEV *Expr,
+ const Loop *TargetLoop) const {
+ const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Expr);
+ if (!AddRec)
+ return SE->getConstant(Expr->getType(), 0);
+ if (AddRec->getLoop() == TargetLoop)
+ return AddRec->getStepRecurrence(*SE);
+ return findCoefficient(AddRec->getStart(), TargetLoop);
+}
+
+
+// Given a linear SCEV,
+// return the SCEV given by zeroing out the coefficient
+// corresponding to the specified loop.
+// For example, given a*i + b*j + c*k, zeroing the coefficient
+// corresponding to the j loop would yield a*i + c*k.
+const SCEV *DependenceAnalysis::zeroCoefficient(const SCEV *Expr,
+ const Loop *TargetLoop) const {
+ const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Expr);
+ if (!AddRec)
+ return Expr; // ignore
+ if (AddRec->getLoop() == TargetLoop)
+ return AddRec->getStart();
+ return SE->getAddRecExpr(zeroCoefficient(AddRec->getStart(), TargetLoop),
+ AddRec->getStepRecurrence(*SE),
+ AddRec->getLoop(),
+ AddRec->getNoWrapFlags());
+}
+
+
+// Given a linear SCEV Expr,
+// return the SCEV given by adding some Value to the
+// coefficient corresponding to the specified TargetLoop.
+// For example, given a*i + b*j + c*k, adding 1 to the coefficient
+// corresponding to the j loop would yield a*i + (b+1)*j + c*k.
+const SCEV *DependenceAnalysis::addToCoefficient(const SCEV *Expr,
+ const Loop *TargetLoop,
+ const SCEV *Value) const {
+ const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Expr);
+ if (!AddRec) // create a new addRec
+ return SE->getAddRecExpr(Expr,
+ Value,
+ TargetLoop,
+ SCEV::FlagAnyWrap); // Worst case, with no info.
+ if (AddRec->getLoop() == TargetLoop) {
+ const SCEV *Sum = SE->getAddExpr(AddRec->getStepRecurrence(*SE), Value);
+ if (Sum->isZero())
+ return AddRec->getStart();
+ return SE->getAddRecExpr(AddRec->getStart(),
+ Sum,
+ AddRec->getLoop(),
+ AddRec->getNoWrapFlags());
+ }
+ return SE->getAddRecExpr(addToCoefficient(AddRec->getStart(),
+ TargetLoop, Value),
+ AddRec->getStepRecurrence(*SE),
+ AddRec->getLoop(),
+ AddRec->getNoWrapFlags());
+}
+
+
+// Review the constraints, looking for opportunities
+// to simplify a subscript pair (Src and Dst).
+// Return true if some simplification occurs.
+// If the simplification isn't exact (that is, if it is conservative
+// in terms of dependence), set consistent to false.
+// Corresponds to Figure 5 from the paper
+//
+// Practical Dependence Testing
+// Goff, Kennedy, Tseng
+// PLDI 1991
+bool DependenceAnalysis::propagate(const SCEV *&Src,
+ const SCEV *&Dst,
+ SmallBitVector &Loops,
+ SmallVector<Constraint, 4> &Constraints,
+ bool &Consistent) {
+ bool Result = false;
+ for (int LI = Loops.find_first(); LI >= 0; LI = Loops.find_next(LI)) {
+ DEBUG(dbgs() << "\t Constraint[" << LI << "] is");
+ DEBUG(Constraints[LI].dump(dbgs()));
+ if (Constraints[LI].isDistance())
+ Result |= propagateDistance(Src, Dst, Constraints[LI], Consistent);
+ else if (Constraints[LI].isLine())
+ Result |= propagateLine(Src, Dst, Constraints[LI], Consistent);
+ else if (Constraints[LI].isPoint())
+ Result |= propagatePoint(Src, Dst, Constraints[LI]);
+ }
+ return Result;
+}
+
+
+// Attempt to propagate a distance
+// constraint into a subscript pair (Src and Dst).
+// Return true if some simplification occurs.
+// If the simplification isn't exact (that is, if it is conservative
+// in terms of dependence), set consistent to false.
+bool DependenceAnalysis::propagateDistance(const SCEV *&Src,
+ const SCEV *&Dst,
+ Constraint &CurConstraint,
+ bool &Consistent) {
+ const Loop *CurLoop = CurConstraint.getAssociatedLoop();
+ DEBUG(dbgs() << "\t\tSrc is " << *Src << "\n");
+ const SCEV *A_K = findCoefficient(Src, CurLoop);
+ if (A_K->isZero())
+ return false;
+ const SCEV *DA_K = SE->getMulExpr(A_K, CurConstraint.getD());
+ Src = SE->getMinusSCEV(Src, DA_K);
+ Src = zeroCoefficient(Src, CurLoop);
+ DEBUG(dbgs() << "\t\tnew Src is " << *Src << "\n");
+ DEBUG(dbgs() << "\t\tDst is " << *Dst << "\n");
+ Dst = addToCoefficient(Dst, CurLoop, SE->getNegativeSCEV(A_K));
+ DEBUG(dbgs() << "\t\tnew Dst is " << *Dst << "\n");
+ if (!findCoefficient(Dst, CurLoop)->isZero())
+ Consistent = false;
+ return true;
+}
+
+
+// Attempt to propagate a line
+// constraint into a subscript pair (Src and Dst).
+// Return true if some simplification occurs.
+// If the simplification isn't exact (that is, if it is conservative
+// in terms of dependence), set consistent to false.
+bool DependenceAnalysis::propagateLine(const SCEV *&Src,
+ const SCEV *&Dst,
+ Constraint &CurConstraint,
+ bool &Consistent) {
+ const Loop *CurLoop = CurConstraint.getAssociatedLoop();
+ const SCEV *A = CurConstraint.getA();
+ const SCEV *B = CurConstraint.getB();
+ const SCEV *C = CurConstraint.getC();
+ DEBUG(dbgs() << "\t\tA = " << *A << ", B = " << *B << ", C = " << *C << "\n");
+ DEBUG(dbgs() << "\t\tSrc = " << *Src << "\n");
+ DEBUG(dbgs() << "\t\tDst = " << *Dst << "\n");
+ if (A->isZero()) {
+ const SCEVConstant *Bconst = dyn_cast<SCEVConstant>(B);
+ const SCEVConstant *Cconst = dyn_cast<SCEVConstant>(C);
+ if (!Bconst || !Cconst) return false;
+ APInt Beta = Bconst->getValue()->getValue();
+ APInt Charlie = Cconst->getValue()->getValue();
+ APInt CdivB = Charlie.sdiv(Beta);
+ assert(Charlie.srem(Beta) == 0 && "C should be evenly divisible by B");
+ const SCEV *AP_K = findCoefficient(Dst, CurLoop);
+ // Src = SE->getAddExpr(Src, SE->getMulExpr(AP_K, SE->getConstant(CdivB)));
+ Src = SE->getMinusSCEV(Src, SE->getMulExpr(AP_K, SE->getConstant(CdivB)));
+ Dst = zeroCoefficient(Dst, CurLoop);
+ if (!findCoefficient(Src, CurLoop)->isZero())
+ Consistent = false;
+ }
+ else if (B->isZero()) {
+ const SCEVConstant *Aconst = dyn_cast<SCEVConstant>(A);
+ const SCEVConstant *Cconst = dyn_cast<SCEVConstant>(C);
+ if (!Aconst || !Cconst) return false;
+ APInt Alpha = Aconst->getValue()->getValue();
+ APInt Charlie = Cconst->getValue()->getValue();
+ APInt CdivA = Charlie.sdiv(Alpha);
+ assert(Charlie.srem(Alpha) == 0 && "C should be evenly divisible by A");
+ const SCEV *A_K = findCoefficient(Src, CurLoop);
+ Src = SE->getAddExpr(Src, SE->getMulExpr(A_K, SE->getConstant(CdivA)));
+ Src = zeroCoefficient(Src, CurLoop);
+ if (!findCoefficient(Dst, CurLoop)->isZero())
+ Consistent = false;
+ }
+ else if (isKnownPredicate(CmpInst::ICMP_EQ, A, B)) {
+ const SCEVConstant *Aconst = dyn_cast<SCEVConstant>(A);
+ const SCEVConstant *Cconst = dyn_cast<SCEVConstant>(C);
+ if (!Aconst || !Cconst) return false;
+ APInt Alpha = Aconst->getValue()->getValue();
+ APInt Charlie = Cconst->getValue()->getValue();
+ APInt CdivA = Charlie.sdiv(Alpha);
+ assert(Charlie.srem(Alpha) == 0 && "C should be evenly divisible by A");
+ const SCEV *A_K = findCoefficient(Src, CurLoop);
+ Src = SE->getAddExpr(Src, SE->getMulExpr(A_K, SE->getConstant(CdivA)));
+ Src = zeroCoefficient(Src, CurLoop);
+ Dst = addToCoefficient(Dst, CurLoop, A_K);
+ if (!findCoefficient(Dst, CurLoop)->isZero())
+ Consistent = false;
+ }
+ else {
+ // paper is incorrect here, or perhaps just misleading
+ const SCEV *A_K = findCoefficient(Src, CurLoop);
+ Src = SE->getMulExpr(Src, A);
+ Dst = SE->getMulExpr(Dst, A);
+ Src = SE->getAddExpr(Src, SE->getMulExpr(A_K, C));
+ Src = zeroCoefficient(Src, CurLoop);
+ Dst = addToCoefficient(Dst, CurLoop, SE->getMulExpr(A_K, B));
+ if (!findCoefficient(Dst, CurLoop)->isZero())
+ Consistent = false;
+ }
+ DEBUG(dbgs() << "\t\tnew Src = " << *Src << "\n");
+ DEBUG(dbgs() << "\t\tnew Dst = " << *Dst << "\n");
+ return true;
+}
+
+
+// Attempt to propagate a point
+// constraint into a subscript pair (Src and Dst).
+// Return true if some simplification occurs.
+bool DependenceAnalysis::propagatePoint(const SCEV *&Src,
+ const SCEV *&Dst,
+ Constraint &CurConstraint) {
+ const Loop *CurLoop = CurConstraint.getAssociatedLoop();
+ const SCEV *A_K = findCoefficient(Src, CurLoop);
+ const SCEV *AP_K = findCoefficient(Dst, CurLoop);
+ const SCEV *XA_K = SE->getMulExpr(A_K, CurConstraint.getX());
+ const SCEV *YAP_K = SE->getMulExpr(AP_K, CurConstraint.getY());
+ DEBUG(dbgs() << "\t\tSrc is " << *Src << "\n");
+ Src = SE->getAddExpr(Src, SE->getMinusSCEV(XA_K, YAP_K));
+ Src = zeroCoefficient(Src, CurLoop);
+ DEBUG(dbgs() << "\t\tnew Src is " << *Src << "\n");
+ DEBUG(dbgs() << "\t\tDst is " << *Dst << "\n");
+ Dst = zeroCoefficient(Dst, CurLoop);
+ DEBUG(dbgs() << "\t\tnew Dst is " << *Dst << "\n");
+ return true;
+}
+
+
+// Update direction vector entry based on the current constraint.
+void DependenceAnalysis::updateDirection(Dependence::DVEntry &Level,
+ const Constraint &CurConstraint
+ ) const {
+ DEBUG(dbgs() << "\tUpdate direction, constraint =");
+ DEBUG(CurConstraint.dump(dbgs()));
+ if (CurConstraint.isAny())
+ ; // use defaults
+ else if (CurConstraint.isDistance()) {
+ // this one is consistent, the others aren't
+ Level.Scalar = false;
+ Level.Distance = CurConstraint.getD();
+ unsigned NewDirection = Dependence::DVEntry::NONE;
+ if (!SE->isKnownNonZero(Level.Distance)) // if may be zero
+ NewDirection = Dependence::DVEntry::EQ;
+ if (!SE->isKnownNonPositive(Level.Distance)) // if may be positive
+ NewDirection |= Dependence::DVEntry::LT;
+ if (!SE->isKnownNonNegative(Level.Distance)) // if may be negative
+ NewDirection |= Dependence::DVEntry::GT;
+ Level.Direction &= NewDirection;
+ }
+ else if (CurConstraint.isLine()) {
+ Level.Scalar = false;
+ Level.Distance = NULL;
+ // direction should be accurate
+ }
+ else if (CurConstraint.isPoint()) {
+ Level.Scalar = false;
+ Level.Distance = NULL;
+ unsigned NewDirection = Dependence::DVEntry::NONE;
+ if (!isKnownPredicate(CmpInst::ICMP_NE,
+ CurConstraint.getY(),
+ CurConstraint.getX()))
+ // if X may be = Y
+ NewDirection |= Dependence::DVEntry::EQ;
+ if (!isKnownPredicate(CmpInst::ICMP_SLE,
+ CurConstraint.getY(),
+ CurConstraint.getX()))
+ // if Y may be > X
+ NewDirection |= Dependence::DVEntry::LT;
+ if (!isKnownPredicate(CmpInst::ICMP_SGE,
+ CurConstraint.getY(),
+ CurConstraint.getX()))
+ // if Y may be < X
+ NewDirection |= Dependence::DVEntry::GT;
+ Level.Direction &= NewDirection;
+ }
+ else
+ llvm_unreachable("constraint has unexpected kind");
+}
+
+
+//===----------------------------------------------------------------------===//
+
+#ifndef NDEBUG
+// For debugging purposes, dump a small bit vector to dbgs().
+static void dumpSmallBitVector(SmallBitVector &BV) {
+ dbgs() << "{";
+ for (int VI = BV.find_first(); VI >= 0; VI = BV.find_next(VI)) {
+ dbgs() << VI;
+ if (BV.find_next(VI) >= 0)
+ dbgs() << ' ';
+ }
+ dbgs() << "}\n";
+}
+#endif
+
+
+// depends -
+// Returns NULL if there is no dependence.
+// Otherwise, return a Dependence with as many details as possible.
+// Corresponds to Section 3.1 in the paper
+//
+// Practical Dependence Testing
+// Goff, Kennedy, Tseng
+// PLDI 1991
+//
+// Care is required to keep the code below up to date w.r.t. this routine.
+Dependence *DependenceAnalysis::depends(const Instruction *Src,
+ const Instruction *Dst,
+ bool PossiblyLoopIndependent) {
+ if ((!Src->mayReadFromMemory() && !Src->mayWriteToMemory()) ||
+ (!Dst->mayReadFromMemory() && !Dst->mayWriteToMemory()))
+ // if both instructions don't reference memory, there's no dependence
+ return NULL;
+
+ if (!isLoadOrStore(Src) || !isLoadOrStore(Dst))
+ // can only analyze simple loads and stores, i.e., no calls, invokes, etc.
+ return new Dependence(Src, Dst);
+
+ const Value *SrcPtr = getPointerOperand(Src);
+ const Value *DstPtr = getPointerOperand(Dst);
+
+ switch (underlyingObjectsAlias(AA, DstPtr, SrcPtr)) {
+ case AliasAnalysis::MayAlias:
+ case AliasAnalysis::PartialAlias:
+ // cannot analyse objects if we don't understand their aliasing.
+ return new Dependence(Src, Dst);
+ case AliasAnalysis::NoAlias:
+ // If the objects noalias, they are distinct, accesses are independent.
+ return NULL;
+ case AliasAnalysis::MustAlias:
+ break; // The underlying objects alias; test accesses for dependence.
+ }
+
+ const GEPOperator *SrcGEP = dyn_cast<GEPOperator>(SrcPtr);
+ const GEPOperator *DstGEP = dyn_cast<GEPOperator>(DstPtr);
+ if (!SrcGEP || !DstGEP)
+ return new Dependence(Src, Dst); // missing GEP, assume dependence
+
+ if (SrcGEP->getPointerOperandType() != DstGEP->getPointerOperandType())
+ return new Dependence(Src, Dst); // different types, assume dependence
+
+ // establish loop nesting levels
+ establishNestingLevels(Src, Dst);
+ DEBUG(dbgs() << " common nesting levels = " << CommonLevels << "\n");
+ DEBUG(dbgs() << " maximum nesting levels = " << MaxLevels << "\n");
+
+ FullDependence Result(Src, Dst, PossiblyLoopIndependent, CommonLevels);
+ ++TotalArrayPairs;
+
+ // classify subscript pairs
+ unsigned Pairs = SrcGEP->idx_end() - SrcGEP->idx_begin();
+ SmallVector<Subscript, 4> Pair(Pairs);
+ for (unsigned SI = 0; SI < Pairs; ++SI) {
+ Pair[SI].Loops.resize(MaxLevels + 1);
+ Pair[SI].GroupLoops.resize(MaxLevels + 1);
+ Pair[SI].Group.resize(Pairs);
+ }
+ Pairs = 0;
+ for (GEPOperator::const_op_iterator SrcIdx = SrcGEP->idx_begin(),
+ SrcEnd = SrcGEP->idx_end(),
+ DstIdx = DstGEP->idx_begin(),
+ DstEnd = DstGEP->idx_end();
+ SrcIdx != SrcEnd && DstIdx != DstEnd;
+ ++SrcIdx, ++DstIdx, ++Pairs) {
+ Pair[Pairs].Src = SE->getSCEV(*SrcIdx);
+ Pair[Pairs].Dst = SE->getSCEV(*DstIdx);
+ removeMatchingExtensions(&Pair[Pairs]);
+ Pair[Pairs].Classification =
+ classifyPair(Pair[Pairs].Src, LI->getLoopFor(Src->getParent()),
+ Pair[Pairs].Dst, LI->getLoopFor(Dst->getParent()),
+ Pair[Pairs].Loops);
+ Pair[Pairs].GroupLoops = Pair[Pairs].Loops;
+ Pair[Pairs].Group.set(Pairs);
+ DEBUG(dbgs() << " subscript " << Pairs << "\n");
+ DEBUG(dbgs() << "\tsrc = " << *Pair[Pairs].Src << "\n");
+ DEBUG(dbgs() << "\tdst = " << *Pair[Pairs].Dst << "\n");
+ DEBUG(dbgs() << "\tclass = " << Pair[Pairs].Classification << "\n");
+ DEBUG(dbgs() << "\tloops = ");
+ DEBUG(dumpSmallBitVector(Pair[Pairs].Loops));
+ }
+
+ SmallBitVector Separable(Pairs);
+ SmallBitVector Coupled(Pairs);
+
+ // Partition subscripts into separable and minimally-coupled groups
+ // Algorithm in paper is algorithmically better;
+ // this may be faster in practice. Check someday.
+ //
+ // Here's an example of how it works. Consider this code:
+ //
+ // for (i = ...) {
+ // for (j = ...) {
+ // for (k = ...) {
+ // for (l = ...) {
+ // for (m = ...) {
+ // A[i][j][k][m] = ...;
+ // ... = A[0][j][l][i + j];
+ // }
+ // }
+ // }
+ // }
+ // }
+ //
+ // There are 4 subscripts here:
+ // 0 [i] and [0]
+ // 1 [j] and [j]
+ // 2 [k] and [l]
+ // 3 [m] and [i + j]
+ //
+ // We've already classified each subscript pair as ZIV, SIV, etc.,
+ // and collected all the loops mentioned by pair P in Pair[P].Loops.
+ // In addition, we've initialized Pair[P].GroupLoops to Pair[P].Loops
+ // and set Pair[P].Group = {P}.
+ //
+ // Src Dst Classification Loops GroupLoops Group
+ // 0 [i] [0] SIV {1} {1} {0}
+ // 1 [j] [j] SIV {2} {2} {1}
+ // 2 [k] [l] RDIV {3,4} {3,4} {2}
+ // 3 [m] [i + j] MIV {1,2,5} {1,2,5} {3}
+ //
+ // For each subscript SI 0 .. 3, we consider each remaining subscript, SJ.
+ // So, 0 is compared against 1, 2, and 3; 1 is compared against 2 and 3, etc.
+ //
+ // We begin by comparing 0 and 1. The intersection of the GroupLoops is empty.
+ // Next, 0 and 2. Again, the intersection of their GroupLoops is empty.
+ // Next 0 and 3. The intersection of their GroupLoop = {1}, not empty,
+ // so Pair[3].Group = {0,3} and Done = false (that is, 0 will not be added
+ // to either Separable or Coupled).
+ //
+ // Next, we consider 1 and 2. The intersection of the GroupLoops is empty.
+ // Next, 1 and 3. The intersectionof their GroupLoops = {2}, not empty,
+ // so Pair[3].Group = {0, 1, 3} and Done = false.
+ //
+ // Next, we compare 2 against 3. The intersection of the GroupLoops is empty.
+ // Since Done remains true, we add 2 to the set of Separable pairs.
+ //
+ // Finally, we consider 3. There's nothing to compare it with,
+ // so Done remains true and we add it to the Coupled set.
+ // Pair[3].Group = {0, 1, 3} and GroupLoops = {1, 2, 5}.
+ //
+ // In the end, we've got 1 separable subscript and 1 coupled group.
+ for (unsigned SI = 0; SI < Pairs; ++SI) {
+ if (Pair[SI].Classification == Subscript::NonLinear) {
+ // ignore these, but collect loops for later
+ ++NonlinearSubscriptPairs;
+ collectCommonLoops(Pair[SI].Src,
+ LI->getLoopFor(Src->getParent()),
+ Pair[SI].Loops);
+ collectCommonLoops(Pair[SI].Dst,
+ LI->getLoopFor(Dst->getParent()),
+ Pair[SI].Loops);
+ Result.Consistent = false;
+ }
+ else if (Pair[SI].Classification == Subscript::ZIV) {
+ // always separable
+ Separable.set(SI);
+ }
+ else {
+ // SIV, RDIV, or MIV, so check for coupled group
+ bool Done = true;
+ for (unsigned SJ = SI + 1; SJ < Pairs; ++SJ) {
+ SmallBitVector Intersection = Pair[SI].GroupLoops;
+ Intersection &= Pair[SJ].GroupLoops;
+ if (Intersection.any()) {
+ // accumulate set of all the loops in group
+ Pair[SJ].GroupLoops |= Pair[SI].GroupLoops;
+ // accumulate set of all subscripts in group
+ Pair[SJ].Group |= Pair[SI].Group;
+ Done = false;
+ }
+ }
+ if (Done) {
+ if (Pair[SI].Group.count() == 1) {
+ Separable.set(SI);
+ ++SeparableSubscriptPairs;
+ }
+ else {
+ Coupled.set(SI);
+ ++CoupledSubscriptPairs;
+ }
+ }
+ }
+ }
+
+ DEBUG(dbgs() << " Separable = ");
+ DEBUG(dumpSmallBitVector(Separable));
+ DEBUG(dbgs() << " Coupled = ");
+ DEBUG(dumpSmallBitVector(Coupled));
+
+ Constraint NewConstraint;
+ NewConstraint.setAny(SE);
+
+ // test separable subscripts
+ for (int SI = Separable.find_first(); SI >= 0; SI = Separable.find_next(SI)) {
+ DEBUG(dbgs() << "testing subscript " << SI);
+ switch (Pair[SI].Classification) {
+ case Subscript::ZIV:
+ DEBUG(dbgs() << ", ZIV\n");
+ if (testZIV(Pair[SI].Src, Pair[SI].Dst, Result))
+ return NULL;
+ break;
+ case Subscript::SIV: {
+ DEBUG(dbgs() << ", SIV\n");
+ unsigned Level;
+ const SCEV *SplitIter = NULL;
+ if (testSIV(Pair[SI].Src, Pair[SI].Dst, Level,
+ Result, NewConstraint, SplitIter))
+ return NULL;
+ break;
+ }
+ case Subscript::RDIV:
+ DEBUG(dbgs() << ", RDIV\n");
+ if (testRDIV(Pair[SI].Src, Pair[SI].Dst, Result))
+ return NULL;
+ break;
+ case Subscript::MIV:
+ DEBUG(dbgs() << ", MIV\n");
+ if (testMIV(Pair[SI].Src, Pair[SI].Dst, Pair[SI].Loops, Result))
+ return NULL;
+ break;
+ default:
+ llvm_unreachable("subscript has unexpected classification");
+ }
+ }
+
+ if (Coupled.count()) {
+ // test coupled subscript groups
+ DEBUG(dbgs() << "starting on coupled subscripts\n");
+ DEBUG(dbgs() << "MaxLevels + 1 = " << MaxLevels + 1 << "\n");
+ SmallVector<Constraint, 4> Constraints(MaxLevels + 1);
+ for (unsigned II = 0; II <= MaxLevels; ++II)
+ Constraints[II].setAny(SE);
+ for (int SI = Coupled.find_first(); SI >= 0; SI = Coupled.find_next(SI)) {
+ DEBUG(dbgs() << "testing subscript group " << SI << " { ");
+ SmallBitVector Group(Pair[SI].Group);
+ SmallBitVector Sivs(Pairs);
+ SmallBitVector Mivs(Pairs);
+ SmallBitVector ConstrainedLevels(MaxLevels + 1);
+ for (int SJ = Group.find_first(); SJ >= 0; SJ = Group.find_next(SJ)) {
+ DEBUG(dbgs() << SJ << " ");
+ if (Pair[SJ].Classification == Subscript::SIV)
+ Sivs.set(SJ);
+ else
+ Mivs.set(SJ);
+ }
+ DEBUG(dbgs() << "}\n");
+ while (Sivs.any()) {
+ bool Changed = false;
+ for (int SJ = Sivs.find_first(); SJ >= 0; SJ = Sivs.find_next(SJ)) {
+ DEBUG(dbgs() << "testing subscript " << SJ << ", SIV\n");
+ // SJ is an SIV subscript that's part of the current coupled group
+ unsigned Level;
+ const SCEV *SplitIter = NULL;
+ DEBUG(dbgs() << "SIV\n");
+ if (testSIV(Pair[SJ].Src, Pair[SJ].Dst, Level,
+ Result, NewConstraint, SplitIter))
+ return NULL;
+ ConstrainedLevels.set(Level);
+ if (intersectConstraints(&Constraints[Level], &NewConstraint)) {
+ if (Constraints[Level].isEmpty()) {
+ ++DeltaIndependence;
+ return NULL;
+ }
+ Changed = true;
+ }
+ Sivs.reset(SJ);
+ }
+ if (Changed) {
+ // propagate, possibly creating new SIVs and ZIVs
+ DEBUG(dbgs() << " propagating\n");
+ DEBUG(dbgs() << "\tMivs = ");
+ DEBUG(dumpSmallBitVector(Mivs));
+ for (int SJ = Mivs.find_first(); SJ >= 0; SJ = Mivs.find_next(SJ)) {
+ // SJ is an MIV subscript that's part of the current coupled group
+ DEBUG(dbgs() << "\tSJ = " << SJ << "\n");
+ if (propagate(Pair[SJ].Src, Pair[SJ].Dst, Pair[SJ].Loops,
+ Constraints, Result.Consistent)) {
+ DEBUG(dbgs() << "\t Changed\n");
+ ++DeltaPropagations;
+ Pair[SJ].Classification =
+ classifyPair(Pair[SJ].Src, LI->getLoopFor(Src->getParent()),
+ Pair[SJ].Dst, LI->getLoopFor(Dst->getParent()),
+ Pair[SJ].Loops);
+ switch (Pair[SJ].Classification) {
+ case Subscript::ZIV:
+ DEBUG(dbgs() << "ZIV\n");
+ if (testZIV(Pair[SJ].Src, Pair[SJ].Dst, Result))
+ return NULL;
+ Mivs.reset(SJ);
+ break;
+ case Subscript::SIV:
+ Sivs.set(SJ);
+ Mivs.reset(SJ);
+ break;
+ case Subscript::RDIV:
+ case Subscript::MIV:
+ break;
+ default:
+ llvm_unreachable("bad subscript classification");
+ }
+ }
+ }
+ }
+ }
+
+ // test & propagate remaining RDIVs
+ for (int SJ = Mivs.find_first(); SJ >= 0; SJ = Mivs.find_next(SJ)) {
+ if (Pair[SJ].Classification == Subscript::RDIV) {
+ DEBUG(dbgs() << "RDIV test\n");
+ if (testRDIV(Pair[SJ].Src, Pair[SJ].Dst, Result))
+ return NULL;
+ // I don't yet understand how to propagate RDIV results
+ Mivs.reset(SJ);
+ }
+ }
+
+ // test remaining MIVs
+ // This code is temporary.
+ // Better to somehow test all remaining subscripts simultaneously.
+ for (int SJ = Mivs.find_first(); SJ >= 0; SJ = Mivs.find_next(SJ)) {
+ if (Pair[SJ].Classification == Subscript::MIV) {
+ DEBUG(dbgs() << "MIV test\n");
+ if (testMIV(Pair[SJ].Src, Pair[SJ].Dst, Pair[SJ].Loops, Result))
+ return NULL;
+ }
+ else
+ llvm_unreachable("expected only MIV subscripts at this point");
+ }
+
+ // update Result.DV from constraint vector
+ DEBUG(dbgs() << " updating\n");
+ for (int SJ = ConstrainedLevels.find_first();
+ SJ >= 0; SJ = ConstrainedLevels.find_next(SJ)) {
+ updateDirection(Result.DV[SJ - 1], Constraints[SJ]);
+ if (Result.DV[SJ - 1].Direction == Dependence::DVEntry::NONE)
+ return NULL;
+ }
+ }
+ }
+
+ // make sure Scalar flags are set correctly
+ SmallBitVector CompleteLoops(MaxLevels + 1);
+ for (unsigned SI = 0; SI < Pairs; ++SI)
+ CompleteLoops |= Pair[SI].Loops;
+ for (unsigned II = 1; II <= CommonLevels; ++II)
+ if (CompleteLoops[II])
+ Result.DV[II - 1].Scalar = false;
+
+ // make sure loopIndepent flag is set correctly
+ if (PossiblyLoopIndependent) {
+ for (unsigned II = 1; II <= CommonLevels; ++II) {
+ if (!(Result.getDirection(II) & Dependence::DVEntry::EQ)) {
+ Result.LoopIndependent = false;
+ break;
+ }
+ }
+ }
+
+ FullDependence *Final = new FullDependence(Result);
+ Result.DV = NULL;
+ return Final;
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// getSplitIteration -
+// Rather than spend rarely-used space recording the splitting iteration
+// during the Weak-Crossing SIV test, we re-compute it on demand.
+// The re-computation is basically a repeat of the entire dependence test,
+// though simplified since we know that the dependence exists.
+// It's tedious, since we must go through all propagations, etc.
+//
+// Care is required to keep this code up to date w.r.t. the code above.
+//
+// Generally, the dependence analyzer will be used to build
+// a dependence graph for a function (basically a map from instructions
+// to dependences). Looking for cycles in the graph shows us loops
+// that cannot be trivially vectorized/parallelized.
+//
+// We can try to improve the situation by examining all the dependences
+// that make up the cycle, looking for ones we can break.
+// Sometimes, peeling the first or last iteration of a loop will break
+// dependences, and we've got flags for those possibilities.
+// Sometimes, splitting a loop at some other iteration will do the trick,
+// and we've got a flag for that case. Rather than waste the space to
+// record the exact iteration (since we rarely know), we provide
+// a method that calculates the iteration. It's a drag that it must work
+// from scratch, but wonderful in that it's possible.
+//
+// Here's an example:
+//
+// for (i = 0; i < 10; i++)
+// A[i] = ...
+// ... = A[11 - i]
+//
+// There's a loop-carried flow dependence from the store to the load,
+// found by the weak-crossing SIV test. The dependence will have a flag,
+// indicating that the dependence can be broken by splitting the loop.
+// Calling getSplitIteration will return 5.
+// Splitting the loop breaks the dependence, like so:
+//
+// for (i = 0; i <= 5; i++)
+// A[i] = ...
+// ... = A[11 - i]
+// for (i = 6; i < 10; i++)
+// A[i] = ...
+// ... = A[11 - i]
+//
+// breaks the dependence and allows us to vectorize/parallelize
+// both loops.
+const SCEV *DependenceAnalysis::getSplitIteration(const Dependence *Dep,
+ unsigned SplitLevel) {
+ assert(Dep && "expected a pointer to a Dependence");
+ assert(Dep->isSplitable(SplitLevel) &&
+ "Dep should be splitable at SplitLevel");
+ const Instruction *Src = Dep->getSrc();
+ const Instruction *Dst = Dep->getDst();
+ assert(Src->mayReadFromMemory() || Src->mayWriteToMemory());
+ assert(Dst->mayReadFromMemory() || Dst->mayWriteToMemory());
+ assert(isLoadOrStore(Src));
+ assert(isLoadOrStore(Dst));
+ const Value *SrcPtr = getPointerOperand(Src);
+ const Value *DstPtr = getPointerOperand(Dst);
+ assert(underlyingObjectsAlias(AA, DstPtr, SrcPtr) ==
+ AliasAnalysis::MustAlias);
+ const GEPOperator *SrcGEP = dyn_cast<GEPOperator>(SrcPtr);
+ const GEPOperator *DstGEP = dyn_cast<GEPOperator>(DstPtr);
+ assert(SrcGEP);
+ assert(DstGEP);
+ assert(SrcGEP->getPointerOperandType() == DstGEP->getPointerOperandType());
+
+ // establish loop nesting levels
+ establishNestingLevels(Src, Dst);
+
+ FullDependence Result(Src, Dst, false, CommonLevels);
+
+ // classify subscript pairs
+ unsigned Pairs = SrcGEP->idx_end() - SrcGEP->idx_begin();
+ SmallVector<Subscript, 4> Pair(Pairs);
+ for (unsigned SI = 0; SI < Pairs; ++SI) {
+ Pair[SI].Loops.resize(MaxLevels + 1);
+ Pair[SI].GroupLoops.resize(MaxLevels + 1);
+ Pair[SI].Group.resize(Pairs);
+ }
+ Pairs = 0;
+ for (GEPOperator::const_op_iterator SrcIdx = SrcGEP->idx_begin(),
+ SrcEnd = SrcGEP->idx_end(),
+ DstIdx = DstGEP->idx_begin(),
+ DstEnd = DstGEP->idx_end();
+ SrcIdx != SrcEnd && DstIdx != DstEnd;
+ ++SrcIdx, ++DstIdx, ++Pairs) {
+ Pair[Pairs].Src = SE->getSCEV(*SrcIdx);
+ Pair[Pairs].Dst = SE->getSCEV(*DstIdx);
+ Pair[Pairs].Classification =
+ classifyPair(Pair[Pairs].Src, LI->getLoopFor(Src->getParent()),
+ Pair[Pairs].Dst, LI->getLoopFor(Dst->getParent()),
+ Pair[Pairs].Loops);
+ Pair[Pairs].GroupLoops = Pair[Pairs].Loops;
+ Pair[Pairs].Group.set(Pairs);
+ }
+
+ SmallBitVector Separable(Pairs);
+ SmallBitVector Coupled(Pairs);
+
+ // partition subscripts into separable and minimally-coupled groups
+ for (unsigned SI = 0; SI < Pairs; ++SI) {
+ if (Pair[SI].Classification == Subscript::NonLinear) {
+ // ignore these, but collect loops for later
+ collectCommonLoops(Pair[SI].Src,
+ LI->getLoopFor(Src->getParent()),
+ Pair[SI].Loops);
+ collectCommonLoops(Pair[SI].Dst,
+ LI->getLoopFor(Dst->getParent()),
+ Pair[SI].Loops);
+ Result.Consistent = false;
+ }
+ else if (Pair[SI].Classification == Subscript::ZIV)
+ Separable.set(SI);
+ else {
+ // SIV, RDIV, or MIV, so check for coupled group
+ bool Done = true;
+ for (unsigned SJ = SI + 1; SJ < Pairs; ++SJ) {
+ SmallBitVector Intersection = Pair[SI].GroupLoops;
+ Intersection &= Pair[SJ].GroupLoops;
+ if (Intersection.any()) {
+ // accumulate set of all the loops in group
+ Pair[SJ].GroupLoops |= Pair[SI].GroupLoops;
+ // accumulate set of all subscripts in group
+ Pair[SJ].Group |= Pair[SI].Group;
+ Done = false;
+ }
+ }
+ if (Done) {
+ if (Pair[SI].Group.count() == 1)
+ Separable.set(SI);
+ else
+ Coupled.set(SI);
+ }
+ }
+ }
+
+ Constraint NewConstraint;
+ NewConstraint.setAny(SE);
+
+ // test separable subscripts
+ for (int SI = Separable.find_first(); SI >= 0; SI = Separable.find_next(SI)) {
+ switch (Pair[SI].Classification) {
+ case Subscript::SIV: {
+ unsigned Level;
+ const SCEV *SplitIter = NULL;
+ (void) testSIV(Pair[SI].Src, Pair[SI].Dst, Level,
+ Result, NewConstraint, SplitIter);
+ if (Level == SplitLevel) {
+ assert(SplitIter != NULL);
+ return SplitIter;
+ }
+ break;
+ }
+ case Subscript::ZIV:
+ case Subscript::RDIV:
+ case Subscript::MIV:
+ break;
+ default:
+ llvm_unreachable("subscript has unexpected classification");
+ }
+ }
+
+ if (Coupled.count()) {
+ // test coupled subscript groups
+ SmallVector<Constraint, 4> Constraints(MaxLevels + 1);
+ for (unsigned II = 0; II <= MaxLevels; ++II)
+ Constraints[II].setAny(SE);
+ for (int SI = Coupled.find_first(); SI >= 0; SI = Coupled.find_next(SI)) {
+ SmallBitVector Group(Pair[SI].Group);
+ SmallBitVector Sivs(Pairs);
+ SmallBitVector Mivs(Pairs);
+ SmallBitVector ConstrainedLevels(MaxLevels + 1);
+ for (int SJ = Group.find_first(); SJ >= 0; SJ = Group.find_next(SJ)) {
+ if (Pair[SJ].Classification == Subscript::SIV)
+ Sivs.set(SJ);
+ else
+ Mivs.set(SJ);
+ }
+ while (Sivs.any()) {
+ bool Changed = false;
+ for (int SJ = Sivs.find_first(); SJ >= 0; SJ = Sivs.find_next(SJ)) {
+ // SJ is an SIV subscript that's part of the current coupled group
+ unsigned Level;
+ const SCEV *SplitIter = NULL;
+ (void) testSIV(Pair[SJ].Src, Pair[SJ].Dst, Level,
+ Result, NewConstraint, SplitIter);
+ if (Level == SplitLevel && SplitIter)
+ return SplitIter;
+ ConstrainedLevels.set(Level);
+ if (intersectConstraints(&Constraints[Level], &NewConstraint))
+ Changed = true;
+ Sivs.reset(SJ);
+ }
+ if (Changed) {
+ // propagate, possibly creating new SIVs and ZIVs
+ for (int SJ = Mivs.find_first(); SJ >= 0; SJ = Mivs.find_next(SJ)) {
+ // SJ is an MIV subscript that's part of the current coupled group
+ if (propagate(Pair[SJ].Src, Pair[SJ].Dst,
+ Pair[SJ].Loops, Constraints, Result.Consistent)) {
+ Pair[SJ].Classification =
+ classifyPair(Pair[SJ].Src, LI->getLoopFor(Src->getParent()),
+ Pair[SJ].Dst, LI->getLoopFor(Dst->getParent()),
+ Pair[SJ].Loops);
+ switch (Pair[SJ].Classification) {
+ case Subscript::ZIV:
+ Mivs.reset(SJ);
+ break;
+ case Subscript::SIV:
+ Sivs.set(SJ);
+ Mivs.reset(SJ);
+ break;
+ case Subscript::RDIV:
+ case Subscript::MIV:
+ break;
+ default:
+ llvm_unreachable("bad subscript classification");
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ llvm_unreachable("somehow reached end of routine");
+ return NULL;
+}
diff --git a/contrib/llvm/lib/Analysis/DominanceFrontier.cpp b/contrib/llvm/lib/Analysis/DominanceFrontier.cpp
index 1604576..3e537e9 100644
--- a/contrib/llvm/lib/Analysis/DominanceFrontier.cpp
+++ b/contrib/llvm/lib/Analysis/DominanceFrontier.cpp
@@ -133,7 +133,9 @@ void DominanceFrontierBase::print(raw_ostream &OS, const Module* ) const {
}
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void DominanceFrontierBase::dump() const {
print(dbgs());
}
+#endif
diff --git a/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp b/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp
index 0df3e8a..dec0ece 100644
--- a/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp
@@ -141,12 +141,13 @@ private:
for (BasicBlock::iterator II = BB->begin(), IE = BB->end();
II != IE; ++II) {
CallSite CS(cast<Value>(II));
- if (CS && !isa<IntrinsicInst>(II)) {
+ if (CS) {
const Function *Callee = CS.getCalledFunction();
- if (Callee)
- Node->addCalledFunction(CS, getOrInsertFunction(Callee));
- else
+ if (!Callee)
+ // Indirect calls of intrinsics are not allowed so no need to check.
Node->addCalledFunction(CS, CallsExternalNode);
+ else if (!Callee->isIntrinsic())
+ Node->addCalledFunction(CS, getOrInsertFunction(Callee));
}
}
}
@@ -198,9 +199,11 @@ void CallGraph::print(raw_ostream &OS, Module*) const {
for (CallGraph::const_iterator I = begin(), E = end(); I != E; ++I)
I->second->print(OS);
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void CallGraph::dump() const {
print(dbgs(), 0);
}
+#endif
//===----------------------------------------------------------------------===//
// Implementations of public modification methods
@@ -267,7 +270,9 @@ void CallGraphNode::print(raw_ostream &OS) const {
OS << '\n';
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void CallGraphNode::dump() const { print(dbgs()); }
+#endif
/// removeCallEdgeFor - This method removes the edge in the node for the
/// specified call site. Note that this method takes linear time, so it
diff --git a/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp b/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
index 22f6e96..990caa8 100644
--- a/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
@@ -263,7 +263,7 @@ bool GlobalsModRef::AnalyzeUsesOfPointer(Value *V,
} else if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
if (AnalyzeUsesOfPointer(BCI, Readers, Writers, OkayStoreDest))
return true;
- } else if (isFreeCall(U)) {
+ } else if (isFreeCall(U, TLI)) {
Writers.push_back(cast<Instruction>(U)->getParent()->getParent());
} else if (CallInst *CI = dyn_cast<CallInst>(U)) {
// Make sure that this is just the function being called, not that it is
@@ -329,7 +329,7 @@ bool GlobalsModRef::AnalyzeIndirectGlobalMemory(GlobalValue *GV) {
// Check the value being stored.
Value *Ptr = GetUnderlyingObject(SI->getOperand(0));
- if (!isAllocLikeFn(Ptr))
+ if (!isAllocLikeFn(Ptr, TLI))
return false; // Too hard to analyze.
// Analyze all uses of the allocation. If any of them are used in a
@@ -458,7 +458,7 @@ void GlobalsModRef::AnalyzeCallGraph(CallGraph &CG, Module &M) {
if (SI->isVolatile())
// Treat volatile stores as reading memory somewhere.
FunctionEffect |= Ref;
- } else if (isAllocationFn(&*II) || isFreeCall(&*II)) {
+ } else if (isAllocationFn(&*II, TLI) || isFreeCall(&*II, TLI)) {
FunctionEffect |= ModRef;
} else if (IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(&*II)) {
// The callgraph doesn't include intrinsic calls.
diff --git a/contrib/llvm/lib/Analysis/IVUsers.cpp b/contrib/llvm/lib/Analysis/IVUsers.cpp
index 0a6682a..d4221b8 100644
--- a/contrib/llvm/lib/Analysis/IVUsers.cpp
+++ b/contrib/llvm/lib/Analysis/IVUsers.cpp
@@ -22,7 +22,7 @@
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
@@ -235,7 +235,7 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) {
LI = &getAnalysis<LoopInfo>();
DT = &getAnalysis<DominatorTree>();
SE = &getAnalysis<ScalarEvolution>();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
// Find all uses of induction variables in this loop, and categorize
// them by stride. Start by finding all of the PHI nodes in the header for
@@ -273,9 +273,11 @@ void IVUsers::print(raw_ostream &OS, const Module *M) const {
}
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void IVUsers::dump() const {
print(dbgs());
}
+#endif
void IVUsers::releaseMemory() {
Processed.clear();
diff --git a/contrib/llvm/lib/Analysis/InlineCost.cpp b/contrib/llvm/lib/Analysis/InlineCost.cpp
index e9f39ab..5f51f77 100644
--- a/contrib/llvm/lib/Analysis/InlineCost.cpp
+++ b/contrib/llvm/lib/Analysis/InlineCost.cpp
@@ -24,7 +24,7 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/Operator.h"
#include "llvm/GlobalAlias.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
@@ -41,8 +41,8 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
typedef InstVisitor<CallAnalyzer, bool> Base;
friend class InstVisitor<CallAnalyzer, bool>;
- // TargetData if available, or null.
- const TargetData *const TD;
+ // DataLayout if available, or null.
+ const DataLayout *const TD;
// The called function.
Function &F;
@@ -51,9 +51,12 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
int Cost;
const bool AlwaysInline;
- bool IsRecursive;
+ bool IsCallerRecursive;
+ bool IsRecursiveCall;
bool ExposesReturnsTwice;
bool HasDynamicAlloca;
+ /// Number of bytes allocated statically by the callee.
+ uint64_t AllocatedSize;
unsigned NumInstructions, NumVectorInstructions;
int FiftyPercentVectorBonus, TenPercentVectorBonus;
int VectorBonus;
@@ -123,10 +126,11 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
bool visitCallSite(CallSite CS);
public:
- CallAnalyzer(const TargetData *TD, Function &Callee, int Threshold)
+ CallAnalyzer(const DataLayout *TD, Function &Callee, int Threshold)
: TD(TD), F(Callee), Threshold(Threshold), Cost(0),
- AlwaysInline(F.hasFnAttr(Attribute::AlwaysInline)),
- IsRecursive(false), ExposesReturnsTwice(false), HasDynamicAlloca(false),
+ AlwaysInline(F.getFnAttributes().hasAttribute(Attributes::AlwaysInline)),
+ IsCallerRecursive(false), IsRecursiveCall(false),
+ ExposesReturnsTwice(false), HasDynamicAlloca(false), AllocatedSize(0),
NumInstructions(0), NumVectorInstructions(0),
FiftyPercentVectorBonus(0), TenPercentVectorBonus(0), VectorBonus(0),
NumConstantArgs(0), NumConstantOffsetPtrArgs(0), NumAllocaArgs(0),
@@ -270,6 +274,13 @@ bool CallAnalyzer::visitAlloca(AllocaInst &I) {
// FIXME: Check whether inlining will turn a dynamic alloca into a static
// alloca, and handle that case.
+ // Accumulate the allocated size.
+ if (I.isStaticAlloca()) {
+ Type *Ty = I.getAllocatedType();
+ AllocatedSize += (TD ? TD->getTypeAllocSize(Ty) :
+ Ty->getPrimitiveSizeInBits());
+ }
+
// We will happily inline static alloca instructions or dynamic alloca
// instructions in always-inline situations.
if (AlwaysInline || I.isStaticAlloca())
@@ -603,7 +614,7 @@ bool CallAnalyzer::visitStore(StoreInst &I) {
bool CallAnalyzer::visitCallSite(CallSite CS) {
if (CS.isCall() && cast<CallInst>(CS.getInstruction())->canReturnTwice() &&
- !F.hasFnAttr(Attribute::ReturnsTwice)) {
+ !F.getFnAttributes().hasAttribute(Attributes::ReturnsTwice)) {
// This aborts the entire analysis.
ExposesReturnsTwice = true;
return false;
@@ -626,7 +637,7 @@ bool CallAnalyzer::visitCallSite(CallSite CS) {
if (F == CS.getInstruction()->getParent()->getParent()) {
// This flag will fully abort the analysis, so don't bother with anything
// else.
- IsRecursive = true;
+ IsRecursiveCall = true;
return false;
}
@@ -713,7 +724,14 @@ bool CallAnalyzer::analyzeBlock(BasicBlock *BB) {
Cost += InlineConstants::InstrCost;
// If the visit this instruction detected an uninlinable pattern, abort.
- if (IsRecursive || ExposesReturnsTwice || HasDynamicAlloca)
+ if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca)
+ return false;
+
+ // If the caller is a recursive function then we don't want to inline
+ // functions which allocate a lot of stack space because it would increase
+ // the caller stack usage dramatically.
+ if (IsCallerRecursive &&
+ AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller)
return false;
if (NumVectorInstructions > NumInstructions/2)
@@ -815,7 +833,7 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
// one load and one store per word copied.
// FIXME: The maxStoresPerMemcpy setting from the target should be used
// here instead of a magic number of 8, but it's not available via
- // TargetData.
+ // DataLayout.
NumStores = std::min(NumStores, 8U);
Cost -= 2 * NumStores * InlineConstants::InstrCost;
@@ -832,12 +850,14 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
Cost += InlineConstants::LastCallToStaticBonus;
// If the instruction after the call, or if the normal destination of the
- // invoke is an unreachable instruction, the function is noreturn. As such,
- // there is little point in inlining this unless there is literally zero cost.
- if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
+ // invoke is an unreachable instruction, the function is noreturn. As such,
+ // there is little point in inlining this unless there is literally zero
+ // cost.
+ Instruction *Instr = CS.getInstruction();
+ if (InvokeInst *II = dyn_cast<InvokeInst>(Instr)) {
if (isa<UnreachableInst>(II->getNormalDest()->begin()))
Threshold = 1;
- } else if (isa<UnreachableInst>(++BasicBlock::iterator(CS.getInstruction())))
+ } else if (isa<UnreachableInst>(++BasicBlock::iterator(Instr)))
Threshold = 1;
// If this function uses the coldcc calling convention, prefer not to inline
@@ -853,6 +873,20 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
if (F.empty())
return true;
+ Function *Caller = CS.getInstruction()->getParent()->getParent();
+ // Check if the caller function is recursive itself.
+ for (Value::use_iterator U = Caller->use_begin(), E = Caller->use_end();
+ U != E; ++U) {
+ CallSite Site(cast<Value>(*U));
+ if (!Site)
+ continue;
+ Instruction *I = Site.getInstruction();
+ if (I->getParent()->getParent() == Caller) {
+ IsCallerRecursive = true;
+ break;
+ }
+ }
+
// Track whether we've seen a return instruction. The first return
// instruction is free, as at least one will usually disappear in inlining.
bool HasReturn = false;
@@ -909,9 +943,9 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
// We never want to inline functions that contain an indirectbr. This is
// incorrect because all the blockaddress's (in static global initializers
- // for example) would be referring to the original function, and this indirect
- // jump would jump from the inlined copy of the function into the original
- // function which is extremely undefined behavior.
+ // for example) would be referring to the original function, and this
+ // indirect jump would jump from the inlined copy of the function into the
+ // original function which is extremely undefined behavior.
// FIXME: This logic isn't really right; we can safely inline functions
// with indirectbr's as long as no other function or global references the
// blockaddress of a block within the current function. And as a QOI issue,
@@ -929,8 +963,16 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
// Analyze the cost of this block. If we blow through the threshold, this
// returns false, and we can bail on out.
if (!analyzeBlock(BB)) {
- if (IsRecursive || ExposesReturnsTwice || HasDynamicAlloca)
+ if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca)
return false;
+
+ // If the caller is a recursive function then we don't want to inline
+ // functions which allocate a lot of stack space because it would increase
+ // the caller stack usage dramatically.
+ if (IsCallerRecursive &&
+ AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller)
+ return false;
+
break;
}
@@ -956,7 +998,8 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
// If we're unable to select a particular successor, just count all of
// them.
- for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize; ++TIdx)
+ for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize;
+ ++TIdx)
BBWorklist.insert(TI->getSuccessor(TIdx));
// If we had any successors at this point, than post-inlining is likely to
@@ -975,6 +1018,7 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
return AlwaysInline || Cost < Threshold;
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// \brief Dump stats about this call's analysis.
void CallAnalyzer::dump() {
#define DEBUG_PRINT_STAT(x) llvm::dbgs() << " " #x ": " << x << "\n"
@@ -988,6 +1032,7 @@ void CallAnalyzer::dump() {
DEBUG_PRINT_STAT(SROACostSavingsLost);
#undef DEBUG_PRINT_STAT
}
+#endif
InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, int Threshold) {
return getInlineCost(CS, CS.getCalledFunction(), Threshold);
@@ -999,10 +1044,12 @@ InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, Function *Callee,
// something else. Don't inline functions marked noinline or call sites
// marked noinline.
if (!Callee || Callee->mayBeOverridden() ||
- Callee->hasFnAttr(Attribute::NoInline) || CS.isNoInline())
+ Callee->getFnAttributes().hasAttribute(Attributes::NoInline) ||
+ CS.isNoInline())
return llvm::InlineCost::getNever();
- DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName() << "...\n");
+ DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName()
+ << "...\n");
CallAnalyzer CA(TD, *Callee, Threshold);
bool ShouldInline = CA.analyzeCall(CS);
diff --git a/contrib/llvm/lib/Analysis/InstructionSimplify.cpp b/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
index 379a35a..a76e5ad 100644
--- a/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -31,7 +31,7 @@
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/PatternMatch.h"
#include "llvm/Support/ValueHandle.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace llvm;
using namespace llvm::PatternMatch;
@@ -42,11 +42,11 @@ STATISTIC(NumFactor , "Number of factorizations");
STATISTIC(NumReassoc, "Number of reassociations");
struct Query {
- const TargetData *TD;
+ const DataLayout *TD;
const TargetLibraryInfo *TLI;
const DominatorTree *DT;
- Query(const TargetData *td, const TargetLibraryInfo *tli,
+ Query(const DataLayout *td, const TargetLibraryInfo *tli,
const DominatorTree *dt) : TD(td), TLI(tli), DT(dt) {}
};
@@ -651,7 +651,7 @@ static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
}
Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *TD, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
RecursionLimit);
@@ -664,7 +664,7 @@ Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
/// if the GEP has all-constant indices. Returns false if any non-constant
/// index is encountered leaving the 'Offset' in an undefined state. The
/// 'Offset' APInt must be the bitwidth of the target's pointer size.
-static bool accumulateGEPOffset(const TargetData &TD, GEPOperator *GEP,
+static bool accumulateGEPOffset(const DataLayout &TD, GEPOperator *GEP,
APInt &Offset) {
unsigned IntPtrWidth = TD.getPointerSizeInBits();
assert(IntPtrWidth == Offset.getBitWidth());
@@ -696,7 +696,7 @@ static bool accumulateGEPOffset(const TargetData &TD, GEPOperator *GEP,
/// accumulates the total constant offset applied in the returned constant. It
/// returns 0 if V is not a pointer, and returns the constant '0' if there are
/// no constant offsets applied.
-static Constant *stripAndComputeConstantOffsets(const TargetData &TD,
+static Constant *stripAndComputeConstantOffsets(const DataLayout &TD,
Value *&V) {
if (!V->getType()->isPointerTy())
return 0;
@@ -731,7 +731,7 @@ static Constant *stripAndComputeConstantOffsets(const TargetData &TD,
/// \brief Compute the constant difference between two pointer values.
/// If the difference is not a constant, returns zero.
-static Constant *computePointerDifference(const TargetData &TD,
+static Constant *computePointerDifference(const DataLayout &TD,
Value *LHS, Value *RHS) {
Constant *LHSOffset = stripAndComputeConstantOffsets(TD, LHS);
if (!LHSOffset)
@@ -880,7 +880,7 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
}
Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *TD, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
RecursionLimit);
@@ -951,7 +951,7 @@ static Value *SimplifyMulInst(Value *Op0, Value *Op1, const Query &Q,
return 0;
}
-Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyMulInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1039,7 +1039,7 @@ static Value *SimplifySDivInst(Value *Op0, Value *Op1, const Query &Q,
return 0;
}
-Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifySDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1055,7 +1055,7 @@ static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const Query &Q,
return 0;
}
-Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyUDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1074,7 +1074,7 @@ static Value *SimplifyFDivInst(Value *Op0, Value *Op1, const Query &Q,
return 0;
}
-Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyFDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1144,7 +1144,7 @@ static Value *SimplifySRemInst(Value *Op0, Value *Op1, const Query &Q,
return 0;
}
-Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifySRemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1160,7 +1160,7 @@ static Value *SimplifyURemInst(Value *Op0, Value *Op1, const Query &Q,
return 0;
}
-Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyURemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1179,7 +1179,7 @@ static Value *SimplifyFRemInst(Value *Op0, Value *Op1, const Query &,
return 0;
}
-Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyFRemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1248,7 +1248,7 @@ static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
}
Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *TD, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
RecursionLimit);
@@ -1275,7 +1275,7 @@ static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
}
Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyLShrInst(Op0, Op1, isExact, Query (TD, TLI, DT),
@@ -1307,7 +1307,7 @@ static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
}
Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyAShrInst(Op0, Op1, isExact, Query (TD, TLI, DT),
@@ -1407,7 +1407,7 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const Query &Q,
return 0;
}
-Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyAndInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1501,7 +1501,7 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const Query &Q,
return 0;
}
-Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyOrInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1561,7 +1561,7 @@ static Value *SimplifyXorInst(Value *Op0, Value *Op1, const Query &Q,
return 0;
}
-Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyXorInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1591,7 +1591,7 @@ static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
return 0;
}
-static Constant *computePointerICmp(const TargetData &TD,
+static Constant *computePointerICmp(const DataLayout &TD,
CmpInst::Predicate Pred,
Value *LHS, Value *RHS) {
// We can only fold certain predicates on pointer comparisons.
@@ -2065,8 +2065,25 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (A && C && (A == C || A == D || B == C || B == D) &&
NoLHSWrapProblem && NoRHSWrapProblem) {
// Determine Y and Z in the form icmp (X+Y), (X+Z).
- Value *Y = (A == C || A == D) ? B : A;
- Value *Z = (C == A || C == B) ? D : C;
+ Value *Y, *Z;
+ if (A == C) {
+ // C + B == C + D -> B == D
+ Y = B;
+ Z = D;
+ } else if (A == D) {
+ // D + B == C + D -> B == C
+ Y = B;
+ Z = C;
+ } else if (B == C) {
+ // A + C == C + D -> A == D
+ Y = A;
+ Z = D;
+ } else {
+ assert(B == D);
+ // A + D == C + D -> A == C
+ Y = A;
+ Z = C;
+ }
if (Value *V = SimplifyICmpInst(Pred, Y, Z, Q, MaxRecurse-1))
return V;
}
@@ -2399,7 +2416,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
}
Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyICmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
@@ -2496,7 +2513,7 @@ static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
}
Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyFCmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
@@ -2531,7 +2548,7 @@ static Value *SimplifySelectInst(Value *CondVal, Value *TrueVal,
}
Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Query (TD, TLI, DT),
@@ -2579,7 +2596,7 @@ static Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const Query &Q, unsigned) {
return ConstantExpr::getGetElementPtr(cast<Constant>(Ops[0]), Ops.slice(1));
}
-Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const TargetData *TD,
+Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyGEPInst(Ops, Query (TD, TLI, DT), RecursionLimit);
@@ -2616,7 +2633,7 @@ static Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyInsertValueInst(Agg, Val, Idxs, Query (TD, TLI, DT),
@@ -2664,7 +2681,7 @@ static Value *SimplifyTruncInst(Value *Op, Type *Ty, const Query &Q, unsigned) {
return 0;
}
-Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const TargetData *TD,
+Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyTruncInst(Op, Ty, Query (TD, TLI, DT), RecursionLimit);
@@ -2730,7 +2747,7 @@ static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
}
Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
- const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *TD, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyBinOp(Opcode, LHS, RHS, Query (TD, TLI, DT), RecursionLimit);
}
@@ -2745,7 +2762,7 @@ static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
}
Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *TD, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyCmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
RecursionLimit);
@@ -2761,7 +2778,7 @@ static Value *SimplifyCallInst(CallInst *CI, const Query &) {
/// SimplifyInstruction - See if we can compute a simplified version of this
/// instruction. If not, this returns null.
-Value *llvm::SimplifyInstruction(Instruction *I, const TargetData *TD,
+Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
Value *Result;
@@ -2881,7 +2898,7 @@ Value *llvm::SimplifyInstruction(Instruction *I, const TargetData *TD,
/// This routine returns 'true' only when *it* simplifies something. The passed
/// in simplified value does not count toward this.
static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
bool Simplified = false;
@@ -2936,14 +2953,14 @@ static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
}
bool llvm::recursivelySimplifyInstruction(Instruction *I,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return replaceAndRecursivelySimplifyImpl(I, 0, TD, TLI, DT);
}
bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
diff --git a/contrib/llvm/lib/Analysis/LazyValueInfo.cpp b/contrib/llvm/lib/Analysis/LazyValueInfo.cpp
index 9140786..2b87d80 100644
--- a/contrib/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/contrib/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -13,13 +13,14 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "lazy-value-info"
+#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/LazyValueInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Constants.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/ConstantRange.h"
@@ -212,7 +213,7 @@ public:
// Unless we can prove that the two Constants are different, we must
// move to overdefined.
- // FIXME: use TargetData/TargetLibraryInfo for smarter constant folding.
+ // FIXME: use DataLayout/TargetLibraryInfo for smarter constant folding.
if (ConstantInt *Res = dyn_cast<ConstantInt>(
ConstantFoldCompareInstOperands(CmpInst::ICMP_NE,
getConstant(),
@@ -238,7 +239,7 @@ public:
// Unless we can prove that the two Constants are different, we must
// move to overdefined.
- // FIXME: use TargetData/TargetLibraryInfo for smarter constant folding.
+ // FIXME: use DataLayout/TargetLibraryInfo for smarter constant folding.
if (ConstantInt *Res = dyn_cast<ConstantInt>(
ConstantFoldCompareInstOperands(CmpInst::ICMP_NE,
getNotConstant(),
@@ -294,7 +295,7 @@ raw_ostream &operator<<(raw_ostream &OS, const LVILatticeVal &Val) {
//===----------------------------------------------------------------------===//
namespace {
- /// LVIValueHandle - A callback value handle update the cache when
+ /// LVIValueHandle - A callback value handle updates the cache when
/// values are erased.
class LazyValueInfoCache;
struct LVIValueHandle : public CallbackVH {
@@ -470,8 +471,10 @@ bool LazyValueInfoCache::hasBlockValue(Value *Val, BasicBlock *BB) {
return true;
LVIValueHandle ValHandle(Val, this);
- if (!ValueCache.count(ValHandle)) return false;
- return ValueCache[ValHandle].count(BB);
+ std::map<LVIValueHandle, ValueCacheEntryTy>::iterator I =
+ ValueCache.find(ValHandle);
+ if (I == ValueCache.end()) return false;
+ return I->second.count(BB);
}
LVILatticeVal LazyValueInfoCache::getBlockValue(Value *Val, BasicBlock *BB) {
@@ -555,13 +558,11 @@ bool LazyValueInfoCache::solveBlockValue(Value *Val, BasicBlock *BB) {
static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) {
if (LoadInst *L = dyn_cast<LoadInst>(I)) {
return L->getPointerAddressSpace() == 0 &&
- GetUnderlyingObject(L->getPointerOperand()) ==
- GetUnderlyingObject(Ptr);
+ GetUnderlyingObject(L->getPointerOperand()) == Ptr;
}
if (StoreInst *S = dyn_cast<StoreInst>(I)) {
return S->getPointerAddressSpace() == 0 &&
- GetUnderlyingObject(S->getPointerOperand()) ==
- GetUnderlyingObject(Ptr);
+ GetUnderlyingObject(S->getPointerOperand()) == Ptr;
}
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
if (MI->isVolatile()) return false;
@@ -571,11 +572,11 @@ static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) {
if (!Len || Len->isZero()) return false;
if (MI->getDestAddressSpace() == 0)
- if (MI->getRawDest() == Ptr || MI->getDest() == Ptr)
+ if (GetUnderlyingObject(MI->getRawDest()) == Ptr)
return true;
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
if (MTI->getSourceAddressSpace() == 0)
- if (MTI->getRawSource() == Ptr || MTI->getSource() == Ptr)
+ if (GetUnderlyingObject(MTI->getRawSource()) == Ptr)
return true;
}
return false;
@@ -589,13 +590,19 @@ bool LazyValueInfoCache::solveBlockValueNonLocal(LVILatticeVal &BBLV,
// then we know that the pointer can't be NULL.
bool NotNull = false;
if (Val->getType()->isPointerTy()) {
- if (isa<AllocaInst>(Val)) {
+ if (isKnownNonNull(Val)) {
NotNull = true;
} else {
- for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();BI != BE;++BI){
- if (InstructionDereferencesPointer(BI, Val)) {
- NotNull = true;
- break;
+ Value *UnderlyingVal = GetUnderlyingObject(Val);
+ // If 'GetUnderlyingObject' didn't converge, skip it. It won't converge
+ // inside InstructionDereferencesPointer either.
+ if (UnderlyingVal == GetUnderlyingObject(UnderlyingVal, NULL, 1)) {
+ for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
+ BI != BE; ++BI) {
+ if (InstructionDereferencesPointer(BI, UnderlyingVal)) {
+ NotNull = true;
+ break;
+ }
}
}
}
@@ -845,9 +852,12 @@ static bool getEdgeValueLocal(Value *Val, BasicBlock *BBFrom,
for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
i != e; ++i) {
ConstantRange EdgeVal(i.getCaseValue()->getValue());
- if (DefaultCase)
- EdgesVals = EdgesVals.difference(EdgeVal);
- else if (i.getCaseSuccessor() == BBTo)
+ if (DefaultCase) {
+ // It is possible that the default destination is the destination of
+ // some cases. There is no need to perform difference for those cases.
+ if (i.getCaseSuccessor() != BBTo)
+ EdgesVals = EdgesVals.difference(EdgeVal);
+ } else if (i.getCaseSuccessor() == BBTo)
EdgesVals = EdgesVals.unionWith(EdgeVal);
}
Result = LVILatticeVal::getRange(EdgesVals);
@@ -1004,7 +1014,7 @@ bool LazyValueInfo::runOnFunction(Function &F) {
if (PImpl)
getCache(PImpl).clear();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
// Fully lazy.
diff --git a/contrib/llvm/lib/Analysis/Lint.cpp b/contrib/llvm/lib/Analysis/Lint.cpp
index 83bdf52..6d6d580 100644
--- a/contrib/llvm/lib/Analysis/Lint.cpp
+++ b/contrib/llvm/lib/Analysis/Lint.cpp
@@ -43,7 +43,7 @@
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Assembly/Writer.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Pass.h"
#include "llvm/PassManager.h"
@@ -103,7 +103,7 @@ namespace {
Module *Mod;
AliasAnalysis *AA;
DominatorTree *DT;
- TargetData *TD;
+ DataLayout *TD;
TargetLibraryInfo *TLI;
std::string Messages;
@@ -177,7 +177,7 @@ bool Lint::runOnFunction(Function &F) {
Mod = F.getParent();
AA = &getAnalysis<AliasAnalysis>();
DT = &getAnalysis<DominatorTree>();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
visit(F);
dbgs() << MessagesStr.str();
@@ -411,14 +411,50 @@ void Lint::visitMemoryReference(Instruction &I,
"Undefined behavior: Branch to non-blockaddress", &I);
}
+ // Check for buffer overflows and misalignment.
if (TD) {
- if (Align == 0 && Ty) Align = TD->getABITypeAlignment(Ty);
+ // Only handles memory references that read/write something simple like an
+ // alloca instruction or a global variable.
+ int64_t Offset = 0;
+ if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, *TD)) {
+ // OK, so the access is to a constant offset from Ptr. Check that Ptr is
+ // something we can handle and if so extract the size of this base object
+ // along with its alignment.
+ uint64_t BaseSize = AliasAnalysis::UnknownSize;
+ unsigned BaseAlign = 0;
+
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
+ Type *ATy = AI->getAllocatedType();
+ if (!AI->isArrayAllocation() && ATy->isSized())
+ BaseSize = TD->getTypeAllocSize(ATy);
+ BaseAlign = AI->getAlignment();
+ if (BaseAlign == 0 && ATy->isSized())
+ BaseAlign = TD->getABITypeAlignment(ATy);
+ } else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
+ // If the global may be defined differently in another compilation unit
+ // then don't warn about funky memory accesses.
+ if (GV->hasDefinitiveInitializer()) {
+ Type *GTy = GV->getType()->getElementType();
+ if (GTy->isSized())
+ BaseSize = TD->getTypeAllocSize(GTy);
+ BaseAlign = GV->getAlignment();
+ if (BaseAlign == 0 && GTy->isSized())
+ BaseAlign = TD->getABITypeAlignment(GTy);
+ }
+ }
- if (Align != 0) {
- unsigned BitWidth = TD->getTypeSizeInBits(Ptr->getType());
- APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- ComputeMaskedBits(Ptr, KnownZero, KnownOne, TD);
- Assert1(!(KnownOne & APInt::getLowBitsSet(BitWidth, Log2_32(Align))),
+ // Accesses from before the start or after the end of the object are not
+ // defined.
+ Assert1(Size == AliasAnalysis::UnknownSize ||
+ BaseSize == AliasAnalysis::UnknownSize ||
+ (Offset >= 0 && Offset + Size <= BaseSize),
+ "Undefined behavior: Buffer overflow", &I);
+
+ // Accesses that say that the memory is more aligned than it is are not
+ // defined.
+ if (Align == 0 && Ty && Ty->isSized())
+ Align = TD->getABITypeAlignment(Ty);
+ Assert1(!BaseAlign || Align <= MinAlign(BaseAlign, Offset),
"Undefined behavior: Memory reference address is misaligned", &I);
}
}
@@ -470,7 +506,7 @@ void Lint::visitShl(BinaryOperator &I) {
"Undefined result: Shift count out of range", &I);
}
-static bool isZero(Value *V, TargetData *TD) {
+static bool isZero(Value *V, DataLayout *TD) {
// Assume undef could be zero.
if (isa<UndefValue>(V)) return true;
diff --git a/contrib/llvm/lib/Analysis/Loads.cpp b/contrib/llvm/lib/Analysis/Loads.cpp
index 873a275..73aa8b4 100644
--- a/contrib/llvm/lib/Analysis/Loads.cpp
+++ b/contrib/llvm/lib/Analysis/Loads.cpp
@@ -13,7 +13,7 @@
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/GlobalAlias.h"
#include "llvm/GlobalVariable.h"
#include "llvm/IntrinsicInst.h"
@@ -52,8 +52,8 @@ static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
/// bitcasts to get back to the underlying object being addressed, keeping
/// track of the offset in bytes from the GEPs relative to the result.
/// This is closely related to GetUnderlyingObject but is located
-/// here to avoid making VMCore depend on TargetData.
-static Value *getUnderlyingObjectWithOffset(Value *V, const TargetData *TD,
+/// here to avoid making VMCore depend on DataLayout.
+static Value *getUnderlyingObjectWithOffset(Value *V, const DataLayout *TD,
uint64_t &ByteOffset,
unsigned MaxLookup = 6) {
if (!V->getType()->isPointerTy())
@@ -85,7 +85,7 @@ static Value *getUnderlyingObjectWithOffset(Value *V, const TargetData *TD,
/// specified pointer, we do a quick local scan of the basic block containing
/// ScanFrom, to determine if the address is already accessed.
bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
- unsigned Align, const TargetData *TD) {
+ unsigned Align, const DataLayout *TD) {
uint64_t ByteOffset = 0;
Value *Base = V;
if (TD)
diff --git a/contrib/llvm/lib/Analysis/LoopDependenceAnalysis.cpp b/contrib/llvm/lib/Analysis/LoopDependenceAnalysis.cpp
deleted file mode 100644
index 463269d..0000000
--- a/contrib/llvm/lib/Analysis/LoopDependenceAnalysis.cpp
+++ /dev/null
@@ -1,362 +0,0 @@
-//===- LoopDependenceAnalysis.cpp - LDA Implementation ----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This is the (beginning) of an implementation of a loop dependence analysis
-// framework, which is used to detect dependences in memory accesses in loops.
-//
-// Please note that this is work in progress and the interface is subject to
-// change.
-//
-// TODO: adapt as implementation progresses.
-//
-// TODO: document lingo (pair, subscript, index)
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "lda"
-#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/LoopDependenceAnalysis.h"
-#include "llvm/Analysis/LoopPass.h"
-#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/ScalarEvolutionExpressions.h"
-#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Assembly/Writer.h"
-#include "llvm/Instructions.h"
-#include "llvm/Operator.h"
-#include "llvm/Support/Allocator.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
-using namespace llvm;
-
-STATISTIC(NumAnswered, "Number of dependence queries answered");
-STATISTIC(NumAnalysed, "Number of distinct dependence pairs analysed");
-STATISTIC(NumDependent, "Number of pairs with dependent accesses");
-STATISTIC(NumIndependent, "Number of pairs with independent accesses");
-STATISTIC(NumUnknown, "Number of pairs with unknown accesses");
-
-LoopPass *llvm::createLoopDependenceAnalysisPass() {
- return new LoopDependenceAnalysis();
-}
-
-INITIALIZE_PASS_BEGIN(LoopDependenceAnalysis, "lda",
- "Loop Dependence Analysis", false, true)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
-INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
-INITIALIZE_PASS_END(LoopDependenceAnalysis, "lda",
- "Loop Dependence Analysis", false, true)
-char LoopDependenceAnalysis::ID = 0;
-
-//===----------------------------------------------------------------------===//
-// Utility Functions
-//===----------------------------------------------------------------------===//
-
-static inline bool IsMemRefInstr(const Value *V) {
- const Instruction *I = dyn_cast<const Instruction>(V);
- return I && (I->mayReadFromMemory() || I->mayWriteToMemory());
-}
-
-static void GetMemRefInstrs(const Loop *L,
- SmallVectorImpl<Instruction*> &Memrefs) {
- for (Loop::block_iterator b = L->block_begin(), be = L->block_end();
- b != be; ++b)
- for (BasicBlock::iterator i = (*b)->begin(), ie = (*b)->end();
- i != ie; ++i)
- if (IsMemRefInstr(i))
- Memrefs.push_back(i);
-}
-
-static bool IsLoadOrStoreInst(Value *I) {
- // Returns true if the load or store can be analyzed. Atomic and volatile
- // operations have properties which this analysis does not understand.
- if (LoadInst *LI = dyn_cast<LoadInst>(I))
- return LI->isUnordered();
- else if (StoreInst *SI = dyn_cast<StoreInst>(I))
- return SI->isUnordered();
- return false;
-}
-
-static Value *GetPointerOperand(Value *I) {
- if (LoadInst *i = dyn_cast<LoadInst>(I))
- return i->getPointerOperand();
- if (StoreInst *i = dyn_cast<StoreInst>(I))
- return i->getPointerOperand();
- llvm_unreachable("Value is no load or store instruction!");
-}
-
-static AliasAnalysis::AliasResult UnderlyingObjectsAlias(AliasAnalysis *AA,
- const Value *A,
- const Value *B) {
- const Value *aObj = GetUnderlyingObject(A);
- const Value *bObj = GetUnderlyingObject(B);
- return AA->alias(aObj, AA->getTypeStoreSize(aObj->getType()),
- bObj, AA->getTypeStoreSize(bObj->getType()));
-}
-
-static inline const SCEV *GetZeroSCEV(ScalarEvolution *SE) {
- return SE->getConstant(Type::getInt32Ty(SE->getContext()), 0L);
-}
-
-//===----------------------------------------------------------------------===//
-// Dependence Testing
-//===----------------------------------------------------------------------===//
-
-bool LoopDependenceAnalysis::isDependencePair(const Value *A,
- const Value *B) const {
- return IsMemRefInstr(A) &&
- IsMemRefInstr(B) &&
- (cast<const Instruction>(A)->mayWriteToMemory() ||
- cast<const Instruction>(B)->mayWriteToMemory());
-}
-
-bool LoopDependenceAnalysis::findOrInsertDependencePair(Value *A,
- Value *B,
- DependencePair *&P) {
- void *insertPos = 0;
- FoldingSetNodeID id;
- id.AddPointer(A);
- id.AddPointer(B);
-
- P = Pairs.FindNodeOrInsertPos(id, insertPos);
- if (P) return true;
-
- P = new (PairAllocator) DependencePair(id, A, B);
- Pairs.InsertNode(P, insertPos);
- return false;
-}
-
-void LoopDependenceAnalysis::getLoops(const SCEV *S,
- DenseSet<const Loop*>* Loops) const {
- // Refactor this into an SCEVVisitor, if efficiency becomes a concern.
- for (const Loop *L = this->L; L != 0; L = L->getParentLoop())
- if (!SE->isLoopInvariant(S, L))
- Loops->insert(L);
-}
-
-bool LoopDependenceAnalysis::isLoopInvariant(const SCEV *S) const {
- DenseSet<const Loop*> loops;
- getLoops(S, &loops);
- return loops.empty();
-}
-
-bool LoopDependenceAnalysis::isAffine(const SCEV *S) const {
- const SCEVAddRecExpr *rec = dyn_cast<SCEVAddRecExpr>(S);
- return isLoopInvariant(S) || (rec && rec->isAffine());
-}
-
-bool LoopDependenceAnalysis::isZIVPair(const SCEV *A, const SCEV *B) const {
- return isLoopInvariant(A) && isLoopInvariant(B);
-}
-
-bool LoopDependenceAnalysis::isSIVPair(const SCEV *A, const SCEV *B) const {
- DenseSet<const Loop*> loops;
- getLoops(A, &loops);
- getLoops(B, &loops);
- return loops.size() == 1;
-}
-
-LoopDependenceAnalysis::DependenceResult
-LoopDependenceAnalysis::analyseZIV(const SCEV *A,
- const SCEV *B,
- Subscript *S) const {
- assert(isZIVPair(A, B) && "Attempted to ZIV-test non-ZIV SCEVs!");
- return A == B ? Dependent : Independent;
-}
-
-LoopDependenceAnalysis::DependenceResult
-LoopDependenceAnalysis::analyseSIV(const SCEV *A,
- const SCEV *B,
- Subscript *S) const {
- return Unknown; // TODO: Implement.
-}
-
-LoopDependenceAnalysis::DependenceResult
-LoopDependenceAnalysis::analyseMIV(const SCEV *A,
- const SCEV *B,
- Subscript *S) const {
- return Unknown; // TODO: Implement.
-}
-
-LoopDependenceAnalysis::DependenceResult
-LoopDependenceAnalysis::analyseSubscript(const SCEV *A,
- const SCEV *B,
- Subscript *S) const {
- DEBUG(dbgs() << " Testing subscript: " << *A << ", " << *B << "\n");
-
- if (A == B) {
- DEBUG(dbgs() << " -> [D] same SCEV\n");
- return Dependent;
- }
-
- if (!isAffine(A) || !isAffine(B)) {
- DEBUG(dbgs() << " -> [?] not affine\n");
- return Unknown;
- }
-
- if (isZIVPair(A, B))
- return analyseZIV(A, B, S);
-
- if (isSIVPair(A, B))
- return analyseSIV(A, B, S);
-
- return analyseMIV(A, B, S);
-}
-
-LoopDependenceAnalysis::DependenceResult
-LoopDependenceAnalysis::analysePair(DependencePair *P) const {
- DEBUG(dbgs() << "Analysing:\n" << *P->A << "\n" << *P->B << "\n");
-
- // We only analyse loads and stores but no possible memory accesses by e.g.
- // free, call, or invoke instructions.
- if (!IsLoadOrStoreInst(P->A) || !IsLoadOrStoreInst(P->B)) {
- DEBUG(dbgs() << "--> [?] no load/store\n");
- return Unknown;
- }
-
- Value *aPtr = GetPointerOperand(P->A);
- Value *bPtr = GetPointerOperand(P->B);
-
- switch (UnderlyingObjectsAlias(AA, aPtr, bPtr)) {
- case AliasAnalysis::MayAlias:
- case AliasAnalysis::PartialAlias:
- // We can not analyse objects if we do not know about their aliasing.
- DEBUG(dbgs() << "---> [?] may alias\n");
- return Unknown;
-
- case AliasAnalysis::NoAlias:
- // If the objects noalias, they are distinct, accesses are independent.
- DEBUG(dbgs() << "---> [I] no alias\n");
- return Independent;
-
- case AliasAnalysis::MustAlias:
- break; // The underlying objects alias, test accesses for dependence.
- }
-
- const GEPOperator *aGEP = dyn_cast<GEPOperator>(aPtr);
- const GEPOperator *bGEP = dyn_cast<GEPOperator>(bPtr);
-
- if (!aGEP || !bGEP)
- return Unknown;
-
- // FIXME: Is filtering coupled subscripts necessary?
-
- // Collect GEP operand pairs (FIXME: use GetGEPOperands from BasicAA), adding
- // trailing zeroes to the smaller GEP, if needed.
- typedef SmallVector<std::pair<const SCEV*, const SCEV*>, 4> GEPOpdPairsTy;
- GEPOpdPairsTy opds;
- for(GEPOperator::const_op_iterator aIdx = aGEP->idx_begin(),
- aEnd = aGEP->idx_end(),
- bIdx = bGEP->idx_begin(),
- bEnd = bGEP->idx_end();
- aIdx != aEnd && bIdx != bEnd;
- aIdx += (aIdx != aEnd), bIdx += (bIdx != bEnd)) {
- const SCEV* aSCEV = (aIdx != aEnd) ? SE->getSCEV(*aIdx) : GetZeroSCEV(SE);
- const SCEV* bSCEV = (bIdx != bEnd) ? SE->getSCEV(*bIdx) : GetZeroSCEV(SE);
- opds.push_back(std::make_pair(aSCEV, bSCEV));
- }
-
- if (!opds.empty() && opds[0].first != opds[0].second) {
- // We cannot (yet) handle arbitrary GEP pointer offsets. By limiting
- //
- // TODO: this could be relaxed by adding the size of the underlying object
- // to the first subscript. If we have e.g. (GEP x,0,i; GEP x,2,-i) and we
- // know that x is a [100 x i8]*, we could modify the first subscript to be
- // (i, 200-i) instead of (i, -i).
- return Unknown;
- }
-
- // Now analyse the collected operand pairs (skipping the GEP ptr offsets).
- for (GEPOpdPairsTy::const_iterator i = opds.begin() + 1, end = opds.end();
- i != end; ++i) {
- Subscript subscript;
- DependenceResult result = analyseSubscript(i->first, i->second, &subscript);
- if (result != Dependent) {
- // We either proved independence or failed to analyse this subscript.
- // Further subscripts will not improve the situation, so abort early.
- return result;
- }
- P->Subscripts.push_back(subscript);
- }
- // We successfully analysed all subscripts but failed to prove independence.
- return Dependent;
-}
-
-bool LoopDependenceAnalysis::depends(Value *A, Value *B) {
- assert(isDependencePair(A, B) && "Values form no dependence pair!");
- ++NumAnswered;
-
- DependencePair *p;
- if (!findOrInsertDependencePair(A, B, p)) {
- // The pair is not cached, so analyse it.
- ++NumAnalysed;
- switch (p->Result = analysePair(p)) {
- case Dependent: ++NumDependent; break;
- case Independent: ++NumIndependent; break;
- case Unknown: ++NumUnknown; break;
- }
- }
- return p->Result != Independent;
-}
-
-//===----------------------------------------------------------------------===//
-// LoopDependenceAnalysis Implementation
-//===----------------------------------------------------------------------===//
-
-bool LoopDependenceAnalysis::runOnLoop(Loop *L, LPPassManager &) {
- this->L = L;
- AA = &getAnalysis<AliasAnalysis>();
- SE = &getAnalysis<ScalarEvolution>();
- return false;
-}
-
-void LoopDependenceAnalysis::releaseMemory() {
- Pairs.clear();
- PairAllocator.Reset();
-}
-
-void LoopDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesAll();
- AU.addRequiredTransitive<AliasAnalysis>();
- AU.addRequiredTransitive<ScalarEvolution>();
-}
-
-static void PrintLoopInfo(raw_ostream &OS,
- LoopDependenceAnalysis *LDA, const Loop *L) {
- if (!L->empty()) return; // ignore non-innermost loops
-
- SmallVector<Instruction*, 8> memrefs;
- GetMemRefInstrs(L, memrefs);
-
- OS << "Loop at depth " << L->getLoopDepth() << ", header block: ";
- WriteAsOperand(OS, L->getHeader(), false);
- OS << "\n";
-
- OS << " Load/store instructions: " << memrefs.size() << "\n";
- for (SmallVector<Instruction*, 8>::const_iterator x = memrefs.begin(),
- end = memrefs.end(); x != end; ++x)
- OS << "\t" << (x - memrefs.begin()) << ": " << **x << "\n";
-
- OS << " Pairwise dependence results:\n";
- for (SmallVector<Instruction*, 8>::const_iterator x = memrefs.begin(),
- end = memrefs.end(); x != end; ++x)
- for (SmallVector<Instruction*, 8>::const_iterator y = x + 1;
- y != end; ++y)
- if (LDA->isDependencePair(*x, *y))
- OS << "\t" << (x - memrefs.begin()) << "," << (y - memrefs.begin())
- << ": " << (LDA->depends(*x, *y) ? "dependent" : "independent")
- << "\n";
-}
-
-void LoopDependenceAnalysis::print(raw_ostream &OS, const Module*) const {
- // TODO: doc why const_cast is safe
- PrintLoopInfo(OS, const_cast<LoopDependenceAnalysis*>(this), this->L);
-}
diff --git a/contrib/llvm/lib/Analysis/LoopInfo.cpp b/contrib/llvm/lib/Analysis/LoopInfo.cpp
index 20c33a3..8341f9d 100644
--- a/contrib/llvm/lib/Analysis/LoopInfo.cpp
+++ b/contrib/llvm/lib/Analysis/LoopInfo.cpp
@@ -306,9 +306,11 @@ BasicBlock *Loop::getUniqueExitBlock() const {
return 0;
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void Loop::dump() const {
print(dbgs());
}
+#endif
//===----------------------------------------------------------------------===//
// UnloopUpdater implementation
@@ -429,8 +431,8 @@ void UnloopUpdater::updateSubloopParents() {
Unloop->removeChildLoop(llvm::prior(Unloop->end()));
assert(SubloopParents.count(Subloop) && "DFS failed to visit subloop");
- if (SubloopParents[Subloop])
- SubloopParents[Subloop]->addChildLoop(Subloop);
+ if (Loop *Parent = SubloopParents[Subloop])
+ Parent->addChildLoop(Subloop);
else
LI->addTopLevelLoop(Subloop);
}
@@ -456,9 +458,8 @@ Loop *UnloopUpdater::getNearestLoop(BasicBlock *BB, Loop *BBLoop) {
assert(Subloop && "subloop is not an ancestor of the original loop");
}
// Get the current nearest parent of the Subloop exits, initially Unloop.
- if (!SubloopParents.count(Subloop))
- SubloopParents[Subloop] = Unloop;
- NearLoop = SubloopParents[Subloop];
+ NearLoop =
+ SubloopParents.insert(std::make_pair(Subloop, Unloop)).first->second;
}
succ_iterator I = succ_begin(BB), E = succ_end(BB);
diff --git a/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp b/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp
index e77d2ff..0a539fe 100644
--- a/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -25,7 +25,8 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
@@ -39,7 +40,7 @@ enum AllocType {
};
struct AllocFnsTy {
- const char *Name;
+ LibFunc::Func Func;
AllocType AllocTy;
unsigned char NumParams;
// First and Second size parameters (or -1 if unused)
@@ -49,22 +50,22 @@ struct AllocFnsTy {
// FIXME: certain users need more information. E.g., SimplifyLibCalls needs to
// know which functions are nounwind, noalias, nocapture parameters, etc.
static const AllocFnsTy AllocationFnData[] = {
- {"malloc", MallocLike, 1, 0, -1},
- {"valloc", MallocLike, 1, 0, -1},
- {"_Znwj", MallocLike, 1, 0, -1}, // new(unsigned int)
- {"_ZnwjRKSt9nothrow_t", MallocLike, 2, 0, -1}, // new(unsigned int, nothrow)
- {"_Znwm", MallocLike, 1, 0, -1}, // new(unsigned long)
- {"_ZnwmRKSt9nothrow_t", MallocLike, 2, 0, -1}, // new(unsigned long, nothrow)
- {"_Znaj", MallocLike, 1, 0, -1}, // new[](unsigned int)
- {"_ZnajRKSt9nothrow_t", MallocLike, 2, 0, -1}, // new[](unsigned int, nothrow)
- {"_Znam", MallocLike, 1, 0, -1}, // new[](unsigned long)
- {"_ZnamRKSt9nothrow_t", MallocLike, 2, 0, -1}, // new[](unsigned long, nothrow)
- {"posix_memalign", MallocLike, 3, 2, -1},
- {"calloc", CallocLike, 2, 0, 1},
- {"realloc", ReallocLike, 2, 1, -1},
- {"reallocf", ReallocLike, 2, 1, -1},
- {"strdup", StrDupLike, 1, -1, -1},
- {"strndup", StrDupLike, 2, 1, -1}
+ {LibFunc::malloc, MallocLike, 1, 0, -1},
+ {LibFunc::valloc, MallocLike, 1, 0, -1},
+ {LibFunc::Znwj, MallocLike, 1, 0, -1}, // new(unsigned int)
+ {LibFunc::ZnwjRKSt9nothrow_t, MallocLike, 2, 0, -1}, // new(unsigned int, nothrow)
+ {LibFunc::Znwm, MallocLike, 1, 0, -1}, // new(unsigned long)
+ {LibFunc::ZnwmRKSt9nothrow_t, MallocLike, 2, 0, -1}, // new(unsigned long, nothrow)
+ {LibFunc::Znaj, MallocLike, 1, 0, -1}, // new[](unsigned int)
+ {LibFunc::ZnajRKSt9nothrow_t, MallocLike, 2, 0, -1}, // new[](unsigned int, nothrow)
+ {LibFunc::Znam, MallocLike, 1, 0, -1}, // new[](unsigned long)
+ {LibFunc::ZnamRKSt9nothrow_t, MallocLike, 2, 0, -1}, // new[](unsigned long, nothrow)
+ {LibFunc::posix_memalign, MallocLike, 3, 2, -1},
+ {LibFunc::calloc, CallocLike, 2, 0, 1},
+ {LibFunc::realloc, ReallocLike, 2, 1, -1},
+ {LibFunc::reallocf, ReallocLike, 2, 1, -1},
+ {LibFunc::strdup, StrDupLike, 1, -1, -1},
+ {LibFunc::strndup, StrDupLike, 2, 1, -1}
};
@@ -85,15 +86,22 @@ static Function *getCalledFunction(const Value *V, bool LookThroughBitCast) {
/// \brief Returns the allocation data for the given value if it is a call to a
/// known allocation function, and NULL otherwise.
static const AllocFnsTy *getAllocationData(const Value *V, AllocType AllocTy,
+ const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false) {
Function *Callee = getCalledFunction(V, LookThroughBitCast);
if (!Callee)
return 0;
+ // Make sure that the function is available.
+ StringRef FnName = Callee->getName();
+ LibFunc::Func TLIFn;
+ if (!TLI || !TLI->getLibFunc(FnName, TLIFn) || !TLI->has(TLIFn))
+ return 0;
+
unsigned i = 0;
bool found = false;
for ( ; i < array_lengthof(AllocationFnData); ++i) {
- if (Callee->getName() == AllocationFnData[i].Name) {
+ if (AllocationFnData[i].Func == TLIFn) {
found = true;
break;
}
@@ -106,7 +114,6 @@ static const AllocFnsTy *getAllocationData(const Value *V, AllocType AllocTy,
return 0;
// Check function prototype.
- // FIXME: Check the nobuiltin metadata?? (PR5130)
int FstParam = FnData->FstParam;
int SndParam = FnData->SndParam;
FunctionType *FTy = Callee->getFunctionType();
@@ -125,64 +132,72 @@ static const AllocFnsTy *getAllocationData(const Value *V, AllocType AllocTy,
static bool hasNoAliasAttr(const Value *V, bool LookThroughBitCast) {
ImmutableCallSite CS(LookThroughBitCast ? V->stripPointerCasts() : V);
- return CS && CS.hasFnAttr(Attribute::NoAlias);
+ return CS && CS.hasFnAttr(Attributes::NoAlias);
}
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates or reallocates memory (either malloc, calloc, realloc, or strdup
/// like).
-bool llvm::isAllocationFn(const Value *V, bool LookThroughBitCast) {
- return getAllocationData(V, AnyAlloc, LookThroughBitCast);
+bool llvm::isAllocationFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast) {
+ return getAllocationData(V, AnyAlloc, TLI, LookThroughBitCast);
}
/// \brief Tests if a value is a call or invoke to a function that returns a
/// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions).
-bool llvm::isNoAliasFn(const Value *V, bool LookThroughBitCast) {
+bool llvm::isNoAliasFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast) {
// it's safe to consider realloc as noalias since accessing the original
// pointer is undefined behavior
- return isAllocationFn(V, LookThroughBitCast) ||
+ return isAllocationFn(V, TLI, LookThroughBitCast) ||
hasNoAliasAttr(V, LookThroughBitCast);
}
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates uninitialized memory (such as malloc).
-bool llvm::isMallocLikeFn(const Value *V, bool LookThroughBitCast) {
- return getAllocationData(V, MallocLike, LookThroughBitCast);
+bool llvm::isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast) {
+ return getAllocationData(V, MallocLike, TLI, LookThroughBitCast);
}
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates zero-filled memory (such as calloc).
-bool llvm::isCallocLikeFn(const Value *V, bool LookThroughBitCast) {
- return getAllocationData(V, CallocLike, LookThroughBitCast);
+bool llvm::isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast) {
+ return getAllocationData(V, CallocLike, TLI, LookThroughBitCast);
}
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates memory (either malloc, calloc, or strdup like).
-bool llvm::isAllocLikeFn(const Value *V, bool LookThroughBitCast) {
- return getAllocationData(V, AllocLike, LookThroughBitCast);
+bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast) {
+ return getAllocationData(V, AllocLike, TLI, LookThroughBitCast);
}
/// \brief Tests if a value is a call or invoke to a library function that
/// reallocates memory (such as realloc).
-bool llvm::isReallocLikeFn(const Value *V, bool LookThroughBitCast) {
- return getAllocationData(V, ReallocLike, LookThroughBitCast);
+bool llvm::isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast) {
+ return getAllocationData(V, ReallocLike, TLI, LookThroughBitCast);
}
/// extractMallocCall - Returns the corresponding CallInst if the instruction
/// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we
/// ignore InvokeInst here.
-const CallInst *llvm::extractMallocCall(const Value *I) {
- return isMallocLikeFn(I) ? dyn_cast<CallInst>(I) : 0;
+const CallInst *llvm::extractMallocCall(const Value *I,
+ const TargetLibraryInfo *TLI) {
+ return isMallocLikeFn(I, TLI) ? dyn_cast<CallInst>(I) : 0;
}
-static Value *computeArraySize(const CallInst *CI, const TargetData *TD,
+static Value *computeArraySize(const CallInst *CI, const DataLayout *TD,
+ const TargetLibraryInfo *TLI,
bool LookThroughSExt = false) {
if (!CI)
return NULL;
// The size of the malloc's result type must be known to determine array size.
- Type *T = getMallocAllocatedType(CI);
+ Type *T = getMallocAllocatedType(CI, TLI);
if (!T || !T->isSized() || !TD)
return NULL;
@@ -204,9 +219,11 @@ static Value *computeArraySize(const CallInst *CI, const TargetData *TD,
/// isArrayMalloc - Returns the corresponding CallInst if the instruction
/// is a call to malloc whose array size can be determined and the array size
/// is not constant 1. Otherwise, return NULL.
-const CallInst *llvm::isArrayMalloc(const Value *I, const TargetData *TD) {
- const CallInst *CI = extractMallocCall(I);
- Value *ArraySize = computeArraySize(CI, TD);
+const CallInst *llvm::isArrayMalloc(const Value *I,
+ const DataLayout *TD,
+ const TargetLibraryInfo *TLI) {
+ const CallInst *CI = extractMallocCall(I, TLI);
+ Value *ArraySize = computeArraySize(CI, TD, TLI);
if (ArraySize &&
ArraySize != ConstantInt::get(CI->getArgOperand(0)->getType(), 1))
@@ -221,8 +238,9 @@ const CallInst *llvm::isArrayMalloc(const Value *I, const TargetData *TD) {
/// 0: PointerType is the calls' return type.
/// 1: PointerType is the bitcast's result type.
/// >1: Unique PointerType cannot be determined, return NULL.
-PointerType *llvm::getMallocType(const CallInst *CI) {
- assert(isMallocLikeFn(CI) && "getMallocType and not malloc call");
+PointerType *llvm::getMallocType(const CallInst *CI,
+ const TargetLibraryInfo *TLI) {
+ assert(isMallocLikeFn(CI, TLI) && "getMallocType and not malloc call");
PointerType *MallocType = NULL;
unsigned NumOfBitCastUses = 0;
@@ -252,8 +270,9 @@ PointerType *llvm::getMallocType(const CallInst *CI) {
/// 0: PointerType is the malloc calls' return type.
/// 1: PointerType is the bitcast's result type.
/// >1: Unique PointerType cannot be determined, return NULL.
-Type *llvm::getMallocAllocatedType(const CallInst *CI) {
- PointerType *PT = getMallocType(CI);
+Type *llvm::getMallocAllocatedType(const CallInst *CI,
+ const TargetLibraryInfo *TLI) {
+ PointerType *PT = getMallocType(CI, TLI);
return PT ? PT->getElementType() : NULL;
}
@@ -262,22 +281,24 @@ Type *llvm::getMallocAllocatedType(const CallInst *CI) {
/// then return that multiple. For non-array mallocs, the multiple is
/// constant 1. Otherwise, return NULL for mallocs whose array size cannot be
/// determined.
-Value *llvm::getMallocArraySize(CallInst *CI, const TargetData *TD,
+Value *llvm::getMallocArraySize(CallInst *CI, const DataLayout *TD,
+ const TargetLibraryInfo *TLI,
bool LookThroughSExt) {
- assert(isMallocLikeFn(CI) && "getMallocArraySize and not malloc call");
- return computeArraySize(CI, TD, LookThroughSExt);
+ assert(isMallocLikeFn(CI, TLI) && "getMallocArraySize and not malloc call");
+ return computeArraySize(CI, TD, TLI, LookThroughSExt);
}
/// extractCallocCall - Returns the corresponding CallInst if the instruction
/// is a calloc call.
-const CallInst *llvm::extractCallocCall(const Value *I) {
- return isCallocLikeFn(I) ? cast<CallInst>(I) : 0;
+const CallInst *llvm::extractCallocCall(const Value *I,
+ const TargetLibraryInfo *TLI) {
+ return isCallocLikeFn(I, TLI) ? cast<CallInst>(I) : 0;
}
/// isFreeCall - Returns non-null if the value is a call to the builtin free()
-const CallInst *llvm::isFreeCall(const Value *I) {
+const CallInst *llvm::isFreeCall(const Value *I, const TargetLibraryInfo *TLI) {
const CallInst *CI = dyn_cast<CallInst>(I);
if (!CI)
return 0;
@@ -285,9 +306,14 @@ const CallInst *llvm::isFreeCall(const Value *I) {
if (Callee == 0 || !Callee->isDeclaration())
return 0;
- if (Callee->getName() != "free" &&
- Callee->getName() != "_ZdlPv" && // operator delete(void*)
- Callee->getName() != "_ZdaPv") // operator delete[](void*)
+ StringRef FnName = Callee->getName();
+ LibFunc::Func TLIFn;
+ if (!TLI || !TLI->getLibFunc(FnName, TLIFn) || !TLI->has(TLIFn))
+ return 0;
+
+ if (TLIFn != LibFunc::free &&
+ TLIFn != LibFunc::ZdlPv && // operator delete(void*)
+ TLIFn != LibFunc::ZdaPv) // operator delete[](void*)
return 0;
// Check free prototype.
@@ -315,12 +341,12 @@ const CallInst *llvm::isFreeCall(const Value *I) {
/// object size in Size if successful, and false otherwise.
/// If RoundToAlign is true, then Size is rounded up to the aligment of allocas,
/// byval arguments, and global variables.
-bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const TargetData *TD,
- bool RoundToAlign) {
+bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout *TD,
+ const TargetLibraryInfo *TLI, bool RoundToAlign) {
if (!TD)
return false;
- ObjectSizeOffsetVisitor Visitor(TD, Ptr->getContext(), RoundToAlign);
+ ObjectSizeOffsetVisitor Visitor(TD, TLI, Ptr->getContext(), RoundToAlign);
SizeOffsetType Data = Visitor.compute(const_cast<Value*>(Ptr));
if (!Visitor.bothKnown(Data))
return false;
@@ -347,10 +373,11 @@ APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Align) {
return Size;
}
-ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const TargetData *TD,
+ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout *TD,
+ const TargetLibraryInfo *TLI,
LLVMContext &Context,
bool RoundToAlign)
-: TD(TD), RoundToAlign(RoundToAlign) {
+: TD(TD), TLI(TLI), RoundToAlign(RoundToAlign) {
IntegerType *IntTy = TD->getIntPtrType(Context);
IntTyBits = IntTy->getBitWidth();
Zero = APInt::getNullValue(IntTyBits);
@@ -358,11 +385,16 @@ ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const TargetData *TD,
SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) {
V = V->stripPointerCasts();
+ if (Instruction *I = dyn_cast<Instruction>(V)) {
+ // If we have already seen this instruction, bail out. Cycles can happen in
+ // unreachable code after constant propagation.
+ if (!SeenInsts.insert(I))
+ return unknown();
- if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
- return visitGEPOperator(*GEP);
- if (Instruction *I = dyn_cast<Instruction>(V))
+ if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
+ return visitGEPOperator(*GEP);
return visit(*I);
+ }
if (Argument *A = dyn_cast<Argument>(V))
return visitArgument(*A);
if (ConstantPointerNull *P = dyn_cast<ConstantPointerNull>(V))
@@ -371,9 +403,12 @@ SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) {
return visitGlobalVariable(*GV);
if (UndefValue *UV = dyn_cast<UndefValue>(V))
return visitUndefValue(*UV);
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
if (CE->getOpcode() == Instruction::IntToPtr)
return unknown(); // clueless
+ if (CE->getOpcode() == Instruction::GetElementPtr)
+ return visitGEPOperator(cast<GEPOperator>(*CE));
+ }
DEBUG(dbgs() << "ObjectSizeOffsetVisitor::compute() unhandled value: " << *V
<< '\n');
@@ -408,7 +443,8 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitArgument(Argument &A) {
}
SizeOffsetType ObjectSizeOffsetVisitor::visitCallSite(CallSite CS) {
- const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc);
+ const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc,
+ TLI);
if (!FnData)
return unknown();
@@ -473,10 +509,6 @@ ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) {
}
SizeOffsetType ObjectSizeOffsetVisitor::visitGEPOperator(GEPOperator &GEP) {
- // Ignore self-referencing GEPs, they can occur in unreachable code.
- if (&GEP == GEP.getPointerOperand())
- return unknown();
-
SizeOffsetType PtrData = compute(GEP.getPointerOperand());
if (!bothKnown(PtrData) || !GEP.hasAllConstantIndices())
return unknown();
@@ -510,10 +542,6 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitPHINode(PHINode&) {
}
SizeOffsetType ObjectSizeOffsetVisitor::visitSelectInst(SelectInst &I) {
- // ignore malformed self-looping selects
- if (I.getTrueValue() == &I || I.getFalseValue() == &I)
- return unknown();
-
SizeOffsetType TrueSide = compute(I.getTrueValue());
SizeOffsetType FalseSide = compute(I.getFalseValue());
if (bothKnown(TrueSide) && bothKnown(FalseSide) && TrueSide == FalseSide)
@@ -531,10 +559,10 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) {
}
-ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const TargetData *TD,
+ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const DataLayout *TD,
+ const TargetLibraryInfo *TLI,
LLVMContext &Context)
-: TD(TD), Context(Context), Builder(Context, TargetFolder(TD)),
-Visitor(TD, Context) {
+: TD(TD), TLI(TLI), Context(Context), Builder(Context, TargetFolder(TD)) {
IntTy = TD->getIntPtrType(Context);
Zero = ConstantInt::get(IntTy, 0);
}
@@ -559,6 +587,7 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) {
}
SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) {
+ ObjectSizeOffsetVisitor Visitor(TD, TLI, Context);
SizeOffsetType Const = Visitor.compute(V);
if (Visitor.bothKnown(Const))
return std::make_pair(ConstantInt::get(Context, Const.first),
@@ -621,7 +650,8 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitAllocaInst(AllocaInst &I) {
}
SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallSite(CallSite CS) {
- const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc);
+ const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc,
+ TLI);
if (!FnData)
return unknown();
@@ -719,10 +749,6 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) {
}
SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitSelectInst(SelectInst &I) {
- // ignore malformed self-looping selects
- if (I.getTrueValue() == &I || I.getFalseValue() == &I)
- return unknown();
-
SizeOffsetEvalType TrueSide = compute_(I.getTrueValue());
SizeOffsetEvalType FalseSide = compute_(I.getFalseValue());
diff --git a/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index 059e574..9872890 100644
--- a/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -30,7 +30,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/PredIteratorCache.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace llvm;
STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses");
@@ -89,7 +89,7 @@ void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
bool MemoryDependenceAnalysis::runOnFunction(Function &) {
AA = &getAnalysis<AliasAnalysis>();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
DT = getAnalysisIfAvailable<DominatorTree>();
if (PredCache == 0)
PredCache.reset(new PredIteratorCache());
@@ -148,7 +148,7 @@ AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst,
return AliasAnalysis::ModRef;
}
- if (const CallInst *CI = isFreeCall(Inst)) {
+ if (const CallInst *CI = isFreeCall(Inst, AA->getTargetLibraryInfo())) {
// calls to free() deallocate the entire structure
Loc = AliasAnalysis::Location(CI->getArgOperand(0));
return AliasAnalysis::Mod;
@@ -256,7 +256,7 @@ isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
const Value *&MemLocBase,
int64_t &MemLocOffs,
const LoadInst *LI,
- const TargetData *TD) {
+ const DataLayout *TD) {
// If we have no target data, we can't do this.
if (TD == 0) return false;
@@ -280,7 +280,7 @@ isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
unsigned MemoryDependenceAnalysis::
getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
unsigned MemLocSize, const LoadInst *LI,
- const TargetData &TD) {
+ const DataLayout &TD) {
// We can only extend simple integer loads.
if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) return 0;
@@ -327,12 +327,12 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
return 0;
if (LIOffs+NewLoadByteSize > MemLocEnd &&
- LI->getParent()->getParent()->hasFnAttr(Attribute::AddressSafety)) {
+ LI->getParent()->getParent()->getFnAttributes().
+ hasAttribute(Attributes::AddressSafety))
// We will be reading past the location accessed by the original program.
// While this is safe in a regular build, Address Safety analysis tools
// may start reporting false warnings. So, don't do widening.
return 0;
- }
// If a load of this width would include all of MemLoc, then we succeed.
if (LIOffs+NewLoadByteSize >= MemLocEnd)
@@ -479,12 +479,20 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
// a subsequent bitcast of the malloc call result. There can be stores to
// the malloced memory between the malloc call and its bitcast uses, and we
// need to continue scanning until the malloc call.
- if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst)) {
+ const TargetLibraryInfo *TLI = AA->getTargetLibraryInfo();
+ if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, TLI)) {
const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, TD);
if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr))
return MemDepResult::getDef(Inst);
- continue;
+ // Be conservative if the accessed pointer may alias the allocation.
+ if (AA->alias(Inst, AccessPtr) != AliasAnalysis::NoAlias)
+ return MemDepResult::getClobber(Inst);
+ // If the allocation is not aliased and does not read memory (like
+ // strdup), it is safe to ignore.
+ if (isa<AllocaInst>(Inst) ||
+ isMallocLikeFn(Inst, TLI) || isCallocLikeFn(Inst, TLI))
+ continue;
}
// See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
@@ -975,7 +983,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
I != E; ++I) {
Visited.insert(std::make_pair(I->getBB(), Addr));
- if (!I->getResult().isNonLocal())
+ if (!I->getResult().isNonLocal() && DT->isReachableFromEntry(I->getBB()))
Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(), Addr));
}
++NumCacheCompleteNonLocalPtr;
@@ -1021,7 +1029,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
NumSortedEntries);
// If we got a Def or Clobber, add this to the list of results.
- if (!Dep.isNonLocal()) {
+ if (!Dep.isNonLocal() && DT->isReachableFromEntry(BB)) {
Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr()));
continue;
}
diff --git a/contrib/llvm/lib/Analysis/NoAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/NoAliasAnalysis.cpp
index 101c2d5..2eb4137 100644
--- a/contrib/llvm/lib/Analysis/NoAliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/NoAliasAnalysis.cpp
@@ -15,7 +15,7 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/Pass.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace llvm;
namespace {
@@ -36,7 +36,7 @@ namespace {
virtual void initializePass() {
// Note: NoAA does not call InitializeAliasAnalysis because it's
// special and does not support chaining.
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
}
virtual AliasResult alias(const Location &LocA, const Location &LocB) {
diff --git a/contrib/llvm/lib/Analysis/PHITransAddr.cpp b/contrib/llvm/lib/Analysis/PHITransAddr.cpp
index 38cb1c9..c35737e 100644
--- a/contrib/llvm/lib/Analysis/PHITransAddr.cpp
+++ b/contrib/llvm/lib/Analysis/PHITransAddr.cpp
@@ -41,6 +41,7 @@ static bool CanPHITrans(Instruction *Inst) {
return false;
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void PHITransAddr::dump() const {
if (Addr == 0) {
dbgs() << "PHITransAddr: null\n";
@@ -50,6 +51,7 @@ void PHITransAddr::dump() const {
for (unsigned i = 0, e = InstInputs.size(); i != e; ++i)
dbgs() << " Input #" << i << " is " << *InstInputs[i] << "\n";
}
+#endif
static bool VerifySubExpr(Value *Expr,
diff --git a/contrib/llvm/lib/Analysis/ProfileDataLoader.cpp b/contrib/llvm/lib/Analysis/ProfileDataLoader.cpp
new file mode 100644
index 0000000..a4f634a
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/ProfileDataLoader.cpp
@@ -0,0 +1,155 @@
+//===- ProfileDataLoader.cpp - Load profile information from disk ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The ProfileDataLoader class is used to load raw profiling data from the dump
+// file.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Module.h"
+#include "llvm/InstrTypes.h"
+#include "llvm/Analysis/ProfileDataLoader.h"
+#include "llvm/Analysis/ProfileDataTypes.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/system_error.h"
+#include <cstdio>
+#include <cstdlib>
+using namespace llvm;
+
+raw_ostream &llvm::operator<<(raw_ostream &O, std::pair<const BasicBlock *,
+ const BasicBlock *> E) {
+ O << "(";
+
+ if (E.first)
+ O << E.first->getName();
+ else
+ O << "0";
+
+ O << ",";
+
+ if (E.second)
+ O << E.second->getName();
+ else
+ O << "0";
+
+ return O << ")";
+}
+
+/// AddCounts - Add 'A' and 'B', accounting for the fact that the value of one
+/// (or both) may not be defined.
+static unsigned AddCounts(unsigned A, unsigned B) {
+ // If either value is undefined, use the other.
+ // Undefined + undefined = undefined.
+ if (A == ProfileDataLoader::Uncounted) return B;
+ if (B == ProfileDataLoader::Uncounted) return A;
+
+ return A + B;
+}
+
+/// ReadProfilingData - Load 'NumEntries' items of type 'T' from file 'F'
+template <typename T>
+static void ReadProfilingData(const char *ToolName, FILE *F,
+ T *Data, size_t NumEntries) {
+ // Read in the block of data...
+ if (fread(Data, sizeof(T), NumEntries, F) != NumEntries)
+ report_fatal_error(Twine(ToolName) + ": Profiling data truncated");
+}
+
+/// ReadProfilingNumEntries - Read how many entries are in this profiling data
+/// packet.
+static unsigned ReadProfilingNumEntries(const char *ToolName, FILE *F,
+ bool ShouldByteSwap) {
+ unsigned Entry;
+ ReadProfilingData<unsigned>(ToolName, F, &Entry, 1);
+ return ShouldByteSwap ? ByteSwap_32(Entry) : Entry;
+}
+
+/// ReadProfilingBlock - Read the number of entries in the next profiling data
+/// packet and then accumulate the entries into 'Data'.
+static void ReadProfilingBlock(const char *ToolName, FILE *F,
+ bool ShouldByteSwap,
+ SmallVector<unsigned, 32> &Data) {
+ // Read the number of entries...
+ unsigned NumEntries = ReadProfilingNumEntries(ToolName, F, ShouldByteSwap);
+
+ // Read in the data.
+ SmallVector<unsigned, 8> TempSpace(NumEntries);
+ ReadProfilingData<unsigned>(ToolName, F, TempSpace.data(), NumEntries);
+
+ // Make sure we have enough space ...
+ if (Data.size() < NumEntries)
+ Data.resize(NumEntries, ProfileDataLoader::Uncounted);
+
+ // Accumulate the data we just read into the existing data.
+ for (unsigned i = 0; i < NumEntries; ++i) {
+ unsigned Entry = ShouldByteSwap ? ByteSwap_32(TempSpace[i]) : TempSpace[i];
+ Data[i] = AddCounts(Entry, Data[i]);
+ }
+}
+
+/// ReadProfilingArgBlock - Read the command line arguments that the progam was
+/// run with when the current profiling data packet(s) were generated.
+static void ReadProfilingArgBlock(const char *ToolName, FILE *F,
+ bool ShouldByteSwap,
+ SmallVector<std::string, 1> &CommandLines) {
+ // Read the number of bytes ...
+ unsigned ArgLength = ReadProfilingNumEntries(ToolName, F, ShouldByteSwap);
+
+ // Read in the arguments (if there are any to read). Round up the length to
+ // the nearest 4-byte multiple.
+ SmallVector<char, 8> Args(ArgLength+4);
+ if (ArgLength)
+ ReadProfilingData<char>(ToolName, F, Args.data(), (ArgLength+3) & ~3);
+
+ // Store the arguments.
+ CommandLines.push_back(std::string(&Args[0], &Args[ArgLength]));
+}
+
+const unsigned ProfileDataLoader::Uncounted = ~0U;
+
+/// ProfileDataLoader ctor - Read the specified profiling data file, reporting
+/// a fatal error if the file is invalid or broken.
+ProfileDataLoader::ProfileDataLoader(const char *ToolName,
+ const std::string &Filename)
+ : Filename(Filename) {
+ FILE *F = fopen(Filename.c_str(), "rb");
+ if (F == 0)
+ report_fatal_error(Twine(ToolName) + ": Error opening '" +
+ Filename + "': ");
+
+ // Keep reading packets until we run out of them.
+ unsigned PacketType;
+ while (fread(&PacketType, sizeof(unsigned), 1, F) == 1) {
+ // If the low eight bits of the packet are zero, we must be dealing with an
+ // endianness mismatch. Byteswap all words read from the profiling
+ // information. This can happen when the compiler host and target have
+ // different endianness.
+ bool ShouldByteSwap = (char)PacketType == 0;
+ PacketType = ShouldByteSwap ? ByteSwap_32(PacketType) : PacketType;
+
+ switch (PacketType) {
+ case ArgumentInfo:
+ ReadProfilingArgBlock(ToolName, F, ShouldByteSwap, CommandLines);
+ break;
+
+ case EdgeInfo:
+ ReadProfilingBlock(ToolName, F, ShouldByteSwap, EdgeCounts);
+ break;
+
+ default:
+ report_fatal_error(std::string(ToolName)
+ + ": Unknown profiling packet type");
+ break;
+ }
+ }
+
+ fclose(F);
+}
diff --git a/contrib/llvm/lib/Analysis/ProfileDataLoaderPass.cpp b/contrib/llvm/lib/Analysis/ProfileDataLoaderPass.cpp
new file mode 100644
index 0000000..c43cff0
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/ProfileDataLoaderPass.cpp
@@ -0,0 +1,188 @@
+//===- ProfileDataLoaderPass.cpp - Set branch weight metadata from prof ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass loads profiling data from a dump file and sets branch weight
+// metadata.
+//
+// TODO: Replace all "profile-metadata-loader" strings with "profile-loader"
+// once ProfileInfo etc. has been removed.
+//
+//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "profile-metadata-loader"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/BasicBlock.h"
+#include "llvm/InstrTypes.h"
+#include "llvm/Module.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/MDBuilder.h"
+#include "llvm/Metadata.h"
+#include "llvm/Pass.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Analysis/ProfileDataLoader.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Format.h"
+#include "llvm/ADT/Statistic.h"
+using namespace llvm;
+
+STATISTIC(NumEdgesRead, "The # of edges read.");
+STATISTIC(NumTermsAnnotated, "The # of terminator instructions annotated.");
+
+static cl::opt<std::string>
+ProfileMetadataFilename("profile-file", cl::init("llvmprof.out"),
+ cl::value_desc("filename"),
+ cl::desc("Profile file loaded by -profile-metadata-loader"));
+
+namespace {
+ /// This pass loads profiling data from a dump file and sets branch weight
+ /// metadata.
+ class ProfileMetadataLoaderPass : public ModulePass {
+ std::string Filename;
+ public:
+ static char ID; // Class identification, replacement for typeinfo
+ explicit ProfileMetadataLoaderPass(const std::string &filename = "")
+ : ModulePass(ID), Filename(filename) {
+ initializeProfileMetadataLoaderPassPass(*PassRegistry::getPassRegistry());
+ if (filename.empty()) Filename = ProfileMetadataFilename;
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ }
+
+ virtual const char *getPassName() const {
+ return "Profile loader";
+ }
+
+ virtual void readEdge(unsigned, ProfileData&, ProfileData::Edge,
+ ArrayRef<unsigned>);
+ virtual unsigned matchEdges(Module&, ProfileData&, ArrayRef<unsigned>);
+ virtual void setBranchWeightMetadata(Module&, ProfileData&);
+
+ virtual bool runOnModule(Module &M);
+ };
+} // End of anonymous namespace
+
+char ProfileMetadataLoaderPass::ID = 0;
+INITIALIZE_PASS_BEGIN(ProfileMetadataLoaderPass, "profile-metadata-loader",
+ "Load profile information from llvmprof.out", false, true)
+INITIALIZE_PASS_END(ProfileMetadataLoaderPass, "profile-metadata-loader",
+ "Load profile information from llvmprof.out", false, true)
+
+char &llvm::ProfileMetadataLoaderPassID = ProfileMetadataLoaderPass::ID;
+
+/// createProfileMetadataLoaderPass - This function returns a Pass that loads
+/// the profiling information for the module from the specified filename,
+/// making it available to the optimizers.
+ModulePass *llvm::createProfileMetadataLoaderPass() {
+ return new ProfileMetadataLoaderPass();
+}
+ModulePass *llvm::createProfileMetadataLoaderPass(const std::string &Filename) {
+ return new ProfileMetadataLoaderPass(Filename);
+}
+
+/// readEdge - Take the value from a profile counter and assign it to an edge.
+void ProfileMetadataLoaderPass::readEdge(unsigned ReadCount,
+ ProfileData &PB, ProfileData::Edge e,
+ ArrayRef<unsigned> Counters) {
+ if (ReadCount >= Counters.size()) return;
+
+ unsigned weight = Counters[ReadCount];
+ assert(weight != ProfileDataLoader::Uncounted);
+ PB.addEdgeWeight(e, weight);
+
+ DEBUG(dbgs() << "-- Read Edge Counter for " << e
+ << " (# "<< (ReadCount) << "): "
+ << PB.getEdgeWeight(e) << "\n");
+}
+
+/// matchEdges - Link every profile counter with an edge.
+unsigned ProfileMetadataLoaderPass::matchEdges(Module &M, ProfileData &PB,
+ ArrayRef<unsigned> Counters) {
+ if (Counters.size() == 0) return 0;
+
+ unsigned ReadCount = 0;
+
+ for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
+ if (F->isDeclaration()) continue;
+ DEBUG(dbgs() << "Loading edges in '" << F->getName() << "'\n");
+ readEdge(ReadCount++, PB, PB.getEdge(0, &F->getEntryBlock()), Counters);
+ for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
+ TerminatorInst *TI = BB->getTerminator();
+ for (unsigned s = 0, e = TI->getNumSuccessors(); s != e; ++s) {
+ readEdge(ReadCount++, PB, PB.getEdge(BB,TI->getSuccessor(s)),
+ Counters);
+ }
+ }
+ }
+
+ return ReadCount;
+}
+
+/// setBranchWeightMetadata - Translate the counter values associated with each
+/// edge into branch weights for each conditional branch (a branch with 2 or
+/// more desinations).
+void ProfileMetadataLoaderPass::setBranchWeightMetadata(Module &M,
+ ProfileData &PB) {
+ for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
+ if (F->isDeclaration()) continue;
+ DEBUG(dbgs() << "Setting branch metadata in '" << F->getName() << "'\n");
+
+ for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
+ TerminatorInst *TI = BB->getTerminator();
+ unsigned NumSuccessors = TI->getNumSuccessors();
+
+ // If there is only one successor then we can not set a branch
+ // probability as the target is certain.
+ if (NumSuccessors < 2) continue;
+
+ // Load the weights of all edges leading from this terminator.
+ DEBUG(dbgs() << "-- Terminator with " << NumSuccessors
+ << " successors:\n");
+ SmallVector<uint32_t, 4> Weights(NumSuccessors);
+ for (unsigned s = 0 ; s < NumSuccessors ; ++s) {
+ ProfileData::Edge edge = PB.getEdge(BB, TI->getSuccessor(s));
+ Weights[s] = (uint32_t)PB.getEdgeWeight(edge);
+ DEBUG(dbgs() << "---- Edge '" << edge << "' has weight "
+ << Weights[s] << "\n");
+ }
+
+ // Set branch weight metadata. This will set branch probabilities of
+ // 100%/0% if that is true of the dynamic execution.
+ // BranchProbabilityInfo can account for this when it loads this metadata
+ // (it gives the unexectuted branch a weight of 1 for the purposes of
+ // probability calculations).
+ MDBuilder MDB(TI->getContext());
+ MDNode *Node = MDB.createBranchWeights(Weights);
+ TI->setMetadata(LLVMContext::MD_prof, Node);
+ NumTermsAnnotated++;
+ }
+ }
+}
+
+bool ProfileMetadataLoaderPass::runOnModule(Module &M) {
+ ProfileDataLoader PDL("profile-data-loader", Filename);
+ ProfileData PB;
+
+ ArrayRef<unsigned> Counters = PDL.getRawEdgeCounts();
+
+ unsigned ReadCount = matchEdges(M, PB, Counters);
+
+ if (ReadCount != Counters.size()) {
+ errs() << "WARNING: profile information is inconsistent with "
+ << "the current program!\n";
+ }
+ NumEdgesRead = ReadCount;
+
+ setBranchWeightMetadata(M, PB);
+
+ return ReadCount > 0;
+}
diff --git a/contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp b/contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp
index 63468f8..12b59e0 100644
--- a/contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp
+++ b/contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp
@@ -286,7 +286,7 @@ void ProfileEstimatorPass::recurseBasicBlock(BasicBlock *BB) {
}
}
- double fraction = floor(BBWeight/Edges.size());
+ double fraction = Edges.size() ? floor(BBWeight/Edges.size()) : 0.0;
// Finally we know what flow is still not leaving the block, distribute this
// flow onto the empty edges.
for (SmallVector<Edge, 8>::iterator ei = Edges.begin(), ee = Edges.end();
diff --git a/contrib/llvm/lib/Analysis/ProfileInfo.cpp b/contrib/llvm/lib/Analysis/ProfileInfo.cpp
index 173de2c..b5b7ac1 100644
--- a/contrib/llvm/lib/Analysis/ProfileInfo.cpp
+++ b/contrib/llvm/lib/Analysis/ProfileInfo.cpp
@@ -1016,40 +1016,14 @@ void ProfileInfoT<Function,BasicBlock>::repair(const Function *F) {
}
}
-raw_ostream& operator<<(raw_ostream &O, const Function *F) {
- return O << F->getName();
-}
-
raw_ostream& operator<<(raw_ostream &O, const MachineFunction *MF) {
return O << MF->getFunction()->getName() << "(MF)";
}
-raw_ostream& operator<<(raw_ostream &O, const BasicBlock *BB) {
- return O << BB->getName();
-}
-
raw_ostream& operator<<(raw_ostream &O, const MachineBasicBlock *MBB) {
return O << MBB->getBasicBlock()->getName() << "(MB)";
}
-raw_ostream& operator<<(raw_ostream &O, std::pair<const BasicBlock *, const BasicBlock *> E) {
- O << "(";
-
- if (E.first)
- O << E.first;
- else
- O << "0";
-
- O << ",";
-
- if (E.second)
- O << E.second;
- else
- O << "0";
-
- return O << ")";
-}
-
raw_ostream& operator<<(raw_ostream &O, std::pair<const MachineBasicBlock *, const MachineBasicBlock *> E) {
O << "(";
diff --git a/contrib/llvm/lib/Analysis/RegionInfo.cpp b/contrib/llvm/lib/Analysis/RegionInfo.cpp
index 868f483..30f0d2f 100644
--- a/contrib/llvm/lib/Analysis/RegionInfo.cpp
+++ b/contrib/llvm/lib/Analysis/RegionInfo.cpp
@@ -47,7 +47,7 @@ static cl::opt<enum Region::PrintStyle> printStyle("print-region-style",
cl::values(
clEnumValN(Region::PrintNone, "none", "print no details"),
clEnumValN(Region::PrintBB, "bb",
- "print regions in detail with block_node_iterator"),
+ "print regions in detail with block_iterator"),
clEnumValN(Region::PrintRN, "rn",
"print regions in detail with element_iterator"),
clEnumValEnd));
@@ -246,22 +246,6 @@ void Region::verifyRegionNest() const {
verifyRegion();
}
-Region::block_node_iterator Region::block_node_begin() {
- return GraphTraits<FlatIt<Region*> >::nodes_begin(this);
-}
-
-Region::block_node_iterator Region::block_node_end() {
- return GraphTraits<FlatIt<Region*> >::nodes_end(this);
-}
-
-Region::const_block_node_iterator Region::block_node_begin() const {
- return GraphTraits<FlatIt<const Region*> >::nodes_begin(this);
-}
-
-Region::const_block_node_iterator Region::block_node_end() const {
- return GraphTraits<FlatIt<const Region*> >::nodes_end(this);
-}
-
Region::element_iterator Region::element_begin() {
return GraphTraits<Region*>::nodes_begin(this);
}
@@ -425,10 +409,8 @@ void Region::print(raw_ostream &OS, bool print_tree, unsigned level,
OS.indent(level*2 + 2);
if (Style == PrintBB) {
- for (const_block_node_iterator I = block_node_begin(),
- E = block_node_end();
- I != E; ++I)
- OS << **I << ", "; // TODO: remove the last ","
+ for (const_block_iterator I = block_begin(), E = block_end(); I != E; ++I)
+ OS << (*I)->getName() << ", "; // TODO: remove the last ","
} else if (Style == PrintRN) {
for (const_element_iterator I = element_begin(), E = element_end(); I!=E; ++I)
OS << **I << ", "; // TODO: remove the last ",
@@ -445,9 +427,11 @@ void Region::print(raw_ostream &OS, bool print_tree, unsigned level,
OS.indent(level*2) << "} \n";
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void Region::dump() const {
print(dbgs(), true, getDepth(), printStyle.getValue());
}
+#endif
void Region::clearNodeCache() {
// Free the cached nodes.
diff --git a/contrib/llvm/lib/Analysis/RegionPass.cpp b/contrib/llvm/lib/Analysis/RegionPass.cpp
index c97b5eb..9208fa2 100644
--- a/contrib/llvm/lib/Analysis/RegionPass.cpp
+++ b/contrib/llvm/lib/Analysis/RegionPass.cpp
@@ -195,10 +195,9 @@ public:
virtual bool runOnRegion(Region *R, RGPassManager &RGM) {
Out << Banner;
- for (Region::block_node_iterator I = R->block_node_begin(),
- E = R->block_node_end();
+ for (Region::block_iterator I = R->block_begin(), E = R->block_end();
I != E; ++I)
- (*I)->getEntry()->print(Out);
+ (*I)->print(Out);
return false;
}
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolution.cpp b/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
index a654648..e3189ec 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -73,7 +73,7 @@
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Assembly/Writer.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ConstantRange.h"
@@ -105,6 +105,11 @@ MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
"derived loop"),
cl::init(100));
+// FIXME: Enable this with XDEBUG when the test suite is clean.
+static cl::opt<bool>
+VerifySCEV("verify-scev",
+ cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
+
INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution",
"Scalar Evolution Analysis", false, true)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
@@ -122,10 +127,12 @@ char ScalarEvolution::ID = 0;
// Implementation of the SCEV class.
//
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void SCEV::dump() const {
print(dbgs());
dbgs() << '\n';
}
+#endif
void SCEV::print(raw_ostream &OS) const {
switch (getSCEVType()) {
@@ -2580,7 +2587,7 @@ const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
}
const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy) {
- // If we have TargetData, we can bypass creating a target-independent
+ // If we have DataLayout, we can bypass creating a target-independent
// constant expression and then folding it back into a ConstantInt.
// This is just a compile-time optimization.
if (TD)
@@ -2606,7 +2613,7 @@ const SCEV *ScalarEvolution::getAlignOfExpr(Type *AllocTy) {
const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy,
unsigned FieldNo) {
- // If we have TargetData, we can bypass creating a target-independent
+ // If we have DataLayout, we can bypass creating a target-independent
// constant expression and then folding it back into a ConstantInt.
// This is just a compile-time optimization.
if (TD)
@@ -2671,7 +2678,7 @@ bool ScalarEvolution::isSCEVable(Type *Ty) const {
uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
assert(isSCEVable(Ty) && "Type is not SCEVable!");
- // If we have a TargetData, use it!
+ // If we have a DataLayout, use it!
if (TD)
return TD->getTypeSizeInBits(Ty);
@@ -2679,7 +2686,7 @@ uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
if (Ty->isIntegerTy())
return Ty->getPrimitiveSizeInBits();
- // The only other support type is pointer. Without TargetData, conservatively
+ // The only other support type is pointer. Without DataLayout, conservatively
// assume pointers are 64-bit.
assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!");
return 64;
@@ -2699,7 +2706,7 @@ Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
if (TD) return TD->getIntPtrType(getContext());
- // Without TargetData, conservatively assume pointers are 64-bit.
+ // Without DataLayout, conservatively assume pointers are 64-bit.
return Type::getInt64Ty(getContext());
}
@@ -3978,8 +3985,11 @@ getSmallConstantTripMultiple(Loop *L, BasicBlock *ExitingBlock) {
ConstantInt *Result = MulC->getValue();
- // Guard against huge trip counts.
- if (!Result || Result->getValue().getActiveBits() > 32)
+ // Guard against huge trip counts (this requires checking
+ // for zero to handle the case where the trip count == -1 and the
+ // addition wraps).
+ if (!Result || Result->getValue().getActiveBits() > 32 ||
+ Result->getValue().getActiveBits() == 0)
return 1;
return (unsigned)Result->getZExtValue();
@@ -4749,7 +4759,7 @@ static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
/// reason, return null.
static Constant *EvaluateExpression(Value *V, const Loop *L,
DenseMap<Instruction *, Constant *> &Vals,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI) {
// Convenient constant check, but redundant for recursive calls.
if (Constant *C = dyn_cast<Constant>(V)) return C;
@@ -6141,7 +6151,7 @@ bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred,
return CmpInst::isTrueWhenEqual(Pred);
if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
if (FoundLHS == FoundRHS)
- return CmpInst::isFalseWhenEqual(Pred);
+ return CmpInst::isFalseWhenEqual(FoundPred);
// Check to see if we can make the LHS or RHS match.
if (LHS == FoundRHS || RHS == FoundLHS) {
@@ -6588,7 +6598,7 @@ ScalarEvolution::ScalarEvolution()
bool ScalarEvolution::runOnFunction(Function &F) {
this->F = &F;
LI = &getAnalysis<LoopInfo>();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
DT = &getAnalysis<DominatorTree>();
return false;
@@ -6930,3 +6940,87 @@ void ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
UnsignedRanges.erase(S);
SignedRanges.erase(S);
}
+
+typedef DenseMap<const Loop *, std::string> VerifyMap;
+
+/// replaceSubString - Replaces all occurences of From in Str with To.
+static void replaceSubString(std::string &Str, StringRef From, StringRef To) {
+ size_t Pos = 0;
+ while ((Pos = Str.find(From, Pos)) != std::string::npos) {
+ Str.replace(Pos, From.size(), To.data(), To.size());
+ Pos += To.size();
+ }
+}
+
+/// getLoopBackedgeTakenCounts - Helper method for verifyAnalysis.
+static void
+getLoopBackedgeTakenCounts(Loop *L, VerifyMap &Map, ScalarEvolution &SE) {
+ for (Loop::reverse_iterator I = L->rbegin(), E = L->rend(); I != E; ++I) {
+ getLoopBackedgeTakenCounts(*I, Map, SE); // recurse.
+
+ std::string &S = Map[L];
+ if (S.empty()) {
+ raw_string_ostream OS(S);
+ SE.getBackedgeTakenCount(L)->print(OS);
+
+ // false and 0 are semantically equivalent. This can happen in dead loops.
+ replaceSubString(OS.str(), "false", "0");
+ // Remove wrap flags, their use in SCEV is highly fragile.
+ // FIXME: Remove this when SCEV gets smarter about them.
+ replaceSubString(OS.str(), "<nw>", "");
+ replaceSubString(OS.str(), "<nsw>", "");
+ replaceSubString(OS.str(), "<nuw>", "");
+ }
+ }
+}
+
+void ScalarEvolution::verifyAnalysis() const {
+ if (!VerifySCEV)
+ return;
+
+ ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
+
+ // Gather stringified backedge taken counts for all loops using SCEV's caches.
+ // FIXME: It would be much better to store actual values instead of strings,
+ // but SCEV pointers will change if we drop the caches.
+ VerifyMap BackedgeDumpsOld, BackedgeDumpsNew;
+ for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
+ getLoopBackedgeTakenCounts(*I, BackedgeDumpsOld, SE);
+
+ // Gather stringified backedge taken counts for all loops without using
+ // SCEV's caches.
+ SE.releaseMemory();
+ for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
+ getLoopBackedgeTakenCounts(*I, BackedgeDumpsNew, SE);
+
+ // Now compare whether they're the same with and without caches. This allows
+ // verifying that no pass changed the cache.
+ assert(BackedgeDumpsOld.size() == BackedgeDumpsNew.size() &&
+ "New loops suddenly appeared!");
+
+ for (VerifyMap::iterator OldI = BackedgeDumpsOld.begin(),
+ OldE = BackedgeDumpsOld.end(),
+ NewI = BackedgeDumpsNew.begin();
+ OldI != OldE; ++OldI, ++NewI) {
+ assert(OldI->first == NewI->first && "Loop order changed!");
+
+ // Compare the stringified SCEVs. We don't care if undef backedgetaken count
+ // changes.
+ // FIXME: We currently ignore SCEV changes from/to CouldNotCompute. This
+ // means that a pass is buggy or SCEV has to learn a new pattern but is
+ // usually not harmful.
+ if (OldI->second != NewI->second &&
+ OldI->second.find("undef") == std::string::npos &&
+ NewI->second.find("undef") == std::string::npos &&
+ OldI->second != "***COULDNOTCOMPUTE***" &&
+ NewI->second != "***COULDNOTCOMPUTE***") {
+ dbgs() << "SCEVValidator: SCEV for loop '"
+ << OldI->first->getHeader()->getName()
+ << "' changed from '" << OldI->second
+ << "' to '" << NewI->second << "'!\n";
+ std::abort();
+ }
+ }
+
+ // TODO: Verify more things.
+}
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp b/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
index 62710c5..111bfb4 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -18,7 +18,7 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/LLVMContext.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/ADT/STLExtras.h"
@@ -212,7 +212,7 @@ static bool FactorOutConstant(const SCEV *&S,
const SCEV *&Remainder,
const SCEV *Factor,
ScalarEvolution &SE,
- const TargetData *TD) {
+ const DataLayout *TD) {
// Everything is divisible by one.
if (Factor->isOne())
return true;
@@ -253,7 +253,7 @@ static bool FactorOutConstant(const SCEV *&S,
// of the given factor.
if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
if (TD) {
- // With TargetData, the size is known. Check if there is a constant
+ // With DataLayout, the size is known. Check if there is a constant
// operand which is a multiple of the given factor. If so, we can
// factor it.
const SCEVConstant *FC = cast<SCEVConstant>(Factor);
@@ -267,7 +267,7 @@ static bool FactorOutConstant(const SCEV *&S,
return true;
}
} else {
- // Without TargetData, check if Factor can be factored out of any of the
+ // Without DataLayout, check if Factor can be factored out of any of the
// Mul's operands. If so, we can just remove it.
for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
const SCEV *SOp = M->getOperand(i);
@@ -458,7 +458,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
// An empty struct has no fields.
if (STy->getNumElements() == 0) break;
if (SE.TD) {
- // With TargetData, field offsets are known. See if a constant offset
+ // With DataLayout, field offsets are known. See if a constant offset
// falls within any of the struct fields.
if (Ops.empty()) break;
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
@@ -477,7 +477,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
}
}
} else {
- // Without TargetData, just check for an offsetof expression of the
+ // Without DataLayout, just check for an offsetof expression of the
// appropriate struct type.
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) {
@@ -1618,6 +1618,17 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
PEnd = Phis.end(); PIter != PEnd; ++PIter) {
PHINode *Phi = *PIter;
+ // Fold constant phis. They may be congruent to other constant phis and
+ // would confuse the logic below that expects proper IVs.
+ if (Value *V = Phi->hasConstantValue()) {
+ Phi->replaceAllUsesWith(V);
+ DeadInsts.push_back(Phi);
+ ++NumElim;
+ DEBUG_WITH_TYPE(DebugType, dbgs()
+ << "INDVARS: Eliminated constant iv: " << *Phi << '\n');
+ continue;
+ }
+
if (!SE.isSCEVable(Phi->getType()))
continue;
diff --git a/contrib/llvm/lib/Analysis/Trace.cpp b/contrib/llvm/lib/Analysis/Trace.cpp
index ff5010b..22da857 100644
--- a/contrib/llvm/lib/Analysis/Trace.cpp
+++ b/contrib/llvm/lib/Analysis/Trace.cpp
@@ -43,9 +43,11 @@ void Trace::print(raw_ostream &O) const {
O << "; Trace parent function: \n" << *F;
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// dump - Debugger convenience method; writes trace to standard error
/// output stream.
///
void Trace::dump() const {
print(dbgs());
}
+#endif
diff --git a/contrib/llvm/lib/Analysis/ValueTracking.cpp b/contrib/llvm/lib/Analysis/ValueTracking.cpp
index cea34e1..3beb373 100644
--- a/contrib/llvm/lib/Analysis/ValueTracking.cpp
+++ b/contrib/llvm/lib/Analysis/ValueTracking.cpp
@@ -22,7 +22,7 @@
#include "llvm/LLVMContext.h"
#include "llvm/Metadata.h"
#include "llvm/Operator.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Support/ConstantRange.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/MathExtras.h"
@@ -36,7 +36,7 @@ const unsigned MaxDepth = 6;
/// getBitWidth - Returns the bitwidth of the given scalar or pointer type (if
/// unknown returns 0). For vector types, returns the element type's bitwidth.
-static unsigned getBitWidth(Type *Ty, const TargetData *TD) {
+static unsigned getBitWidth(Type *Ty, const DataLayout *TD) {
if (unsigned BitWidth = Ty->getScalarSizeInBits())
return BitWidth;
assert(isa<PointerType>(Ty) && "Expected a pointer type!");
@@ -46,7 +46,7 @@ static unsigned getBitWidth(Type *Ty, const TargetData *TD) {
static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
APInt &KnownZero, APInt &KnownOne,
APInt &KnownZero2, APInt &KnownOne2,
- const TargetData *TD, unsigned Depth) {
+ const DataLayout *TD, unsigned Depth) {
if (!Add) {
if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) {
// We know that the top bits of C-X are clear if X contains less bits
@@ -132,7 +132,7 @@ static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
static void ComputeMaskedBitsMul(Value *Op0, Value *Op1, bool NSW,
APInt &KnownZero, APInt &KnownOne,
APInt &KnownZero2, APInt &KnownOne2,
- const TargetData *TD, unsigned Depth) {
+ const DataLayout *TD, unsigned Depth) {
unsigned BitWidth = KnownZero.getBitWidth();
ComputeMaskedBits(Op1, KnownZero, KnownOne, TD, Depth+1);
ComputeMaskedBits(Op0, KnownZero2, KnownOne2, TD, Depth+1);
@@ -226,7 +226,7 @@ void llvm::computeMaskedBitsLoad(const MDNode &Ranges, APInt &KnownZero) {
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
- const TargetData *TD, unsigned Depth) {
+ const DataLayout *TD, unsigned Depth) {
assert(V && "No Value?");
assert(Depth <= MaxDepth && "Limit Search Depth");
unsigned BitWidth = KnownZero.getBitWidth();
@@ -308,11 +308,20 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
}
if (Argument *A = dyn_cast<Argument>(V)) {
- // Get alignment information off byval arguments if specified in the IR.
- if (A->hasByValAttr())
- if (unsigned Align = A->getParamAlignment())
- KnownZero = APInt::getLowBitsSet(BitWidth,
- CountTrailingZeros_32(Align));
+ unsigned Align = 0;
+
+ if (A->hasByValAttr()) {
+ // Get alignment information off byval arguments if specified in the IR.
+ Align = A->getParamAlignment();
+ } else if (TD && A->hasStructRetAttr()) {
+ // An sret parameter has at least the ABI alignment of the return type.
+ Type *EltTy = cast<PointerType>(A->getType())->getElementType();
+ if (EltTy->isSized())
+ Align = TD->getABITypeAlignment(EltTy);
+ }
+
+ if (Align)
+ KnownZero = APInt::getLowBitsSet(BitWidth, CountTrailingZeros_32(Align));
return;
}
@@ -420,15 +429,13 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
case Instruction::ZExt:
case Instruction::Trunc: {
Type *SrcTy = I->getOperand(0)->getType();
-
+
unsigned SrcBitWidth;
// Note that we handle pointer operands here because of inttoptr/ptrtoint
// which fall through here.
- if (SrcTy->isPointerTy())
- SrcBitWidth = TD->getTypeSizeInBits(SrcTy);
- else
- SrcBitWidth = SrcTy->getScalarSizeInBits();
-
+ SrcBitWidth = TD->getTypeSizeInBits(SrcTy->getScalarType());
+
+ assert(SrcBitWidth && "SrcBitWidth can't be zero");
KnownZero = KnownZero.zextOrTrunc(SrcBitWidth);
KnownOne = KnownOne.zextOrTrunc(SrcBitWidth);
ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
@@ -778,7 +785,7 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
/// ComputeSignBit - Determine whether the sign bit is known to be zero or
/// one. Convenience wrapper around ComputeMaskedBits.
void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
- const TargetData *TD, unsigned Depth) {
+ const DataLayout *TD, unsigned Depth) {
unsigned BitWidth = getBitWidth(V->getType(), TD);
if (!BitWidth) {
KnownZero = false;
@@ -796,7 +803,7 @@ void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
/// bit set when defined. For vectors return true if every element is known to
/// be a power of two when defined. Supports values with integer or pointer
/// types and vectors of integers.
-bool llvm::isPowerOfTwo(Value *V, const TargetData *TD, bool OrZero,
+bool llvm::isPowerOfTwo(Value *V, const DataLayout *TD, bool OrZero,
unsigned Depth) {
if (Constant *C = dyn_cast<Constant>(V)) {
if (C->isNullValue())
@@ -859,7 +866,7 @@ bool llvm::isPowerOfTwo(Value *V, const TargetData *TD, bool OrZero,
/// when defined. For vectors return true if every element is known to be
/// non-zero when defined. Supports values with integer or pointer type and
/// vectors of integers.
-bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
+bool llvm::isKnownNonZero(Value *V, const DataLayout *TD, unsigned Depth) {
if (Constant *C = dyn_cast<Constant>(V)) {
if (C->isNullValue())
return false;
@@ -986,7 +993,7 @@ bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask,
- const TargetData *TD, unsigned Depth) {
+ const DataLayout *TD, unsigned Depth) {
APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
@@ -1003,10 +1010,10 @@ bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask,
///
/// 'Op' must have a scalar integer type.
///
-unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD,
+unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout *TD,
unsigned Depth) {
assert((TD || V->getType()->isIntOrIntVectorTy()) &&
- "ComputeNumSignBits requires a TargetData object to operate "
+ "ComputeNumSignBits requires a DataLayout object to operate "
"on non-integer values!");
Type *Ty = V->getType();
unsigned TyBits = TD ? TD->getTypeSizeInBits(V->getType()->getScalarType()) :
@@ -1582,7 +1589,7 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
/// it can be expressed as a base pointer plus a constant offset. Return the
/// base and offset to the caller.
Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
- const TargetData &TD) {
+ const DataLayout &TD) {
Operator *PtrOp = dyn_cast<Operator>(Ptr);
if (PtrOp == 0 || Ptr->getType()->isVectorTy())
return Ptr;
@@ -1614,7 +1621,7 @@ Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
// right.
unsigned PtrSize = TD.getPointerSizeInBits();
if (PtrSize < 64)
- Offset = (Offset << (64-PtrSize)) >> (64-PtrSize);
+ Offset = SignExtend64(Offset, PtrSize);
return GetPointerBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD);
}
@@ -1768,7 +1775,7 @@ uint64_t llvm::GetStringLength(Value *V) {
}
Value *
-llvm::GetUnderlyingObject(Value *V, const TargetData *TD, unsigned MaxLookup) {
+llvm::GetUnderlyingObject(Value *V, const DataLayout *TD, unsigned MaxLookup) {
if (!V->getType()->isPointerTy())
return V;
for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
@@ -1799,7 +1806,7 @@ llvm::GetUnderlyingObject(Value *V, const TargetData *TD, unsigned MaxLookup) {
void
llvm::GetUnderlyingObjects(Value *V,
SmallVectorImpl<Value *> &Objects,
- const TargetData *TD,
+ const DataLayout *TD,
unsigned MaxLookup) {
SmallPtrSet<Value *, 4> Visited;
SmallVector<Value *, 4> Worklist;
@@ -1844,7 +1851,7 @@ bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
}
bool llvm::isSafeToSpeculativelyExecute(const Value *V,
- const TargetData *TD) {
+ const DataLayout *TD) {
const Operator *Inst = dyn_cast<Operator>(V);
if (!Inst)
return false;
OpenPOWER on IntegriCloud