summaryrefslogtreecommitdiffstats
path: root/lib/Analysis
diff options
context:
space:
mode:
authorrdivacky <rdivacky@FreeBSD.org>2010-02-16 09:30:23 +0000
committerrdivacky <rdivacky@FreeBSD.org>2010-02-16 09:30:23 +0000
commitf25ddd991a5601d0101602c4c263a58c7af4b8a2 (patch)
tree4cfca640904d1896e25032757a61f8959c066919 /lib/Analysis
parent3fd58f91dd318518f7daa4ba64c0aaf31799d89b (diff)
downloadFreeBSD-src-f25ddd991a5601d0101602c4c263a58c7af4b8a2.zip
FreeBSD-src-f25ddd991a5601d0101602c4c263a58c7af4b8a2.tar.gz
Update LLVM to r96341.
Diffstat (limited to 'lib/Analysis')
-rw-r--r--lib/Analysis/ConstantFolding.cpp46
-rw-r--r--lib/Analysis/DebugInfo.cpp59
-rw-r--r--lib/Analysis/IPA/GlobalsModRef.cpp2
-rw-r--r--lib/Analysis/IPA/Makefile1
-rw-r--r--lib/Analysis/IVUsers.cpp176
-rw-r--r--lib/Analysis/InlineCost.cpp80
-rw-r--r--lib/Analysis/LiveValues.cpp2
-rw-r--r--lib/Analysis/Makefile1
-rw-r--r--lib/Analysis/MemoryBuiltins.cpp4
-rw-r--r--lib/Analysis/ScalarEvolution.cpp480
-rw-r--r--lib/Analysis/ScalarEvolutionExpander.cpp152
-rw-r--r--lib/Analysis/ValueTracking.cpp41
12 files changed, 618 insertions, 426 deletions
diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp
index 4ae8859..808e6fa 100644
--- a/lib/Analysis/ConstantFolding.cpp
+++ b/lib/Analysis/ConstantFolding.cpp
@@ -80,7 +80,7 @@ static Constant *FoldBitCast(Constant *C, const Type *DestTy,
// First thing is first. We only want to think about integer here, so if
// we have something in FP form, recast it as integer.
- if (DstEltTy->isFloatingPoint()) {
+ if (DstEltTy->isFloatingPointTy()) {
// Fold to an vector of integers with same size as our FP type.
unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
const Type *DestIVTy =
@@ -95,7 +95,7 @@ static Constant *FoldBitCast(Constant *C, const Type *DestTy,
// Okay, we know the destination is integer, if the input is FP, convert
// it to integer first.
- if (SrcEltTy->isFloatingPoint()) {
+ if (SrcEltTy->isFloatingPointTy()) {
unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
const Type *SrcIVTy =
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
@@ -517,6 +517,42 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
return 0;
}
+/// CastGEPIndices - If array indices are not pointer-sized integers,
+/// explicitly cast them so that they aren't implicitly casted by the
+/// getelementptr.
+static Constant *CastGEPIndices(Constant *const *Ops, unsigned NumOps,
+ const Type *ResultTy,
+ const TargetData *TD) {
+ if (!TD) return 0;
+ const Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext());
+
+ bool Any = false;
+ SmallVector<Constant*, 32> NewIdxs;
+ for (unsigned i = 1; i != NumOps; ++i) {
+ if ((i == 1 ||
+ !isa<StructType>(GetElementPtrInst::getIndexedType(Ops[0]->getType(),
+ reinterpret_cast<Value *const *>(Ops+1),
+ i-1))) &&
+ Ops[i]->getType() != IntPtrTy) {
+ Any = true;
+ NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i],
+ true,
+ IntPtrTy,
+ true),
+ Ops[i], IntPtrTy));
+ } else
+ NewIdxs.push_back(Ops[i]);
+ }
+ if (!Any) return 0;
+
+ Constant *C =
+ ConstantExpr::getGetElementPtr(Ops[0], &NewIdxs[0], NewIdxs.size());
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
+ if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
+ C = Folded;
+ return C;
+}
+
/// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP
/// constant expression, do so.
static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
@@ -676,10 +712,10 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I, const TargetData *TD) {
/// ConstantFoldConstantExpression - Attempt to fold the constant expression
/// using the specified TargetData. If successful, the constant result is
/// result is returned, if not, null is returned.
-Constant *llvm::ConstantFoldConstantExpression(ConstantExpr *CE,
+Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
const TargetData *TD) {
SmallVector<Constant*, 8> Ops;
- for (User::op_iterator i = CE->op_begin(), e = CE->op_end(); i != e; ++i) {
+ for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end(); i != e; ++i) {
Constant *NewC = cast<Constant>(*i);
// Recursively fold the ConstantExpr's operands.
if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(NewC))
@@ -810,6 +846,8 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
case Instruction::ShuffleVector:
return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]);
case Instruction::GetElementPtr:
+ if (Constant *C = CastGEPIndices(Ops, NumOps, DestTy, TD))
+ return C;
if (Constant *C = SymbolicallyEvaluateGEP(Ops, NumOps, DestTy, TD))
return C;
diff --git a/lib/Analysis/DebugInfo.cpp b/lib/Analysis/DebugInfo.cpp
index 4ba837a..258f1db 100644
--- a/lib/Analysis/DebugInfo.cpp
+++ b/lib/Analysis/DebugInfo.cpp
@@ -725,6 +725,29 @@ DIBasicType DIFactory::CreateBasicTypeEx(DIDescriptor Context,
return DIBasicType(MDNode::get(VMContext, &Elts[0], 10));
}
+/// CreateArtificialType - Create a new DIType with "artificial" flag set.
+DIType DIFactory::CreateArtificialType(DIType Ty) {
+ if (Ty.isArtificial())
+ return Ty;
+
+ SmallVector<Value *, 9> Elts;
+ MDNode *N = Ty.getNode();
+ assert (N && "Unexpected input DIType!");
+ for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
+ if (Value *V = N->getOperand(i))
+ Elts.push_back(V);
+ else
+ Elts.push_back(Constant::getNullValue(Type::getInt32Ty(VMContext)));
+ }
+
+ unsigned CurFlags = Ty.getFlags();
+ CurFlags = CurFlags | DIType::FlagArtificial;
+
+ // Flags are stored at this slot.
+ Elts[8] = ConstantInt::get(Type::getInt32Ty(VMContext), CurFlags);
+
+ return DIType(MDNode::get(VMContext, Elts.data(), Elts.size()));
+}
/// CreateDerivedType - Create a derived type like const qualified type,
/// pointer, typedef, etc.
@@ -794,7 +817,8 @@ DICompositeType DIFactory::CreateCompositeType(unsigned Tag,
unsigned Flags,
DIType DerivedFrom,
DIArray Elements,
- unsigned RuntimeLang) {
+ unsigned RuntimeLang,
+ MDNode *ContainingType) {
Value *Elts[] = {
GetTagConstant(Tag),
@@ -808,9 +832,10 @@ DICompositeType DIFactory::CreateCompositeType(unsigned Tag,
ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
DerivedFrom.getNode(),
Elements.getNode(),
- ConstantInt::get(Type::getInt32Ty(VMContext), RuntimeLang)
+ ConstantInt::get(Type::getInt32Ty(VMContext), RuntimeLang),
+ ContainingType
};
- return DICompositeType(MDNode::get(VMContext, &Elts[0], 12));
+ return DICompositeType(MDNode::get(VMContext, &Elts[0], 13));
}
@@ -858,7 +883,8 @@ DISubprogram DIFactory::CreateSubprogram(DIDescriptor Context,
bool isLocalToUnit,
bool isDefinition,
unsigned VK, unsigned VIndex,
- DIType ContainingType) {
+ DIType ContainingType,
+ bool isArtificial) {
Value *Elts[] = {
GetTagConstant(dwarf::DW_TAG_subprogram),
@@ -874,9 +900,10 @@ DISubprogram DIFactory::CreateSubprogram(DIDescriptor Context,
ConstantInt::get(Type::getInt1Ty(VMContext), isDefinition),
ConstantInt::get(Type::getInt32Ty(VMContext), (unsigned)VK),
ConstantInt::get(Type::getInt32Ty(VMContext), VIndex),
- ContainingType.getNode()
+ ContainingType.getNode(),
+ ConstantInt::get(Type::getInt1Ty(VMContext), isArtificial)
};
- return DISubprogram(MDNode::get(VMContext, &Elts[0], 14));
+ return DISubprogram(MDNode::get(VMContext, &Elts[0], 15));
}
/// CreateSubprogramDefinition - Create new subprogram descriptor for the
@@ -900,9 +927,10 @@ DISubprogram DIFactory::CreateSubprogramDefinition(DISubprogram &SPDeclaration)
ConstantInt::get(Type::getInt1Ty(VMContext), true),
DeclNode->getOperand(11), // Virtuality
DeclNode->getOperand(12), // VIndex
- DeclNode->getOperand(13) // Containting Type
+ DeclNode->getOperand(13), // Containting Type
+ DeclNode->getOperand(14) // isArtificial
};
- return DISubprogram(MDNode::get(VMContext, &Elts[0], 14));
+ return DISubprogram(MDNode::get(VMContext, &Elts[0], 15));
}
/// CreateGlobalVariable - Create a new descriptor for the specified global.
@@ -1033,6 +1061,8 @@ DILocation DIFactory::CreateLocation(unsigned LineNo, unsigned ColumnNo,
/// InsertDeclare - Insert a new llvm.dbg.declare intrinsic call.
Instruction *DIFactory::InsertDeclare(Value *Storage, DIVariable D,
Instruction *InsertBefore) {
+ assert(Storage && "no storage passed to dbg.declare");
+ assert(D.getNode() && "empty DIVariable passed to dbg.declare");
if (!DeclareFn)
DeclareFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_declare);
@@ -1044,19 +1074,27 @@ Instruction *DIFactory::InsertDeclare(Value *Storage, DIVariable D,
/// InsertDeclare - Insert a new llvm.dbg.declare intrinsic call.
Instruction *DIFactory::InsertDeclare(Value *Storage, DIVariable D,
BasicBlock *InsertAtEnd) {
+ assert(Storage && "no storage passed to dbg.declare");
+ assert(D.getNode() && "empty DIVariable passed to dbg.declare");
if (!DeclareFn)
DeclareFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_declare);
Value *Args[] = { MDNode::get(Storage->getContext(), &Storage, 1),
D.getNode() };
- return CallInst::Create(DeclareFn, Args, Args+2, "", InsertAtEnd);
-}
+
+ // If this block already has a terminator then insert this intrinsic
+ // before the terminator.
+ if (TerminatorInst *T = InsertAtEnd->getTerminator())
+ return CallInst::Create(DeclareFn, Args, Args+2, "", T);
+ else
+ return CallInst::Create(DeclareFn, Args, Args+2, "", InsertAtEnd);}
/// InsertDbgValueIntrinsic - Insert a new llvm.dbg.value intrinsic call.
Instruction *DIFactory::InsertDbgValueIntrinsic(Value *V, uint64_t Offset,
DIVariable D,
Instruction *InsertBefore) {
assert(V && "no value passed to dbg.value");
+ assert(D.getNode() && "empty DIVariable passed to dbg.value");
if (!ValueFn)
ValueFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_value);
@@ -1071,6 +1109,7 @@ Instruction *DIFactory::InsertDbgValueIntrinsic(Value *V, uint64_t Offset,
DIVariable D,
BasicBlock *InsertAtEnd) {
assert(V && "no value passed to dbg.value");
+ assert(D.getNode() && "empty DIVariable passed to dbg.value");
if (!ValueFn)
ValueFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_value);
diff --git a/lib/Analysis/IPA/GlobalsModRef.cpp b/lib/Analysis/IPA/GlobalsModRef.cpp
index e803a48..ec94bc8 100644
--- a/lib/Analysis/IPA/GlobalsModRef.cpp
+++ b/lib/Analysis/IPA/GlobalsModRef.cpp
@@ -486,7 +486,7 @@ GlobalsModRef::alias(const Value *V1, unsigned V1Size,
if (GV1 && !NonAddressTakenGlobals.count(GV1)) GV1 = 0;
if (GV2 && !NonAddressTakenGlobals.count(GV2)) GV2 = 0;
- // If the the two pointers are derived from two different non-addr-taken
+ // If the two pointers are derived from two different non-addr-taken
// globals, or if one is and the other isn't, we know these can't alias.
if ((GV1 || GV2) && GV1 != GV2)
return NoAlias;
diff --git a/lib/Analysis/IPA/Makefile b/lib/Analysis/IPA/Makefile
index da719ba..b850c9f 100644
--- a/lib/Analysis/IPA/Makefile
+++ b/lib/Analysis/IPA/Makefile
@@ -10,7 +10,6 @@
LEVEL = ../../..
LIBRARYNAME = LLVMipa
BUILD_ARCHIVE = 1
-CXXFLAGS = -fno-rtti
include $(LEVEL)/Makefile.common
diff --git a/lib/Analysis/IVUsers.cpp b/lib/Analysis/IVUsers.cpp
index 38611cc..4ce6868 100644
--- a/lib/Analysis/IVUsers.cpp
+++ b/lib/Analysis/IVUsers.cpp
@@ -21,6 +21,7 @@
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Assembly/AsmAnnotationWriter.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -35,42 +36,30 @@ Pass *llvm::createIVUsersPass() {
return new IVUsers();
}
-/// containsAddRecFromDifferentLoop - Determine whether expression S involves a
-/// subexpression that is an AddRec from a loop other than L. An outer loop
-/// of L is OK, but not an inner loop nor a disjoint loop.
-static bool containsAddRecFromDifferentLoop(const SCEV *S, Loop *L) {
- // This is very common, put it first.
- if (isa<SCEVConstant>(S))
- return false;
- if (const SCEVCommutativeExpr *AE = dyn_cast<SCEVCommutativeExpr>(S)) {
- for (unsigned int i=0; i< AE->getNumOperands(); i++)
- if (containsAddRecFromDifferentLoop(AE->getOperand(i), L))
- return true;
- return false;
- }
- if (const SCEVAddRecExpr *AE = dyn_cast<SCEVAddRecExpr>(S)) {
- if (const Loop *newLoop = AE->getLoop()) {
- if (newLoop == L)
- return false;
- // if newLoop is an outer loop of L, this is OK.
- if (newLoop->contains(L))
- return false;
+/// CollectSubexprs - Split S into subexpressions which can be pulled out into
+/// separate registers.
+static void CollectSubexprs(const SCEV *S,
+ SmallVectorImpl<const SCEV *> &Ops,
+ ScalarEvolution &SE) {
+ if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
+ // Break out add operands.
+ for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
+ I != E; ++I)
+ CollectSubexprs(*I, Ops, SE);
+ return;
+ } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
+ // Split a non-zero base out of an addrec.
+ if (!AR->getStart()->isZero()) {
+ CollectSubexprs(AR->getStart(), Ops, SE);
+ CollectSubexprs(SE.getAddRecExpr(SE.getIntegerSCEV(0, AR->getType()),
+ AR->getStepRecurrence(SE),
+ AR->getLoop()), Ops, SE);
+ return;
}
- return true;
}
- if (const SCEVUDivExpr *DE = dyn_cast<SCEVUDivExpr>(S))
- return containsAddRecFromDifferentLoop(DE->getLHS(), L) ||
- containsAddRecFromDifferentLoop(DE->getRHS(), L);
-#if 0
- // SCEVSDivExpr has been backed out temporarily, but will be back; we'll
- // need this when it is.
- if (const SCEVSDivExpr *DE = dyn_cast<SCEVSDivExpr>(S))
- return containsAddRecFromDifferentLoop(DE->getLHS(), L) ||
- containsAddRecFromDifferentLoop(DE->getRHS(), L);
-#endif
- if (const SCEVCastExpr *CE = dyn_cast<SCEVCastExpr>(S))
- return containsAddRecFromDifferentLoop(CE->getOperand(), L);
- return false;
+
+ // Otherwise use the value itself.
+ Ops.push_back(S);
}
/// getSCEVStartAndStride - Compute the start and stride of this expression,
@@ -89,35 +78,42 @@ static bool getSCEVStartAndStride(const SCEV *&SH, Loop *L, Loop *UseLoop,
if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(SH)) {
for (unsigned i = 0, e = AE->getNumOperands(); i != e; ++i)
if (const SCEVAddRecExpr *AddRec =
- dyn_cast<SCEVAddRecExpr>(AE->getOperand(i))) {
- if (AddRec->getLoop() == L)
- TheAddRec = SE->getAddExpr(AddRec, TheAddRec);
- else
- return false; // Nested IV of some sort?
- } else {
+ dyn_cast<SCEVAddRecExpr>(AE->getOperand(i)))
+ TheAddRec = SE->getAddExpr(AddRec, TheAddRec);
+ else
Start = SE->getAddExpr(Start, AE->getOperand(i));
- }
} else if (isa<SCEVAddRecExpr>(SH)) {
TheAddRec = SH;
} else {
return false; // not analyzable.
}
- const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(TheAddRec);
- if (!AddRec || AddRec->getLoop() != L) return false;
+ // Break down TheAddRec into its component parts.
+ SmallVector<const SCEV *, 4> Subexprs;
+ CollectSubexprs(TheAddRec, Subexprs, *SE);
+
+ // Look for an addrec on the current loop among the parts.
+ const SCEV *AddRecStride = 0;
+ for (SmallVectorImpl<const SCEV *>::iterator I = Subexprs.begin(),
+ E = Subexprs.end(); I != E; ++I) {
+ const SCEV *S = *I;
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
+ if (AR->getLoop() == L) {
+ *I = AR->getStart();
+ AddRecStride = AR->getStepRecurrence(*SE);
+ break;
+ }
+ }
+ if (!AddRecStride)
+ return false;
+
+ // Add up everything else into a start value (which may not be
+ // loop-invariant).
+ const SCEV *AddRecStart = SE->getAddExpr(Subexprs);
// Use getSCEVAtScope to attempt to simplify other loops out of
// the picture.
- const SCEV *AddRecStart = AddRec->getStart();
AddRecStart = SE->getSCEVAtScope(AddRecStart, UseLoop);
- const SCEV *AddRecStride = AddRec->getStepRecurrence(*SE);
-
- // FIXME: If Start contains an SCEVAddRecExpr from a different loop, other
- // than an outer loop of the current loop, reject it. LSR has no concept of
- // operating on more than one loop at a time so don't confuse it with such
- // expressions.
- if (containsAddRecFromDifferentLoop(AddRecStart, L))
- return false;
Start = SE->getAddExpr(Start, AddRecStart);
@@ -130,7 +126,7 @@ static bool getSCEVStartAndStride(const SCEV *&SH, Loop *L, Loop *UseLoop,
DEBUG(dbgs() << "[";
WriteAsOperand(dbgs(), L->getHeader(), /*PrintType=*/false);
- dbgs() << "] Variable stride: " << *AddRec << "\n");
+ dbgs() << "] Variable stride: " << *AddRecStride << "\n");
}
Stride = AddRecStride;
@@ -246,14 +242,6 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) {
}
if (AddUserToIVUsers) {
- IVUsersOfOneStride *StrideUses = IVUsesByStride[Stride];
- if (!StrideUses) { // First occurrence of this stride?
- StrideOrder.push_back(Stride);
- StrideUses = new IVUsersOfOneStride(Stride);
- IVUses.push_back(StrideUses);
- IVUsesByStride[Stride] = StrideUses;
- }
-
// Okay, we found a user that we cannot reduce. Analyze the instruction
// and decide what to do with it. If we are a use inside of the loop, use
// the value before incrementation, otherwise use it after incrementation.
@@ -261,27 +249,21 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) {
// The value used will be incremented by the stride more than we are
// expecting, so subtract this off.
const SCEV *NewStart = SE->getMinusSCEV(Start, Stride);
- StrideUses->addUser(NewStart, User, I);
- StrideUses->Users.back().setIsUseOfPostIncrementedValue(true);
+ IVUses.push_back(new IVStrideUse(this, Stride, NewStart, User, I));
+ IVUses.back().setIsUseOfPostIncrementedValue(true);
DEBUG(dbgs() << " USING POSTINC SCEV, START=" << *NewStart<< "\n");
} else {
- StrideUses->addUser(Start, User, I);
+ IVUses.push_back(new IVStrideUse(this, Stride, Start, User, I));
}
}
}
return true;
}
-void IVUsers::AddUser(const SCEV *Stride, const SCEV *Offset,
- Instruction *User, Value *Operand) {
- IVUsersOfOneStride *StrideUses = IVUsesByStride[Stride];
- if (!StrideUses) { // First occurrence of this stride?
- StrideOrder.push_back(Stride);
- StrideUses = new IVUsersOfOneStride(Stride);
- IVUses.push_back(StrideUses);
- IVUsesByStride[Stride] = StrideUses;
- }
- IVUsesByStride[Stride]->addUser(Offset, User, Operand);
+IVStrideUse &IVUsers::AddUser(const SCEV *Stride, const SCEV *Offset,
+ Instruction *User, Value *Operand) {
+ IVUses.push_back(new IVStrideUse(this, Stride, Offset, User, Operand));
+ return IVUses.back();
}
IVUsers::IVUsers()
@@ -315,15 +297,15 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) {
/// value of the OperandValToReplace of the given IVStrideUse.
const SCEV *IVUsers::getReplacementExpr(const IVStrideUse &U) const {
// Start with zero.
- const SCEV *RetVal = SE->getIntegerSCEV(0, U.getParent()->Stride->getType());
+ const SCEV *RetVal = SE->getIntegerSCEV(0, U.getStride()->getType());
// Create the basic add recurrence.
- RetVal = SE->getAddRecExpr(RetVal, U.getParent()->Stride, L);
+ RetVal = SE->getAddRecExpr(RetVal, U.getStride(), L);
// Add the offset in a separate step, because it may be loop-variant.
RetVal = SE->getAddExpr(RetVal, U.getOffset());
// For uses of post-incremented values, add an extra stride to compute
// the actual replacement value.
if (U.isUseOfPostIncrementedValue())
- RetVal = SE->getAddExpr(RetVal, U.getParent()->Stride);
+ RetVal = SE->getAddExpr(RetVal, U.getStride());
return RetVal;
}
@@ -332,9 +314,9 @@ const SCEV *IVUsers::getReplacementExpr(const IVStrideUse &U) const {
/// isUseOfPostIncrementedValue flag.
const SCEV *IVUsers::getCanonicalExpr(const IVStrideUse &U) const {
// Start with zero.
- const SCEV *RetVal = SE->getIntegerSCEV(0, U.getParent()->Stride->getType());
+ const SCEV *RetVal = SE->getIntegerSCEV(0, U.getStride()->getType());
// Create the basic add recurrence.
- RetVal = SE->getAddRecExpr(RetVal, U.getParent()->Stride, L);
+ RetVal = SE->getAddRecExpr(RetVal, U.getStride(), L);
// Add the offset in a separate step, because it may be loop-variant.
RetVal = SE->getAddExpr(RetVal, U.getOffset());
return RetVal;
@@ -349,24 +331,20 @@ void IVUsers::print(raw_ostream &OS, const Module *M) const {
}
OS << ":\n";
- for (unsigned Stride = 0, e = StrideOrder.size(); Stride != e; ++Stride) {
- std::map<const SCEV *, IVUsersOfOneStride*>::const_iterator SI =
- IVUsesByStride.find(StrideOrder[Stride]);
- assert(SI != IVUsesByStride.end() && "Stride doesn't exist!");
- OS << " Stride " << *SI->first->getType() << " " << *SI->first << ":\n";
-
- for (ilist<IVStrideUse>::const_iterator UI = SI->second->Users.begin(),
- E = SI->second->Users.end(); UI != E; ++UI) {
- OS << " ";
- WriteAsOperand(OS, UI->getOperandValToReplace(), false);
- OS << " = ";
- OS << *getReplacementExpr(*UI);
- if (UI->isUseOfPostIncrementedValue())
- OS << " (post-inc)";
- OS << " in ";
- UI->getUser()->print(OS);
- OS << '\n';
- }
+ // Use a defualt AssemblyAnnotationWriter to suppress the default info
+ // comments, which aren't relevant here.
+ AssemblyAnnotationWriter Annotator;
+ for (ilist<IVStrideUse>::const_iterator UI = IVUses.begin(),
+ E = IVUses.end(); UI != E; ++UI) {
+ OS << " ";
+ WriteAsOperand(OS, UI->getOperandValToReplace(), false);
+ OS << " = "
+ << *getReplacementExpr(*UI);
+ if (UI->isUseOfPostIncrementedValue())
+ OS << " (post-inc)";
+ OS << " in ";
+ UI->getUser()->print(OS, &Annotator);
+ OS << '\n';
}
}
@@ -375,14 +353,12 @@ void IVUsers::dump() const {
}
void IVUsers::releaseMemory() {
- IVUsesByStride.clear();
- StrideOrder.clear();
Processed.clear();
IVUses.clear();
}
void IVStrideUse::deleted() {
// Remove this user from the list.
- Parent->Users.erase(this);
+ Parent->IVUses.erase(this);
// this now dangles!
}
diff --git a/lib/Analysis/InlineCost.cpp b/lib/Analysis/InlineCost.cpp
index 651c918..972d034 100644
--- a/lib/Analysis/InlineCost.cpp
+++ b/lib/Analysis/InlineCost.cpp
@@ -25,26 +25,28 @@ unsigned InlineCostAnalyzer::FunctionInfo::
CountCodeReductionForConstant(Value *V) {
unsigned Reduction = 0;
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI)
- if (isa<BranchInst>(*UI))
- Reduction += 40; // Eliminating a conditional branch is a big win
- else if (SwitchInst *SI = dyn_cast<SwitchInst>(*UI))
- // Eliminating a switch is a big win, proportional to the number of edges
- // deleted.
- Reduction += (SI->getNumSuccessors()-1) * 40;
- else if (isa<IndirectBrInst>(*UI))
- // Eliminating an indirect branch is a big win.
- Reduction += 200;
- else if (CallInst *CI = dyn_cast<CallInst>(*UI)) {
+ if (isa<BranchInst>(*UI) || isa<SwitchInst>(*UI)) {
+ // We will be able to eliminate all but one of the successors.
+ const TerminatorInst &TI = cast<TerminatorInst>(**UI);
+ const unsigned NumSucc = TI.getNumSuccessors();
+ unsigned Instrs = 0;
+ for (unsigned I = 0; I != NumSucc; ++I)
+ Instrs += TI.getSuccessor(I)->size();
+ // We don't know which blocks will be eliminated, so use the average size.
+ Reduction += InlineConstants::InstrCost*Instrs*(NumSucc-1)/NumSucc;
+ } else if (CallInst *CI = dyn_cast<CallInst>(*UI)) {
// Turning an indirect call into a direct call is a BIG win
- Reduction += CI->getCalledValue() == V ? 500 : 0;
+ if (CI->getCalledValue() == V)
+ Reduction += InlineConstants::IndirectCallBonus;
} else if (InvokeInst *II = dyn_cast<InvokeInst>(*UI)) {
// Turning an indirect call into a direct call is a BIG win
- Reduction += II->getCalledValue() == V ? 500 : 0;
+ if (II->getCalledValue() == V)
+ Reduction += InlineConstants::IndirectCallBonus;
} else {
// Figure out if this instruction will be removed due to simple constant
// propagation.
Instruction &Inst = cast<Instruction>(**UI);
-
+
// We can't constant propagate instructions which have effects or
// read memory.
//
@@ -53,7 +55,7 @@ unsigned InlineCostAnalyzer::FunctionInfo::
// Unfortunately, we don't know the pointer that may get propagated here,
// so we can't make this decision.
if (Inst.mayReadFromMemory() || Inst.mayHaveSideEffects() ||
- isa<AllocaInst>(Inst))
+ isa<AllocaInst>(Inst))
continue;
bool AllOperandsConstant = true;
@@ -65,7 +67,7 @@ unsigned InlineCostAnalyzer::FunctionInfo::
if (AllOperandsConstant) {
// We will get to remove this instruction...
- Reduction += 7;
+ Reduction += InlineConstants::InstrCost;
// And any other instructions that use it which become constants
// themselves.
@@ -87,11 +89,14 @@ unsigned InlineCostAnalyzer::FunctionInfo::
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
Instruction *I = cast<Instruction>(*UI);
if (isa<LoadInst>(I) || isa<StoreInst>(I))
- Reduction += 10;
+ Reduction += InlineConstants::InstrCost;
else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
// If the GEP has variable indices, we won't be able to do much with it.
- if (!GEP->hasAllConstantIndices())
- Reduction += CountCodeReductionForAlloca(GEP)+15;
+ if (GEP->hasAllConstantIndices())
+ Reduction += CountCodeReductionForAlloca(GEP);
+ } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(I)) {
+ // Track pointer through bitcasts.
+ Reduction += CountCodeReductionForAlloca(BCI);
} else {
// If there is some other strange instruction, we're not going to be able
// to do much if we inline this.
@@ -158,10 +163,11 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB) {
(F->getName() == "setjmp" || F->getName() == "_setjmp"))
NeverInline = true;
- // Calls often compile into many machine instructions. Bump up their
- // cost to reflect this.
- if (!isa<IntrinsicInst>(II) && !callIsSmall(CS.getCalledFunction()))
- NumInsts += InlineConstants::CallPenalty;
+ if (!isa<IntrinsicInst>(II) && !callIsSmall(CS.getCalledFunction())) {
+ // Each argument to a call takes on average one instruction to set up.
+ NumInsts += CS.arg_size();
+ ++NumCalls;
+ }
}
if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) {
@@ -223,8 +229,14 @@ void InlineCostAnalyzer::FunctionInfo::analyzeFunction(Function *F) {
if (Metrics.NumRets==1)
--Metrics.NumInsts;
+ // Don't bother calculating argument weights if we are never going to inline
+ // the function anyway.
+ if (Metrics.NeverInline)
+ return;
+
// Check out all of the arguments to the function, figuring out how much
// code can be eliminated if one of the arguments is a constant.
+ ArgumentWeights.reserve(F->arg_size());
for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I)
ArgumentWeights.push_back(ArgInfo(CountCodeReductionForConstant(I),
CountCodeReductionForAlloca(I)));
@@ -313,23 +325,18 @@ InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
I != E; ++I, ++ArgNo) {
// Each argument passed in has a cost at both the caller and the callee
- // sides. This favors functions that take many arguments over functions
- // that take few arguments.
- InlineCost -= 20;
-
- // If this is a function being passed in, it is very likely that we will be
- // able to turn an indirect function call into a direct function call.
- if (isa<Function>(I))
- InlineCost -= 100;
-
+ // sides. Measurements show that each argument costs about the same as an
+ // instruction.
+ InlineCost -= InlineConstants::InstrCost;
+
// If an alloca is passed in, inlining this function is likely to allow
// significant future optimization possibilities (like scalar promotion, and
// scalarization), so encourage the inlining of the function.
//
- else if (isa<AllocaInst>(I)) {
+ if (isa<AllocaInst>(I)) {
if (ArgNo < CalleeFI.ArgumentWeights.size())
InlineCost -= CalleeFI.ArgumentWeights[ArgNo].AllocaWeight;
-
+
// If this is a constant being passed into the function, use the argument
// weights calculated for the callee to determine how much will be folded
// away with this information.
@@ -341,14 +348,17 @@ InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
// Now that we have considered all of the factors that make the call site more
// likely to be inlined, look at factors that make us not want to inline it.
-
+
+ // Calls usually take a long time, so they make the inlining gain smaller.
+ InlineCost += CalleeFI.Metrics.NumCalls * InlineConstants::CallPenalty;
+
// Don't inline into something too big, which would make it bigger.
// "size" here is the number of basic blocks, not instructions.
//
InlineCost += Caller->size()/15;
// Look at the size of the callee. Each instruction counts as 5.
- InlineCost += CalleeFI.Metrics.NumInsts*5;
+ InlineCost += CalleeFI.Metrics.NumInsts*InlineConstants::InstrCost;
return llvm::InlineCost::get(InlineCost);
}
diff --git a/lib/Analysis/LiveValues.cpp b/lib/Analysis/LiveValues.cpp
index 02ec7d3..1b91d93 100644
--- a/lib/Analysis/LiveValues.cpp
+++ b/lib/Analysis/LiveValues.cpp
@@ -184,7 +184,7 @@ LiveValues::Memo &LiveValues::compute(const Value *V) {
}
}
- // If the value was never used outside the the block in which it was
+ // If the value was never used outside the block in which it was
// defined, it's killed in that block.
if (!LiveOutOfDefBB)
M.Killed.insert(DefBB);
diff --git a/lib/Analysis/Makefile b/lib/Analysis/Makefile
index f61b8aa..4af6d35 100644
--- a/lib/Analysis/Makefile
+++ b/lib/Analysis/Makefile
@@ -11,7 +11,6 @@ LEVEL = ../..
LIBRARYNAME = LLVMAnalysis
DIRS = IPA
BUILD_ARCHIVE = 1
-CXXFLAGS = -fno-rtti
include $(LEVEL)/Makefile.common
diff --git a/lib/Analysis/MemoryBuiltins.cpp b/lib/Analysis/MemoryBuiltins.cpp
index b448628..297b588 100644
--- a/lib/Analysis/MemoryBuiltins.cpp
+++ b/lib/Analysis/MemoryBuiltins.cpp
@@ -24,7 +24,7 @@ using namespace llvm;
// malloc Call Utility Functions.
//
-/// isMalloc - Returns true if the the value is either a malloc call or a
+/// isMalloc - Returns true if the value is either a malloc call or a
/// bitcast of the result of a malloc call.
bool llvm::isMalloc(const Value *I) {
return extractMallocCall(I) || extractMallocCallFromBitCast(I);
@@ -183,7 +183,7 @@ Value *llvm::getMallocArraySize(CallInst *CI, const TargetData *TD,
// free Call Utility Functions.
//
-/// isFreeCall - Returns true if the the value is a call to the builtin free()
+/// isFreeCall - Returns true if the value is a call to the builtin free()
bool llvm::isFreeCall(const Value *I) {
const CallInst *CI = dyn_cast<CallInst>(I);
if (!CI)
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp
index 2f44913..9ee7d3a 100644
--- a/lib/Analysis/ScalarEvolution.cpp
+++ b/lib/Analysis/ScalarEvolution.cpp
@@ -214,8 +214,8 @@ bool SCEVCastExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeID &ID,
const SCEV *op, const Type *ty)
: SCEVCastExpr(ID, scTruncate, op, ty) {
- assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
- (Ty->isInteger() || isa<PointerType>(Ty)) &&
+ assert((Op->getType()->isIntegerTy() || isa<PointerType>(Op->getType())) &&
+ (Ty->isIntegerTy() || isa<PointerType>(Ty)) &&
"Cannot truncate non-integer value!");
}
@@ -226,8 +226,8 @@ void SCEVTruncateExpr::print(raw_ostream &OS) const {
SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeID &ID,
const SCEV *op, const Type *ty)
: SCEVCastExpr(ID, scZeroExtend, op, ty) {
- assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
- (Ty->isInteger() || isa<PointerType>(Ty)) &&
+ assert((Op->getType()->isIntegerTy() || isa<PointerType>(Op->getType())) &&
+ (Ty->isIntegerTy() || isa<PointerType>(Ty)) &&
"Cannot zero extend non-integer value!");
}
@@ -238,8 +238,8 @@ void SCEVZeroExtendExpr::print(raw_ostream &OS) const {
SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeID &ID,
const SCEV *op, const Type *ty)
: SCEVCastExpr(ID, scSignExtend, op, ty) {
- assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
- (Ty->isInteger() || isa<PointerType>(Ty)) &&
+ assert((Op->getType()->isIntegerTy() || isa<PointerType>(Op->getType())) &&
+ (Ty->isIntegerTy() || isa<PointerType>(Ty)) &&
"Cannot sign extend non-integer value!");
}
@@ -312,6 +312,21 @@ bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const {
return true;
}
+bool
+SCEVAddRecExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
+ return DT->dominates(L->getHeader(), BB) &&
+ SCEVNAryExpr::dominates(BB, DT);
+}
+
+bool
+SCEVAddRecExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
+ // This uses a "dominates" query instead of "properly dominates" query because
+ // the instruction which produces the addrec's value is a PHI, and a PHI
+ // effectively properly dominates its entire containing block.
+ return DT->dominates(L->getHeader(), BB) &&
+ SCEVNAryExpr::properlyDominates(BB, DT);
+}
+
void SCEVAddRecExpr::print(raw_ostream &OS) const {
OS << "{" << *Operands[0];
for (unsigned i = 1, e = Operands.size(); i != e; ++i)
@@ -321,15 +336,6 @@ void SCEVAddRecExpr::print(raw_ostream &OS) const {
OS << ">";
}
-void SCEVFieldOffsetExpr::print(raw_ostream &OS) const {
- // LLVM struct fields don't have names, so just print the field number.
- OS << "offsetof(" << *STy << ", " << FieldNo << ")";
-}
-
-void SCEVAllocSizeExpr::print(raw_ostream &OS) const {
- OS << "sizeof(" << *AllocTy << ")";
-}
-
bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
// All non-instruction values are loop invariant. All instructions are loop
// invariant if they are not contained in the specified loop.
@@ -356,7 +362,91 @@ const Type *SCEVUnknown::getType() const {
return V->getType();
}
+bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const {
+ if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
+ if (VCE->getOpcode() == Instruction::PtrToInt)
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
+ if (CE->getOpcode() == Instruction::GetElementPtr &&
+ CE->getOperand(0)->isNullValue() &&
+ CE->getNumOperands() == 2)
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
+ if (CI->isOne()) {
+ AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
+ ->getElementType();
+ return true;
+ }
+
+ return false;
+}
+
+bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const {
+ if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
+ if (VCE->getOpcode() == Instruction::PtrToInt)
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
+ if (CE->getOpcode() == Instruction::GetElementPtr &&
+ CE->getOperand(0)->isNullValue()) {
+ const Type *Ty =
+ cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
+ if (const StructType *STy = dyn_cast<StructType>(Ty))
+ if (!STy->isPacked() &&
+ CE->getNumOperands() == 3 &&
+ CE->getOperand(1)->isNullValue()) {
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
+ if (CI->isOne() &&
+ STy->getNumElements() == 2 &&
+ STy->getElementType(0)->isIntegerTy(1)) {
+ AllocTy = STy->getElementType(1);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const {
+ if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
+ if (VCE->getOpcode() == Instruction::PtrToInt)
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
+ if (CE->getOpcode() == Instruction::GetElementPtr &&
+ CE->getNumOperands() == 3 &&
+ CE->getOperand(0)->isNullValue() &&
+ CE->getOperand(1)->isNullValue()) {
+ const Type *Ty =
+ cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
+ // Ignore vector types here so that ScalarEvolutionExpander doesn't
+ // emit getelementptrs that index into vectors.
+ if (isa<StructType>(Ty) || isa<ArrayType>(Ty)) {
+ CTy = Ty;
+ FieldNo = CE->getOperand(2);
+ return true;
+ }
+ }
+
+ return false;
+}
+
void SCEVUnknown::print(raw_ostream &OS) const {
+ const Type *AllocTy;
+ if (isSizeOf(AllocTy)) {
+ OS << "sizeof(" << *AllocTy << ")";
+ return;
+ }
+ if (isAlignOf(AllocTy)) {
+ OS << "alignof(" << *AllocTy << ")";
+ return;
+ }
+
+ const Type *CTy;
+ Constant *FieldNo;
+ if (isOffsetOf(CTy, FieldNo)) {
+ OS << "offsetof(" << *CTy << ", ";
+ WriteAsOperand(OS, FieldNo, false);
+ OS << ")";
+ return;
+ }
+
+ // Otherwise just print it normally.
WriteAsOperand(OS, V, false);
}
@@ -515,21 +605,6 @@ namespace {
return operator()(LC->getOperand(), RC->getOperand());
}
- // Compare offsetof expressions.
- if (const SCEVFieldOffsetExpr *LA = dyn_cast<SCEVFieldOffsetExpr>(LHS)) {
- const SCEVFieldOffsetExpr *RA = cast<SCEVFieldOffsetExpr>(RHS);
- if (CompareTypes(LA->getStructType(), RA->getStructType()) ||
- CompareTypes(RA->getStructType(), LA->getStructType()))
- return CompareTypes(LA->getStructType(), RA->getStructType());
- return LA->getFieldNo() < RA->getFieldNo();
- }
-
- // Compare sizeof expressions by the allocation type.
- if (const SCEVAllocSizeExpr *LA = dyn_cast<SCEVAllocSizeExpr>(LHS)) {
- const SCEVAllocSizeExpr *RA = cast<SCEVAllocSizeExpr>(RHS);
- return CompareTypes(LA->getAllocType(), RA->getAllocType());
- }
-
llvm_unreachable("Unknown SCEV kind!");
return false;
}
@@ -2172,74 +2247,38 @@ const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
}
-const SCEV *ScalarEvolution::getFieldOffsetExpr(const StructType *STy,
- unsigned FieldNo) {
- // If we have TargetData we can determine the constant offset.
- if (TD) {
- const Type *IntPtrTy = TD->getIntPtrType(getContext());
- const StructLayout &SL = *TD->getStructLayout(STy);
- uint64_t Offset = SL.getElementOffset(FieldNo);
- return getIntegerSCEV(Offset, IntPtrTy);
- }
+const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) {
+ Constant *C = ConstantExpr::getSizeOf(AllocTy);
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
+ C = ConstantFoldConstantExpression(CE, TD);
+ const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
+ return getTruncateOrZeroExtend(getSCEV(C), Ty);
+}
- // Field 0 is always at offset 0.
- if (FieldNo == 0) {
- const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
- return getIntegerSCEV(0, Ty);
- }
+const SCEV *ScalarEvolution::getAlignOfExpr(const Type *AllocTy) {
+ Constant *C = ConstantExpr::getAlignOf(AllocTy);
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
+ C = ConstantFoldConstantExpression(CE, TD);
+ const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
+ return getTruncateOrZeroExtend(getSCEV(C), Ty);
+}
- // Okay, it looks like we really DO need an offsetof expr. Check to see if we
- // already have one, otherwise create a new one.
- FoldingSetNodeID ID;
- ID.AddInteger(scFieldOffset);
- ID.AddPointer(STy);
- ID.AddInteger(FieldNo);
- void *IP = 0;
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVFieldOffsetExpr>();
+const SCEV *ScalarEvolution::getOffsetOfExpr(const StructType *STy,
+ unsigned FieldNo) {
+ Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
+ C = ConstantFoldConstantExpression(CE, TD);
const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
- new (S) SCEVFieldOffsetExpr(ID, Ty, STy, FieldNo);
- UniqueSCEVs.InsertNode(S, IP);
- return S;
+ return getTruncateOrZeroExtend(getSCEV(C), Ty);
}
-const SCEV *ScalarEvolution::getAllocSizeExpr(const Type *AllocTy) {
- // If we have TargetData we can determine the constant size.
- if (TD && AllocTy->isSized()) {
- const Type *IntPtrTy = TD->getIntPtrType(getContext());
- return getIntegerSCEV(TD->getTypeAllocSize(AllocTy), IntPtrTy);
- }
-
- // Expand an array size into the element size times the number
- // of elements.
- if (const ArrayType *ATy = dyn_cast<ArrayType>(AllocTy)) {
- const SCEV *E = getAllocSizeExpr(ATy->getElementType());
- return getMulExpr(
- E, getConstant(ConstantInt::get(cast<IntegerType>(E->getType()),
- ATy->getNumElements())));
- }
-
- // Expand a vector size into the element size times the number
- // of elements.
- if (const VectorType *VTy = dyn_cast<VectorType>(AllocTy)) {
- const SCEV *E = getAllocSizeExpr(VTy->getElementType());
- return getMulExpr(
- E, getConstant(ConstantInt::get(cast<IntegerType>(E->getType()),
- VTy->getNumElements())));
- }
-
- // Okay, it looks like we really DO need a sizeof expr. Check to see if we
- // already have one, otherwise create a new one.
- FoldingSetNodeID ID;
- ID.AddInteger(scAllocSize);
- ID.AddPointer(AllocTy);
- void *IP = 0;
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVAllocSizeExpr>();
- const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
- new (S) SCEVAllocSizeExpr(ID, Ty, AllocTy);
- UniqueSCEVs.InsertNode(S, IP);
- return S;
+const SCEV *ScalarEvolution::getOffsetOfExpr(const Type *CTy,
+ Constant *FieldNo) {
+ Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo);
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
+ C = ConstantFoldConstantExpression(CE, TD);
+ const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy));
+ return getTruncateOrZeroExtend(getSCEV(C), Ty);
}
const SCEV *ScalarEvolution::getUnknown(Value *V) {
@@ -2269,7 +2308,7 @@ const SCEV *ScalarEvolution::getUnknown(Value *V) {
/// has access to target-specific information.
bool ScalarEvolution::isSCEVable(const Type *Ty) const {
// Integers and pointers are always SCEVable.
- return Ty->isInteger() || isa<PointerType>(Ty);
+ return Ty->isIntegerTy() || isa<PointerType>(Ty);
}
/// getTypeSizeInBits - Return the size in bits of the specified type,
@@ -2282,7 +2321,7 @@ uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
return TD->getTypeSizeInBits(Ty);
// Integer types have fixed sizes.
- if (Ty->isInteger())
+ if (Ty->isIntegerTy())
return Ty->getPrimitiveSizeInBits();
// The only other support type is pointer. Without TargetData, conservatively
@@ -2298,7 +2337,7 @@ uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const {
assert(isSCEVable(Ty) && "Type is not SCEVable!");
- if (Ty->isInteger())
+ if (Ty->isIntegerTy())
return Ty;
// The only other support type is pointer.
@@ -2327,7 +2366,7 @@ const SCEV *ScalarEvolution::getSCEV(Value *V) {
/// getIntegerSCEV - Given a SCEVable type, create a constant for the
/// specified signed integer value and return a SCEV for the constant.
-const SCEV *ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) {
+const SCEV *ScalarEvolution::getIntegerSCEV(int64_t Val, const Type *Ty) {
const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
return getConstant(ConstantInt::get(ITy, Val));
}
@@ -2373,8 +2412,8 @@ const SCEV *
ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V,
const Type *Ty) {
const Type *SrcTy = V->getType();
- assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
- (Ty->isInteger() || isa<PointerType>(Ty)) &&
+ assert((SrcTy->isIntegerTy() || isa<PointerType>(SrcTy)) &&
+ (Ty->isIntegerTy() || isa<PointerType>(Ty)) &&
"Cannot truncate or zero extend with non-integer arguments!");
if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
return V; // No conversion
@@ -2390,8 +2429,8 @@ const SCEV *
ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
const Type *Ty) {
const Type *SrcTy = V->getType();
- assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
- (Ty->isInteger() || isa<PointerType>(Ty)) &&
+ assert((SrcTy->isIntegerTy() || isa<PointerType>(SrcTy)) &&
+ (Ty->isIntegerTy() || isa<PointerType>(Ty)) &&
"Cannot truncate or zero extend with non-integer arguments!");
if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
return V; // No conversion
@@ -2406,8 +2445,8 @@ ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
const SCEV *
ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
const Type *SrcTy = V->getType();
- assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
- (Ty->isInteger() || isa<PointerType>(Ty)) &&
+ assert((SrcTy->isIntegerTy() || isa<PointerType>(SrcTy)) &&
+ (Ty->isIntegerTy() || isa<PointerType>(Ty)) &&
"Cannot noop or zero extend with non-integer arguments!");
assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
"getNoopOrZeroExtend cannot truncate!");
@@ -2422,8 +2461,8 @@ ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
const SCEV *
ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
const Type *SrcTy = V->getType();
- assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
- (Ty->isInteger() || isa<PointerType>(Ty)) &&
+ assert((SrcTy->isIntegerTy() || isa<PointerType>(SrcTy)) &&
+ (Ty->isIntegerTy() || isa<PointerType>(Ty)) &&
"Cannot noop or sign extend with non-integer arguments!");
assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
"getNoopOrSignExtend cannot truncate!");
@@ -2439,8 +2478,8 @@ ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
const SCEV *
ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
const Type *SrcTy = V->getType();
- assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
- (Ty->isInteger() || isa<PointerType>(Ty)) &&
+ assert((SrcTy->isIntegerTy() || isa<PointerType>(SrcTy)) &&
+ (Ty->isIntegerTy() || isa<PointerType>(Ty)) &&
"Cannot noop or any extend with non-integer arguments!");
assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
"getNoopOrAnyExtend cannot truncate!");
@@ -2454,8 +2493,8 @@ ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
const SCEV *
ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) {
const Type *SrcTy = V->getType();
- assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
- (Ty->isInteger() || isa<PointerType>(Ty)) &&
+ assert((SrcTy->isIntegerTy() || isa<PointerType>(SrcTy)) &&
+ (Ty->isIntegerTy() || isa<PointerType>(Ty)) &&
"Cannot truncate or noop with non-integer arguments!");
assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
"getTruncateOrNoop cannot extend!");
@@ -2527,7 +2566,7 @@ ScalarEvolution::ForgetSymbolicName(Instruction *I, const SCEV *SymName) {
if (It != Scalars.end()) {
// Short-circuit the def-use traversal if the symbolic name
// ceases to appear in expressions.
- if (!It->second->hasOperand(SymName))
+ if (It->second != SymName && !It->second->hasOperand(SymName))
continue;
// SCEVUnknown for a PHI either means that it has an unrecognized
@@ -2689,16 +2728,15 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
// For a struct, add the member offset.
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
TotalOffset = getAddExpr(TotalOffset,
- getFieldOffsetExpr(STy, FieldNo),
+ getOffsetOfExpr(STy, FieldNo),
/*HasNUW=*/false, /*HasNSW=*/InBounds);
} else {
// For an array, add the element offset, explicitly scaled.
const SCEV *LocalOffset = getSCEV(Index);
- if (!isa<PointerType>(LocalOffset->getType()))
- // Getelementptr indicies are signed.
- LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy);
+ // Getelementptr indicies are signed.
+ LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy);
// Lower "inbounds" GEPs to NSW arithmetic.
- LocalOffset = getMulExpr(LocalOffset, getAllocSizeExpr(*GTI),
+ LocalOffset = getMulExpr(LocalOffset, getSizeOfExpr(*GTI),
/*HasNUW=*/false, /*HasNSW=*/InBounds);
TotalOffset = getAddExpr(TotalOffset, LocalOffset,
/*HasNUW=*/false, /*HasNSW=*/InBounds);
@@ -2797,62 +2835,67 @@ ScalarEvolution::getUnsignedRange(const SCEV *S) {
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
return ConstantRange(C->getValue()->getValue());
+ unsigned BitWidth = getTypeSizeInBits(S->getType());
+ ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
+
+ // If the value has known zeros, the maximum unsigned value will have those
+ // known zeros as well.
+ uint32_t TZ = GetMinTrailingZeros(S);
+ if (TZ != 0)
+ ConservativeResult =
+ ConstantRange(APInt::getMinValue(BitWidth),
+ APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
+
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
ConstantRange X = getUnsignedRange(Add->getOperand(0));
for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
X = X.add(getUnsignedRange(Add->getOperand(i)));
- return X;
+ return ConservativeResult.intersectWith(X);
}
if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
ConstantRange X = getUnsignedRange(Mul->getOperand(0));
for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
X = X.multiply(getUnsignedRange(Mul->getOperand(i)));
- return X;
+ return ConservativeResult.intersectWith(X);
}
if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
ConstantRange X = getUnsignedRange(SMax->getOperand(0));
for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
X = X.smax(getUnsignedRange(SMax->getOperand(i)));
- return X;
+ return ConservativeResult.intersectWith(X);
}
if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
ConstantRange X = getUnsignedRange(UMax->getOperand(0));
for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
X = X.umax(getUnsignedRange(UMax->getOperand(i)));
- return X;
+ return ConservativeResult.intersectWith(X);
}
if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
ConstantRange X = getUnsignedRange(UDiv->getLHS());
ConstantRange Y = getUnsignedRange(UDiv->getRHS());
- return X.udiv(Y);
+ return ConservativeResult.intersectWith(X.udiv(Y));
}
if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
ConstantRange X = getUnsignedRange(ZExt->getOperand());
- return X.zeroExtend(cast<IntegerType>(ZExt->getType())->getBitWidth());
+ return ConservativeResult.intersectWith(X.zeroExtend(BitWidth));
}
if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
ConstantRange X = getUnsignedRange(SExt->getOperand());
- return X.signExtend(cast<IntegerType>(SExt->getType())->getBitWidth());
+ return ConservativeResult.intersectWith(X.signExtend(BitWidth));
}
if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
ConstantRange X = getUnsignedRange(Trunc->getOperand());
- return X.truncate(cast<IntegerType>(Trunc->getType())->getBitWidth());
+ return ConservativeResult.intersectWith(X.truncate(BitWidth));
}
- ConstantRange FullSet(getTypeSizeInBits(S->getType()), true);
-
if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
- const SCEV *T = getBackedgeTakenCount(AddRec->getLoop());
- const SCEVConstant *Trip = dyn_cast<SCEVConstant>(T);
- ConstantRange ConservativeResult = FullSet;
-
// If there's no unsigned wrap, the value will never be less than its
// initial value.
if (AddRec->hasNoUnsignedWrap())
@@ -2862,10 +2905,11 @@ ScalarEvolution::getUnsignedRange(const SCEV *S) {
APInt(getTypeSizeInBits(C->getType()), 0));
// TODO: non-affine addrec
- if (Trip && AddRec->isAffine()) {
+ if (AddRec->isAffine()) {
const Type *Ty = AddRec->getType();
const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
- if (getTypeSizeInBits(MaxBECount->getType()) <= getTypeSizeInBits(Ty)) {
+ if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
+ getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
const SCEV *Start = AddRec->getStart();
@@ -2883,7 +2927,7 @@ ScalarEvolution::getUnsignedRange(const SCEV *S) {
EndRange.getUnsignedMax());
if (Min.isMinValue() && Max.isMaxValue())
return ConservativeResult;
- return ConstantRange(Min, Max+1);
+ return ConservativeResult.intersectWith(ConstantRange(Min, Max+1));
}
}
@@ -2897,11 +2941,11 @@ ScalarEvolution::getUnsignedRange(const SCEV *S) {
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
if (Ones == ~Zeros + 1)
- return FullSet;
- return ConstantRange(Ones, ~Zeros + 1);
+ return ConservativeResult;
+ return ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1));
}
- return FullSet;
+ return ConservativeResult;
}
/// getSignedRange - Determine the signed range for a particular SCEV.
@@ -2912,62 +2956,67 @@ ScalarEvolution::getSignedRange(const SCEV *S) {
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
return ConstantRange(C->getValue()->getValue());
+ unsigned BitWidth = getTypeSizeInBits(S->getType());
+ ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
+
+ // If the value has known zeros, the maximum signed value will have those
+ // known zeros as well.
+ uint32_t TZ = GetMinTrailingZeros(S);
+ if (TZ != 0)
+ ConservativeResult =
+ ConstantRange(APInt::getSignedMinValue(BitWidth),
+ APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
+
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
ConstantRange X = getSignedRange(Add->getOperand(0));
for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
X = X.add(getSignedRange(Add->getOperand(i)));
- return X;
+ return ConservativeResult.intersectWith(X);
}
if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
ConstantRange X = getSignedRange(Mul->getOperand(0));
for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
X = X.multiply(getSignedRange(Mul->getOperand(i)));
- return X;
+ return ConservativeResult.intersectWith(X);
}
if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
ConstantRange X = getSignedRange(SMax->getOperand(0));
for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
X = X.smax(getSignedRange(SMax->getOperand(i)));
- return X;
+ return ConservativeResult.intersectWith(X);
}
if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
ConstantRange X = getSignedRange(UMax->getOperand(0));
for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
X = X.umax(getSignedRange(UMax->getOperand(i)));
- return X;
+ return ConservativeResult.intersectWith(X);
}
if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
ConstantRange X = getSignedRange(UDiv->getLHS());
ConstantRange Y = getSignedRange(UDiv->getRHS());
- return X.udiv(Y);
+ return ConservativeResult.intersectWith(X.udiv(Y));
}
if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
ConstantRange X = getSignedRange(ZExt->getOperand());
- return X.zeroExtend(cast<IntegerType>(ZExt->getType())->getBitWidth());
+ return ConservativeResult.intersectWith(X.zeroExtend(BitWidth));
}
if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
ConstantRange X = getSignedRange(SExt->getOperand());
- return X.signExtend(cast<IntegerType>(SExt->getType())->getBitWidth());
+ return ConservativeResult.intersectWith(X.signExtend(BitWidth));
}
if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
ConstantRange X = getSignedRange(Trunc->getOperand());
- return X.truncate(cast<IntegerType>(Trunc->getType())->getBitWidth());
+ return ConservativeResult.intersectWith(X.truncate(BitWidth));
}
- ConstantRange FullSet(getTypeSizeInBits(S->getType()), true);
-
if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
- const SCEV *T = getBackedgeTakenCount(AddRec->getLoop());
- const SCEVConstant *Trip = dyn_cast<SCEVConstant>(T);
- ConstantRange ConservativeResult = FullSet;
-
// If there's no signed wrap, and all the operands have the same sign or
// zero, the value won't ever change sign.
if (AddRec->hasNoSignedWrap()) {
@@ -2977,20 +3026,22 @@ ScalarEvolution::getSignedRange(const SCEV *S) {
if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false;
if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false;
}
- unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
if (AllNonNeg)
- ConservativeResult = ConstantRange(APInt(BitWidth, 0),
- APInt::getSignedMinValue(BitWidth));
+ ConservativeResult = ConservativeResult.intersectWith(
+ ConstantRange(APInt(BitWidth, 0),
+ APInt::getSignedMinValue(BitWidth)));
else if (AllNonPos)
- ConservativeResult = ConstantRange(APInt::getSignedMinValue(BitWidth),
- APInt(BitWidth, 1));
+ ConservativeResult = ConservativeResult.intersectWith(
+ ConstantRange(APInt::getSignedMinValue(BitWidth),
+ APInt(BitWidth, 1)));
}
// TODO: non-affine addrec
- if (Trip && AddRec->isAffine()) {
+ if (AddRec->isAffine()) {
const Type *Ty = AddRec->getType();
const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
- if (getTypeSizeInBits(MaxBECount->getType()) <= getTypeSizeInBits(Ty)) {
+ if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
+ getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
const SCEV *Start = AddRec->getStart();
@@ -3008,7 +3059,7 @@ ScalarEvolution::getSignedRange(const SCEV *S) {
EndRange.getSignedMax());
if (Min.isMinSignedValue() && Max.isMaxSignedValue())
return ConservativeResult;
- return ConstantRange(Min, Max+1);
+ return ConservativeResult.intersectWith(ConstantRange(Min, Max+1));
}
}
@@ -3017,18 +3068,17 @@ ScalarEvolution::getSignedRange(const SCEV *S) {
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
// For a SCEVUnknown, ask ValueTracking.
- unsigned BitWidth = getTypeSizeInBits(U->getType());
- if (!U->getValue()->getType()->isInteger() && !TD)
- return FullSet;
+ if (!U->getValue()->getType()->isIntegerTy() && !TD)
+ return ConservativeResult;
unsigned NS = ComputeNumSignBits(U->getValue(), TD);
if (NS == 1)
- return FullSet;
- return
+ return ConservativeResult;
+ return ConservativeResult.intersectWith(
ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
- APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1);
+ APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1));
}
- return FullSet;
+ return ConservativeResult;
}
/// createSCEV - We know that there is no SCEV for the specified value.
@@ -3179,7 +3229,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
case Instruction::Shl:
// Turn shift left of a constant amount into a multiply.
if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
- uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
+ uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
Constant *X = ConstantInt::get(getContext(),
APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
@@ -3189,7 +3239,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
case Instruction::LShr:
// Turn logical shift right of a constant into a unsigned divide.
if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
- uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
+ uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
Constant *X = ConstantInt::get(getContext(),
APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
@@ -3230,10 +3280,10 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
return getSCEV(U->getOperand(0));
break;
- // It's tempting to handle inttoptr and ptrtoint, however this can
- // lead to pointer expressions which cannot be expanded to GEPs
- // (because they may overflow). For now, the only pointer-typed
- // expressions we handle are GEPs and address literals.
+ // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
+ // lead to pointer expressions which cannot safely be expanded to GEPs,
+ // because ScalarEvolution doesn't respect the GEP aliasing rules when
+ // simplifying integer expressions.
case Instruction::GetElementPtr:
return createNodeForGEP(cast<GEPOperator>(U));
@@ -3350,19 +3400,19 @@ ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
std::pair<std::map<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute()));
if (Pair.second) {
- BackedgeTakenInfo ItCount = ComputeBackedgeTakenCount(L);
- if (ItCount.Exact != getCouldNotCompute()) {
- assert(ItCount.Exact->isLoopInvariant(L) &&
- ItCount.Max->isLoopInvariant(L) &&
- "Computed trip count isn't loop invariant for loop!");
+ BackedgeTakenInfo BECount = ComputeBackedgeTakenCount(L);
+ if (BECount.Exact != getCouldNotCompute()) {
+ assert(BECount.Exact->isLoopInvariant(L) &&
+ BECount.Max->isLoopInvariant(L) &&
+ "Computed backedge-taken count isn't loop invariant for loop!");
++NumTripCountsComputed;
// Update the value in the map.
- Pair.first->second = ItCount;
+ Pair.first->second = BECount;
} else {
- if (ItCount.Max != getCouldNotCompute())
+ if (BECount.Max != getCouldNotCompute())
// Update the value in the map.
- Pair.first->second = ItCount;
+ Pair.first->second = BECount;
if (isa<PHINode>(L->getHeader()->begin()))
// Only count loops that have phi nodes as not being computable.
++NumTripCountsNotComputed;
@@ -3373,7 +3423,7 @@ ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
// conservative estimates made without the benefit of trip count
// information. This is similar to the code in forgetLoop, except that
// it handles SCEVUnknown PHI nodes specially.
- if (ItCount.hasAnyInfo()) {
+ if (BECount.hasAnyInfo()) {
SmallVector<Instruction *, 16> Worklist;
PushLoopPHIs(L, Worklist);
@@ -4230,9 +4280,6 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
return getTruncateExpr(Op, Cast->getType());
}
- if (isa<SCEVTargetDataConstant>(V))
- return V;
-
llvm_unreachable("Unknown SCEV type!");
return 0;
}
@@ -4403,7 +4450,7 @@ const SCEV *ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
-StartC->getValue()->getValue(),
*this);
}
- } else if (AddRec->isQuadratic() && AddRec->getType()->isInteger()) {
+ } else if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
// If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
// the quadratic equation to solve it.
std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(AddRec,
@@ -4947,6 +4994,9 @@ const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
const SCEV *End,
const SCEV *Step,
bool NoWrap) {
+ assert(!isKnownNegative(Step) &&
+ "This code doesn't handle negative strides yet!");
+
const Type *Ty = Start->getType();
const SCEV *NegOne = getIntegerSCEV(-1, Ty);
const SCEV *Diff = getMinusSCEV(End, Start);
@@ -4989,39 +5039,35 @@ ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
AddRec->hasNoUnsignedWrap();
if (AddRec->isAffine()) {
- // FORNOW: We only support unit strides.
unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
const SCEV *Step = AddRec->getStepRecurrence(*this);
- // TODO: handle non-constant strides.
- const SCEVConstant *CStep = dyn_cast<SCEVConstant>(Step);
- if (!CStep || CStep->isZero())
+ if (Step->isZero())
return getCouldNotCompute();
- if (CStep->isOne()) {
+ if (Step->isOne()) {
// With unit stride, the iteration never steps past the limit value.
- } else if (CStep->getValue()->getValue().isStrictlyPositive()) {
- if (NoWrap) {
- // We know the iteration won't step past the maximum value for its type.
- ;
- } else if (const SCEVConstant *CLimit = dyn_cast<SCEVConstant>(RHS)) {
- // Test whether a positive iteration iteration can step past the limit
- // value and past the maximum value for its type in a single step.
- if (isSigned) {
- APInt Max = APInt::getSignedMaxValue(BitWidth);
- if ((Max - CStep->getValue()->getValue())
- .slt(CLimit->getValue()->getValue()))
- return getCouldNotCompute();
- } else {
- APInt Max = APInt::getMaxValue(BitWidth);
- if ((Max - CStep->getValue()->getValue())
- .ult(CLimit->getValue()->getValue()))
- return getCouldNotCompute();
- }
- } else
- // TODO: handle non-constant limit values below.
- return getCouldNotCompute();
+ } else if (isKnownPositive(Step)) {
+ // Test whether a positive iteration can step past the limit
+ // value and past the maximum value for its type in a single step.
+ // Note that it's not sufficient to check NoWrap here, because even
+ // though the value after a wrap is undefined, it's not undefined
+ // behavior, so if wrap does occur, the loop could either terminate or
+ // loop infinitely, but in either case, the loop is guaranteed to
+ // iterate at least until the iteration where the wrapping occurs.
+ const SCEV *One = getIntegerSCEV(1, Step->getType());
+ if (isSigned) {
+ APInt Max = APInt::getSignedMaxValue(BitWidth);
+ if ((Max - getSignedRange(getMinusSCEV(Step, One)).getSignedMax())
+ .slt(getSignedRange(RHS).getSignedMax()))
+ return getCouldNotCompute();
+ } else {
+ APInt Max = APInt::getMaxValue(BitWidth);
+ if ((Max - getUnsignedRange(getMinusSCEV(Step, One)).getUnsignedMax())
+ .ult(getUnsignedRange(RHS).getUnsignedMax()))
+ return getCouldNotCompute();
+ }
} else
- // TODO: handle negative strides below.
+ // TODO: Handle negative strides here and below.
return getCouldNotCompute();
// We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant
@@ -5054,6 +5100,20 @@ ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
getSignedRange(End).getSignedMax() :
getUnsignedRange(End).getUnsignedMax());
+ // If MaxEnd is within a step of the maximum integer value in its type,
+ // adjust it down to the minimum value which would produce the same effect.
+ // This allows the subsequent ceiling divison of (N+(step-1))/step to
+ // compute the correct value.
+ const SCEV *StepMinusOne = getMinusSCEV(Step,
+ getIntegerSCEV(1, Step->getType()));
+ MaxEnd = isSigned ?
+ getSMinExpr(MaxEnd,
+ getMinusSCEV(getConstant(APInt::getSignedMaxValue(BitWidth)),
+ StepMinusOne)) :
+ getUMinExpr(MaxEnd,
+ getMinusSCEV(getConstant(APInt::getMaxValue(BitWidth)),
+ StepMinusOne));
+
// Finally, we subtract these two values and divide, rounding up, to get
// the number of times the backedge is executed.
const SCEV *BECount = getBECount(Start, End, Step, NoWrap);
diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp
index a72f58f..c2e1f89 100644
--- a/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -365,31 +365,33 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
// the indices index into the element or field type selected by the
// preceding index.
for (;;) {
- const SCEV *ElSize = SE.getAllocSizeExpr(ElTy);
// If the scale size is not 0, attempt to factor out a scale for
// array indexing.
SmallVector<const SCEV *, 8> ScaledOps;
- if (ElTy->isSized() && !ElSize->isZero()) {
- SmallVector<const SCEV *, 8> NewOps;
- for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
- const SCEV *Op = Ops[i];
- const SCEV *Remainder = SE.getIntegerSCEV(0, Ty);
- if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) {
- // Op now has ElSize factored out.
- ScaledOps.push_back(Op);
- if (!Remainder->isZero())
- NewOps.push_back(Remainder);
- AnyNonZeroIndices = true;
- } else {
- // The operand was not divisible, so add it to the list of operands
- // we'll scan next iteration.
- NewOps.push_back(Ops[i]);
+ if (ElTy->isSized()) {
+ const SCEV *ElSize = SE.getSizeOfExpr(ElTy);
+ if (!ElSize->isZero()) {
+ SmallVector<const SCEV *, 8> NewOps;
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
+ const SCEV *Op = Ops[i];
+ const SCEV *Remainder = SE.getIntegerSCEV(0, Ty);
+ if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) {
+ // Op now has ElSize factored out.
+ ScaledOps.push_back(Op);
+ if (!Remainder->isZero())
+ NewOps.push_back(Remainder);
+ AnyNonZeroIndices = true;
+ } else {
+ // The operand was not divisible, so add it to the list of operands
+ // we'll scan next iteration.
+ NewOps.push_back(Ops[i]);
+ }
+ }
+ // If we made any changes, update Ops.
+ if (!ScaledOps.empty()) {
+ Ops = NewOps;
+ SimplifyAddOperands(Ops, Ty, SE);
}
- }
- // If we made any changes, update Ops.
- if (!ScaledOps.empty()) {
- Ops = NewOps;
- SimplifyAddOperands(Ops, Ty, SE);
}
}
@@ -427,22 +429,22 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
}
}
} else {
- // Without TargetData, just check for a SCEVFieldOffsetExpr of the
+ // Without TargetData, just check for an offsetof expression of the
// appropriate struct type.
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- if (const SCEVFieldOffsetExpr *FO =
- dyn_cast<SCEVFieldOffsetExpr>(Ops[i]))
- if (FO->getStructType() == STy) {
- unsigned FieldNo = FO->getFieldNo();
- GepIndices.push_back(
- ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
- FieldNo));
- ElTy = STy->getTypeAtIndex(FieldNo);
+ if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) {
+ const Type *CTy;
+ Constant *FieldNo;
+ if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) {
+ GepIndices.push_back(FieldNo);
+ ElTy =
+ STy->getTypeAtIndex(cast<ConstantInt>(FieldNo)->getZExtValue());
Ops[i] = SE.getConstant(Ty, 0);
AnyNonZeroIndices = true;
FoundFieldNo = true;
break;
}
+ }
}
// If no struct field offsets were found, tentatively assume that
// field zero was selected (since the zero offset would obviously
@@ -639,8 +641,52 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
// Reuse a previously-inserted PHI, if present.
for (BasicBlock::iterator I = L->getHeader()->begin();
PHINode *PN = dyn_cast<PHINode>(I); ++I)
- if (isInsertedInstruction(PN) && SE.getSCEV(PN) == Normalized)
- return PN;
+ if (SE.isSCEVable(PN->getType()) &&
+ (SE.getEffectiveSCEVType(PN->getType()) ==
+ SE.getEffectiveSCEVType(Normalized->getType())) &&
+ SE.getSCEV(PN) == Normalized)
+ if (BasicBlock *LatchBlock = L->getLoopLatch()) {
+ Instruction *IncV =
+ cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
+
+ // Determine if this is a well-behaved chain of instructions leading
+ // back to the PHI. It probably will be, if we're scanning an inner
+ // loop already visited by LSR for example, but it wouldn't have
+ // to be.
+ do {
+ if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV)) {
+ IncV = 0;
+ break;
+ }
+ IncV = dyn_cast<Instruction>(IncV->getOperand(0));
+ if (!IncV)
+ break;
+ if (IncV->mayHaveSideEffects()) {
+ IncV = 0;
+ break;
+ }
+ } while (IncV != PN);
+
+ if (IncV) {
+ // Ok, the add recurrence looks usable.
+ // Remember this PHI, even in post-inc mode.
+ InsertedValues.insert(PN);
+ // Remember the increment.
+ IncV = cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
+ rememberInstruction(IncV);
+ if (L == IVIncInsertLoop)
+ do {
+ if (SE.DT->dominates(IncV, IVIncInsertPos))
+ break;
+ // Make sure the increment is where we want it. But don't move it
+ // down past a potential existing post-inc user.
+ IncV->moveBefore(IVIncInsertPos);
+ IVIncInsertPos = IncV;
+ IncV = cast<Instruction>(IncV->getOperand(0));
+ } while (IncV != PN);
+ return PN;
+ }
+ }
// Save the original insertion point so we can restore it when we're done.
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
@@ -711,7 +757,7 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
// Restore the original insert point.
if (SaveInsertBB)
- Builder.SetInsertPoint(SaveInsertBB, SaveInsertPt);
+ restoreInsertPoint(SaveInsertBB, SaveInsertPt);
// Remember this PHI, even in post-inc mode.
InsertedValues.insert(PN);
@@ -774,6 +820,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
// Re-apply any non-loop-dominating scale.
if (PostLoopScale) {
+ Result = InsertNoopCastOfTo(Result, IntTy);
Result = Builder.CreateMul(Result,
expandCodeFor(PostLoopScale, IntTy));
rememberInstruction(Result);
@@ -785,6 +832,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
const SCEV *const OffsetArray[1] = { PostLoopOffset };
Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result);
} else {
+ Result = InsertNoopCastOfTo(Result, IntTy);
Result = Builder.CreateAdd(Result,
expandCodeFor(PostLoopOffset, IntTy));
rememberInstruction(Result);
@@ -825,7 +873,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
while (isa<PHINode>(NewInsertPt)) ++NewInsertPt;
V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0,
NewInsertPt);
- Builder.SetInsertPoint(SaveInsertBB, SaveInsertPt);
+ restoreInsertPoint(SaveInsertBB, SaveInsertPt);
return V;
}
@@ -1001,14 +1049,6 @@ Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
return LHS;
}
-Value *SCEVExpander::visitFieldOffsetExpr(const SCEVFieldOffsetExpr *S) {
- return ConstantExpr::getOffsetOf(S->getStructType(), S->getFieldNo());
-}
-
-Value *SCEVExpander::visitAllocSizeExpr(const SCEVAllocSizeExpr *S) {
- return ConstantExpr::getSizeOf(S->getAllocType());
-}
-
Value *SCEVExpander::expandCodeFor(const SCEV *SH, const Type *Ty) {
// Expand the code for this SCEV.
Value *V = expand(SH);
@@ -1059,10 +1099,32 @@ Value *SCEVExpander::expand(const SCEV *S) {
if (!PostIncLoop)
InsertedExpressions[std::make_pair(S, InsertPt)] = V;
- Builder.SetInsertPoint(SaveInsertBB, SaveInsertPt);
+ restoreInsertPoint(SaveInsertBB, SaveInsertPt);
return V;
}
+void SCEVExpander::rememberInstruction(Value *I) {
+ if (!PostIncLoop)
+ InsertedValues.insert(I);
+
+ // If we just claimed an existing instruction and that instruction had
+ // been the insert point, adjust the insert point forward so that
+ // subsequently inserted code will be dominated.
+ if (Builder.GetInsertPoint() == I) {
+ BasicBlock::iterator It = cast<Instruction>(I);
+ do { ++It; } while (isInsertedInstruction(It));
+ Builder.SetInsertPoint(Builder.GetInsertBlock(), It);
+ }
+}
+
+void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) {
+ // If we aquired more instructions since the old insert point was saved,
+ // advance past them.
+ while (isInsertedInstruction(I)) ++I;
+
+ Builder.SetInsertPoint(BB, I);
+}
+
/// getOrInsertCanonicalInductionVariable - This method returns the
/// canonical induction variable of the specified type for the specified
/// loop (inserting one if there is none). A canonical induction variable
@@ -1070,13 +1132,13 @@ Value *SCEVExpander::expand(const SCEV *S) {
Value *
SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
const Type *Ty) {
- assert(Ty->isInteger() && "Can only insert integer induction variables!");
+ assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
const SCEV *H = SE.getAddRecExpr(SE.getIntegerSCEV(0, Ty),
SE.getIntegerSCEV(1, Ty), L);
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
Value *V = expandCodeFor(H, 0, L->getHeader()->begin());
if (SaveInsertBB)
- Builder.SetInsertPoint(SaveInsertBB, SaveInsertPt);
+ restoreInsertPoint(SaveInsertBB, SaveInsertPt);
return V;
}
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index 91e5bc3..7cc9c0d 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -49,11 +49,11 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
assert(V && "No Value?");
assert(Depth <= MaxDepth && "Limit Search Depth");
unsigned BitWidth = Mask.getBitWidth();
- assert((V->getType()->isIntOrIntVector() || isa<PointerType>(V->getType())) &&
- "Not integer or pointer type!");
+ assert((V->getType()->isIntOrIntVectorTy() || isa<PointerType>(V->getType()))
+ && "Not integer or pointer type!");
assert((!TD ||
TD->getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
- (!V->getType()->isIntOrIntVector() ||
+ (!V->getType()->isIntOrIntVectorTy() ||
V->getType()->getScalarSizeInBits() == BitWidth) &&
KnownZero.getBitWidth() == BitWidth &&
KnownOne.getBitWidth() == BitWidth &&
@@ -269,7 +269,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
}
case Instruction::BitCast: {
const Type *SrcTy = I->getOperand(0)->getType();
- if ((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
+ if ((SrcTy->isIntegerTy() || isa<PointerType>(SrcTy)) &&
// TODO: For now, not handling conversions like:
// (bitcast i64 %x to <2 x i32>)
!isa<VectorType>(I->getType())) {
@@ -421,20 +421,29 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
}
case Instruction::SRem:
if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
- APInt RA = Rem->getValue();
- if (RA.isPowerOf2() || (-RA).isPowerOf2()) {
- APInt LowBits = RA.isStrictlyPositive() ? (RA - 1) : ~RA;
+ APInt RA = Rem->getValue().abs();
+ if (RA.isPowerOf2()) {
+ APInt LowBits = RA - 1;
APInt Mask2 = LowBits | APInt::getSignBit(BitWidth);
ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD,
Depth+1);
- // If the sign bit of the first operand is zero, the sign bit of
- // the result is zero. If the first operand has no one bits below
- // the second operand's single 1 bit, its sign will be zero.
+ // The low bits of the first operand are unchanged by the srem.
+ KnownZero = KnownZero2 & LowBits;
+ KnownOne = KnownOne2 & LowBits;
+
+ // If the first operand is non-negative or has all low bits zero, then
+ // the upper bits are all zero.
if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
- KnownZero2 |= ~LowBits;
+ KnownZero |= ~LowBits;
- KnownZero |= KnownZero2 & Mask;
+ // If the first operand is negative and not all low bits are zero, then
+ // the upper bits are all one.
+ if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
+ KnownOne |= ~LowBits;
+
+ KnownZero &= Mask;
+ KnownOne &= Mask;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
}
@@ -640,7 +649,7 @@ bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask,
///
unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD,
unsigned Depth) {
- assert((TD || V->getType()->isIntOrIntVector()) &&
+ assert((TD || V->getType()->isIntOrIntVectorTy()) &&
"ComputeNumSignBits requires a TargetData object to operate "
"on non-integer values!");
const Type *Ty = V->getType();
@@ -814,7 +823,7 @@ bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
assert(V && "No Value?");
assert(Depth <= MaxDepth && "Limit Search Depth");
- assert(V->getType()->isInteger() && "Not integer or pointer type!");
+ assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
const Type *T = V->getType();
@@ -1363,7 +1372,7 @@ bool llvm::GetConstantStringInfo(Value *V, std::string &Str, uint64_t Offset,
// Make sure the index-ee is a pointer to array of i8.
const PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType());
const ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType());
- if (AT == 0 || !AT->getElementType()->isInteger(8))
+ if (AT == 0 || !AT->getElementType()->isIntegerTy(8))
return false;
// Check to make sure that the first operand of the GEP is an integer and
@@ -1402,7 +1411,7 @@ bool llvm::GetConstantStringInfo(Value *V, std::string &Str, uint64_t Offset,
// Must be a Constant Array
ConstantArray *Array = dyn_cast<ConstantArray>(GlobalInit);
- if (Array == 0 || !Array->getType()->getElementType()->isInteger(8))
+ if (Array == 0 || !Array->getType()->getElementType()->isIntegerTy(8))
return false;
// Get the number of elements in the array
OpenPOWER on IntegriCloud