summaryrefslogtreecommitdiffstats
path: root/lib/Analysis
diff options
context:
space:
mode:
authorrdivacky <rdivacky@FreeBSD.org>2010-03-21 10:49:05 +0000
committerrdivacky <rdivacky@FreeBSD.org>2010-03-21 10:49:05 +0000
commit2f2afc1aae898651e26987a5c71f3febb19bca98 (patch)
tree2caca31db4facdc95c23930c0c745c8ef0dee97d /lib/Analysis
parent0f448b841684305c051796982f300c9bff959307 (diff)
downloadFreeBSD-src-2f2afc1aae898651e26987a5c71f3febb19bca98.zip
FreeBSD-src-2f2afc1aae898651e26987a5c71f3febb19bca98.tar.gz
Update LLVM to r99115.
Diffstat (limited to 'lib/Analysis')
-rw-r--r--lib/Analysis/ConstantFolding.cpp49
-rw-r--r--lib/Analysis/LoopDependenceAnalysis.cpp3
-rw-r--r--lib/Analysis/ScalarEvolution.cpp83
-rw-r--r--lib/Analysis/ScalarEvolutionExpander.cpp46
4 files changed, 102 insertions, 79 deletions
diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp
index 96bb027..dda1fba 100644
--- a/lib/Analysis/ConstantFolding.cpp
+++ b/lib/Analysis/ConstantFolding.cpp
@@ -564,21 +564,6 @@ static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
unsigned BitWidth =
TD->getTypeSizeInBits(TD->getIntPtrType(Ptr->getContext()));
- APInt BasePtr(BitWidth, 0);
- bool BaseIsInt = true;
- if (!Ptr->isNullValue()) {
- // If this is a inttoptr from a constant int, we can fold this as the base,
- // otherwise we can't.
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
- if (CE->getOpcode() == Instruction::IntToPtr)
- if (ConstantInt *Base = dyn_cast<ConstantInt>(CE->getOperand(0))) {
- BasePtr = Base->getValue();
- BasePtr.zextOrTrunc(BitWidth);
- }
-
- if (BasePtr == 0)
- BaseIsInt = false;
- }
// If this is a constant expr gep that is effectively computing an
// "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
@@ -615,7 +600,14 @@ static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
// If the base value for this address is a literal integer value, fold the
// getelementptr to the resulting integer value casted to the pointer type.
- if (BaseIsInt) {
+ APInt BasePtr(BitWidth, 0);
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
+ if (CE->getOpcode() == Instruction::IntToPtr)
+ if (ConstantInt *Base = dyn_cast<ConstantInt>(CE->getOperand(0))) {
+ BasePtr = Base->getValue();
+ BasePtr.zextOrTrunc(BitWidth);
+ }
+ if (Ptr->isNullValue() || BasePtr != 0) {
Constant *C = ConstantInt::get(Ptr->getContext(), Offset+BasePtr);
return ConstantExpr::getIntToPtr(C, ResultTy);
}
@@ -1002,6 +994,8 @@ llvm::canConstantFoldCallTo(const Function *F) {
case Intrinsic::usub_with_overflow:
case Intrinsic::sadd_with_overflow:
case Intrinsic::ssub_with_overflow:
+ case Intrinsic::convert_from_fp16:
+ case Intrinsic::convert_to_fp16:
return true;
default:
return false;
@@ -1082,6 +1076,15 @@ llvm::ConstantFoldCall(Function *F,
const Type *Ty = F->getReturnType();
if (NumOperands == 1) {
if (ConstantFP *Op = dyn_cast<ConstantFP>(Operands[0])) {
+ if (Name == "llvm.convert.to.fp16") {
+ APFloat Val(Op->getValueAPF());
+
+ bool lost = false;
+ Val.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &lost);
+
+ return ConstantInt::get(F->getContext(), Val.bitcastToAPInt());
+ }
+
if (!Ty->isFloatTy() && !Ty->isDoubleTy())
return 0;
/// Currently APFloat versions of these functions do not exist, so we use
@@ -1166,6 +1169,20 @@ llvm::ConstantFoldCall(Function *F,
return ConstantInt::get(Ty, Op->getValue().countTrailingZeros());
else if (Name.startswith("llvm.ctlz"))
return ConstantInt::get(Ty, Op->getValue().countLeadingZeros());
+ else if (Name == "llvm.convert.from.fp16") {
+ APFloat Val(Op->getValue());
+
+ bool lost = false;
+ APFloat::opStatus status =
+ Val.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &lost);
+
+ // Conversion is always precise.
+ status = status;
+ assert(status == APFloat::opOK && !lost &&
+ "Precision lost during fp16 constfolding");
+
+ return ConstantFP::get(F->getContext(), Val);
+ }
return 0;
}
diff --git a/lib/Analysis/LoopDependenceAnalysis.cpp b/lib/Analysis/LoopDependenceAnalysis.cpp
index bb4f46d..e101947 100644
--- a/lib/Analysis/LoopDependenceAnalysis.cpp
+++ b/lib/Analysis/LoopDependenceAnalysis.cpp
@@ -119,8 +119,7 @@ bool LoopDependenceAnalysis::findOrInsertDependencePair(Value *A,
P = Pairs.FindNodeOrInsertPos(id, insertPos);
if (P) return true;
- P = PairAllocator.Allocate<DependencePair>();
- new (P) DependencePair(id, A, B);
+ P = new (PairAllocator) DependencePair(id, A, B);
Pairs.InsertNode(P, insertPos);
return false;
}
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp
index 15f072d..1af271a 100644
--- a/lib/Analysis/ScalarEvolution.cpp
+++ b/lib/Analysis/ScalarEvolution.cpp
@@ -141,7 +141,7 @@ bool SCEV::isAllOnesValue() const {
}
SCEVCouldNotCompute::SCEVCouldNotCompute() :
- SCEV(FoldingSetNodeID(), scCouldNotCompute) {}
+ SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const {
llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
@@ -177,8 +177,7 @@ const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
ID.AddPointer(V);
void *IP = 0;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVConstant>();
- new (S) SCEVConstant(ID, V);
+ SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
UniqueSCEVs.InsertNode(S, IP);
return S;
}
@@ -199,7 +198,7 @@ void SCEVConstant::print(raw_ostream &OS) const {
WriteAsOperand(OS, V, false);
}
-SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeID &ID,
+SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
unsigned SCEVTy, const SCEV *op, const Type *ty)
: SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
@@ -211,7 +210,7 @@ bool SCEVCastExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
return Op->properlyDominates(BB, DT);
}
-SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeID &ID,
+SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, const Type *ty)
: SCEVCastExpr(ID, scTruncate, op, ty) {
assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
@@ -223,7 +222,7 @@ void SCEVTruncateExpr::print(raw_ostream &OS) const {
OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
}
-SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeID &ID,
+SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, const Type *ty)
: SCEVCastExpr(ID, scZeroExtend, op, ty) {
assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
@@ -235,7 +234,7 @@ void SCEVZeroExtendExpr::print(raw_ostream &OS) const {
OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
}
-SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeID &ID,
+SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, const Type *ty)
: SCEVCastExpr(ID, scSignExtend, op, ty) {
assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
@@ -248,10 +247,10 @@ void SCEVSignExtendExpr::print(raw_ostream &OS) const {
}
void SCEVCommutativeExpr::print(raw_ostream &OS) const {
- assert(Operands.size() > 1 && "This plus expr shouldn't exist!");
+ assert(NumOperands > 1 && "This plus expr shouldn't exist!");
const char *OpStr = getOperationStr();
OS << "(" << *Operands[0];
- for (unsigned i = 1, e = Operands.size(); i != e; ++i)
+ for (unsigned i = 1, e = NumOperands; i != e; ++i)
OS << OpStr << *Operands[i];
OS << ")";
}
@@ -329,7 +328,7 @@ SCEVAddRecExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
void SCEVAddRecExpr::print(raw_ostream &OS) const {
OS << "{" << *Operands[0];
- for (unsigned i = 1, e = Operands.size(); i != e; ++i)
+ for (unsigned i = 1, e = NumOperands; i != e; ++i)
OS << ",+," << *Operands[i];
OS << "}<";
WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
@@ -846,8 +845,8 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
// The cast wasn't folded; create an explicit cast node.
// Recompute the insert position, as it may have been invalidated.
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVTruncateExpr>();
- new (S) SCEVTruncateExpr(ID, Op, Ty);
+ SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
+ Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
return S;
}
@@ -981,8 +980,8 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
// The cast wasn't folded; create an explicit cast node.
// Recompute the insert position, as it may have been invalidated.
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVZeroExtendExpr>();
- new (S) SCEVZeroExtendExpr(ID, Op, Ty);
+ SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
+ Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
return S;
}
@@ -1116,8 +1115,8 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
// The cast wasn't folded; create an explicit cast node.
// Recompute the insert position, as it may have been invalidated.
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVSignExtendExpr>();
- new (S) SCEVSignExtendExpr(ID, Op, Ty);
+ SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
+ Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
return S;
}
@@ -1202,23 +1201,23 @@ static bool
CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
SmallVector<const SCEV *, 8> &NewOps,
APInt &AccumulatedConstant,
- const SmallVectorImpl<const SCEV *> &Ops,
+ const SCEV *const *Ops, size_t NumOperands,
const APInt &Scale,
ScalarEvolution &SE) {
bool Interesting = false;
// Iterate over the add operands.
- for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
+ for (unsigned i = 0, e = NumOperands; i != e; ++i) {
const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
APInt NewScale =
Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
// A multiplication of a constant with another add; recurse.
+ const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
Interesting |=
CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
- cast<SCEVAddExpr>(Mul->getOperand(1))
- ->getOperands(),
+ Add->op_begin(), Add->getNumOperands(),
NewScale, SE);
} else {
// A multiplication of a constant with some other value. Update
@@ -1427,7 +1426,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
SmallVector<const SCEV *, 8> NewOps;
APInt AccumulatedConstant(BitWidth, 0);
if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
- Ops, APInt(BitWidth, 1), *this)) {
+ Ops.data(), Ops.size(),
+ APInt(BitWidth, 1), *this)) {
// Some interesting folding opportunity is present, so its worthwhile to
// re-generate the operands list. Group the operands by constant scale,
// to avoid multiplying by the same constant scale multiple times.
@@ -1611,8 +1611,10 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
SCEVAddExpr *S =
static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
if (!S) {
- S = SCEVAllocator.Allocate<SCEVAddExpr>();
- new (S) SCEVAddExpr(ID, Ops);
+ const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+ std::uninitialized_copy(Ops.begin(), Ops.end(), O);
+ S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
+ O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
}
if (HasNUW) S->setHasNoUnsignedWrap(true);
@@ -1819,8 +1821,10 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
SCEVMulExpr *S =
static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
if (!S) {
- S = SCEVAllocator.Allocate<SCEVMulExpr>();
- new (S) SCEVMulExpr(ID, Ops);
+ const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+ std::uninitialized_copy(Ops.begin(), Ops.end(), O);
+ S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
+ O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
}
if (HasNUW) S->setHasNoUnsignedWrap(true);
@@ -1880,9 +1884,7 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
const SCEV *Op = M->getOperand(i);
const SCEV *Div = getUDivExpr(Op, RHSC);
if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
- const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands();
- Operands = SmallVector<const SCEV *, 4>(MOperands.begin(),
- MOperands.end());
+ Operands = SmallVector<const SCEV *, 4>(M->op_begin(), M->op_end());
Operands[i] = Div;
return getMulExpr(Operands);
}
@@ -1921,8 +1923,8 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
ID.AddPointer(RHS);
void *IP = 0;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVUDivExpr>();
- new (S) SCEVUDivExpr(ID, LHS, RHS);
+ SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
+ LHS, RHS);
UniqueSCEVs.InsertNode(S, IP);
return S;
}
@@ -2030,8 +2032,10 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
SCEVAddRecExpr *S =
static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
if (!S) {
- S = SCEVAllocator.Allocate<SCEVAddRecExpr>();
- new (S) SCEVAddRecExpr(ID, Operands, L);
+ const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size());
+ std::uninitialized_copy(Operands.begin(), Operands.end(), O);
+ S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
+ O, Operands.size(), L);
UniqueSCEVs.InsertNode(S, IP);
}
if (HasNUW) S->setHasNoUnsignedWrap(true);
@@ -2130,8 +2134,10 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
ID.AddPointer(Ops[i]);
void *IP = 0;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVSMaxExpr>();
- new (S) SCEVSMaxExpr(ID, Ops);
+ const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+ std::uninitialized_copy(Ops.begin(), Ops.end(), O);
+ SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
+ O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
return S;
}
@@ -2227,8 +2233,10 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
ID.AddPointer(Ops[i]);
void *IP = 0;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVUMaxExpr>();
- new (S) SCEVUMaxExpr(ID, Ops);
+ const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+ std::uninitialized_copy(Ops.begin(), Ops.end(), O);
+ SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
+ O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
return S;
}
@@ -2290,8 +2298,7 @@ const SCEV *ScalarEvolution::getUnknown(Value *V) {
ID.AddPointer(V);
void *IP = 0;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVUnknown>();
- new (S) SCEVUnknown(ID, V);
+ SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V);
UniqueSCEVs.InsertNode(S, IP);
return S;
}
diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp
index 3c2cbfb..138cdc6 100644
--- a/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -232,9 +232,7 @@ static bool FactorOutConstant(const SCEV *&S,
const SCEVConstant *FC = cast<SCEVConstant>(Factor);
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) {
- const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands();
- SmallVector<const SCEV *, 4> NewMulOps(MOperands.begin(),
- MOperands.end());
+ SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
NewMulOps[0] =
SE.getConstant(C->getValue()->getValue().sdiv(
FC->getValue()->getValue()));
@@ -249,9 +247,7 @@ static bool FactorOutConstant(const SCEV *&S,
const SCEV *Remainder = SE.getIntegerSCEV(0, SOp->getType());
if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) &&
Remainder->isZero()) {
- const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands();
- SmallVector<const SCEV *, 4> NewMulOps(MOperands.begin(),
- MOperands.end());
+ SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
NewMulOps[i] = SOp;
S = SE.getMulExpr(NewMulOps);
return true;
@@ -297,13 +293,11 @@ static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
SE.getAddExpr(NoAddRecs);
// If it returned an add, use the operands. Otherwise it simplified
// the sum into a single value, so just use that.
+ Ops.clear();
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
- Ops = Add->getOperands();
- else {
- Ops.clear();
- if (!Sum->isZero())
- Ops.push_back(Sum);
- }
+ Ops.insert(Ops.end(), Add->op_begin(), Add->op_end());
+ else if (!Sum->isZero())
+ Ops.push_back(Sum);
// Then append the addrecs.
Ops.insert(Ops.end(), AddRecs.begin(), AddRecs.end());
}
@@ -1060,10 +1054,9 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
if (CanonicalIV &&
SE.getTypeSizeInBits(CanonicalIV->getType()) >
SE.getTypeSizeInBits(Ty)) {
- const SmallVectorImpl<const SCEV *> &Ops = S->getOperands();
- SmallVector<const SCEV *, 4> NewOps(Ops.size());
- for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- NewOps[i] = SE.getAnyExtendExpr(Ops[i], CanonicalIV->getType());
+ SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
+ for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
+ NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop()));
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
@@ -1078,8 +1071,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
// {X,+,F} --> X + {0,+,F}
if (!S->getStart()->isZero()) {
- const SmallVectorImpl<const SCEV *> &SOperands = S->getOperands();
- SmallVector<const SCEV *, 4> NewOps(SOperands.begin(), SOperands.end());
+ SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end());
NewOps[0] = SE.getIntegerSCEV(0, Ty);
const SCEV *Rest = SE.getAddRecExpr(NewOps, L);
@@ -1248,6 +1240,15 @@ Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
return LHS;
}
+Value *SCEVExpander::expandCodeFor(const SCEV *SH, const Type *Ty,
+ Instruction *I) {
+ BasicBlock::iterator IP = I;
+ while (isInsertedInstruction(IP) || isa<DbgInfoIntrinsic>(IP))
+ ++IP;
+ Builder.SetInsertPoint(IP->getParent(), IP);
+ return expandCodeFor(SH, Ty);
+}
+
Value *SCEVExpander::expandCodeFor(const SCEV *SH, const Type *Ty) {
// Expand the code for this SCEV.
Value *V = expand(SH);
@@ -1286,9 +1287,7 @@ Value *SCEVExpander::expand(const SCEV *S) {
// there) so that it is guaranteed to dominate any user inside the loop.
if (L && S->hasComputableLoopEvolution(L) && L != PostIncLoop)
InsertPt = L->getHeader()->getFirstNonPHI();
- while (isa<DbgInfoIntrinsic>(InsertPt))
- InsertPt = llvm::next(BasicBlock::iterator(InsertPt));
- while (isInsertedInstruction(InsertPt))
+ while (isInsertedInstruction(InsertPt) || isa<DbgInfoIntrinsic>(InsertPt))
InsertPt = llvm::next(BasicBlock::iterator(InsertPt));
break;
}
@@ -1324,7 +1323,8 @@ void SCEVExpander::rememberInstruction(Value *I) {
// subsequently inserted code will be dominated.
if (Builder.GetInsertPoint() == I) {
BasicBlock::iterator It = cast<Instruction>(I);
- do { ++It; } while (isInsertedInstruction(It));
+ do { ++It; } while (isInsertedInstruction(It) ||
+ isa<DbgInfoIntrinsic>(It));
Builder.SetInsertPoint(Builder.GetInsertBlock(), It);
}
}
@@ -1332,7 +1332,7 @@ void SCEVExpander::rememberInstruction(Value *I) {
void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) {
// If we acquired more instructions since the old insert point was saved,
// advance past them.
- while (isInsertedInstruction(I)) ++I;
+ while (isInsertedInstruction(I) || isa<DbgInfoIntrinsic>(I)) ++I;
Builder.SetInsertPoint(BB, I);
}
OpenPOWER on IntegriCloud