summaryrefslogtreecommitdiffstats
path: root/lib/Analysis/ScalarEvolution.cpp
diff options
context:
space:
mode:
authorrdivacky <rdivacky@FreeBSD.org>2010-05-04 16:11:02 +0000
committerrdivacky <rdivacky@FreeBSD.org>2010-05-04 16:11:02 +0000
commit750ce4d809c7e2a298a389a512a17652ff5be3f2 (patch)
tree70fbd90da02177c8e6ef82adba9fa8ace285a5e3 /lib/Analysis/ScalarEvolution.cpp
parent5f970ec96e421f64db6b1c6509a902ea73d98cc7 (diff)
downloadFreeBSD-src-750ce4d809c7e2a298a389a512a17652ff5be3f2.zip
FreeBSD-src-750ce4d809c7e2a298a389a512a17652ff5be3f2.tar.gz
Update LLVM to r103004.
Diffstat (limited to 'lib/Analysis/ScalarEvolution.cpp')
-rw-r--r--lib/Analysis/ScalarEvolution.cpp932
1 files changed, 619 insertions, 313 deletions
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp
index 1af271a..6870268 100644
--- a/lib/Analysis/ScalarEvolution.cpp
+++ b/lib/Analysis/ScalarEvolution.cpp
@@ -188,8 +188,8 @@ const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
const SCEV *
ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
- return getConstant(
- ConstantInt::get(cast<IntegerType>(Ty), V, isSigned));
+ const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
+ return getConstant(ConstantInt::get(ITy, V, isSigned));
}
const Type *SCEVConstant::getType() const { return V->getType(); }
@@ -247,11 +247,13 @@ void SCEVSignExtendExpr::print(raw_ostream &OS) const {
}
void SCEVCommutativeExpr::print(raw_ostream &OS) const {
- assert(NumOperands > 1 && "This plus expr shouldn't exist!");
const char *OpStr = getOperationStr();
- OS << "(" << *Operands[0];
- for (unsigned i = 1, e = NumOperands; i != e; ++i)
- OS << OpStr << *Operands[i];
+ OS << "(";
+ for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
+ OS << **I;
+ if (next(I) != E)
+ OS << OpStr;
+ }
OS << ")";
}
@@ -759,7 +761,7 @@ static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
CalculationBits);
const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
for (unsigned i = 1; i != K; ++i) {
- const SCEV *S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType()));
+ const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
Dividend = SE.getMulExpr(Dividend,
SE.getTruncateOrZeroExtend(S, CalculationTy));
}
@@ -955,7 +957,7 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
getUnsignedRange(Step).getUnsignedMax());
if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
- (isLoopGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
+ (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
AR->getPostIncExpr(*this), N)))
// Return the expression with the addrec on the outside.
@@ -965,8 +967,8 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
} else if (isKnownNegative(Step)) {
const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
getSignedRange(Step).getSignedMin());
- if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) &&
- (isLoopGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) ||
+ if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
+ (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) &&
isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
AR->getPostIncExpr(*this), N)))
// Return the expression with the addrec on the outside.
@@ -1090,7 +1092,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
const SCEV *N = getConstant(APInt::getSignedMinValue(BitWidth) -
getSignedRange(Step).getSignedMax());
if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, AR, N) ||
- (isLoopGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) &&
+ (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) &&
isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT,
AR->getPostIncExpr(*this), N)))
// Return the expression with the addrec on the outside.
@@ -1101,7 +1103,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) -
getSignedRange(Step).getSignedMin());
if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, AR, N) ||
- (isLoopGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) &&
+ (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) &&
isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT,
AR->getPostIncExpr(*this), N)))
// Return the expression with the addrec on the outside.
@@ -1237,7 +1239,7 @@ CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
}
} else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
// Pull a buried constant out to the outside.
- if (Scale != 1 || AccumulatedConstant != 0 || C->isZero())
+ if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
Interesting = true;
AccumulatedConstant += Scale * C->getValue()->getValue();
} else {
@@ -1308,13 +1310,13 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
}
// If we are left with a constant zero being added, strip it off.
- if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
+ if (LHSC->getValue()->isZero()) {
Ops.erase(Ops.begin());
--Idx;
}
- }
- if (Ops.size() == 1) return Ops[0];
+ if (Ops.size() == 1) return Ops[0];
+ }
// Okay, check to see if the same value occurs in the operand list twice. If
// so, merge them together into an multiply expression. Since we sorted the
@@ -1324,7 +1326,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
// Found a match, merge the two values into a multiply, and add any
// remaining values to the result.
- const SCEV *Two = getIntegerSCEV(2, Ty);
+ const SCEV *Two = getConstant(Ty, 2);
const SCEV *Mul = getMulExpr(Ops[i], Two);
if (Ops.size() == 2)
return Mul;
@@ -1353,9 +1355,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
}
LargeOps.push_back(T->getOperand());
} else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
- // This could be either sign or zero extension, but sign extension
- // is much more likely to be foldable here.
- LargeOps.push_back(getSignExtendExpr(C, SrcType));
+ LargeOps.push_back(getAnyExtendExpr(C, SrcType));
} else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
SmallVector<const SCEV *, 8> LargeMulOps;
for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
@@ -1368,9 +1368,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
LargeMulOps.push_back(T->getOperand());
} else if (const SCEVConstant *C =
dyn_cast<SCEVConstant>(M->getOperand(j))) {
- // This could be either sign or zero extension, but sign extension
- // is much more likely to be foldable here.
- LargeMulOps.push_back(getSignExtendExpr(C, SrcType));
+ LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
} else {
Ok = false;
break;
@@ -1445,7 +1443,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
Ops.push_back(getMulExpr(getConstant(I->first),
getAddExpr(I->second)));
if (Ops.empty())
- return getIntegerSCEV(0, Ty);
+ return getConstant(Ty, 0);
if (Ops.size() == 1)
return Ops[0];
return getAddExpr(Ops);
@@ -1470,7 +1468,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
MulOps.erase(MulOps.begin()+MulOp);
InnerMul = getMulExpr(MulOps);
}
- const SCEV *One = getIntegerSCEV(1, Ty);
+ const SCEV *One = getConstant(Ty, 1);
const SCEV *AddOne = getAddExpr(InnerMul, One);
const SCEV *OuterMul = getMulExpr(AddOne, Ops[AddOp]);
if (Ops.size() == 2) return OuterMul;
@@ -1534,8 +1532,9 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
// they are loop invariant w.r.t. the recurrence.
SmallVector<const SCEV *, 8> LIOps;
const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
+ const Loop *AddRecLoop = AddRec->getLoop();
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
+ if (Ops[i]->isLoopInvariant(AddRecLoop)) {
LIOps.push_back(Ops[i]);
Ops.erase(Ops.begin()+i);
--i; --e;
@@ -1552,7 +1551,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
// It's tempting to propagate NUW/NSW flags here, but nuw/nsw addition
// is not associative so this isn't necessarily safe.
- const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop());
+ const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop);
// If all of the other operands were loop invariant, we are done.
if (Ops.size() == 1) return NewRec;
@@ -1573,7 +1572,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
if (OtherIdx != Idx) {
const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
- if (AddRec->getLoop() == OtherAddRec->getLoop()) {
+ if (AddRecLoop == OtherAddRec->getLoop()) {
// Other + {A,+,B} + {C,+,D} --> Other + {A+C,+,B+D}
SmallVector<const SCEV *, 4> NewOps(AddRec->op_begin(),
AddRec->op_end());
@@ -1585,7 +1584,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
}
NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i));
}
- const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop());
+ const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRecLoop);
if (Ops.size() == 2) return NewAddRec;
@@ -1697,15 +1696,15 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
return getAddExpr(NewOps);
}
}
+
+ if (Ops.size() == 1)
+ return Ops[0];
}
// Skip over the add expression until we get to a multiply.
while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
++Idx;
- if (Ops.size() == 1)
- return Ops[0];
-
// If there are mul operands inline them all into this expression.
if (Idx < Ops.size()) {
bool DeletedMul = false;
@@ -1843,77 +1842,81 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
if (RHSC->getValue()->equalsInt(1))
return LHS; // X udiv 1 --> x
- if (RHSC->isZero())
- return getIntegerSCEV(0, LHS->getType()); // value is undefined
-
- // Determine if the division can be folded into the operands of
- // its operands.
- // TODO: Generalize this to non-constants by using known-bits information.
- const Type *Ty = LHS->getType();
- unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
- unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ;
- // For non-power-of-two values, effectively round the value up to the
- // nearest power of two.
- if (!RHSC->getValue()->getValue().isPowerOf2())
- ++MaxShiftAmt;
- const IntegerType *ExtTy =
- IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
- // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
- if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
- if (const SCEVConstant *Step =
- dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)))
- if (!Step->getValue()->getValue()
- .urem(RHSC->getValue()->getValue()) &&
- getZeroExtendExpr(AR, ExtTy) ==
- getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
- getZeroExtendExpr(Step, ExtTy),
- AR->getLoop())) {
- SmallVector<const SCEV *, 4> Operands;
- for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
- Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
- return getAddRecExpr(Operands, AR->getLoop());
- }
- // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
- if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
- SmallVector<const SCEV *, 4> Operands;
- for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
- Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
- if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
- // Find an operand that's safely divisible.
- for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
- const SCEV *Op = M->getOperand(i);
- const SCEV *Div = getUDivExpr(Op, RHSC);
- if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
- Operands = SmallVector<const SCEV *, 4>(M->op_begin(), M->op_end());
- Operands[i] = Div;
- return getMulExpr(Operands);
+ // If the denominator is zero, the result of the udiv is undefined. Don't
+ // try to analyze it, because the resolution chosen here may differ from
+ // the resolution chosen in other parts of the compiler.
+ if (!RHSC->getValue()->isZero()) {
+ // Determine if the division can be folded into the operands of
+ // its operands.
+ // TODO: Generalize this to non-constants by using known-bits information.
+ const Type *Ty = LHS->getType();
+ unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
+ unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ;
+ // For non-power-of-two values, effectively round the value up to the
+ // nearest power of two.
+ if (!RHSC->getValue()->getValue().isPowerOf2())
+ ++MaxShiftAmt;
+ const IntegerType *ExtTy =
+ IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
+ // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
+ if (const SCEVConstant *Step =
+ dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)))
+ if (!Step->getValue()->getValue()
+ .urem(RHSC->getValue()->getValue()) &&
+ getZeroExtendExpr(AR, ExtTy) ==
+ getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
+ getZeroExtendExpr(Step, ExtTy),
+ AR->getLoop())) {
+ SmallVector<const SCEV *, 4> Operands;
+ for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
+ Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
+ return getAddRecExpr(Operands, AR->getLoop());
}
+ // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
+ if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
+ SmallVector<const SCEV *, 4> Operands;
+ for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
+ Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
+ if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
+ // Find an operand that's safely divisible.
+ for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
+ const SCEV *Op = M->getOperand(i);
+ const SCEV *Div = getUDivExpr(Op, RHSC);
+ if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
+ Operands = SmallVector<const SCEV *, 4>(M->op_begin(),
+ M->op_end());
+ Operands[i] = Div;
+ return getMulExpr(Operands);
+ }
+ }
+ }
+ // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
+ if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) {
+ SmallVector<const SCEV *, 4> Operands;
+ for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
+ Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
+ if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
+ Operands.clear();
+ for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
+ const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
+ if (isa<SCEVUDivExpr>(Op) ||
+ getMulExpr(Op, RHS) != A->getOperand(i))
+ break;
+ Operands.push_back(Op);
+ }
+ if (Operands.size() == A->getNumOperands())
+ return getAddExpr(Operands);
}
- }
- // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
- if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) {
- SmallVector<const SCEV *, 4> Operands;
- for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
- Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
- if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
- Operands.clear();
- for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
- const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
- if (isa<SCEVUDivExpr>(Op) || getMulExpr(Op, RHS) != A->getOperand(i))
- break;
- Operands.push_back(Op);
- }
- if (Operands.size() == A->getNumOperands())
- return getAddExpr(Operands);
}
- }
- // Fold if both operands are constant.
- if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
- Constant *LHSCV = LHSC->getValue();
- Constant *RHSCV = RHSC->getValue();
- return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
- RHSCV)));
+ // Fold if both operands are constant.
+ if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
+ Constant *LHSCV = LHSC->getValue();
+ Constant *RHSCV = RHSC->getValue();
+ return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
+ RHSCV)));
+ }
}
}
@@ -2090,9 +2093,9 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
// maximum-int.
return Ops[0];
}
- }
- if (Ops.size() == 1) return Ops[0];
+ if (Ops.size() == 1) return Ops[0];
+ }
// Find the first SMax
while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
@@ -2116,7 +2119,13 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
// so, delete one. Since we sorted the list, these values are required to
// be adjacent.
for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
- if (Ops[i] == Ops[i+1]) { // X smax Y smax Y --> X smax Y
+ // X smax Y smax Y --> X smax Y
+ // X smax Y --> X, if X is always greater than Y
+ if (Ops[i] == Ops[i+1] ||
+ isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) {
+ Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
+ --i; --e;
+ } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) {
Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
--i; --e;
}
@@ -2189,9 +2198,9 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
// maximum-int.
return Ops[0];
}
- }
- if (Ops.size() == 1) return Ops[0];
+ if (Ops.size() == 1) return Ops[0];
+ }
// Find the first UMax
while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
@@ -2215,7 +2224,13 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
// so, delete one. Since we sorted the list, these values are required to
// be adjacent.
for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
- if (Ops[i] == Ops[i+1]) { // X umax Y umax Y --> X umax Y
+ // X umax Y umax Y --> X umax Y
+ // X umax Y --> X, if X is always greater than Y
+ if (Ops[i] == Ops[i+1] ||
+ isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) {
+ Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
+ --i; --e;
+ } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) {
Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
--i; --e;
}
@@ -2254,6 +2269,13 @@ const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
}
const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) {
+ // If we have TargetData, we can bypass creating a target-independent
+ // constant expression and then folding it back into a ConstantInt.
+ // This is just a compile-time optimization.
+ if (TD)
+ return getConstant(TD->getIntPtrType(getContext()),
+ TD->getTypeAllocSize(AllocTy));
+
Constant *C = ConstantExpr::getSizeOf(AllocTy);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
C = ConstantFoldConstantExpression(CE, TD);
@@ -2271,6 +2293,13 @@ const SCEV *ScalarEvolution::getAlignOfExpr(const Type *AllocTy) {
const SCEV *ScalarEvolution::getOffsetOfExpr(const StructType *STy,
unsigned FieldNo) {
+ // If we have TargetData, we can bypass creating a target-independent
+ // constant expression and then folding it back into a ConstantInt.
+ // This is just a compile-time optimization.
+ if (TD)
+ return getConstant(TD->getIntPtrType(getContext()),
+ TD->getStructLayout(STy)->getElementOffset(FieldNo));
+
Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
C = ConstantFoldConstantExpression(CE, TD);
@@ -2597,14 +2626,29 @@ ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
/// a loop header, making it a potential recurrence, or it doesn't.
///
const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
- if (PN->getNumIncomingValues() == 2) // The loops have been canonicalized.
- if (const Loop *L = LI->getLoopFor(PN->getParent()))
- if (L->getHeader() == PN->getParent()) {
- // If it lives in the loop header, it has two incoming values, one
- // from outside the loop, and one from inside.
- unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0));
- unsigned BackEdge = IncomingEdge^1;
-
+ if (const Loop *L = LI->getLoopFor(PN->getParent()))
+ if (L->getHeader() == PN->getParent()) {
+ // The loop may have multiple entrances or multiple exits; we can analyze
+ // this phi as an addrec if it has a unique entry value and a unique
+ // backedge value.
+ Value *BEValueV = 0, *StartValueV = 0;
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
+ Value *V = PN->getIncomingValue(i);
+ if (L->contains(PN->getIncomingBlock(i))) {
+ if (!BEValueV) {
+ BEValueV = V;
+ } else if (BEValueV != V) {
+ BEValueV = 0;
+ break;
+ }
+ } else if (!StartValueV) {
+ StartValueV = V;
+ } else if (StartValueV != V) {
+ StartValueV = 0;
+ break;
+ }
+ }
+ if (BEValueV && StartValueV) {
// While we are analyzing this PHI node, handle its value symbolically.
const SCEV *SymbolicName = getUnknown(PN);
assert(Scalars.find(PN) == Scalars.end() &&
@@ -2613,7 +2657,6 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
// Using this symbolic name for the PHI, analyze the value coming around
// the back-edge.
- Value *BEValueV = PN->getIncomingValue(BackEdge);
const SCEV *BEValue = getSCEV(BEValueV);
// NOTE: If BEValue is loop invariant, we know that the PHI node just
@@ -2657,8 +2700,7 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
HasNSW = true;
}
- const SCEV *StartVal =
- getSCEV(PN->getIncomingValue(IncomingEdge));
+ const SCEV *StartVal = getSCEV(StartValueV);
const SCEV *PHISCEV =
getAddRecExpr(StartVal, Accum, L, HasNUW, HasNSW);
@@ -2684,12 +2726,12 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
// Because the other in-value of i (0) fits the evolution of BEValue
// i really is an addrec evolution.
if (AddRec->getLoop() == L && AddRec->isAffine()) {
- const SCEV *StartVal = getSCEV(PN->getIncomingValue(IncomingEdge));
+ const SCEV *StartVal = getSCEV(StartValueV);
// If StartVal = j.start - j.stride, we can use StartVal as the
// initial step of the addrec evolution.
if (StartVal == getMinusSCEV(AddRec->getOperand(0),
- AddRec->getOperand(1))) {
+ AddRec->getOperand(1))) {
const SCEV *PHISCEV =
getAddRecExpr(StartVal, AddRec->getOperand(1), L);
@@ -2702,9 +2744,8 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
}
}
}
-
- return SymbolicName;
}
+ }
// If the PHI has a single incoming value, follow that value, unless the
// PHI's incoming blocks are in a different loop, in which case doing so
@@ -2737,7 +2778,7 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
// Don't attempt to analyze GEPs over unsized objects.
if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
return getUnknown(GEP);
- const SCEV *TotalOffset = getIntegerSCEV(0, IntPtrTy);
+ const SCEV *TotalOffset = getConstant(IntPtrTy, 0);
gep_type_iterator GTI = gep_type_begin(GEP);
for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()),
E = GEP->op_end();
@@ -2920,9 +2961,9 @@ ScalarEvolution::getUnsignedRange(const SCEV *S) {
// initial value.
if (AddRec->hasNoUnsignedWrap())
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
- ConservativeResult =
- ConstantRange(C->getValue()->getValue(),
- APInt(getTypeSizeInBits(C->getType()), 0));
+ if (!C->getValue()->isZero())
+ ConservativeResult =
+ ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0));
// TODO: non-affine addrec
if (AddRec->isAffine()) {
@@ -2933,14 +2974,26 @@ ScalarEvolution::getUnsignedRange(const SCEV *S) {
MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
const SCEV *Start = AddRec->getStart();
- const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
+ const SCEV *Step = AddRec->getStepRecurrence(*this);
- // Check for overflow.
- if (!AddRec->hasNoUnsignedWrap())
+ ConstantRange StartRange = getUnsignedRange(Start);
+ ConstantRange StepRange = getSignedRange(Step);
+ ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
+ ConstantRange EndRange =
+ StartRange.add(MaxBECountRange.multiply(StepRange));
+
+ // Check for overflow. This must be done with ConstantRange arithmetic
+ // because we could be called from within the ScalarEvolution overflow
+ // checking code.
+ ConstantRange ExtStartRange = StartRange.zextOrTrunc(BitWidth*2+1);
+ ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
+ ConstantRange ExtMaxBECountRange =
+ MaxBECountRange.zextOrTrunc(BitWidth*2+1);
+ ConstantRange ExtEndRange = EndRange.zextOrTrunc(BitWidth*2+1);
+ if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
+ ExtEndRange)
return ConservativeResult;
- ConstantRange StartRange = getUnsignedRange(Start);
- ConstantRange EndRange = getUnsignedRange(End);
APInt Min = APIntOps::umin(StartRange.getUnsignedMin(),
EndRange.getUnsignedMin());
APInt Max = APIntOps::umax(StartRange.getUnsignedMax(),
@@ -3064,14 +3117,26 @@ ScalarEvolution::getSignedRange(const SCEV *S) {
MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
const SCEV *Start = AddRec->getStart();
- const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
+ const SCEV *Step = AddRec->getStepRecurrence(*this);
- // Check for overflow.
- if (!AddRec->hasNoSignedWrap())
+ ConstantRange StartRange = getSignedRange(Start);
+ ConstantRange StepRange = getSignedRange(Step);
+ ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
+ ConstantRange EndRange =
+ StartRange.add(MaxBECountRange.multiply(StepRange));
+
+ // Check for overflow. This must be done with ConstantRange arithmetic
+ // because we could be called from within the ScalarEvolution overflow
+ // checking code.
+ ConstantRange ExtStartRange = StartRange.sextOrTrunc(BitWidth*2+1);
+ ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
+ ConstantRange ExtMaxBECountRange =
+ MaxBECountRange.zextOrTrunc(BitWidth*2+1);
+ ConstantRange ExtEndRange = EndRange.sextOrTrunc(BitWidth*2+1);
+ if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
+ ExtEndRange)
return ConservativeResult;
- ConstantRange StartRange = getSignedRange(Start);
- ConstantRange EndRange = getSignedRange(End);
APInt Min = APIntOps::smin(StartRange.getSignedMin(),
EndRange.getSignedMin());
APInt Max = APIntOps::smax(StartRange.getSignedMax(),
@@ -3122,9 +3187,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
return getConstant(CI);
else if (isa<ConstantPointerNull>(V))
- return getIntegerSCEV(0, V->getType());
- else if (isa<UndefValue>(V))
- return getIntegerSCEV(0, V->getType());
+ return getConstant(V->getType(), 0);
else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee());
else
@@ -3256,8 +3319,16 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
// Turn shift left of a constant amount into a multiply.
if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
+
+ // If the shift count is not less than the bitwidth, the result of
+ // the shift is undefined. Don't try to analyze it, because the
+ // resolution chosen here may differ from the resolution chosen in
+ // other parts of the compiler.
+ if (SA->getValue().uge(BitWidth))
+ break;
+
Constant *X = ConstantInt::get(getContext(),
- APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
+ APInt(BitWidth, 1).shl(SA->getZExtValue()));
return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
}
break;
@@ -3266,8 +3337,16 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
// Turn logical shift right of a constant into a unsigned divide.
if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
+
+ // If the shift count is not less than the bitwidth, the result of
+ // the shift is undefined. Don't try to analyze it, because the
+ // resolution chosen here may differ from the resolution chosen in
+ // other parts of the compiler.
+ if (SA->getValue().uge(BitWidth))
+ break;
+
Constant *X = ConstantInt::get(getContext(),
- APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
+ APInt(BitWidth, 1).shl(SA->getZExtValue()));
return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
}
break;
@@ -3275,19 +3354,26 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
case Instruction::AShr:
// For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
- if (Instruction *L = dyn_cast<Instruction>(U->getOperand(0)))
+ if (Operator *L = dyn_cast<Operator>(U->getOperand(0)))
if (L->getOpcode() == Instruction::Shl &&
L->getOperand(1) == U->getOperand(1)) {
- unsigned BitWidth = getTypeSizeInBits(U->getType());
+ uint64_t BitWidth = getTypeSizeInBits(U->getType());
+
+ // If the shift count is not less than the bitwidth, the result of
+ // the shift is undefined. Don't try to analyze it, because the
+ // resolution chosen here may differ from the resolution chosen in
+ // other parts of the compiler.
+ if (CI->getValue().uge(BitWidth))
+ break;
+
uint64_t Amt = BitWidth - CI->getZExtValue();
if (Amt == BitWidth)
return getSCEV(L->getOperand(0)); // shift by zero --> noop
- if (Amt > BitWidth)
- return getIntegerSCEV(0, U->getType()); // value is undefined
return
getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
- IntegerType::get(getContext(), Amt)),
- U->getType());
+ IntegerType::get(getContext(),
+ Amt)),
+ U->getType());
}
break;
@@ -3330,10 +3416,22 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
// fall through
case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SGE:
- if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
- return getSMaxExpr(getSCEV(LHS), getSCEV(RHS));
- else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
- return getSMinExpr(getSCEV(LHS), getSCEV(RHS));
+ // a >s b ? a+x : b+x -> smax(a, b)+x
+ // a >s b ? b+x : a+x -> smin(a, b)+x
+ if (LHS->getType() == U->getType()) {
+ const SCEV *LS = getSCEV(LHS);
+ const SCEV *RS = getSCEV(RHS);
+ const SCEV *LA = getSCEV(U->getOperand(1));
+ const SCEV *RA = getSCEV(U->getOperand(2));
+ const SCEV *LDiff = getMinusSCEV(LA, LS);
+ const SCEV *RDiff = getMinusSCEV(RA, RS);
+ if (LDiff == RDiff)
+ return getAddExpr(getSMaxExpr(LS, RS), LDiff);
+ LDiff = getMinusSCEV(LA, RS);
+ RDiff = getMinusSCEV(RA, LS);
+ if (LDiff == RDiff)
+ return getAddExpr(getSMinExpr(LS, RS), LDiff);
+ }
break;
case ICmpInst::ICMP_ULT:
case ICmpInst::ICMP_ULE:
@@ -3341,28 +3439,52 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
// fall through
case ICmpInst::ICMP_UGT:
case ICmpInst::ICMP_UGE:
- if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
- return getUMaxExpr(getSCEV(LHS), getSCEV(RHS));
- else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
- return getUMinExpr(getSCEV(LHS), getSCEV(RHS));
+ // a >u b ? a+x : b+x -> umax(a, b)+x
+ // a >u b ? b+x : a+x -> umin(a, b)+x
+ if (LHS->getType() == U->getType()) {
+ const SCEV *LS = getSCEV(LHS);
+ const SCEV *RS = getSCEV(RHS);
+ const SCEV *LA = getSCEV(U->getOperand(1));
+ const SCEV *RA = getSCEV(U->getOperand(2));
+ const SCEV *LDiff = getMinusSCEV(LA, LS);
+ const SCEV *RDiff = getMinusSCEV(RA, RS);
+ if (LDiff == RDiff)
+ return getAddExpr(getUMaxExpr(LS, RS), LDiff);
+ LDiff = getMinusSCEV(LA, RS);
+ RDiff = getMinusSCEV(RA, LS);
+ if (LDiff == RDiff)
+ return getAddExpr(getUMinExpr(LS, RS), LDiff);
+ }
break;
case ICmpInst::ICMP_NE:
- // n != 0 ? n : 1 -> umax(n, 1)
- if (LHS == U->getOperand(1) &&
- isa<ConstantInt>(U->getOperand(2)) &&
- cast<ConstantInt>(U->getOperand(2))->isOne() &&
+ // n != 0 ? n+x : 1+x -> umax(n, 1)+x
+ if (LHS->getType() == U->getType() &&
isa<ConstantInt>(RHS) &&
- cast<ConstantInt>(RHS)->isZero())
- return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(2)));
+ cast<ConstantInt>(RHS)->isZero()) {
+ const SCEV *One = getConstant(LHS->getType(), 1);
+ const SCEV *LS = getSCEV(LHS);
+ const SCEV *LA = getSCEV(U->getOperand(1));
+ const SCEV *RA = getSCEV(U->getOperand(2));
+ const SCEV *LDiff = getMinusSCEV(LA, LS);
+ const SCEV *RDiff = getMinusSCEV(RA, One);
+ if (LDiff == RDiff)
+ return getAddExpr(getUMaxExpr(LS, One), LDiff);
+ }
break;
case ICmpInst::ICMP_EQ:
- // n == 0 ? 1 : n -> umax(n, 1)
- if (LHS == U->getOperand(2) &&
- isa<ConstantInt>(U->getOperand(1)) &&
- cast<ConstantInt>(U->getOperand(1))->isOne() &&
+ // n == 0 ? 1+x : n+x -> umax(n, 1)+x
+ if (LHS->getType() == U->getType() &&
isa<ConstantInt>(RHS) &&
- cast<ConstantInt>(RHS)->isZero())
- return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(1)));
+ cast<ConstantInt>(RHS)->isZero()) {
+ const SCEV *One = getConstant(LHS->getType(), 1);
+ const SCEV *LS = getSCEV(LHS);
+ const SCEV *LA = getSCEV(U->getOperand(1));
+ const SCEV *RA = getSCEV(U->getOperand(2));
+ const SCEV *LDiff = getMinusSCEV(LA, One);
+ const SCEV *RDiff = getMinusSCEV(RA, LS);
+ if (LDiff == RDiff)
+ return getAddExpr(getUMaxExpr(LS, One), LDiff);
+ }
break;
default:
break;
@@ -3739,7 +3861,7 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
return getCouldNotCompute();
else
// The backedge is never taken.
- return getIntegerSCEV(0, CI->getType());
+ return getConstant(CI->getType(), 0);
}
// If it's not an integer or pointer comparison then compute it the hard way.
@@ -3786,6 +3908,9 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L,
Cond = ICmpInst::getSwappedPredicate(Cond);
}
+ // Simplify the operands before analyzing them.
+ (void)SimplifyICmpOperands(Cond, LHS, RHS);
+
// If we have a comparison of a chrec against a constant, try to use value
// ranges to answer this query.
if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
@@ -4067,7 +4192,7 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
if (I != ConstantEvolutionLoopExitValue.end())
return I->second;
- if (BEs.ugt(APInt(BEs.getBitWidth(),MaxBruteForceIterations)))
+ if (BEs.ugt(MaxBruteForceIterations))
return ConstantEvolutionLoopExitValue[PN] = 0; // Not going to evaluate it.
Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
@@ -4562,7 +4687,7 @@ ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
// already. If so, the backedge will execute zero times.
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
if (!C->getValue()->isNullValue())
- return getIntegerSCEV(0, C->getType());
+ return getConstant(C->getType(), 0);
return getCouldNotCompute(); // Otherwise it will loop infinitely.
}
@@ -4573,6 +4698,8 @@ ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
/// getLoopPredecessor - If the given loop's header has exactly one unique
/// predecessor outside the loop, return it. Otherwise return null.
+/// This is less strict that the loop "preheader" concept, which requires
+/// the predecessor to have only one single successor.
///
BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) {
BasicBlock *Header = L->getHeader();
@@ -4591,21 +4718,21 @@ BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) {
/// successor from which BB is reachable, or null if no such block is
/// found.
///
-BasicBlock *
+std::pair<BasicBlock *, BasicBlock *>
ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
// If the block has a unique predecessor, then there is no path from the
// predecessor to the block that does not go through the direct edge
// from the predecessor to the block.
if (BasicBlock *Pred = BB->getSinglePredecessor())
- return Pred;
+ return std::make_pair(Pred, BB);
// A loop's header is defined to be a block that dominates the loop.
// If the header has a unique predecessor outside the loop, it must be
// a block that has exactly one successor that can reach the loop.
if (Loop *L = LI->getLoopFor(BB))
- return getLoopPredecessor(L);
+ return std::make_pair(getLoopPredecessor(L), L->getHeader());
- return 0;
+ return std::pair<BasicBlock *, BasicBlock *>();
}
/// HasSameValue - SCEV structural equivalence is usually sufficient for
@@ -4631,6 +4758,266 @@ static bool HasSameValue(const SCEV *A, const SCEV *B) {
return false;
}
+/// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with
+/// predicate Pred. Return true iff any changes were made.
+///
+bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
+ const SCEV *&LHS, const SCEV *&RHS) {
+ bool Changed = false;
+
+ // Canonicalize a constant to the right side.
+ if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
+ // Check for both operands constant.
+ if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
+ if (ConstantExpr::getICmp(Pred,
+ LHSC->getValue(),
+ RHSC->getValue())->isNullValue())
+ goto trivially_false;
+ else
+ goto trivially_true;
+ }
+ // Otherwise swap the operands to put the constant on the right.
+ std::swap(LHS, RHS);
+ Pred = ICmpInst::getSwappedPredicate(Pred);
+ Changed = true;
+ }
+
+ // If we're comparing an addrec with a value which is loop-invariant in the
+ // addrec's loop, put the addrec on the left. Also make a dominance check,
+ // as both operands could be addrecs loop-invariant in each other's loop.
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
+ const Loop *L = AR->getLoop();
+ if (LHS->isLoopInvariant(L) && LHS->properlyDominates(L->getHeader(), DT)) {
+ std::swap(LHS, RHS);
+ Pred = ICmpInst::getSwappedPredicate(Pred);
+ Changed = true;
+ }
+ }
+
+ // If there's a constant operand, canonicalize comparisons with boundary
+ // cases, and canonicalize *-or-equal comparisons to regular comparisons.
+ if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
+ const APInt &RA = RC->getValue()->getValue();
+ switch (Pred) {
+ default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
+ case ICmpInst::ICMP_EQ:
+ case ICmpInst::ICMP_NE:
+ break;
+ case ICmpInst::ICMP_UGE:
+ if ((RA - 1).isMinValue()) {
+ Pred = ICmpInst::ICMP_NE;
+ RHS = getConstant(RA - 1);
+ Changed = true;
+ break;
+ }
+ if (RA.isMaxValue()) {
+ Pred = ICmpInst::ICMP_EQ;
+ Changed = true;
+ break;
+ }
+ if (RA.isMinValue()) goto trivially_true;
+
+ Pred = ICmpInst::ICMP_UGT;
+ RHS = getConstant(RA - 1);
+ Changed = true;
+ break;
+ case ICmpInst::ICMP_ULE:
+ if ((RA + 1).isMaxValue()) {
+ Pred = ICmpInst::ICMP_NE;
+ RHS = getConstant(RA + 1);
+ Changed = true;
+ break;
+ }
+ if (RA.isMinValue()) {
+ Pred = ICmpInst::ICMP_EQ;
+ Changed = true;
+ break;
+ }
+ if (RA.isMaxValue()) goto trivially_true;
+
+ Pred = ICmpInst::ICMP_ULT;
+ RHS = getConstant(RA + 1);
+ Changed = true;
+ break;
+ case ICmpInst::ICMP_SGE:
+ if ((RA - 1).isMinSignedValue()) {
+ Pred = ICmpInst::ICMP_NE;
+ RHS = getConstant(RA - 1);
+ Changed = true;
+ break;
+ }
+ if (RA.isMaxSignedValue()) {
+ Pred = ICmpInst::ICMP_EQ;
+ Changed = true;
+ break;
+ }
+ if (RA.isMinSignedValue()) goto trivially_true;
+
+ Pred = ICmpInst::ICMP_SGT;
+ RHS = getConstant(RA - 1);
+ Changed = true;
+ break;
+ case ICmpInst::ICMP_SLE:
+ if ((RA + 1).isMaxSignedValue()) {
+ Pred = ICmpInst::ICMP_NE;
+ RHS = getConstant(RA + 1);
+ Changed = true;
+ break;
+ }
+ if (RA.isMinSignedValue()) {
+ Pred = ICmpInst::ICMP_EQ;
+ Changed = true;
+ break;
+ }
+ if (RA.isMaxSignedValue()) goto trivially_true;
+
+ Pred = ICmpInst::ICMP_SLT;
+ RHS = getConstant(RA + 1);
+ Changed = true;
+ break;
+ case ICmpInst::ICMP_UGT:
+ if (RA.isMinValue()) {
+ Pred = ICmpInst::ICMP_NE;
+ Changed = true;
+ break;
+ }
+ if ((RA + 1).isMaxValue()) {
+ Pred = ICmpInst::ICMP_EQ;
+ RHS = getConstant(RA + 1);
+ Changed = true;
+ break;
+ }
+ if (RA.isMaxValue()) goto trivially_false;
+ break;
+ case ICmpInst::ICMP_ULT:
+ if (RA.isMaxValue()) {
+ Pred = ICmpInst::ICMP_NE;
+ Changed = true;
+ break;
+ }
+ if ((RA - 1).isMinValue()) {
+ Pred = ICmpInst::ICMP_EQ;
+ RHS = getConstant(RA - 1);
+ Changed = true;
+ break;
+ }
+ if (RA.isMinValue()) goto trivially_false;
+ break;
+ case ICmpInst::ICMP_SGT:
+ if (RA.isMinSignedValue()) {
+ Pred = ICmpInst::ICMP_NE;
+ Changed = true;
+ break;
+ }
+ if ((RA + 1).isMaxSignedValue()) {
+ Pred = ICmpInst::ICMP_EQ;
+ RHS = getConstant(RA + 1);
+ Changed = true;
+ break;
+ }
+ if (RA.isMaxSignedValue()) goto trivially_false;
+ break;
+ case ICmpInst::ICMP_SLT:
+ if (RA.isMaxSignedValue()) {
+ Pred = ICmpInst::ICMP_NE;
+ Changed = true;
+ break;
+ }
+ if ((RA - 1).isMinSignedValue()) {
+ Pred = ICmpInst::ICMP_EQ;
+ RHS = getConstant(RA - 1);
+ Changed = true;
+ break;
+ }
+ if (RA.isMinSignedValue()) goto trivially_false;
+ break;
+ }
+ }
+
+ // Check for obvious equality.
+ if (HasSameValue(LHS, RHS)) {
+ if (ICmpInst::isTrueWhenEqual(Pred))
+ goto trivially_true;
+ if (ICmpInst::isFalseWhenEqual(Pred))
+ goto trivially_false;
+ }
+
+ // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
+ // adding or subtracting 1 from one of the operands.
+ switch (Pred) {
+ case ICmpInst::ICMP_SLE:
+ if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) {
+ RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
+ /*HasNUW=*/false, /*HasNSW=*/true);
+ Pred = ICmpInst::ICMP_SLT;
+ Changed = true;
+ } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) {
+ LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
+ /*HasNUW=*/false, /*HasNSW=*/true);
+ Pred = ICmpInst::ICMP_SLT;
+ Changed = true;
+ }
+ break;
+ case ICmpInst::ICMP_SGE:
+ if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) {
+ RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
+ /*HasNUW=*/false, /*HasNSW=*/true);
+ Pred = ICmpInst::ICMP_SGT;
+ Changed = true;
+ } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) {
+ LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
+ /*HasNUW=*/false, /*HasNSW=*/true);
+ Pred = ICmpInst::ICMP_SGT;
+ Changed = true;
+ }
+ break;
+ case ICmpInst::ICMP_ULE:
+ if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) {
+ RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
+ /*HasNUW=*/true, /*HasNSW=*/false);
+ Pred = ICmpInst::ICMP_ULT;
+ Changed = true;
+ } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) {
+ LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
+ /*HasNUW=*/true, /*HasNSW=*/false);
+ Pred = ICmpInst::ICMP_ULT;
+ Changed = true;
+ }
+ break;
+ case ICmpInst::ICMP_UGE:
+ if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) {
+ RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
+ /*HasNUW=*/true, /*HasNSW=*/false);
+ Pred = ICmpInst::ICMP_UGT;
+ Changed = true;
+ } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) {
+ LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
+ /*HasNUW=*/true, /*HasNSW=*/false);
+ Pred = ICmpInst::ICMP_UGT;
+ Changed = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ // TODO: More simplifications are possible here.
+
+ return Changed;
+
+trivially_true:
+ // Return 0 == 0.
+ LHS = RHS = getConstant(Type::getInt1Ty(getContext()), 0);
+ Pred = ICmpInst::ICMP_EQ;
+ return true;
+
+trivially_false:
+ // Return 0 != 0.
+ LHS = RHS = getConstant(Type::getInt1Ty(getContext()), 0);
+ Pred = ICmpInst::ICMP_NE;
+ return true;
+}
+
bool ScalarEvolution::isKnownNegative(const SCEV *S) {
return getSignedRange(S).getSignedMax().isNegative();
}
@@ -4653,10 +5040,36 @@ bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS) {
+ // Canonicalize the inputs first.
+ (void)SimplifyICmpOperands(Pred, LHS, RHS);
+
+ // If LHS or RHS is an addrec, check to see if the condition is true in
+ // every iteration of the loop.
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
+ if (isLoopEntryGuardedByCond(
+ AR->getLoop(), Pred, AR->getStart(), RHS) &&
+ isLoopBackedgeGuardedByCond(
+ AR->getLoop(), Pred, AR->getPostIncExpr(*this), RHS))
+ return true;
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS))
+ if (isLoopEntryGuardedByCond(
+ AR->getLoop(), Pred, LHS, AR->getStart()) &&
+ isLoopBackedgeGuardedByCond(
+ AR->getLoop(), Pred, LHS, AR->getPostIncExpr(*this)))
+ return true;
+
+ // Otherwise see what can be done with known constant ranges.
+ return isKnownPredicateWithRanges(Pred, LHS, RHS);
+}
+bool
+ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS) {
if (HasSameValue(LHS, RHS))
return ICmpInst::isTrueWhenEqual(Pred);
+ // This code is split out from isKnownPredicate because it is called from
+ // within isLoopEntryGuardedByCond.
switch (Pred) {
default:
llvm_unreachable("Unexpected ICmpInst::Predicate value!");
@@ -4753,35 +5166,33 @@ ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
LoopContinuePredicate->getSuccessor(0) != L->getHeader());
}
-/// isLoopGuardedByCond - Test whether entry to the loop is protected
+/// isLoopEntryGuardedByCond - Test whether entry to the loop is protected
/// by a conditional between LHS and RHS. This is used to help avoid max
/// expressions in loop trip counts, and to eliminate casts.
bool
-ScalarEvolution::isLoopGuardedByCond(const Loop *L,
- ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS) {
+ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
+ ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS) {
// Interpret a null as meaning no loop, where there is obviously no guard
// (interprocedural conditions notwithstanding).
if (!L) return false;
- BasicBlock *Predecessor = getLoopPredecessor(L);
- BasicBlock *PredecessorDest = L->getHeader();
-
// Starting at the loop predecessor, climb up the predecessor chain, as long
// as there are predecessors that can be found that have unique successors
// leading to the original header.
- for (; Predecessor;
- PredecessorDest = Predecessor,
- Predecessor = getPredecessorWithUniqueSuccessorForBB(Predecessor)) {
+ for (std::pair<BasicBlock *, BasicBlock *>
+ Pair(getLoopPredecessor(L), L->getHeader());
+ Pair.first;
+ Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
BranchInst *LoopEntryPredicate =
- dyn_cast<BranchInst>(Predecessor->getTerminator());
+ dyn_cast<BranchInst>(Pair.first->getTerminator());
if (!LoopEntryPredicate ||
LoopEntryPredicate->isUnconditional())
continue;
if (isImpliedCond(LoopEntryPredicate->getCondition(), Pred, LHS, RHS,
- LoopEntryPredicate->getSuccessor(0) != PredecessorDest))
+ LoopEntryPredicate->getSuccessor(0) != Pair.second))
return true;
}
@@ -4845,117 +5256,12 @@ bool ScalarEvolution::isImpliedCond(Value *CondValue,
// Canonicalize the query to match the way instcombine will have
// canonicalized the comparison.
- // First, put a constant operand on the right.
- if (isa<SCEVConstant>(LHS)) {
- std::swap(LHS, RHS);
- Pred = ICmpInst::getSwappedPredicate(Pred);
- }
- // Then, canonicalize comparisons with boundary cases.
- if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
- const APInt &RA = RC->getValue()->getValue();
- switch (Pred) {
- default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
- case ICmpInst::ICMP_EQ:
- case ICmpInst::ICMP_NE:
- break;
- case ICmpInst::ICMP_UGE:
- if ((RA - 1).isMinValue()) {
- Pred = ICmpInst::ICMP_NE;
- RHS = getConstant(RA - 1);
- break;
- }
- if (RA.isMaxValue()) {
- Pred = ICmpInst::ICMP_EQ;
- break;
- }
- if (RA.isMinValue()) return true;
- break;
- case ICmpInst::ICMP_ULE:
- if ((RA + 1).isMaxValue()) {
- Pred = ICmpInst::ICMP_NE;
- RHS = getConstant(RA + 1);
- break;
- }
- if (RA.isMinValue()) {
- Pred = ICmpInst::ICMP_EQ;
- break;
- }
- if (RA.isMaxValue()) return true;
- break;
- case ICmpInst::ICMP_SGE:
- if ((RA - 1).isMinSignedValue()) {
- Pred = ICmpInst::ICMP_NE;
- RHS = getConstant(RA - 1);
- break;
- }
- if (RA.isMaxSignedValue()) {
- Pred = ICmpInst::ICMP_EQ;
- break;
- }
- if (RA.isMinSignedValue()) return true;
- break;
- case ICmpInst::ICMP_SLE:
- if ((RA + 1).isMaxSignedValue()) {
- Pred = ICmpInst::ICMP_NE;
- RHS = getConstant(RA + 1);
- break;
- }
- if (RA.isMinSignedValue()) {
- Pred = ICmpInst::ICMP_EQ;
- break;
- }
- if (RA.isMaxSignedValue()) return true;
- break;
- case ICmpInst::ICMP_UGT:
- if (RA.isMinValue()) {
- Pred = ICmpInst::ICMP_NE;
- break;
- }
- if ((RA + 1).isMaxValue()) {
- Pred = ICmpInst::ICMP_EQ;
- RHS = getConstant(RA + 1);
- break;
- }
- if (RA.isMaxValue()) return false;
- break;
- case ICmpInst::ICMP_ULT:
- if (RA.isMaxValue()) {
- Pred = ICmpInst::ICMP_NE;
- break;
- }
- if ((RA - 1).isMinValue()) {
- Pred = ICmpInst::ICMP_EQ;
- RHS = getConstant(RA - 1);
- break;
- }
- if (RA.isMinValue()) return false;
- break;
- case ICmpInst::ICMP_SGT:
- if (RA.isMinSignedValue()) {
- Pred = ICmpInst::ICMP_NE;
- break;
- }
- if ((RA + 1).isMaxSignedValue()) {
- Pred = ICmpInst::ICMP_EQ;
- RHS = getConstant(RA + 1);
- break;
- }
- if (RA.isMaxSignedValue()) return false;
- break;
- case ICmpInst::ICMP_SLT:
- if (RA.isMaxSignedValue()) {
- Pred = ICmpInst::ICMP_NE;
- break;
- }
- if ((RA - 1).isMinSignedValue()) {
- Pred = ICmpInst::ICMP_EQ;
- RHS = getConstant(RA - 1);
- break;
- }
- if (RA.isMinSignedValue()) return false;
- break;
- }
- }
+ if (SimplifyICmpOperands(Pred, LHS, RHS))
+ if (LHS == RHS)
+ return CmpInst::isTrueWhenEqual(Pred);
+ if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
+ if (FoundLHS == FoundRHS)
+ return CmpInst::isFalseWhenEqual(Pred);
// Check to see if we can make the LHS or RHS match.
if (LHS == FoundRHS || RHS == FoundLHS) {
@@ -5028,26 +5334,26 @@ ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
break;
case ICmpInst::ICMP_SLT:
case ICmpInst::ICMP_SLE:
- if (isKnownPredicate(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
- isKnownPredicate(ICmpInst::ICMP_SGE, RHS, FoundRHS))
+ if (isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
+ isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, RHS, FoundRHS))
return true;
break;
case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SGE:
- if (isKnownPredicate(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
- isKnownPredicate(ICmpInst::ICMP_SLE, RHS, FoundRHS))
+ if (isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
+ isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, RHS, FoundRHS))
return true;
break;
case ICmpInst::ICMP_ULT:
case ICmpInst::ICMP_ULE:
- if (isKnownPredicate(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
- isKnownPredicate(ICmpInst::ICMP_UGE, RHS, FoundRHS))
+ if (isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
+ isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, RHS, FoundRHS))
return true;
break;
case ICmpInst::ICMP_UGT:
case ICmpInst::ICMP_UGE:
- if (isKnownPredicate(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
- isKnownPredicate(ICmpInst::ICMP_ULE, RHS, FoundRHS))
+ if (isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
+ isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, RHS, FoundRHS))
return true;
break;
}
@@ -5066,7 +5372,7 @@ const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
"This code doesn't handle negative strides yet!");
const Type *Ty = Start->getType();
- const SCEV *NegOne = getIntegerSCEV(-1, Ty);
+ const SCEV *NegOne = getConstant(Ty, (uint64_t)-1);
const SCEV *Diff = getMinusSCEV(End, Start);
const SCEV *RoundUp = getAddExpr(Step, NegOne);
@@ -5122,7 +5428,7 @@ ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
// behavior, so if wrap does occur, the loop could either terminate or
// loop infinitely, but in either case, the loop is guaranteed to
// iterate at least until the iteration where the wrapping occurs.
- const SCEV *One = getIntegerSCEV(1, Step->getType());
+ const SCEV *One = getConstant(Step->getType(), 1);
if (isSigned) {
APInt Max = APInt::getSignedMaxValue(BitWidth);
if ((Max - getSignedRange(getMinusSCEV(Step, One)).getSignedMax())
@@ -5156,10 +5462,10 @@ ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
// only know that it will execute (max(m,n)-n)/s times. In both cases,
// the division must round up.
const SCEV *End = RHS;
- if (!isLoopGuardedByCond(L,
- isSigned ? ICmpInst::ICMP_SLT :
- ICmpInst::ICMP_ULT,
- getMinusSCEV(Start, Step), RHS))
+ if (!isLoopEntryGuardedByCond(L,
+ isSigned ? ICmpInst::ICMP_SLT :
+ ICmpInst::ICMP_ULT,
+ getMinusSCEV(Start, Step), RHS))
End = isSigned ? getSMaxExpr(RHS, Start)
: getUMaxExpr(RHS, Start);
@@ -5173,7 +5479,7 @@ ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
// This allows the subsequent ceiling division of (N+(step-1))/step to
// compute the correct value.
const SCEV *StepMinusOne = getMinusSCEV(Step,
- getIntegerSCEV(1, Step->getType()));
+ getConstant(Step->getType(), 1));
MaxEnd = isSigned ?
getSMinExpr(MaxEnd,
getMinusSCEV(getConstant(APInt::getSignedMaxValue(BitWidth)),
@@ -5210,7 +5516,7 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
if (!SC->getValue()->isZero()) {
SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
- Operands[0] = SE.getIntegerSCEV(0, SC->getType());
+ Operands[0] = SE.getConstant(SC->getType(), 0);
const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop());
if (const SCEVAddRecExpr *ShiftedAddRec =
dyn_cast<SCEVAddRecExpr>(Shifted))
@@ -5234,7 +5540,7 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
// iteration exits.
unsigned BitWidth = SE.getTypeSizeInBits(getType());
if (!Range.contains(APInt(BitWidth, 0)))
- return SE.getIntegerSCEV(0, getType());
+ return SE.getConstant(getType(), 0);
if (isAffine()) {
// If this is an affine expression then we have this situation:
@@ -5459,7 +5765,7 @@ void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
WriteAsOperand(OS, F, /*PrintType=*/false);
OS << "\n";
for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
- if (isSCEVable(I->getType())) {
+ if (isSCEVable(I->getType()) && !isa<CmpInst>(*I)) {
OS << *I << '\n';
OS << " --> ";
const SCEV *SV = SE.getSCEV(&*I);
OpenPOWER on IntegriCloud