summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Analysis/ValueTracking.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Analysis/ValueTracking.cpp')
-rw-r--r--contrib/llvm/lib/Analysis/ValueTracking.cpp1164
1 files changed, 968 insertions, 196 deletions
diff --git a/contrib/llvm/lib/Analysis/ValueTracking.cpp b/contrib/llvm/lib/Analysis/ValueTracking.cpp
index fa0d779..314ec9c 100644
--- a/contrib/llvm/lib/Analysis/ValueTracking.cpp
+++ b/contrib/llvm/lib/Analysis/ValueTracking.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/InstructionSimplify.h"
@@ -43,7 +44,7 @@ const unsigned MaxDepth = 6;
/// Enable an experimental feature to leverage information about dominating
/// conditions to compute known bits. The individual options below control how
-/// hard we search. The defaults are choosen to be fairly aggressive. If you
+/// hard we search. The defaults are chosen to be fairly aggressive. If you
/// run into compile time problems when testing, scale them back and report
/// your findings.
static cl::opt<bool> EnableDomConditions("value-tracking-dom-conditions",
@@ -58,12 +59,12 @@ static cl::opt<unsigned> DomConditionsMaxDepth("dom-conditions-max-depth",
/// conditions?
static cl::opt<unsigned> DomConditionsMaxDomBlocks("dom-conditions-dom-blocks",
cl::Hidden,
- cl::init(20000));
+ cl::init(20));
// Controls the number of uses of the value searched for possible
// dominating comparisons.
static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
- cl::Hidden, cl::init(2000));
+ cl::Hidden, cl::init(20));
// If true, don't consider only compares whose only use is a branch.
static cl::opt<bool> DomConditionsSingleCmpUse("dom-conditions-single-cmp-use",
@@ -185,6 +186,25 @@ bool llvm::isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth,
return ::isKnownNonZero(V, DL, Depth, Query(AC, safeCxtI(V, CxtI), DT));
}
+bool llvm::isKnownNonNegative(Value *V, const DataLayout &DL, unsigned Depth,
+ AssumptionCache *AC, const Instruction *CxtI,
+ const DominatorTree *DT) {
+ bool NonNegative, Negative;
+ ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT);
+ return NonNegative;
+}
+
+static bool isKnownNonEqual(Value *V1, Value *V2, const DataLayout &DL,
+ const Query &Q);
+
+bool llvm::isKnownNonEqual(Value *V1, Value *V2, const DataLayout &DL,
+ AssumptionCache *AC, const Instruction *CxtI,
+ const DominatorTree *DT) {
+ return ::isKnownNonEqual(V1, V2, DL, Query(AC,
+ safeCxtI(V1, safeCxtI(V2, CxtI)),
+ DT));
+}
+
static bool MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL,
unsigned Depth, const Query &Q);
@@ -320,7 +340,7 @@ static void computeKnownBitsMul(Value *Op0, Value *Op1, bool NSW,
}
// If low bits are zero in either operand, output low known-0 bits.
- // Also compute a conserative estimate for high known-0 bits.
+ // Also compute a conservative estimate for high known-0 bits.
// More trickiness is possible, but this is sufficient for the
// interesting case of alignment computation.
KnownOne.clearAllBits();
@@ -347,26 +367,30 @@ static void computeKnownBitsMul(Value *Op0, Value *Op1, bool NSW,
}
void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
- APInt &KnownZero) {
+ APInt &KnownZero,
+ APInt &KnownOne) {
unsigned BitWidth = KnownZero.getBitWidth();
unsigned NumRanges = Ranges.getNumOperands() / 2;
assert(NumRanges >= 1);
- // Use the high end of the ranges to find leading zeros.
- unsigned MinLeadingZeros = BitWidth;
+ KnownZero.setAllBits();
+ KnownOne.setAllBits();
+
for (unsigned i = 0; i < NumRanges; ++i) {
ConstantInt *Lower =
mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
ConstantInt *Upper =
mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
ConstantRange Range(Lower->getValue(), Upper->getValue());
- if (Range.isWrappedSet())
- MinLeadingZeros = 0; // -1 has no zeros
- unsigned LeadingZeros = (Upper->getValue() - 1).countLeadingZeros();
- MinLeadingZeros = std::min(LeadingZeros, MinLeadingZeros);
- }
- KnownZero = APInt::getHighBitsSet(BitWidth, MinLeadingZeros);
+ // The first CommonPrefixBits of all values in Range are equal.
+ unsigned CommonPrefixBits =
+ (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
+
+ APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
+ KnownOne &= Range.getUnsignedMax() & Mask;
+ KnownZero &= ~Range.getUnsignedMax() & Mask;
+ }
}
static bool isEphemeralValueOf(Instruction *I, const Value *E) {
@@ -374,20 +398,20 @@ static bool isEphemeralValueOf(Instruction *I, const Value *E) {
SmallPtrSet<const Value *, 32> Visited;
SmallPtrSet<const Value *, 16> EphValues;
+ // The instruction defining an assumption's condition itself is always
+ // considered ephemeral to that assumption (even if it has other
+ // non-ephemeral users). See r246696's test case for an example.
+ if (std::find(I->op_begin(), I->op_end(), E) != I->op_end())
+ return true;
+
while (!WorkSet.empty()) {
const Value *V = WorkSet.pop_back_val();
if (!Visited.insert(V).second)
continue;
// If all uses of this value are ephemeral, then so is this value.
- bool FoundNEUse = false;
- for (const User *I : V->users())
- if (!EphValues.count(I)) {
- FoundNEUse = true;
- break;
- }
-
- if (!FoundNEUse) {
+ if (std::all_of(V->user_begin(), V->user_end(),
+ [&](const User *U) { return EphValues.count(U); })) {
if (V == E)
return true;
@@ -447,7 +471,7 @@ static bool isValidAssumeForContext(Value *V, const Query &Q) {
for (BasicBlock::const_iterator I =
std::next(BasicBlock::const_iterator(Q.CxtI)),
IE(Inv); I != IE; ++I)
- if (!isSafeToSpeculativelyExecute(I) && !isAssumeLikeIntrinsic(I))
+ if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
return false;
return !isEphemeralValueOf(Inv, Q.CxtI);
@@ -464,14 +488,14 @@ static bool isValidAssumeForContext(Value *V, const Query &Q) {
// of the block); the common case is that the assume will come first.
for (BasicBlock::iterator I = std::next(BasicBlock::iterator(Inv)),
IE = Inv->getParent()->end(); I != IE; ++I)
- if (I == Q.CxtI)
+ if (&*I == Q.CxtI)
return true;
// The context must come first...
for (BasicBlock::const_iterator I =
std::next(BasicBlock::const_iterator(Q.CxtI)),
IE(Inv); I != IE; ++I)
- if (!isSafeToSpeculativelyExecute(I) && !isAssumeLikeIntrinsic(I))
+ if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
return false;
return !isEphemeralValueOf(Inv, Q.CxtI);
@@ -601,6 +625,11 @@ static void computeKnownBitsFromDominatingCondition(Value *V, APInt &KnownZero,
if (!Q.DT || !Q.CxtI)
return;
Instruction *Cxt = const_cast<Instruction *>(Q.CxtI);
+ // The context instruction might be in a statically unreachable block. If
+ // so, asking dominator queries may yield suprising results. (e.g. the block
+ // may not have a dom tree node)
+ if (!Q.DT->isReachableFromEntry(Cxt->getParent()))
+ return;
// Avoid useless work
if (auto VI = dyn_cast<Instruction>(V))
@@ -647,7 +676,9 @@ static void computeKnownBitsFromDominatingCondition(Value *V, APInt &KnownZero,
// instruction. Finding a condition where one path dominates the context
// isn't enough because both the true and false cases could merge before
// the context instruction we're actually interested in. Instead, we need
- // to ensure that the taken *edge* dominates the context instruction.
+ // to ensure that the taken *edge* dominates the context instruction. We
+ // know that the edge must be reachable since we started from a reachable
+ // block.
BasicBlock *BB0 = BI->getSuccessor(0);
BasicBlockEdge Edge(BI->getParent(), BB0);
if (!Edge.isSingleEdge() || !Q.DT->dominates(Edge, Q.CxtI->getParent()))
@@ -941,6 +972,90 @@ static void computeKnownBitsFromAssume(Value *V, APInt &KnownZero,
}
}
+// Compute known bits from a shift operator, including those with a
+// non-constant shift amount. KnownZero and KnownOne are the outputs of this
+// function. KnownZero2 and KnownOne2 are pre-allocated temporaries with the
+// same bit width as KnownZero and KnownOne. KZF and KOF are operator-specific
+// functors that, given the known-zero or known-one bits respectively, and a
+// shift amount, compute the implied known-zero or known-one bits of the shift
+// operator's result respectively for that shift amount. The results from calling
+// KZF and KOF are conservatively combined for all permitted shift amounts.
+template <typename KZFunctor, typename KOFunctor>
+static void computeKnownBitsFromShiftOperator(Operator *I,
+ APInt &KnownZero, APInt &KnownOne,
+ APInt &KnownZero2, APInt &KnownOne2,
+ const DataLayout &DL, unsigned Depth, const Query &Q,
+ KZFunctor KZF, KOFunctor KOF) {
+ unsigned BitWidth = KnownZero.getBitWidth();
+
+ if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
+ unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
+
+ computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q);
+ KnownZero = KZF(KnownZero, ShiftAmt);
+ KnownOne = KOF(KnownOne, ShiftAmt);
+ return;
+ }
+
+ computeKnownBits(I->getOperand(1), KnownZero, KnownOne, DL, Depth + 1, Q);
+
+ // Note: We cannot use KnownZero.getLimitedValue() here, because if
+ // BitWidth > 64 and any upper bits are known, we'll end up returning the
+ // limit value (which implies all bits are known).
+ uint64_t ShiftAmtKZ = KnownZero.zextOrTrunc(64).getZExtValue();
+ uint64_t ShiftAmtKO = KnownOne.zextOrTrunc(64).getZExtValue();
+
+ // It would be more-clearly correct to use the two temporaries for this
+ // calculation. Reusing the APInts here to prevent unnecessary allocations.
+ KnownZero.clearAllBits(), KnownOne.clearAllBits();
+
+ // If we know the shifter operand is nonzero, we can sometimes infer more
+ // known bits. However this is expensive to compute, so be lazy about it and
+ // only compute it when absolutely necessary.
+ Optional<bool> ShifterOperandIsNonZero;
+
+ // Early exit if we can't constrain any well-defined shift amount.
+ if (!(ShiftAmtKZ & (BitWidth - 1)) && !(ShiftAmtKO & (BitWidth - 1))) {
+ ShifterOperandIsNonZero =
+ isKnownNonZero(I->getOperand(1), DL, Depth + 1, Q);
+ if (!*ShifterOperandIsNonZero)
+ return;
+ }
+
+ computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q);
+
+ KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
+ for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
+ // Combine the shifted known input bits only for those shift amounts
+ // compatible with its known constraints.
+ if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
+ continue;
+ if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
+ continue;
+ // If we know the shifter is nonzero, we may be able to infer more known
+ // bits. This check is sunk down as far as possible to avoid the expensive
+ // call to isKnownNonZero if the cheaper checks above fail.
+ if (ShiftAmt == 0) {
+ if (!ShifterOperandIsNonZero.hasValue())
+ ShifterOperandIsNonZero =
+ isKnownNonZero(I->getOperand(1), DL, Depth + 1, Q);
+ if (*ShifterOperandIsNonZero)
+ continue;
+ }
+
+ KnownZero &= KZF(KnownZero2, ShiftAmt);
+ KnownOne &= KOF(KnownOne2, ShiftAmt);
+ }
+
+ // If there are no compatible shift amounts, then we've proven that the shift
+ // amount must be >= the BitWidth, and the result is undefined. We could
+ // return anything we'd like, but we need to make sure the sets of known bits
+ // stay disjoint (it should be better for some other code to actually
+ // propagate the undef than to pick a value here using known bits).
+ if ((KnownZero & KnownOne) != 0)
+ KnownZero.clearAllBits(), KnownOne.clearAllBits();
+}
+
static void computeKnownBitsFromOperator(Operator *I, APInt &KnownZero,
APInt &KnownOne, const DataLayout &DL,
unsigned Depth, const Query &Q) {
@@ -951,7 +1066,7 @@ static void computeKnownBitsFromOperator(Operator *I, APInt &KnownZero,
default: break;
case Instruction::Load:
if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
- computeKnownBitsFromRangeMetadata(*MD, KnownZero);
+ computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne);
break;
case Instruction::And: {
// If either the LHS or the RHS are Zero, the result is zero.
@@ -962,6 +1077,22 @@ static void computeKnownBitsFromOperator(Operator *I, APInt &KnownZero,
KnownOne &= KnownOne2;
// Output known-0 are known to be clear if zero in either the LHS | RHS.
KnownZero |= KnownZero2;
+
+ // and(x, add (x, -1)) is a common idiom that always clears the low bit;
+ // here we handle the more general case of adding any odd number by
+ // matching the form add(x, add(x, y)) where y is odd.
+ // TODO: This could be generalized to clearing any bit set in y where the
+ // following bit is known to be unset in y.
+ Value *Y = nullptr;
+ if (match(I->getOperand(0), m_Add(m_Specific(I->getOperand(1)),
+ m_Value(Y))) ||
+ match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)),
+ m_Value(Y)))) {
+ APInt KnownZero3(BitWidth, 0), KnownOne3(BitWidth, 0);
+ computeKnownBits(Y, KnownZero3, KnownOne3, DL, Depth + 1, Q);
+ if (KnownOne3.countTrailingOnes() > 0)
+ KnownZero |= APInt::getLowBitsSet(BitWidth, 1);
+ }
break;
}
case Instruction::Or: {
@@ -1050,7 +1181,8 @@ static void computeKnownBitsFromOperator(Operator *I, APInt &KnownZero,
}
case Instruction::BitCast: {
Type *SrcTy = I->getOperand(0)->getType();
- if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
+ if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy() ||
+ SrcTy->isFloatingPointTy()) &&
// TODO: For now, not handling conversions like:
// (bitcast i64 %x to <2 x i32>)
!I->getType()->isVectorTy()) {
@@ -1077,48 +1209,54 @@ static void computeKnownBitsFromOperator(Operator *I, APInt &KnownZero,
KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
break;
}
- case Instruction::Shl:
+ case Instruction::Shl: {
// (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
- if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
- uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
- computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q);
- KnownZero <<= ShiftAmt;
- KnownOne <<= ShiftAmt;
- KnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt); // low bits known 0
- }
+ auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
+ return (KnownZero << ShiftAmt) |
+ APInt::getLowBitsSet(BitWidth, ShiftAmt); // Low bits known 0.
+ };
+
+ auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) {
+ return KnownOne << ShiftAmt;
+ };
+
+ computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
+ KnownZero2, KnownOne2, DL, Depth, Q,
+ KZF, KOF);
break;
- case Instruction::LShr:
+ }
+ case Instruction::LShr: {
// (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
- if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
- // Compute the new bits that are at the top now.
- uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
-
- // Unsigned shift right.
- computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q);
- KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
- KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
- // high bits known zero.
- KnownZero |= APInt::getHighBitsSet(BitWidth, ShiftAmt);
- }
+ auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
+ return APIntOps::lshr(KnownZero, ShiftAmt) |
+ // High bits known zero.
+ APInt::getHighBitsSet(BitWidth, ShiftAmt);
+ };
+
+ auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) {
+ return APIntOps::lshr(KnownOne, ShiftAmt);
+ };
+
+ computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
+ KnownZero2, KnownOne2, DL, Depth, Q,
+ KZF, KOF);
break;
- case Instruction::AShr:
+ }
+ case Instruction::AShr: {
// (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
- if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
- // Compute the new bits that are at the top now.
- uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
+ auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
+ return APIntOps::ashr(KnownZero, ShiftAmt);
+ };
- // Signed shift right.
- computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q);
- KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
- KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
+ auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) {
+ return APIntOps::ashr(KnownOne, ShiftAmt);
+ };
- APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
- if (KnownZero[BitWidth-ShiftAmt-1]) // New bits are known zero.
- KnownZero |= HighBits;
- else if (KnownOne[BitWidth-ShiftAmt-1]) // New bits are known one.
- KnownOne |= HighBits;
- }
+ computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
+ KnownZero2, KnownOne2, DL, Depth, Q,
+ KZF, KOF);
break;
+ }
case Instruction::Sub: {
bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
@@ -1336,13 +1474,19 @@ static void computeKnownBitsFromOperator(Operator *I, APInt &KnownZero,
case Instruction::Call:
case Instruction::Invoke:
if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
- computeKnownBitsFromRangeMetadata(*MD, KnownZero);
+ computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne);
// If a range metadata is attached to this IntrinsicInst, intersect the
// explicit range specified by the metadata and the implicit range of
// the intrinsic.
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
switch (II->getIntrinsicID()) {
default: break;
+ case Intrinsic::bswap:
+ computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL,
+ Depth + 1, Q);
+ KnownZero |= KnownZero2.byteSwap();
+ KnownOne |= KnownOne2.byteSwap();
+ break;
case Intrinsic::ctlz:
case Intrinsic::cttz: {
unsigned LowBits = Log2_32(BitWidth)+1;
@@ -1353,8 +1497,24 @@ static void computeKnownBitsFromOperator(Operator *I, APInt &KnownZero,
break;
}
case Intrinsic::ctpop: {
- unsigned LowBits = Log2_32(BitWidth)+1;
- KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
+ computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL,
+ Depth + 1, Q);
+ // We can bound the space the count needs. Also, bits known to be zero
+ // can't contribute to the population.
+ unsigned BitsPossiblySet = BitWidth - KnownZero2.countPopulation();
+ unsigned LeadingZeros =
+ APInt(BitWidth, BitsPossiblySet).countLeadingZeros();
+ assert(LeadingZeros <= BitWidth);
+ KnownZero |= APInt::getHighBitsSet(BitWidth, LeadingZeros);
+ KnownOne &= ~KnownZero;
+ // TODO: we could bound KnownOne using the lower bound on the number
+ // of bits which might be set provided by popcnt KnownOne2.
+ break;
+ }
+ case Intrinsic::fabs: {
+ Type *Ty = II->getType();
+ APInt SignBit = APInt::getSignBit(Ty->getScalarSizeInBits());
+ KnownZero |= APInt::getSplat(Ty->getPrimitiveSizeInBits(), SignBit);
break;
}
case Intrinsic::x86_sse42_crc32_64_64:
@@ -1394,6 +1554,46 @@ static void computeKnownBitsFromOperator(Operator *I, APInt &KnownZero,
}
}
+static unsigned getAlignment(const Value *V, const DataLayout &DL) {
+ unsigned Align = 0;
+ if (auto *GO = dyn_cast<GlobalObject>(V)) {
+ Align = GO->getAlignment();
+ if (Align == 0) {
+ if (auto *GVar = dyn_cast<GlobalVariable>(GO)) {
+ Type *ObjectType = GVar->getType()->getElementType();
+ if (ObjectType->isSized()) {
+ // If the object is defined in the current Module, we'll be giving
+ // it the preferred alignment. Otherwise, we have to assume that it
+ // may only have the minimum ABI alignment.
+ if (GVar->isStrongDefinitionForLinker())
+ Align = DL.getPreferredAlignment(GVar);
+ else
+ Align = DL.getABITypeAlignment(ObjectType);
+ }
+ }
+ }
+ } else if (const Argument *A = dyn_cast<Argument>(V)) {
+ Align = A->getType()->isPointerTy() ? A->getParamAlignment() : 0;
+
+ if (!Align && A->hasStructRetAttr()) {
+ // An sret parameter has at least the ABI alignment of the return type.
+ Type *EltTy = cast<PointerType>(A->getType())->getElementType();
+ if (EltTy->isSized())
+ Align = DL.getABITypeAlignment(EltTy);
+ }
+ } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
+ Align = AI->getAlignment();
+ else if (auto CS = ImmutableCallSite(V))
+ Align = CS.getAttributes().getParamAlignment(AttributeSet::ReturnIndex);
+ else if (const LoadInst *LI = dyn_cast<LoadInst>(V))
+ if (MDNode *MD = LI->getMetadata(LLVMContext::MD_align)) {
+ ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
+ Align = CI->getLimitedValue();
+ }
+
+ return Align;
+}
+
/// Determine which bits of V are known to be either zero or one and return
/// them in the KnownZero/KnownOne bit sets.
///
@@ -1416,8 +1616,9 @@ void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
unsigned BitWidth = KnownZero.getBitWidth();
assert((V->getType()->isIntOrIntVectorTy() ||
+ V->getType()->isFPOrFPVectorTy() ||
V->getType()->getScalarType()->isPointerTy()) &&
- "Not integer or pointer type!");
+ "Not integer, floating point, or pointer type!");
assert((DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
(!V->getType()->isIntOrIntVectorTy() ||
V->getType()->getScalarSizeInBits() == BitWidth) &&
@@ -1454,59 +1655,6 @@ void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
return;
}
- // The address of an aligned GlobalValue has trailing zeros.
- if (auto *GO = dyn_cast<GlobalObject>(V)) {
- unsigned Align = GO->getAlignment();
- if (Align == 0) {
- if (auto *GVar = dyn_cast<GlobalVariable>(GO)) {
- Type *ObjectType = GVar->getType()->getElementType();
- if (ObjectType->isSized()) {
- // If the object is defined in the current Module, we'll be giving
- // it the preferred alignment. Otherwise, we have to assume that it
- // may only have the minimum ABI alignment.
- if (GVar->isStrongDefinitionForLinker())
- Align = DL.getPreferredAlignment(GVar);
- else
- Align = DL.getABITypeAlignment(ObjectType);
- }
- }
- }
- if (Align > 0)
- KnownZero = APInt::getLowBitsSet(BitWidth,
- countTrailingZeros(Align));
- else
- KnownZero.clearAllBits();
- KnownOne.clearAllBits();
- return;
- }
-
- if (Argument *A = dyn_cast<Argument>(V)) {
- unsigned Align = A->getType()->isPointerTy() ? A->getParamAlignment() : 0;
-
- if (!Align && A->hasStructRetAttr()) {
- // An sret parameter has at least the ABI alignment of the return type.
- Type *EltTy = cast<PointerType>(A->getType())->getElementType();
- if (EltTy->isSized())
- Align = DL.getABITypeAlignment(EltTy);
- }
-
- if (Align)
- KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
- else
- KnownZero.clearAllBits();
- KnownOne.clearAllBits();
-
- // Don't give up yet... there might be an assumption that provides more
- // information...
- computeKnownBitsFromAssume(V, KnownZero, KnownOne, DL, Depth, Q);
-
- // Or a dominating condition for that matter
- if (EnableDomConditions && Depth <= DomConditionsMaxDepth)
- computeKnownBitsFromDominatingCondition(V, KnownZero, KnownOne, DL,
- Depth, Q);
- return;
- }
-
// Start out not knowing anything.
KnownZero.clearAllBits(); KnownOne.clearAllBits();
@@ -1525,6 +1673,14 @@ void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
if (Operator *I = dyn_cast<Operator>(V))
computeKnownBitsFromOperator(I, KnownZero, KnownOne, DL, Depth, Q);
+
+ // Aligned pointers have trailing zeros - refine KnownZero set
+ if (V->getType()->isPointerTy()) {
+ unsigned Align = getAlignment(V, DL);
+ if (Align)
+ KnownZero |= APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
+ }
+
// computeKnownBitsFromAssume and computeKnownBitsFromDominatingCondition
// strictly refines KnownZero and KnownOne. Therefore, we run them after
// computeKnownBitsFromOperator.
@@ -1812,6 +1968,23 @@ bool isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth,
ComputeSignBit(X, XKnownNonNegative, XKnownNegative, DL, Depth, Q);
if (XKnownNegative)
return true;
+
+ // If the shifter operand is a constant, and all of the bits shifted
+ // out are known to be zero, and X is known non-zero then at least one
+ // non-zero bit must remain.
+ if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
+ APInt KnownZero(BitWidth, 0);
+ APInt KnownOne(BitWidth, 0);
+ computeKnownBits(X, KnownZero, KnownOne, DL, Depth, Q);
+
+ auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
+ // Is there a known one in the portion not shifted out?
+ if (KnownOne.countLeadingZeros() < BitWidth - ShiftVal)
+ return true;
+ // Are all the bits to be shifted out known zero?
+ if (KnownZero.countTrailingOnes() >= ShiftVal)
+ return isKnownNonZero(X, DL, Depth, Q);
+ }
}
// div exact can only produce a zero if the dividend is zero.
else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
@@ -1871,6 +2044,26 @@ bool isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth,
isKnownNonZero(SI->getFalseValue(), DL, Depth, Q))
return true;
}
+ // PHI
+ else if (PHINode *PN = dyn_cast<PHINode>(V)) {
+ // Try and detect a recurrence that monotonically increases from a
+ // starting value, as these are common as induction variables.
+ if (PN->getNumIncomingValues() == 2) {
+ Value *Start = PN->getIncomingValue(0);
+ Value *Induction = PN->getIncomingValue(1);
+ if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
+ std::swap(Start, Induction);
+ if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
+ if (!C->isZero() && !C->isNegative()) {
+ ConstantInt *X;
+ if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
+ match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
+ !X->isNegative())
+ return true;
+ }
+ }
+ }
+ }
if (!BitWidth) return false;
APInt KnownZero(BitWidth, 0);
@@ -1879,6 +2072,51 @@ bool isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth,
return KnownOne != 0;
}
+/// Return true if V2 == V1 + X, where X is known non-zero.
+static bool isAddOfNonZero(Value *V1, Value *V2, const DataLayout &DL,
+ const Query &Q) {
+ BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
+ if (!BO || BO->getOpcode() != Instruction::Add)
+ return false;
+ Value *Op = nullptr;
+ if (V2 == BO->getOperand(0))
+ Op = BO->getOperand(1);
+ else if (V2 == BO->getOperand(1))
+ Op = BO->getOperand(0);
+ else
+ return false;
+ return isKnownNonZero(Op, DL, 0, Q);
+}
+
+/// Return true if it is known that V1 != V2.
+static bool isKnownNonEqual(Value *V1, Value *V2, const DataLayout &DL,
+ const Query &Q) {
+ if (V1->getType()->isVectorTy() || V1 == V2)
+ return false;
+ if (V1->getType() != V2->getType())
+ // We can't look through casts yet.
+ return false;
+ if (isAddOfNonZero(V1, V2, DL, Q) || isAddOfNonZero(V2, V1, DL, Q))
+ return true;
+
+ if (IntegerType *Ty = dyn_cast<IntegerType>(V1->getType())) {
+ // Are any known bits in V1 contradictory to known bits in V2? If V1
+ // has a known zero where V2 has a known one, they must not be equal.
+ auto BitWidth = Ty->getBitWidth();
+ APInt KnownZero1(BitWidth, 0);
+ APInt KnownOne1(BitWidth, 0);
+ computeKnownBits(V1, KnownZero1, KnownOne1, DL, 0, Q);
+ APInt KnownZero2(BitWidth, 0);
+ APInt KnownOne2(BitWidth, 0);
+ computeKnownBits(V2, KnownZero2, KnownOne2, DL, 0, Q);
+
+ auto OppositeBits = (KnownZero1 & KnownOne2) | (KnownZero2 & KnownOne1);
+ if (OppositeBits.getBoolValue())
+ return true;
+ }
+ return false;
+}
+
/// Return true if 'V & Mask' is known to be zero. We use this predicate to
/// simplify operations downstream. Mask is known to be zero for bits that V
/// cannot have.
@@ -2545,7 +2783,7 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
}
// This insert value inserts something else than what we are looking for.
- // See if the (aggregrate) value inserted into has the value we are
+ // See if the (aggregate) value inserted into has the value we are
// looking for, then.
if (*req_idx != *i)
return FindInsertedValue(I->getAggregateOperand(), idx_range,
@@ -2560,7 +2798,7 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
}
if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
- // If we're extracting a value from an aggregrate that was extracted from
+ // If we're extracting a value from an aggregate that was extracted from
// something else, we can extract from that something else directly instead.
// However, we will need to chain I's indices with the requested indices.
@@ -2935,20 +3173,42 @@ static bool isDereferenceableFromAttribute(const Value *V, const DataLayout &DL,
return isDereferenceableFromAttribute(V, Offset, Ty, DL, CtxI, DT, TLI);
}
-/// Return true if Value is always a dereferenceable pointer.
-///
+static bool isAligned(const Value *Base, APInt Offset, unsigned Align,
+ const DataLayout &DL) {
+ APInt BaseAlign(Offset.getBitWidth(), getAlignment(Base, DL));
+
+ if (!BaseAlign) {
+ Type *Ty = Base->getType()->getPointerElementType();
+ if (!Ty->isSized())
+ return false;
+ BaseAlign = DL.getABITypeAlignment(Ty);
+ }
+
+ APInt Alignment(Offset.getBitWidth(), Align);
+
+ assert(Alignment.isPowerOf2() && "must be a power of 2!");
+ return BaseAlign.uge(Alignment) && !(Offset & (Alignment-1));
+}
+
+static bool isAligned(const Value *Base, unsigned Align, const DataLayout &DL) {
+ Type *Ty = Base->getType();
+ assert(Ty->isSized() && "must be sized");
+ APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0);
+ return isAligned(Base, Offset, Align, DL);
+}
+
/// Test if V is always a pointer to allocated and suitably aligned memory for
/// a simple load or store.
-static bool isDereferenceablePointer(const Value *V, const DataLayout &DL,
- const Instruction *CtxI,
- const DominatorTree *DT,
- const TargetLibraryInfo *TLI,
- SmallPtrSetImpl<const Value *> &Visited) {
+static bool isDereferenceableAndAlignedPointer(
+ const Value *V, unsigned Align, const DataLayout &DL,
+ const Instruction *CtxI, const DominatorTree *DT,
+ const TargetLibraryInfo *TLI, SmallPtrSetImpl<const Value *> &Visited) {
// Note that it is not safe to speculate into a malloc'd region because
// malloc may return null.
- // These are obviously ok.
- if (isa<AllocaInst>(V)) return true;
+ // These are obviously ok if aligned.
+ if (isa<AllocaInst>(V))
+ return isAligned(V, Align, DL);
// It's not always safe to follow a bitcast, for example:
// bitcast i8* (alloca i8) to i32*
@@ -2963,21 +3223,22 @@ static bool isDereferenceablePointer(const Value *V, const DataLayout &DL,
if (STy->isSized() && DTy->isSized() &&
(DL.getTypeStoreSize(STy) >= DL.getTypeStoreSize(DTy)) &&
(DL.getABITypeAlignment(STy) >= DL.getABITypeAlignment(DTy)))
- return isDereferenceablePointer(BC->getOperand(0), DL, CtxI,
- DT, TLI, Visited);
+ return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, DL,
+ CtxI, DT, TLI, Visited);
}
// Global variables which can't collapse to null are ok.
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
- return !GV->hasExternalWeakLinkage();
+ if (!GV->hasExternalWeakLinkage())
+ return isAligned(V, Align, DL);
// byval arguments are okay.
if (const Argument *A = dyn_cast<Argument>(V))
if (A->hasByValAttr())
- return true;
-
+ return isAligned(V, Align, DL);
+
if (isDereferenceableFromAttribute(V, DL, CtxI, DT, TLI))
- return true;
+ return isAligned(V, Align, DL);
// For GEPs, determine if the indexing lands within the allocated object.
if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
@@ -2985,61 +3246,79 @@ static bool isDereferenceablePointer(const Value *V, const DataLayout &DL,
Type *Ty = VTy->getPointerElementType();
const Value *Base = GEP->getPointerOperand();
- // Conservatively require that the base pointer be fully dereferenceable.
+ // Conservatively require that the base pointer be fully dereferenceable
+ // and aligned.
if (!Visited.insert(Base).second)
return false;
- if (!isDereferenceablePointer(Base, DL, CtxI,
- DT, TLI, Visited))
+ if (!isDereferenceableAndAlignedPointer(Base, Align, DL, CtxI, DT, TLI,
+ Visited))
return false;
-
+
APInt Offset(DL.getPointerTypeSizeInBits(VTy), 0);
if (!GEP->accumulateConstantOffset(DL, Offset))
return false;
-
- // Check if the load is within the bounds of the underlying object.
+
+ // Check if the load is within the bounds of the underlying object
+ // and offset is aligned.
uint64_t LoadSize = DL.getTypeStoreSize(Ty);
Type *BaseType = Base->getType()->getPointerElementType();
- return (Offset + LoadSize).ule(DL.getTypeAllocSize(BaseType));
+ assert(isPowerOf2_32(Align) && "must be a power of 2!");
+ return (Offset + LoadSize).ule(DL.getTypeAllocSize(BaseType)) &&
+ !(Offset & APInt(Offset.getBitWidth(), Align-1));
}
// For gc.relocate, look through relocations
if (const IntrinsicInst *I = dyn_cast<IntrinsicInst>(V))
if (I->getIntrinsicID() == Intrinsic::experimental_gc_relocate) {
GCRelocateOperands RelocateInst(I);
- return isDereferenceablePointer(RelocateInst.getDerivedPtr(), DL, CtxI,
- DT, TLI, Visited);
+ return isDereferenceableAndAlignedPointer(
+ RelocateInst.getDerivedPtr(), Align, DL, CtxI, DT, TLI, Visited);
}
if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
- return isDereferenceablePointer(ASC->getOperand(0), DL, CtxI,
- DT, TLI, Visited);
+ return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, DL,
+ CtxI, DT, TLI, Visited);
// If we don't know, assume the worst.
return false;
}
-bool llvm::isDereferenceablePointer(const Value *V, const DataLayout &DL,
- const Instruction *CtxI,
- const DominatorTree *DT,
- const TargetLibraryInfo *TLI) {
+bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
+ const DataLayout &DL,
+ const Instruction *CtxI,
+ const DominatorTree *DT,
+ const TargetLibraryInfo *TLI) {
// When dereferenceability information is provided by a dereferenceable
// attribute, we know exactly how many bytes are dereferenceable. If we can
// determine the exact offset to the attributed variable, we can use that
// information here.
Type *VTy = V->getType();
Type *Ty = VTy->getPointerElementType();
+
+ // Require ABI alignment for loads without alignment specification
+ if (Align == 0)
+ Align = DL.getABITypeAlignment(Ty);
+
if (Ty->isSized()) {
APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0);
const Value *BV = V->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
-
+
if (Offset.isNonNegative())
- if (isDereferenceableFromAttribute(BV, Offset, Ty, DL,
- CtxI, DT, TLI))
+ if (isDereferenceableFromAttribute(BV, Offset, Ty, DL, CtxI, DT, TLI) &&
+ isAligned(BV, Offset, Align, DL))
return true;
}
SmallPtrSet<const Value *, 32> Visited;
- return ::isDereferenceablePointer(V, DL, CtxI, DT, TLI, Visited);
+ return ::isDereferenceableAndAlignedPointer(V, Align, DL, CtxI, DT, TLI,
+ Visited);
+}
+
+bool llvm::isDereferenceablePointer(const Value *V, const DataLayout &DL,
+ const Instruction *CtxI,
+ const DominatorTree *DT,
+ const TargetLibraryInfo *TLI) {
+ return isDereferenceableAndAlignedPointer(V, 1, DL, CtxI, DT, TLI);
}
bool llvm::isSafeToSpeculativelyExecute(const Value *V,
@@ -3089,10 +3368,15 @@ bool llvm::isSafeToSpeculativelyExecute(const Value *V,
const LoadInst *LI = cast<LoadInst>(Inst);
if (!LI->isUnordered() ||
// Speculative load may create a race that did not exist in the source.
- LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread))
+ LI->getParent()->getParent()->hasFnAttribute(
+ Attribute::SanitizeThread) ||
+ // Speculative load may load data from dirty regions.
+ LI->getParent()->getParent()->hasFnAttribute(
+ Attribute::SanitizeAddress))
return false;
const DataLayout &DL = LI->getModule()->getDataLayout();
- return isDereferenceablePointer(LI->getPointerOperand(), DL, CtxI, DT, TLI);
+ return isDereferenceableAndAlignedPointer(
+ LI->getPointerOperand(), LI->getAlignment(), DL, CtxI, DT, TLI);
}
case Instruction::Call: {
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
@@ -3147,16 +3431,27 @@ bool llvm::isSafeToSpeculativelyExecute(const Value *V,
case Instruction::Switch:
case Instruction::Unreachable:
case Instruction::Fence:
- case Instruction::LandingPad:
case Instruction::AtomicRMW:
case Instruction::AtomicCmpXchg:
+ case Instruction::LandingPad:
case Instruction::Resume:
+ case Instruction::CatchSwitch:
+ case Instruction::CatchPad:
+ case Instruction::CatchRet:
+ case Instruction::CleanupPad:
+ case Instruction::CleanupRet:
return false; // Misc instructions which have effects
}
}
+bool llvm::mayBeMemoryDependent(const Instruction &I) {
+ return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
+}
+
/// Return true if we know that the specified value is never null.
bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) {
+ assert(V->getType()->isPointerTy() && "V must be pointer type");
+
// Alloca never returns null, malloc might.
if (isa<AllocaInst>(V)) return true;
@@ -3164,9 +3459,12 @@ bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) {
if (const Argument *A = dyn_cast<Argument>(V))
return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr();
- // Global values are not null unless extern weak.
+ // A global variable in address space 0 is non null unless extern weak.
+ // Other address spaces may have null as a valid address for a global,
+ // so we can't assume anything.
if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
- return !GV->hasExternalWeakLinkage();
+ return !GV->hasExternalWeakLinkage() &&
+ GV->getType()->getAddressSpace() == 0;
// A Load tagged w/nonnull metadata is never null.
if (const LoadInst *LI = dyn_cast<LoadInst>(V))
@@ -3186,6 +3484,8 @@ bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) {
static bool isKnownNonNullFromDominatingCondition(const Value *V,
const Instruction *CtxI,
const DominatorTree *DT) {
+ assert(V->getType()->isPointerTy() && "V must be pointer type");
+
unsigned NumUsesExplored = 0;
for (auto U : V->users()) {
// Avoid massive lists
@@ -3316,40 +3616,339 @@ OverflowResult llvm::computeOverflowForUnsignedAdd(Value *LHS, Value *RHS,
return OverflowResult::MayOverflow;
}
-static SelectPatternFlavor matchSelectPattern(ICmpInst::Predicate Pred,
+static OverflowResult computeOverflowForSignedAdd(
+ Value *LHS, Value *RHS, AddOperator *Add, const DataLayout &DL,
+ AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) {
+ if (Add && Add->hasNoSignedWrap()) {
+ return OverflowResult::NeverOverflows;
+ }
+
+ bool LHSKnownNonNegative, LHSKnownNegative;
+ bool RHSKnownNonNegative, RHSKnownNegative;
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
+ AC, CxtI, DT);
+ ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
+ AC, CxtI, DT);
+
+ if ((LHSKnownNonNegative && RHSKnownNegative) ||
+ (LHSKnownNegative && RHSKnownNonNegative)) {
+ // The sign bits are opposite: this CANNOT overflow.
+ return OverflowResult::NeverOverflows;
+ }
+
+ // The remaining code needs Add to be available. Early returns if not so.
+ if (!Add)
+ return OverflowResult::MayOverflow;
+
+ // If the sign of Add is the same as at least one of the operands, this add
+ // CANNOT overflow. This is particularly useful when the sum is
+ // @llvm.assume'ed non-negative rather than proved so from analyzing its
+ // operands.
+ bool LHSOrRHSKnownNonNegative =
+ (LHSKnownNonNegative || RHSKnownNonNegative);
+ bool LHSOrRHSKnownNegative = (LHSKnownNegative || RHSKnownNegative);
+ if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
+ bool AddKnownNonNegative, AddKnownNegative;
+ ComputeSignBit(Add, AddKnownNonNegative, AddKnownNegative, DL,
+ /*Depth=*/0, AC, CxtI, DT);
+ if ((AddKnownNonNegative && LHSOrRHSKnownNonNegative) ||
+ (AddKnownNegative && LHSOrRHSKnownNegative)) {
+ return OverflowResult::NeverOverflows;
+ }
+ }
+
+ return OverflowResult::MayOverflow;
+}
+
+OverflowResult llvm::computeOverflowForSignedAdd(AddOperator *Add,
+ const DataLayout &DL,
+ AssumptionCache *AC,
+ const Instruction *CxtI,
+ const DominatorTree *DT) {
+ return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
+ Add, DL, AC, CxtI, DT);
+}
+
+OverflowResult llvm::computeOverflowForSignedAdd(Value *LHS, Value *RHS,
+ const DataLayout &DL,
+ AssumptionCache *AC,
+ const Instruction *CxtI,
+ const DominatorTree *DT) {
+ return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
+}
+
+bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
+ // FIXME: This conservative implementation can be relaxed. E.g. most
+ // atomic operations are guaranteed to terminate on most platforms
+ // and most functions terminate.
+
+ return !I->isAtomic() && // atomics may never succeed on some platforms
+ !isa<CallInst>(I) && // could throw and might not terminate
+ !isa<InvokeInst>(I) && // might not terminate and could throw to
+ // non-successor (see bug 24185 for details).
+ !isa<ResumeInst>(I) && // has no successors
+ !isa<ReturnInst>(I); // has no successors
+}
+
+bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
+ const Loop *L) {
+ // The loop header is guaranteed to be executed for every iteration.
+ //
+ // FIXME: Relax this constraint to cover all basic blocks that are
+ // guaranteed to be executed at every iteration.
+ if (I->getParent() != L->getHeader()) return false;
+
+ for (const Instruction &LI : *L->getHeader()) {
+ if (&LI == I) return true;
+ if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
+ }
+ llvm_unreachable("Instruction not contained in its own parent basic block.");
+}
+
+bool llvm::propagatesFullPoison(const Instruction *I) {
+ switch (I->getOpcode()) {
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Xor:
+ case Instruction::Trunc:
+ case Instruction::BitCast:
+ case Instruction::AddrSpaceCast:
+ // These operations all propagate poison unconditionally. Note that poison
+ // is not any particular value, so xor or subtraction of poison with
+ // itself still yields poison, not zero.
+ return true;
+
+ case Instruction::AShr:
+ case Instruction::SExt:
+ // For these operations, one bit of the input is replicated across
+ // multiple output bits. A replicated poison bit is still poison.
+ return true;
+
+ case Instruction::Shl: {
+ // Left shift *by* a poison value is poison. The number of
+ // positions to shift is unsigned, so no negative values are
+ // possible there. Left shift by zero places preserves poison. So
+ // it only remains to consider left shift of poison by a positive
+ // number of places.
+ //
+ // A left shift by a positive number of places leaves the lowest order bit
+ // non-poisoned. However, if such a shift has a no-wrap flag, then we can
+ // make the poison operand violate that flag, yielding a fresh full-poison
+ // value.
+ auto *OBO = cast<OverflowingBinaryOperator>(I);
+ return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap();
+ }
+
+ case Instruction::Mul: {
+ // A multiplication by zero yields a non-poison zero result, so we need to
+ // rule out zero as an operand. Conservatively, multiplication by a
+ // non-zero constant is not multiplication by zero.
+ //
+ // Multiplication by a non-zero constant can leave some bits
+ // non-poisoned. For example, a multiplication by 2 leaves the lowest
+ // order bit unpoisoned. So we need to consider that.
+ //
+ // Multiplication by 1 preserves poison. If the multiplication has a
+ // no-wrap flag, then we can make the poison operand violate that flag
+ // when multiplied by any integer other than 0 and 1.
+ auto *OBO = cast<OverflowingBinaryOperator>(I);
+ if (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) {
+ for (Value *V : OBO->operands()) {
+ if (auto *CI = dyn_cast<ConstantInt>(V)) {
+ // A ConstantInt cannot yield poison, so we can assume that it is
+ // the other operand that is poison.
+ return !CI->isZero();
+ }
+ }
+ }
+ return false;
+ }
+
+ case Instruction::GetElementPtr:
+ // A GEP implicitly represents a sequence of additions, subtractions,
+ // truncations, sign extensions and multiplications. The multiplications
+ // are by the non-zero sizes of some set of types, so we do not have to be
+ // concerned with multiplication by zero. If the GEP is in-bounds, then
+ // these operations are implicitly no-signed-wrap so poison is propagated
+ // by the arguments above for Add, Sub, Trunc, SExt and Mul.
+ return cast<GEPOperator>(I)->isInBounds();
+
+ default:
+ return false;
+ }
+}
+
+const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
+ switch (I->getOpcode()) {
+ case Instruction::Store:
+ return cast<StoreInst>(I)->getPointerOperand();
+
+ case Instruction::Load:
+ return cast<LoadInst>(I)->getPointerOperand();
+
+ case Instruction::AtomicCmpXchg:
+ return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
+
+ case Instruction::AtomicRMW:
+ return cast<AtomicRMWInst>(I)->getPointerOperand();
+
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ return I->getOperand(1);
+
+ default:
+ return nullptr;
+ }
+}
+
+bool llvm::isKnownNotFullPoison(const Instruction *PoisonI) {
+ // We currently only look for uses of poison values within the same basic
+ // block, as that makes it easier to guarantee that the uses will be
+ // executed given that PoisonI is executed.
+ //
+ // FIXME: Expand this to consider uses beyond the same basic block. To do
+ // this, look out for the distinction between post-dominance and strong
+ // post-dominance.
+ const BasicBlock *BB = PoisonI->getParent();
+
+ // Set of instructions that we have proved will yield poison if PoisonI
+ // does.
+ SmallSet<const Value *, 16> YieldsPoison;
+ YieldsPoison.insert(PoisonI);
+
+ for (BasicBlock::const_iterator I = PoisonI->getIterator(), E = BB->end();
+ I != E; ++I) {
+ if (&*I != PoisonI) {
+ const Value *NotPoison = getGuaranteedNonFullPoisonOp(&*I);
+ if (NotPoison != nullptr && YieldsPoison.count(NotPoison)) return true;
+ if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
+ return false;
+ }
+
+ // Mark poison that propagates from I through uses of I.
+ if (YieldsPoison.count(&*I)) {
+ for (const User *User : I->users()) {
+ const Instruction *UserI = cast<Instruction>(User);
+ if (UserI->getParent() == BB && propagatesFullPoison(UserI))
+ YieldsPoison.insert(User);
+ }
+ }
+ }
+ return false;
+}
+
+static bool isKnownNonNaN(Value *V, FastMathFlags FMF) {
+ if (FMF.noNaNs())
+ return true;
+
+ if (auto *C = dyn_cast<ConstantFP>(V))
+ return !C->isNaN();
+ return false;
+}
+
+static bool isKnownNonZero(Value *V) {
+ if (auto *C = dyn_cast<ConstantFP>(V))
+ return !C->isZero();
+ return false;
+}
+
+static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
+ FastMathFlags FMF,
Value *CmpLHS, Value *CmpRHS,
Value *TrueVal, Value *FalseVal,
Value *&LHS, Value *&RHS) {
LHS = CmpLHS;
RHS = CmpRHS;
- // (icmp X, Y) ? X : Y
- if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
- switch (Pred) {
- default: return SPF_UNKNOWN; // Equality.
- case ICmpInst::ICMP_UGT:
- case ICmpInst::ICMP_UGE: return SPF_UMAX;
- case ICmpInst::ICMP_SGT:
- case ICmpInst::ICMP_SGE: return SPF_SMAX;
- case ICmpInst::ICMP_ULT:
- case ICmpInst::ICMP_ULE: return SPF_UMIN;
- case ICmpInst::ICMP_SLT:
- case ICmpInst::ICMP_SLE: return SPF_SMIN;
+ // If the predicate is an "or-equal" (FP) predicate, then signed zeroes may
+ // return inconsistent results between implementations.
+ // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
+ // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
+ // Therefore we behave conservatively and only proceed if at least one of the
+ // operands is known to not be zero, or if we don't care about signed zeroes.
+ switch (Pred) {
+ default: break;
+ case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
+ case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
+ if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
+ !isKnownNonZero(CmpRHS))
+ return {SPF_UNKNOWN, SPNB_NA, false};
+ }
+
+ SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
+ bool Ordered = false;
+
+ // When given one NaN and one non-NaN input:
+ // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
+ // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
+ // ordered comparison fails), which could be NaN or non-NaN.
+ // so here we discover exactly what NaN behavior is required/accepted.
+ if (CmpInst::isFPPredicate(Pred)) {
+ bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
+ bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
+
+ if (LHSSafe && RHSSafe) {
+ // Both operands are known non-NaN.
+ NaNBehavior = SPNB_RETURNS_ANY;
+ } else if (CmpInst::isOrdered(Pred)) {
+ // An ordered comparison will return false when given a NaN, so it
+ // returns the RHS.
+ Ordered = true;
+ if (LHSSafe)
+ // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
+ NaNBehavior = SPNB_RETURNS_NAN;
+ else if (RHSSafe)
+ NaNBehavior = SPNB_RETURNS_OTHER;
+ else
+ // Completely unsafe.
+ return {SPF_UNKNOWN, SPNB_NA, false};
+ } else {
+ Ordered = false;
+ // An unordered comparison will return true when given a NaN, so it
+ // returns the LHS.
+ if (LHSSafe)
+ // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
+ NaNBehavior = SPNB_RETURNS_OTHER;
+ else if (RHSSafe)
+ NaNBehavior = SPNB_RETURNS_NAN;
+ else
+ // Completely unsafe.
+ return {SPF_UNKNOWN, SPNB_NA, false};
}
}
- // (icmp X, Y) ? Y : X
if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
+ std::swap(CmpLHS, CmpRHS);
+ Pred = CmpInst::getSwappedPredicate(Pred);
+ if (NaNBehavior == SPNB_RETURNS_NAN)
+ NaNBehavior = SPNB_RETURNS_OTHER;
+ else if (NaNBehavior == SPNB_RETURNS_OTHER)
+ NaNBehavior = SPNB_RETURNS_NAN;
+ Ordered = !Ordered;
+ }
+
+ // ([if]cmp X, Y) ? X : Y
+ if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
switch (Pred) {
- default: return SPF_UNKNOWN; // Equality.
+ default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
case ICmpInst::ICMP_UGT:
- case ICmpInst::ICMP_UGE: return SPF_UMIN;
+ case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
case ICmpInst::ICMP_SGT:
- case ICmpInst::ICMP_SGE: return SPF_SMIN;
+ case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
case ICmpInst::ICMP_ULT:
- case ICmpInst::ICMP_ULE: return SPF_UMAX;
+ case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
case ICmpInst::ICMP_SLT:
- case ICmpInst::ICMP_SLE: return SPF_SMAX;
+ case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
+ case FCmpInst::FCMP_UGT:
+ case FCmpInst::FCMP_UGE:
+ case FCmpInst::FCMP_OGT:
+ case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
+ case FCmpInst::FCMP_ULT:
+ case FCmpInst::FCMP_ULE:
+ case FCmpInst::FCMP_OLT:
+ case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
}
}
@@ -3360,13 +3959,13 @@ static SelectPatternFlavor matchSelectPattern(ICmpInst::Predicate Pred,
// ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X
// NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X
if (Pred == ICmpInst::ICMP_SGT && (C1->isZero() || C1->isMinusOne())) {
- return (CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS;
+ return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
}
// ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X
// NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X
if (Pred == ICmpInst::ICMP_SLT && (C1->isZero() || C1->isOne())) {
- return (CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS;
+ return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
}
}
@@ -3377,24 +3976,36 @@ static SelectPatternFlavor matchSelectPattern(ICmpInst::Predicate Pred,
match(CmpLHS, m_Not(m_Specific(TrueVal))))) {
LHS = TrueVal;
RHS = FalseVal;
- return SPF_SMIN;
+ return {SPF_SMIN, SPNB_NA, false};
}
}
}
// TODO: (X > 4) ? X : 5 --> (X >= 5) ? X : 5 --> MAX(X, 5)
- return SPF_UNKNOWN;
+ return {SPF_UNKNOWN, SPNB_NA, false};
}
-static Constant *lookThroughCast(ICmpInst *CmpI, Value *V1, Value *V2,
- Instruction::CastOps *CastOp) {
+static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
+ Instruction::CastOps *CastOp) {
CastInst *CI = dyn_cast<CastInst>(V1);
Constant *C = dyn_cast<Constant>(V2);
- if (!CI || !C)
+ CastInst *CI2 = dyn_cast<CastInst>(V2);
+ if (!CI)
return nullptr;
*CastOp = CI->getOpcode();
+ if (CI2) {
+ // If V1 and V2 are both the same cast from the same type, we can look
+ // through V1.
+ if (CI2->getOpcode() == CI->getOpcode() &&
+ CI2->getSrcTy() == CI->getSrcTy())
+ return CI2->getOperand(0);
+ return nullptr;
+ } else if (!C) {
+ return nullptr;
+ }
+
if (isa<SExtInst>(CI) && CmpI->isSigned()) {
Constant *T = ConstantExpr::getTrunc(C, CI->getSrcTy());
// This is only valid if the truncated value can be sign-extended
@@ -3409,39 +4020,200 @@ static Constant *lookThroughCast(ICmpInst *CmpI, Value *V1, Value *V2,
if (isa<TruncInst>(CI))
return ConstantExpr::getIntegerCast(C, CI->getSrcTy(), CmpI->isSigned());
+ if (isa<FPToUIInst>(CI))
+ return ConstantExpr::getUIToFP(C, CI->getSrcTy(), true);
+
+ if (isa<FPToSIInst>(CI))
+ return ConstantExpr::getSIToFP(C, CI->getSrcTy(), true);
+
+ if (isa<UIToFPInst>(CI))
+ return ConstantExpr::getFPToUI(C, CI->getSrcTy(), true);
+
+ if (isa<SIToFPInst>(CI))
+ return ConstantExpr::getFPToSI(C, CI->getSrcTy(), true);
+
+ if (isa<FPTruncInst>(CI))
+ return ConstantExpr::getFPExtend(C, CI->getSrcTy(), true);
+
+ if (isa<FPExtInst>(CI))
+ return ConstantExpr::getFPTrunc(C, CI->getSrcTy(), true);
+
return nullptr;
}
-SelectPatternFlavor llvm::matchSelectPattern(Value *V,
+SelectPatternResult llvm::matchSelectPattern(Value *V,
Value *&LHS, Value *&RHS,
Instruction::CastOps *CastOp) {
SelectInst *SI = dyn_cast<SelectInst>(V);
- if (!SI) return SPF_UNKNOWN;
+ if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
- ICmpInst *CmpI = dyn_cast<ICmpInst>(SI->getCondition());
- if (!CmpI) return SPF_UNKNOWN;
+ CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
+ if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
- ICmpInst::Predicate Pred = CmpI->getPredicate();
+ CmpInst::Predicate Pred = CmpI->getPredicate();
Value *CmpLHS = CmpI->getOperand(0);
Value *CmpRHS = CmpI->getOperand(1);
Value *TrueVal = SI->getTrueValue();
Value *FalseVal = SI->getFalseValue();
+ FastMathFlags FMF;
+ if (isa<FPMathOperator>(CmpI))
+ FMF = CmpI->getFastMathFlags();
// Bail out early.
if (CmpI->isEquality())
- return SPF_UNKNOWN;
+ return {SPF_UNKNOWN, SPNB_NA, false};
// Deal with type mismatches.
if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
- if (Constant *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp))
- return ::matchSelectPattern(Pred, CmpLHS, CmpRHS,
+ if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp))
+ return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
cast<CastInst>(TrueVal)->getOperand(0), C,
LHS, RHS);
- if (Constant *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp))
- return ::matchSelectPattern(Pred, CmpLHS, CmpRHS,
+ if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp))
+ return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
C, cast<CastInst>(FalseVal)->getOperand(0),
LHS, RHS);
}
- return ::matchSelectPattern(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal,
+ return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
LHS, RHS);
}
+
+ConstantRange llvm::getConstantRangeFromMetadata(MDNode &Ranges) {
+ const unsigned NumRanges = Ranges.getNumOperands() / 2;
+ assert(NumRanges >= 1 && "Must have at least one range!");
+ assert(Ranges.getNumOperands() % 2 == 0 && "Must be a sequence of pairs");
+
+ auto *FirstLow = mdconst::extract<ConstantInt>(Ranges.getOperand(0));
+ auto *FirstHigh = mdconst::extract<ConstantInt>(Ranges.getOperand(1));
+
+ ConstantRange CR(FirstLow->getValue(), FirstHigh->getValue());
+
+ for (unsigned i = 1; i < NumRanges; ++i) {
+ auto *Low = mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
+ auto *High = mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
+
+ // Note: unionWith will potentially create a range that contains values not
+ // contained in any of the original N ranges.
+ CR = CR.unionWith(ConstantRange(Low->getValue(), High->getValue()));
+ }
+
+ return CR;
+}
+
+/// Return true if "icmp Pred LHS RHS" is always true.
+static bool isTruePredicate(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
+ const DataLayout &DL, unsigned Depth,
+ AssumptionCache *AC, const Instruction *CxtI,
+ const DominatorTree *DT) {
+ assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
+ if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
+ return true;
+
+ switch (Pred) {
+ default:
+ return false;
+
+ case CmpInst::ICMP_SLE: {
+ const APInt *C;
+
+ // LHS s<= LHS +_{nsw} C if C >= 0
+ if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
+ return !C->isNegative();
+ return false;
+ }
+
+ case CmpInst::ICMP_ULE: {
+ const APInt *C;
+
+ // LHS u<= LHS +_{nuw} C for any C
+ if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
+ return true;
+
+ // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
+ auto MatchNUWAddsToSameValue = [&](Value *A, Value *B, Value *&X,
+ const APInt *&CA, const APInt *&CB) {
+ if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
+ match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
+ return true;
+
+ // If X & C == 0 then (X | C) == X +_{nuw} C
+ if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
+ match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
+ unsigned BitWidth = CA->getBitWidth();
+ APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
+ computeKnownBits(X, KnownZero, KnownOne, DL, Depth + 1, AC, CxtI, DT);
+
+ if ((KnownZero & *CA) == *CA && (KnownZero & *CB) == *CB)
+ return true;
+ }
+
+ return false;
+ };
+
+ Value *X;
+ const APInt *CLHS, *CRHS;
+ if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
+ return CLHS->ule(*CRHS);
+
+ return false;
+ }
+ }
+}
+
+/// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
+/// ALHS ARHS" is true.
+static bool isImpliedCondOperands(CmpInst::Predicate Pred, Value *ALHS,
+ Value *ARHS, Value *BLHS, Value *BRHS,
+ const DataLayout &DL, unsigned Depth,
+ AssumptionCache *AC, const Instruction *CxtI,
+ const DominatorTree *DT) {
+ switch (Pred) {
+ default:
+ return false;
+
+ case CmpInst::ICMP_SLT:
+ case CmpInst::ICMP_SLE:
+ return isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth, AC, CxtI,
+ DT) &&
+ isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth, AC, CxtI,
+ DT);
+
+ case CmpInst::ICMP_ULT:
+ case CmpInst::ICMP_ULE:
+ return isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth, AC, CxtI,
+ DT) &&
+ isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth, AC, CxtI,
+ DT);
+ }
+}
+
+bool llvm::isImpliedCondition(Value *LHS, Value *RHS, const DataLayout &DL,
+ unsigned Depth, AssumptionCache *AC,
+ const Instruction *CxtI,
+ const DominatorTree *DT) {
+ assert(LHS->getType() == RHS->getType() && "mismatched type");
+ Type *OpTy = LHS->getType();
+ assert(OpTy->getScalarType()->isIntegerTy(1));
+
+ // LHS ==> RHS by definition
+ if (LHS == RHS) return true;
+
+ if (OpTy->isVectorTy())
+ // TODO: extending the code below to handle vectors
+ return false;
+ assert(OpTy->isIntegerTy(1) && "implied by above");
+
+ ICmpInst::Predicate APred, BPred;
+ Value *ALHS, *ARHS;
+ Value *BLHS, *BRHS;
+
+ if (!match(LHS, m_ICmp(APred, m_Value(ALHS), m_Value(ARHS))) ||
+ !match(RHS, m_ICmp(BPred, m_Value(BLHS), m_Value(BRHS))))
+ return false;
+
+ if (APred == BPred)
+ return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth, AC,
+ CxtI, DT);
+
+ return false;
+}
OpenPOWER on IntegriCloud