summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp')
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp288
1 files changed, 151 insertions, 137 deletions
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 377ccb9..27fc34d 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -177,11 +177,10 @@ static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1) {
return false;
// TODO: Enhance logic for other BinOps and remove this check.
- auto AssocOpcode = BinOp1->getOpcode();
- if (AssocOpcode != Instruction::Xor && AssocOpcode != Instruction::And &&
- AssocOpcode != Instruction::Or)
+ if (!BinOp1->isBitwiseLogicOp())
return false;
+ auto AssocOpcode = BinOp1->getOpcode();
auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
return false;
@@ -684,14 +683,14 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
if (SI0->getCondition() == SI1->getCondition()) {
Value *SI = nullptr;
if (Value *V = SimplifyBinOp(TopLevelOpcode, SI0->getFalseValue(),
- SI1->getFalseValue(), DL, TLI, DT, AC))
+ SI1->getFalseValue(), DL, &TLI, &DT, &AC))
SI = Builder->CreateSelect(SI0->getCondition(),
Builder->CreateBinOp(TopLevelOpcode,
SI0->getTrueValue(),
SI1->getTrueValue()),
V);
if (Value *V = SimplifyBinOp(TopLevelOpcode, SI0->getTrueValue(),
- SI1->getTrueValue(), DL, TLI, DT, AC))
+ SI1->getTrueValue(), DL, &TLI, &DT, &AC))
SI = Builder->CreateSelect(
SI0->getCondition(), V,
Builder->CreateBinOp(TopLevelOpcode, SI0->getFalseValue(),
@@ -741,17 +740,18 @@ Value *InstCombiner::dyn_castFNegVal(Value *V, bool IgnoreZeroSign) const {
return nullptr;
}
-static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
+static Value *foldOperationIntoSelectOperand(Instruction &I, Value *SO,
InstCombiner *IC) {
- if (CastInst *CI = dyn_cast<CastInst>(&I)) {
- return IC->Builder->CreateCast(CI->getOpcode(), SO, I.getType());
- }
+ if (auto *Cast = dyn_cast<CastInst>(&I))
+ return IC->Builder->CreateCast(Cast->getOpcode(), SO, I.getType());
+
+ assert(I.isBinaryOp() && "Unexpected opcode for select folding");
// Figure out if the constant is the left or the right argument.
bool ConstIsRHS = isa<Constant>(I.getOperand(1));
Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS));
- if (Constant *SOC = dyn_cast<Constant>(SO)) {
+ if (auto *SOC = dyn_cast<Constant>(SO)) {
if (ConstIsRHS)
return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand);
return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC);
@@ -761,78 +761,65 @@ static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
if (!ConstIsRHS)
std::swap(Op0, Op1);
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I)) {
- Value *RI = IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1,
- SO->getName()+".op");
- Instruction *FPInst = dyn_cast<Instruction>(RI);
- if (FPInst && isa<FPMathOperator>(FPInst))
- FPInst->copyFastMathFlags(BO);
- return RI;
- }
- if (ICmpInst *CI = dyn_cast<ICmpInst>(&I))
- return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
- SO->getName()+".cmp");
- if (FCmpInst *CI = dyn_cast<FCmpInst>(&I))
- return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
- SO->getName()+".cmp");
- llvm_unreachable("Unknown binary instruction type!");
+ auto *BO = cast<BinaryOperator>(&I);
+ Value *RI = IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1,
+ SO->getName() + ".op");
+ auto *FPInst = dyn_cast<Instruction>(RI);
+ if (FPInst && isa<FPMathOperator>(FPInst))
+ FPInst->copyFastMathFlags(BO);
+ return RI;
}
-/// Given an instruction with a select as one operand and a constant as the
-/// other operand, try to fold the binary operator into the select arguments.
-/// This also works for Cast instructions, which obviously do not have a second
-/// operand.
Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
- // Don't modify shared select instructions
- if (!SI->hasOneUse()) return nullptr;
- Value *TV = SI->getOperand(1);
- Value *FV = SI->getOperand(2);
-
- if (isa<Constant>(TV) || isa<Constant>(FV)) {
- // Bool selects with constant operands can be folded to logical ops.
- if (SI->getType()->isIntegerTy(1)) return nullptr;
-
- // If it's a bitcast involving vectors, make sure it has the same number of
- // elements on both sides.
- if (BitCastInst *BC = dyn_cast<BitCastInst>(&Op)) {
- VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
- VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
-
- // Verify that either both or neither are vectors.
- if ((SrcTy == nullptr) != (DestTy == nullptr)) return nullptr;
- // If vectors, verify that they have the same number of elements.
- if (SrcTy && SrcTy->getNumElements() != DestTy->getNumElements())
- return nullptr;
- }
+ // Don't modify shared select instructions.
+ if (!SI->hasOneUse())
+ return nullptr;
- // Test if a CmpInst instruction is used exclusively by a select as
- // part of a minimum or maximum operation. If so, refrain from doing
- // any other folding. This helps out other analyses which understand
- // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
- // and CodeGen. And in this case, at least one of the comparison
- // operands has at least one user besides the compare (the select),
- // which would often largely negate the benefit of folding anyway.
- if (auto *CI = dyn_cast<CmpInst>(SI->getCondition())) {
- if (CI->hasOneUse()) {
- Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
- if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
- (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
- return nullptr;
- }
- }
+ Value *TV = SI->getTrueValue();
+ Value *FV = SI->getFalseValue();
+ if (!(isa<Constant>(TV) || isa<Constant>(FV)))
+ return nullptr;
- Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this);
- Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, this);
+ // Bool selects with constant operands can be folded to logical ops.
+ if (SI->getType()->getScalarType()->isIntegerTy(1))
+ return nullptr;
- return SelectInst::Create(SI->getCondition(),
- SelectTrueVal, SelectFalseVal);
+ // If it's a bitcast involving vectors, make sure it has the same number of
+ // elements on both sides.
+ if (auto *BC = dyn_cast<BitCastInst>(&Op)) {
+ VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
+ VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
+
+ // Verify that either both or neither are vectors.
+ if ((SrcTy == nullptr) != (DestTy == nullptr))
+ return nullptr;
+
+ // If vectors, verify that they have the same number of elements.
+ if (SrcTy && SrcTy->getNumElements() != DestTy->getNumElements())
+ return nullptr;
}
- return nullptr;
+
+ // Test if a CmpInst instruction is used exclusively by a select as
+ // part of a minimum or maximum operation. If so, refrain from doing
+ // any other folding. This helps out other analyses which understand
+ // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
+ // and CodeGen. And in this case, at least one of the comparison
+ // operands has at least one user besides the compare (the select),
+ // which would often largely negate the benefit of folding anyway.
+ if (auto *CI = dyn_cast<CmpInst>(SI->getCondition())) {
+ if (CI->hasOneUse()) {
+ Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
+ if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
+ (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
+ return nullptr;
+ }
+ }
+
+ Value *NewTV = foldOperationIntoSelectOperand(Op, TV, this);
+ Value *NewFV = foldOperationIntoSelectOperand(Op, FV, this);
+ return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
}
-/// Given a binary operator, cast instruction, or select which has a PHI node as
-/// operand #0, see if we can fold the instruction into the PHI (which is only
-/// possible if all operands to the PHI are constants).
Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
PHINode *PN = cast<PHINode>(I.getOperand(0));
unsigned NumPHIValues = PN->getNumIncomingValues();
@@ -877,7 +864,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
// If the incoming non-constant value is in I's block, we will remove one
// instruction, but insert another equivalent one, leading to infinite
// instcombine.
- if (isPotentiallyReachable(I.getParent(), NonConstBB, DT, LI))
+ if (isPotentiallyReachable(I.getParent(), NonConstBB, &DT, LI))
return nullptr;
}
@@ -970,6 +957,19 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
return replaceInstUsesWith(I, NewPN);
}
+Instruction *InstCombiner::foldOpWithConstantIntoOperand(Instruction &I) {
+ assert(isa<Constant>(I.getOperand(1)) && "Unexpected operand type");
+
+ if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) {
+ if (Instruction *NewSel = FoldOpIntoSelect(I, Sel))
+ return NewSel;
+ } else if (isa<PHINode>(I.getOperand(0))) {
+ if (Instruction *NewPhi = FoldOpIntoPhi(I))
+ return NewPhi;
+ }
+ return nullptr;
+}
+
/// Given a pointer type and a constant offset, determine whether or not there
/// is a sequence of GEP indices into the pointed type that will land us at the
/// specified offset. If so, fill them into NewIndices and return the resultant
@@ -1379,7 +1379,8 @@ Value *InstCombiner::SimplifyVectorOp(BinaryOperator &Inst) {
Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
- if (Value *V = SimplifyGEPInst(GEP.getSourceElementType(), Ops, DL, TLI, DT, AC))
+ if (Value *V =
+ SimplifyGEPInst(GEP.getSourceElementType(), Ops, DL, &TLI, &DT, &AC))
return replaceInstUsesWith(GEP, V);
Value *PtrOp = GEP.getOperand(0);
@@ -1394,7 +1395,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
++I, ++GTI) {
// Skip indices into struct types.
- if (isa<StructType>(*GTI))
+ if (GTI.isStruct())
continue;
// Index type should have the same width as IntPtr
@@ -1551,7 +1552,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
bool EndsWithSequential = false;
for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
I != E; ++I)
- EndsWithSequential = !(*I)->isStructTy();
+ EndsWithSequential = I.isSequential();
// Can we combine the two pointer arithmetics offsets?
if (EndsWithSequential) {
@@ -1860,7 +1861,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
if (!Offset) {
// If the bitcast is of an allocation, and the allocation will be
// converted to match the type of the cast, don't touch this.
- if (isa<AllocaInst>(Operand) || isAllocationFn(Operand, TLI)) {
+ if (isa<AllocaInst>(Operand) || isAllocationFn(Operand, &TLI)) {
// See if the bitcast simplifies, if so, don't nuke this GEP yet.
if (Instruction *I = visitBitCast(*BCI)) {
if (I != BCI) {
@@ -1898,6 +1899,25 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
}
}
+ if (!GEP.isInBounds()) {
+ unsigned PtrWidth =
+ DL.getPointerSizeInBits(PtrOp->getType()->getPointerAddressSpace());
+ APInt BasePtrOffset(PtrWidth, 0);
+ Value *UnderlyingPtrOp =
+ PtrOp->stripAndAccumulateInBoundsConstantOffsets(DL,
+ BasePtrOffset);
+ if (auto *AI = dyn_cast<AllocaInst>(UnderlyingPtrOp)) {
+ if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
+ BasePtrOffset.isNonNegative()) {
+ APInt AllocSize(PtrWidth, DL.getTypeAllocSize(AI->getAllocatedType()));
+ if (BasePtrOffset.ule(AllocSize)) {
+ return GetElementPtrInst::CreateInBounds(
+ PtrOp, makeArrayRef(Ops).slice(1), GEP.getName());
+ }
+ }
+ }
+ }
+
return nullptr;
}
@@ -1963,8 +1983,8 @@ isAllocSiteRemovable(Instruction *AI, SmallVectorImpl<WeakVH> &Users,
MemIntrinsic *MI = cast<MemIntrinsic>(II);
if (MI->isVolatile() || MI->getRawDest() != PI)
return false;
+ LLVM_FALLTHROUGH;
}
- // fall through
case Intrinsic::dbg_declare:
case Intrinsic::dbg_value:
case Intrinsic::invariant_start:
@@ -2002,7 +2022,7 @@ Instruction *InstCombiner::visitAllocSite(Instruction &MI) {
// to null and free calls, delete the calls and replace the comparisons with
// true or false as appropriate.
SmallVector<WeakVH, 64> Users;
- if (isAllocSiteRemovable(&MI, Users, TLI)) {
+ if (isAllocSiteRemovable(&MI, Users, &TLI)) {
for (unsigned i = 0, e = Users.size(); i != e; ++i) {
// Lowering all @llvm.objectsize calls first because they may
// use a bitcast/GEP of the alloca we are removing.
@@ -2013,12 +2033,9 @@ Instruction *InstCombiner::visitAllocSite(Instruction &MI) {
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
if (II->getIntrinsicID() == Intrinsic::objectsize) {
- uint64_t Size;
- if (!getObjectSize(II->getArgOperand(0), Size, DL, TLI)) {
- ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
- Size = CI->isZero() ? -1ULL : 0;
- }
- replaceInstUsesWith(*I, ConstantInt::get(I->getType(), Size));
+ ConstantInt *Result = lowerObjectSizeCall(II, DL, &TLI,
+ /*MustSucceed=*/true);
+ replaceInstUsesWith(*I, Result);
eraseInstFromFunction(*I);
Users[i] = nullptr; // Skip examining in the next loop.
}
@@ -2218,6 +2235,20 @@ Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
Value *Cond = SI.getCondition();
+ Value *Op0;
+ ConstantInt *AddRHS;
+ if (match(Cond, m_Add(m_Value(Op0), m_ConstantInt(AddRHS)))) {
+ // Change 'switch (X+4) case 1:' into 'switch (X) case -3'.
+ for (SwitchInst::CaseIt CaseIter : SI.cases()) {
+ Constant *NewCase = ConstantExpr::getSub(CaseIter.getCaseValue(), AddRHS);
+ assert(isa<ConstantInt>(NewCase) &&
+ "Result of expression should be constant");
+ CaseIter.setValue(cast<ConstantInt>(NewCase));
+ }
+ SI.setCondition(Op0);
+ return &SI;
+ }
+
unsigned BitWidth = cast<IntegerType>(Cond->getType())->getBitWidth();
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
computeKnownBits(Cond, KnownZero, KnownOne, 0, &SI);
@@ -2238,43 +2269,20 @@ Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
// Shrink the condition operand if the new type is smaller than the old type.
// This may produce a non-standard type for the switch, but that's ok because
// the backend should extend back to a legal type for the target.
- bool TruncCond = false;
if (NewWidth > 0 && NewWidth < BitWidth) {
- TruncCond = true;
IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
Builder->SetInsertPoint(&SI);
Value *NewCond = Builder->CreateTrunc(Cond, Ty, "trunc");
SI.setCondition(NewCond);
- for (auto &C : SI.cases())
- static_cast<SwitchInst::CaseIt *>(&C)->setValue(ConstantInt::get(
- SI.getContext(), C.getCaseValue()->getValue().trunc(NewWidth)));
- }
-
- ConstantInt *AddRHS = nullptr;
- if (match(Cond, m_Add(m_Value(), m_ConstantInt(AddRHS)))) {
- Instruction *I = cast<Instruction>(Cond);
- // Change 'switch (X+4) case 1:' into 'switch (X) case -3'.
- for (SwitchInst::CaseIt i = SI.case_begin(), e = SI.case_end(); i != e;
- ++i) {
- ConstantInt *CaseVal = i.getCaseValue();
- Constant *LHS = CaseVal;
- if (TruncCond) {
- LHS = LeadingKnownZeros
- ? ConstantExpr::getZExt(CaseVal, Cond->getType())
- : ConstantExpr::getSExt(CaseVal, Cond->getType());
- }
- Constant *NewCaseVal = ConstantExpr::getSub(LHS, AddRHS);
- assert(isa<ConstantInt>(NewCaseVal) &&
- "Result of expression should be constant");
- i.setValue(cast<ConstantInt>(NewCaseVal));
+ for (SwitchInst::CaseIt CaseIter : SI.cases()) {
+ APInt TruncatedCase = CaseIter.getCaseValue()->getValue().trunc(NewWidth);
+ CaseIter.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
}
- SI.setCondition(I->getOperand(0));
- Worklist.Add(I);
return &SI;
}
- return TruncCond ? &SI : nullptr;
+ return nullptr;
}
Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
@@ -2284,7 +2292,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
return replaceInstUsesWith(EV, Agg);
if (Value *V =
- SimplifyExtractValueInst(Agg, EV.getIndices(), DL, TLI, DT, AC))
+ SimplifyExtractValueInst(Agg, EV.getIndices(), DL, &TLI, &DT, &AC))
return replaceInstUsesWith(EV, V);
if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
@@ -2560,7 +2568,7 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
// remove it from the filter. An unexpected type handler may be
// set up for a call site which throws an exception of the same
// type caught. In order for the exception thrown by the unexpected
- // handler to propogate correctly, the filter must be correctly
+ // handler to propagate correctly, the filter must be correctly
// described for the call site.
//
// Example:
@@ -2813,7 +2821,7 @@ bool InstCombiner::run() {
if (I == nullptr) continue; // skip null values.
// Check to see if we can DCE the instruction.
- if (isInstructionTriviallyDead(I, TLI)) {
+ if (isInstructionTriviallyDead(I, &TLI)) {
DEBUG(dbgs() << "IC: DCE: " << *I << '\n');
eraseInstFromFunction(*I);
++NumDeadInst;
@@ -2824,13 +2832,13 @@ bool InstCombiner::run() {
// Instruction isn't dead, see if we can constant propagate it.
if (!I->use_empty() &&
(I->getNumOperands() == 0 || isa<Constant>(I->getOperand(0)))) {
- if (Constant *C = ConstantFoldInstruction(I, DL, TLI)) {
+ if (Constant *C = ConstantFoldInstruction(I, DL, &TLI)) {
DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
// Add operands to the worklist.
replaceInstUsesWith(*I, C);
++NumConstProp;
- if (isInstructionTriviallyDead(I, TLI))
+ if (isInstructionTriviallyDead(I, &TLI))
eraseInstFromFunction(*I);
MadeIRChange = true;
continue;
@@ -2839,20 +2847,21 @@ bool InstCombiner::run() {
// In general, it is possible for computeKnownBits to determine all bits in
// a value even when the operands are not all constants.
- if (ExpensiveCombines && !I->use_empty() && I->getType()->isIntegerTy()) {
- unsigned BitWidth = I->getType()->getScalarSizeInBits();
+ Type *Ty = I->getType();
+ if (ExpensiveCombines && !I->use_empty() && Ty->isIntOrIntVectorTy()) {
+ unsigned BitWidth = Ty->getScalarSizeInBits();
APInt KnownZero(BitWidth, 0);
APInt KnownOne(BitWidth, 0);
computeKnownBits(I, KnownZero, KnownOne, /*Depth*/0, I);
if ((KnownZero | KnownOne).isAllOnesValue()) {
- Constant *C = ConstantInt::get(I->getContext(), KnownOne);
+ Constant *C = ConstantInt::get(Ty, KnownOne);
DEBUG(dbgs() << "IC: ConstFold (all bits known) to: " << *C <<
" from: " << *I << '\n');
// Add operands to the worklist.
replaceInstUsesWith(*I, C);
++NumConstProp;
- if (isInstructionTriviallyDead(I, TLI))
+ if (isInstructionTriviallyDead(I, &TLI))
eraseInstFromFunction(*I);
MadeIRChange = true;
continue;
@@ -2883,7 +2892,7 @@ bool InstCombiner::run() {
// If the user is one of our immediate successors, and if that successor
// only has us as a predecessors (we'd have to split the critical edge
// otherwise), we can keep going.
- if (UserIsSuccessor && UserParent->getSinglePredecessor()) {
+ if (UserIsSuccessor && UserParent->getUniquePredecessor()) {
// Okay, the CFG is simple enough, try to sink this instruction.
if (TryToSinkInstruction(I, UserParent)) {
DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
@@ -2941,14 +2950,12 @@ bool InstCombiner::run() {
eraseInstFromFunction(*I);
} else {
-#ifndef NDEBUG
DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
<< " New = " << *I << '\n');
-#endif
// If the instruction was modified, it's possible that it is now dead.
// if so, remove it.
- if (isInstructionTriviallyDead(I, TLI)) {
+ if (isInstructionTriviallyDead(I, &TLI)) {
eraseInstFromFunction(*I);
} else {
Worklist.Add(I);
@@ -2981,7 +2988,7 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB, const DataLayout &DL,
Worklist.push_back(BB);
SmallVector<Instruction*, 128> InstrsForInstCombineWorklist;
- DenseMap<ConstantExpr*, Constant*> FoldedConstants;
+ DenseMap<Constant *, Constant *> FoldedConstants;
do {
BB = Worklist.pop_back_val();
@@ -3017,17 +3024,17 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB, const DataLayout &DL,
// See if we can constant fold its operands.
for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end(); i != e;
++i) {
- ConstantExpr *CE = dyn_cast<ConstantExpr>(i);
- if (CE == nullptr)
+ if (!isa<ConstantVector>(i) && !isa<ConstantExpr>(i))
continue;
- Constant *&FoldRes = FoldedConstants[CE];
+ auto *C = cast<Constant>(i);
+ Constant *&FoldRes = FoldedConstants[C];
if (!FoldRes)
- FoldRes = ConstantFoldConstantExpression(CE, DL, TLI);
+ FoldRes = ConstantFoldConstant(C, DL, TLI);
if (!FoldRes)
- FoldRes = CE;
+ FoldRes = C;
- if (FoldRes != CE) {
+ if (FoldRes != C) {
*i = FoldRes;
MadeIRChange = true;
}
@@ -3120,8 +3127,15 @@ combineInstructionsOverFunction(Function &F, InstCombineWorklist &Worklist,
/// Builder - This is an IRBuilder that automatically inserts new
/// instructions into the worklist when they are created.
- IRBuilder<TargetFolder, InstCombineIRInserter> Builder(
- F.getContext(), TargetFolder(DL), InstCombineIRInserter(Worklist, &AC));
+ IRBuilder<TargetFolder, IRBuilderCallbackInserter> Builder(
+ F.getContext(), TargetFolder(DL),
+ IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
+ Worklist.Add(I);
+
+ using namespace llvm::PatternMatch;
+ if (match(I, m_Intrinsic<Intrinsic::assume>()))
+ AC.registerAssumption(cast<CallInst>(I));
+ }));
// Lower dbg.declare intrinsics otherwise their value may be clobbered
// by instcombiner.
@@ -3137,7 +3151,7 @@ combineInstructionsOverFunction(Function &F, InstCombineWorklist &Worklist,
bool Changed = prepareICWorklistFromFunction(F, DL, &TLI, Worklist);
InstCombiner IC(Worklist, &Builder, F.optForMinSize(), ExpensiveCombines,
- AA, &AC, &TLI, &DT, DL, LI);
+ AA, AC, TLI, DT, DL, LI);
Changed |= IC.run();
if (!Changed)
@@ -3148,7 +3162,7 @@ combineInstructionsOverFunction(Function &F, InstCombineWorklist &Worklist,
}
PreservedAnalyses InstCombinePass::run(Function &F,
- AnalysisManager<Function> &AM) {
+ FunctionAnalysisManager &AM) {
auto &AC = AM.getResult<AssumptionAnalysis>(F);
auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
OpenPOWER on IntegriCloud