summaryrefslogtreecommitdiffstats
path: root/lib/Transforms
diff options
context:
space:
mode:
authorrdivacky <rdivacky@FreeBSD.org>2010-02-16 09:30:23 +0000
committerrdivacky <rdivacky@FreeBSD.org>2010-02-16 09:30:23 +0000
commitf25ddd991a5601d0101602c4c263a58c7af4b8a2 (patch)
tree4cfca640904d1896e25032757a61f8959c066919 /lib/Transforms
parent3fd58f91dd318518f7daa4ba64c0aaf31799d89b (diff)
downloadFreeBSD-src-f25ddd991a5601d0101602c4c263a58c7af4b8a2.zip
FreeBSD-src-f25ddd991a5601d0101602c4c263a58c7af4b8a2.tar.gz
Update LLVM to r96341.
Diffstat (limited to 'lib/Transforms')
-rw-r--r--lib/Transforms/Hello/Makefile1
-rw-r--r--lib/Transforms/IPO/ArgumentPromotion.cpp2
-rw-r--r--lib/Transforms/IPO/ConstantMerge.cpp41
-rw-r--r--lib/Transforms/IPO/DeadTypeElimination.cpp4
-rw-r--r--lib/Transforms/IPO/GlobalOpt.cpp8
-rw-r--r--lib/Transforms/IPO/Inliner.cpp34
-rw-r--r--lib/Transforms/IPO/Makefile1
-rw-r--r--lib/Transforms/IPO/MergeFunctions.cpp1
-rw-r--r--lib/Transforms/IPO/PartialInlining.cpp2
-rw-r--r--lib/Transforms/IPO/StripSymbols.cpp9
-rw-r--r--lib/Transforms/InstCombine/InstCombine.h11
-rw-r--r--lib/Transforms/InstCombine/InstCombineAddSub.cpp73
-rw-r--r--lib/Transforms/InstCombine/InstCombineAndOrXor.cpp158
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp86
-rw-r--r--lib/Transforms/InstCombine/InstCombineCasts.cpp110
-rw-r--r--lib/Transforms/InstCombine/InstCombineCompares.cpp10
-rw-r--r--lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp28
-rw-r--r--lib/Transforms/InstCombine/InstCombineMulDivRem.cpp12
-rw-r--r--lib/Transforms/InstCombine/InstCombineSelect.cpp70
-rw-r--r--lib/Transforms/InstCombine/InstCombineShifts.cpp32
-rw-r--r--lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp177
-rw-r--r--lib/Transforms/InstCombine/InstCombineVectorOps.cpp3
-rw-r--r--lib/Transforms/InstCombine/InstructionCombining.cpp8
-rw-r--r--lib/Transforms/InstCombine/Makefile1
-rw-r--r--lib/Transforms/Instrumentation/Makefile1
-rw-r--r--lib/Transforms/Instrumentation/ProfilingUtils.cpp2
-rw-r--r--lib/Transforms/Scalar/CodeGenPrepare.cpp167
-rw-r--r--lib/Transforms/Scalar/DeadStoreElimination.cpp8
-rw-r--r--lib/Transforms/Scalar/GVN.cpp197
-rw-r--r--lib/Transforms/Scalar/IndVarSimplify.cpp136
-rw-r--r--lib/Transforms/Scalar/JumpThreading.cpp48
-rw-r--r--lib/Transforms/Scalar/LoopStrengthReduce.cpp5046
-rw-r--r--lib/Transforms/Scalar/LoopUnrollPass.cpp10
-rw-r--r--lib/Transforms/Scalar/LoopUnswitch.cpp16
-rw-r--r--lib/Transforms/Scalar/Makefile1
-rw-r--r--lib/Transforms/Scalar/MemCpyOptimizer.cpp2
-rw-r--r--lib/Transforms/Scalar/Reassociate.cpp17
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp66
-rw-r--r--lib/Transforms/Scalar/SimplifyCFGPass.cpp16
-rw-r--r--lib/Transforms/Scalar/SimplifyHalfPowrLibCalls.cpp2
-rw-r--r--lib/Transforms/Scalar/SimplifyLibCalls.cpp69
-rw-r--r--lib/Transforms/Scalar/TailRecursionElimination.cpp3
-rw-r--r--lib/Transforms/Utils/BreakCriticalEdges.cpp86
-rw-r--r--lib/Transforms/Utils/CloneFunction.cpp2
-rw-r--r--lib/Transforms/Utils/Local.cpp89
-rw-r--r--lib/Transforms/Utils/LoopSimplify.cpp5
-rw-r--r--lib/Transforms/Utils/LoopUnroll.cpp6
-rw-r--r--lib/Transforms/Utils/Makefile1
-rw-r--r--lib/Transforms/Utils/PromoteMemoryToRegister.cpp87
-rw-r--r--lib/Transforms/Utils/SSAUpdater.cpp116
-rw-r--r--lib/Transforms/Utils/SimplifyCFG.cpp146
-rw-r--r--lib/Transforms/Utils/ValueMapper.cpp2
52 files changed, 4100 insertions, 3129 deletions
diff --git a/lib/Transforms/Hello/Makefile b/lib/Transforms/Hello/Makefile
index 46f8098..c5e75d4 100644
--- a/lib/Transforms/Hello/Makefile
+++ b/lib/Transforms/Hello/Makefile
@@ -11,7 +11,6 @@ LEVEL = ../../..
LIBRARYNAME = LLVMHello
LOADABLE_MODULE = 1
USEDLIBS =
-CXXFLAGS = -fno-rtti
include $(LEVEL)/Makefile.common
diff --git a/lib/Transforms/IPO/ArgumentPromotion.cpp b/lib/Transforms/IPO/ArgumentPromotion.cpp
index d8190a4..325d353 100644
--- a/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -247,7 +247,7 @@ static bool PrefixIn(const ArgPromotion::IndicesVector &Indices,
return Low != Set.end() && IsPrefix(*Low, Indices);
}
-/// Mark the given indices (ToMark) as safe in the the given set of indices
+/// Mark the given indices (ToMark) as safe in the given set of indices
/// (Safe). Marking safe usually means adding ToMark to Safe. However, if there
/// is already a prefix of Indices in Safe, Indices are implicitely marked safe
/// already. Furthermore, any indices that Indices is itself a prefix of, are
diff --git a/lib/Transforms/IPO/ConstantMerge.cpp b/lib/Transforms/IPO/ConstantMerge.cpp
index 4972687..3c05f88 100644
--- a/lib/Transforms/IPO/ConstantMerge.cpp
+++ b/lib/Transforms/IPO/ConstantMerge.cpp
@@ -19,10 +19,11 @@
#define DEBUG_TYPE "constmerge"
#include "llvm/Transforms/IPO.h"
+#include "llvm/DerivedTypes.h"
#include "llvm/Module.h"
#include "llvm/Pass.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Statistic.h"
-#include <map>
using namespace llvm;
STATISTIC(NumMerged, "Number of global constants merged");
@@ -48,10 +49,10 @@ ModulePass *llvm::createConstantMergePass() { return new ConstantMerge(); }
bool ConstantMerge::runOnModule(Module &M) {
// Map unique constant/section pairs to globals. We don't want to merge
// globals in different sections.
- std::map<std::pair<Constant*, std::string>, GlobalVariable*> CMap;
+ DenseMap<Constant*, GlobalVariable*> CMap;
// Replacements - This vector contains a list of replacements to perform.
- std::vector<std::pair<GlobalVariable*, GlobalVariable*> > Replacements;
+ SmallVector<std::pair<GlobalVariable*, GlobalVariable*>, 32> Replacements;
bool MadeChange = false;
@@ -76,19 +77,21 @@ bool ConstantMerge::runOnModule(Module &M) {
continue;
}
- // Only process constants with initializers.
- if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
- Constant *Init = GV->getInitializer();
-
- // Check to see if the initializer is already known.
- GlobalVariable *&Slot = CMap[std::make_pair(Init, GV->getSection())];
-
- if (Slot == 0) { // Nope, add it to the map.
- Slot = GV;
- } else if (GV->hasLocalLinkage()) { // Yup, this is a duplicate!
- // Make all uses of the duplicate constant use the canonical version.
- Replacements.push_back(std::make_pair(GV, Slot));
- }
+ // Only process constants with initializers in the default addres space.
+ if (!GV->isConstant() ||!GV->hasDefinitiveInitializer() ||
+ GV->getType()->getAddressSpace() != 0 || !GV->getSection().empty())
+ continue;
+
+ Constant *Init = GV->getInitializer();
+
+ // Check to see if the initializer is already known.
+ GlobalVariable *&Slot = CMap[Init];
+
+ if (Slot == 0) { // Nope, add it to the map.
+ Slot = GV;
+ } else if (GV->hasLocalLinkage()) { // Yup, this is a duplicate!
+ // Make all uses of the duplicate constant use the canonical version.
+ Replacements.push_back(std::make_pair(GV, Slot));
}
}
@@ -100,11 +103,11 @@ bool ConstantMerge::runOnModule(Module &M) {
// now. This avoid invalidating the pointers in CMap, which are unneeded
// now.
for (unsigned i = 0, e = Replacements.size(); i != e; ++i) {
- // Eliminate any uses of the dead global...
+ // Eliminate any uses of the dead global.
Replacements[i].first->replaceAllUsesWith(Replacements[i].second);
- // Delete the global value from the module...
- M.getGlobalList().erase(Replacements[i].first);
+ // Delete the global value from the module.
+ Replacements[i].first->eraseFromParent();
}
NumMerged += Replacements.size();
diff --git a/lib/Transforms/IPO/DeadTypeElimination.cpp b/lib/Transforms/IPO/DeadTypeElimination.cpp
index 025d77e..662fbb5 100644
--- a/lib/Transforms/IPO/DeadTypeElimination.cpp
+++ b/lib/Transforms/IPO/DeadTypeElimination.cpp
@@ -57,13 +57,13 @@ ModulePass *llvm::createDeadTypeEliminationPass() {
//
static inline bool ShouldNukeSymtabEntry(const Type *Ty){
// Nuke all names for primitive types!
- if (Ty->isPrimitiveType() || Ty->isInteger())
+ if (Ty->isPrimitiveType() || Ty->isIntegerTy())
return true;
// Nuke all pointers to primitive types as well...
if (const PointerType *PT = dyn_cast<PointerType>(Ty))
if (PT->getElementType()->isPrimitiveType() ||
- PT->getElementType()->isInteger())
+ PT->getElementType()->isIntegerTy())
return true;
return false;
diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp
index ee260e9..df060eb 100644
--- a/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/lib/Transforms/IPO/GlobalOpt.cpp
@@ -638,8 +638,8 @@ static bool AllUsesOfValueWillTrapIfNull(Value *V,
} else if (PHINode *PN = dyn_cast<PHINode>(*UI)) {
// If we've already seen this phi node, ignore it, it has already been
// checked.
- if (PHIs.insert(PN))
- return AllUsesOfValueWillTrapIfNull(PN, PHIs);
+ if (PHIs.insert(PN) && !AllUsesOfValueWillTrapIfNull(PN, PHIs))
+ return false;
} else if (isa<ICmpInst>(*UI) &&
isa<ConstantPointerNull>(UI->getOperand(1))) {
// Ignore setcc X, null
@@ -1590,7 +1590,7 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
// simplification. In these cases, we typically end up with "cond ? v1 : v2"
// where v1 and v2 both require constant pool loads, a big loss.
if (GVElType == Type::getInt1Ty(GV->getContext()) ||
- GVElType->isFloatingPoint() ||
+ GVElType->isFloatingPointTy() ||
isa<PointerType>(GVElType) || isa<VectorType>(GVElType))
return false;
@@ -1925,7 +1925,7 @@ GlobalVariable *GlobalOpt::FindGlobalCtors(Module &M) {
if (!ATy) return 0;
const StructType *STy = dyn_cast<StructType>(ATy->getElementType());
if (!STy || STy->getNumElements() != 2 ||
- !STy->getElementType(0)->isInteger(32)) return 0;
+ !STy->getElementType(0)->isIntegerTy(32)) return 0;
const PointerType *PFTy = dyn_cast<PointerType>(STy->getElementType(1));
if (!PFTy) return 0;
const FunctionType *FTy = dyn_cast<FunctionType>(PFTy->getElementType());
diff --git a/lib/Transforms/IPO/Inliner.cpp b/lib/Transforms/IPO/Inliner.cpp
index 0990278..752a97c 100644
--- a/lib/Transforms/IPO/Inliner.cpp
+++ b/lib/Transforms/IPO/Inliner.cpp
@@ -38,8 +38,15 @@ STATISTIC(NumDeleted, "Number of functions deleted because all callers found");
STATISTIC(NumMergedAllocas, "Number of allocas merged together");
static cl::opt<int>
-InlineLimit("inline-threshold", cl::Hidden, cl::init(200), cl::ZeroOrMore,
- cl::desc("Control the amount of inlining to perform (default = 200)"));
+InlineLimit("inline-threshold", cl::Hidden, cl::init(225), cl::ZeroOrMore,
+ cl::desc("Control the amount of inlining to perform (default = 225)"));
+
+static cl::opt<int>
+HintThreshold("inlinehint-threshold", cl::Hidden, cl::init(325),
+ cl::desc("Threshold for inlining functions with inline hint"));
+
+// Threshold to use when optsize is specified (and there is no -inline-limit).
+const int OptSizeThreshold = 75;
Inliner::Inliner(void *ID)
: CallGraphSCCPass(ID), InlineThreshold(InlineLimit) {}
@@ -172,13 +179,23 @@ static bool InlineCallIfPossible(CallSite CS, CallGraph &CG,
return true;
}
-unsigned Inliner::getInlineThreshold(Function* Caller) const {
+unsigned Inliner::getInlineThreshold(CallSite CS) const {
+ int thres = InlineThreshold;
+
+ // Listen to optsize when -inline-limit is not given.
+ Function *Caller = CS.getCaller();
if (Caller && !Caller->isDeclaration() &&
Caller->hasFnAttr(Attribute::OptimizeForSize) &&
InlineLimit.getNumOccurrences() == 0)
- return 50;
- else
- return InlineThreshold;
+ thres = OptSizeThreshold;
+
+ // Listen to inlinehint when it would increase the threshold.
+ Function *Callee = CS.getCalledFunction();
+ if (HintThreshold > thres && Callee && !Callee->isDeclaration() &&
+ Callee->hasFnAttr(Attribute::InlineHint))
+ thres = HintThreshold;
+
+ return thres;
}
/// shouldInline - Return true if the inliner should attempt to inline
@@ -200,7 +217,7 @@ bool Inliner::shouldInline(CallSite CS) {
int Cost = IC.getValue();
Function *Caller = CS.getCaller();
- int CurrentThreshold = getInlineThreshold(Caller);
+ int CurrentThreshold = getInlineThreshold(CS);
float FudgeFactor = getInlineFudgeFactor(CS);
if (Cost >= (int)(CurrentThreshold * FudgeFactor)) {
DEBUG(dbgs() << " NOT Inlining: cost=" << Cost
@@ -236,8 +253,7 @@ bool Inliner::shouldInline(CallSite CS) {
outerCallsFound = true;
int Cost2 = IC2.getValue();
- Function *Caller2 = CS2.getCaller();
- int CurrentThreshold2 = getInlineThreshold(Caller2);
+ int CurrentThreshold2 = getInlineThreshold(CS2);
float FudgeFactor2 = getInlineFudgeFactor(CS2);
if (Cost2 >= (int)(CurrentThreshold2 * FudgeFactor2))
diff --git a/lib/Transforms/IPO/Makefile b/lib/Transforms/IPO/Makefile
index fd018c4..5c42374 100644
--- a/lib/Transforms/IPO/Makefile
+++ b/lib/Transforms/IPO/Makefile
@@ -10,7 +10,6 @@
LEVEL = ../../..
LIBRARYNAME = LLVMipo
BUILD_ARCHIVE = 1
-CXXFLAGS = -fno-rtti
include $(LEVEL)/Makefile.common
diff --git a/lib/Transforms/IPO/MergeFunctions.cpp b/lib/Transforms/IPO/MergeFunctions.cpp
index fa8845b..b07e22c 100644
--- a/lib/Transforms/IPO/MergeFunctions.cpp
+++ b/lib/Transforms/IPO/MergeFunctions.cpp
@@ -467,7 +467,6 @@ static LinkageCategory categorize(const Function *F) {
case GlobalValue::AppendingLinkage:
case GlobalValue::DLLImportLinkage:
case GlobalValue::DLLExportLinkage:
- case GlobalValue::GhostLinkage:
case GlobalValue::CommonLinkage:
return ExternalStrong;
}
diff --git a/lib/Transforms/IPO/PartialInlining.cpp b/lib/Transforms/IPO/PartialInlining.cpp
index f40902f..f8ec722 100644
--- a/lib/Transforms/IPO/PartialInlining.cpp
+++ b/lib/Transforms/IPO/PartialInlining.cpp
@@ -117,7 +117,7 @@ Function* PartialInliner::unswitchFunction(Function* F) {
DominatorTree DT;
DT.runOnFunction(*duplicateFunction);
- // Extract the body of the the if.
+ // Extract the body of the if.
Function* extractedFunction = ExtractCodeRegion(DT, toExtract);
// Inline the top-level if test into all callers.
diff --git a/lib/Transforms/IPO/StripSymbols.cpp b/lib/Transforms/IPO/StripSymbols.cpp
index 0e0d83a..310e4a2 100644
--- a/lib/Transforms/IPO/StripSymbols.cpp
+++ b/lib/Transforms/IPO/StripSymbols.cpp
@@ -214,6 +214,15 @@ static bool StripDebugInfo(Module &M) {
Changed = true;
}
+ if (Function *DbgVal = M.getFunction("llvm.dbg.value")) {
+ while (!DbgVal->use_empty()) {
+ CallInst *CI = cast<CallInst>(DbgVal->use_back());
+ CI->eraseFromParent();
+ }
+ DbgVal->eraseFromParent();
+ Changed = true;
+ }
+
NamedMDNode *NMD = M.getNamedMetadata("llvm.dbg.gv");
if (NMD) {
Changed = true;
diff --git a/lib/Transforms/InstCombine/InstCombine.h b/lib/Transforms/InstCombine/InstCombine.h
index 5367900..09accb6 100644
--- a/lib/Transforms/InstCombine/InstCombine.h
+++ b/lib/Transforms/InstCombine/InstCombine.h
@@ -199,11 +199,12 @@ private:
SmallVectorImpl<Value*> &NewIndices);
Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI);
- /// ValueRequiresCast - Return true if the cast from "V to Ty" actually
- /// results in any code being generated. It does not require codegen if V is
- /// simple enough or if the cast can be folded into other casts.
- bool ValueRequiresCast(Instruction::CastOps opcode,const Value *V,
- const Type *Ty);
+ /// ShouldOptimizeCast - Return true if the cast from "V to Ty" actually
+ /// results in any code being generated and is interesting to optimize out. If
+ /// the cast can be eliminated by some other simple transformation, we prefer
+ /// to do the simplification first.
+ bool ShouldOptimizeCast(Instruction::CastOps opcode,const Value *V,
+ const Type *Ty);
Instruction *visitCallSite(CallSite CS);
bool transformConstExprCastCall(CallSite CS);
diff --git a/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 4891ff0..2da17f1 100644
--- a/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -35,7 +35,7 @@ static Constant *SubOne(ConstantInt *C) {
// Otherwise, return null.
//
static inline Value *dyn_castFoldableMul(Value *V, ConstantInt *&CST) {
- if (!V->hasOneUse() || !V->getType()->isInteger())
+ if (!V->hasOneUse() || !V->getType()->isIntegerTy())
return 0;
Instruction *I = dyn_cast<Instruction>(V);
@@ -121,50 +121,34 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)))) {
uint32_t TySizeBits = I.getType()->getScalarSizeInBits();
const APInt& RHSVal = cast<ConstantInt>(RHSC)->getValue();
-
- uint32_t Size = TySizeBits / 2;
- APInt C0080Val(APInt(TySizeBits, 1ULL).shl(Size - 1));
- APInt CFF80Val(-C0080Val);
- do {
- if (TySizeBits > Size) {
- // If we have ADD(XOR(AND(X, 0xFF), 0x80), 0xF..F80), it's a sext.
- // If we have ADD(XOR(AND(X, 0xFF), 0xF..F80), 0x80), it's a sext.
- if ((RHSVal == CFF80Val && XorRHS->getValue() == C0080Val) ||
- (RHSVal == C0080Val && XorRHS->getValue() == CFF80Val)) {
- // This is a sign extend if the top bits are known zero.
- if (!MaskedValueIsZero(XorLHS,
- APInt::getHighBitsSet(TySizeBits, TySizeBits - Size)))
- Size = 0; // Not a sign ext, but can't be any others either.
- break;
- }
- }
- Size >>= 1;
- C0080Val = APIntOps::lshr(C0080Val, Size);
- CFF80Val = APIntOps::ashr(CFF80Val, Size);
- } while (Size >= 1);
-
- // FIXME: This shouldn't be necessary. When the backends can handle types
- // with funny bit widths then this switch statement should be removed. It
- // is just here to get the size of the "middle" type back up to something
- // that the back ends can handle.
- const Type *MiddleType = 0;
- switch (Size) {
- default: break;
- case 32:
- case 16:
- case 8: MiddleType = IntegerType::get(I.getContext(), Size); break;
+ unsigned ExtendAmt = 0;
+ // If we have ADD(XOR(AND(X, 0xFF), 0x80), 0xF..F80), it's a sext.
+ // If we have ADD(XOR(AND(X, 0xFF), 0xF..F80), 0x80), it's a sext.
+ if (XorRHS->getValue() == -RHSVal) {
+ if (RHSVal.isPowerOf2())
+ ExtendAmt = TySizeBits - RHSVal.logBase2() - 1;
+ else if (XorRHS->getValue().isPowerOf2())
+ ExtendAmt = TySizeBits - XorRHS->getValue().logBase2() - 1;
+ }
+
+ if (ExtendAmt) {
+ APInt Mask = APInt::getHighBitsSet(TySizeBits, ExtendAmt);
+ if (!MaskedValueIsZero(XorLHS, Mask))
+ ExtendAmt = 0;
}
- if (MiddleType) {
- Value *NewTrunc = Builder->CreateTrunc(XorLHS, MiddleType, "sext");
- return new SExtInst(NewTrunc, I.getType(), I.getName());
+
+ if (ExtendAmt) {
+ Constant *ShAmt = ConstantInt::get(I.getType(), ExtendAmt);
+ Value *NewShl = Builder->CreateShl(XorLHS, ShAmt, "sext");
+ return BinaryOperator::CreateAShr(NewShl, ShAmt);
}
}
}
- if (I.getType()->isInteger(1))
+ if (I.getType()->isIntegerTy(1))
return BinaryOperator::CreateXor(LHS, RHS);
- if (I.getType()->isInteger()) {
+ if (I.getType()->isIntegerTy()) {
// X + X --> X << 1
if (LHS == RHS)
return BinaryOperator::CreateShl(LHS, ConstantInt::get(I.getType(), 1));
@@ -184,7 +168,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
// -A + B --> B - A
// -A + -B --> -(A + B)
if (Value *LHSV = dyn_castNegVal(LHS)) {
- if (LHS->getType()->isIntOrIntVector()) {
+ if (LHS->getType()->isIntOrIntVectorTy()) {
if (Value *RHSV = dyn_castNegVal(RHS)) {
Value *NewAdd = Builder->CreateAdd(LHSV, RHSV, "sum");
return BinaryOperator::CreateNeg(NewAdd);
@@ -238,7 +222,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
}
// W*X + Y*Z --> W * (X+Z) iff W == Y
- if (I.getType()->isIntOrIntVector()) {
+ if (I.getType()->isIntOrIntVectorTy()) {
Value *W, *X, *Y, *Z;
if (match(LHS, m_Mul(m_Value(W), m_Value(X))) &&
match(RHS, m_Mul(m_Value(Y), m_Value(Z)))) {
@@ -576,7 +560,7 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
return ReplaceInstUsesWith(I, Op0); // undef - X -> undef
if (isa<UndefValue>(Op1))
return ReplaceInstUsesWith(I, Op1); // X - undef -> undef
- if (I.getType()->isInteger(1))
+ if (I.getType()->isIntegerTy(1))
return BinaryOperator::CreateXor(Op0, Op1);
if (ConstantInt *C = dyn_cast<ConstantInt>(Op0)) {
@@ -676,6 +660,13 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
return BinaryOperator::CreateSDiv(Op1I->getOperand(0),
ConstantExpr::getNeg(DivRHS));
+ // 0 - (C << X) -> (-C << X)
+ if (Op1I->getOpcode() == Instruction::Shl)
+ if (ConstantInt *CSI = dyn_cast<ConstantInt>(Op0))
+ if (CSI->isZero())
+ if (Value *ShlLHSNeg = dyn_castNegVal(Op1I->getOperand(0)))
+ return BinaryOperator::CreateShl(ShlLHSNeg, Op1I->getOperand(1));
+
// X - X*C --> X * (1-C)
ConstantInt *C2 = 0;
if (dyn_castFoldableMul(Op1I, C2) == Op0) {
diff --git a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index fa7bb12..5e47953 100644
--- a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -546,7 +546,7 @@ Instruction *InstCombiner::FoldAndOfICmps(Instruction &I,
std::swap(LHSCC, RHSCC);
}
- // At this point, we know we have have two icmp instructions
+ // At this point, we know we have two icmp instructions
// comparing a value against two constants and and'ing the result
// together. Because of the above check, we know that we only have
// icmp eq, icmp ne, icmp [su]lt, and icmp [SU]gt here. We also know
@@ -932,24 +932,49 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0))
if (Instruction *Res = FoldAndOfICmps(I, LHS, RHS))
return Res;
-
+
+ // If and'ing two fcmp, try combine them into one.
+ if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
+ if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
+ if (Instruction *Res = FoldAndOfFCmps(I, LHS, RHS))
+ return Res;
+
+
// fold (and (cast A), (cast B)) -> (cast (and A, B))
if (CastInst *Op0C = dyn_cast<CastInst>(Op0))
- if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
- if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind ?
- const Type *SrcTy = Op0C->getOperand(0)->getType();
- if (SrcTy == Op1C->getOperand(0)->getType() &&
- SrcTy->isIntOrIntVector() &&
- // Only do this if the casts both really cause code to be generated.
- ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
- I.getType()) &&
- ValueRequiresCast(Op1C->getOpcode(), Op1C->getOperand(0),
- I.getType())) {
- Value *NewOp = Builder->CreateAnd(Op0C->getOperand(0),
- Op1C->getOperand(0), I.getName());
+ if (CastInst *Op1C = dyn_cast<CastInst>(Op1)) {
+ const Type *SrcTy = Op0C->getOperand(0)->getType();
+ if (Op0C->getOpcode() == Op1C->getOpcode() && // same cast kind ?
+ SrcTy == Op1C->getOperand(0)->getType() &&
+ SrcTy->isIntOrIntVectorTy()) {
+ Value *Op0COp = Op0C->getOperand(0), *Op1COp = Op1C->getOperand(0);
+
+ // Only do this if the casts both really cause code to be generated.
+ if (ShouldOptimizeCast(Op0C->getOpcode(), Op0COp, I.getType()) &&
+ ShouldOptimizeCast(Op1C->getOpcode(), Op1COp, I.getType())) {
+ Value *NewOp = Builder->CreateAnd(Op0COp, Op1COp, I.getName());
return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
}
+
+ // If this is and(cast(icmp), cast(icmp)), try to fold this even if the
+ // cast is otherwise not optimizable. This happens for vector sexts.
+ if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1COp))
+ if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0COp))
+ if (Instruction *Res = FoldAndOfICmps(I, LHS, RHS)) {
+ InsertNewInstBefore(Res, I);
+ return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
+ }
+
+ // If this is and(cast(fcmp), cast(fcmp)), try to fold this even if the
+ // cast is otherwise not optimizable. This happens for vector sexts.
+ if (FCmpInst *RHS = dyn_cast<FCmpInst>(Op1COp))
+ if (FCmpInst *LHS = dyn_cast<FCmpInst>(Op0COp))
+ if (Instruction *Res = FoldAndOfFCmps(I, LHS, RHS)) {
+ InsertNewInstBefore(Res, I);
+ return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
+ }
}
+ }
// (X >> Z) & (Y >> Z) -> (X&Y) >> Z for all shifts.
if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
@@ -965,13 +990,6 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
}
}
- // If and'ing two fcmp, try combine them into one.
- if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) {
- if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
- if (Instruction *Res = FoldAndOfFCmps(I, LHS, RHS))
- return Res;
- }
-
return Changed ? &I : 0;
}
@@ -1142,18 +1160,20 @@ static Instruction *MatchSelectFromAndOr(Value *A, Value *B,
Value *C, Value *D) {
// If A is not a select of -1/0, this cannot match.
Value *Cond = 0;
- if (!match(A, m_SelectCst<-1, 0>(m_Value(Cond))))
+ if (!match(A, m_SExt(m_Value(Cond))) ||
+ !Cond->getType()->isIntegerTy(1))
return 0;
// ((cond?-1:0)&C) | (B&(cond?0:-1)) -> cond ? C : B.
- if (match(D, m_SelectCst<0, -1>(m_Specific(Cond))))
+ if (match(D, m_Not(m_SExt(m_Specific(Cond)))))
return SelectInst::Create(Cond, C, B);
- if (match(D, m_Not(m_SelectCst<-1, 0>(m_Specific(Cond)))))
+ if (match(D, m_SExt(m_Not(m_Specific(Cond)))))
return SelectInst::Create(Cond, C, B);
+
// ((cond?-1:0)&C) | ((cond?0:-1)&D) -> cond ? C : D.
- if (match(B, m_SelectCst<0, -1>(m_Specific(Cond))))
+ if (match(B, m_Not(m_SExt(m_Specific(Cond)))))
return SelectInst::Create(Cond, C, D);
- if (match(B, m_Not(m_SelectCst<-1, 0>(m_Specific(Cond)))))
+ if (match(B, m_SExt(m_Not(m_Specific(Cond)))))
return SelectInst::Create(Cond, C, D);
return 0;
}
@@ -1224,7 +1244,7 @@ Instruction *InstCombiner::FoldOrOfICmps(Instruction &I,
std::swap(LHSCC, RHSCC);
}
- // At this point, we know we have have two icmp instructions
+ // At this point, we know we have two icmp instructions
// comparing a value against two constants and or'ing the result
// together. Because of the above check, we know that we only have
// ICMP_EQ, ICMP_NE, ICMP_LT, and ICMP_GT here. We also know (from the
@@ -1595,15 +1615,19 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
}
}
- // (A & (C0?-1:0)) | (B & ~(C0?-1:0)) -> C0 ? A : B, and commuted variants
- if (Instruction *Match = MatchSelectFromAndOr(A, B, C, D))
- return Match;
- if (Instruction *Match = MatchSelectFromAndOr(B, A, D, C))
- return Match;
- if (Instruction *Match = MatchSelectFromAndOr(C, B, A, D))
- return Match;
- if (Instruction *Match = MatchSelectFromAndOr(D, A, B, C))
- return Match;
+ // (A & (C0?-1:0)) | (B & ~(C0?-1:0)) -> C0 ? A : B, and commuted variants.
+ // Don't do this for vector select idioms, the code generator doesn't handle
+ // them well yet.
+ if (!isa<VectorType>(I.getType())) {
+ if (Instruction *Match = MatchSelectFromAndOr(A, B, C, D))
+ return Match;
+ if (Instruction *Match = MatchSelectFromAndOr(B, A, D, C))
+ return Match;
+ if (Instruction *Match = MatchSelectFromAndOr(C, B, A, D))
+ return Match;
+ if (Instruction *Match = MatchSelectFromAndOr(D, A, B, C))
+ return Match;
+ }
// ((A&~B)|(~A&B)) -> A^B
if ((match(C, m_Not(m_Specific(D))) &&
@@ -1663,37 +1687,51 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
if (Instruction *Res = FoldOrOfICmps(I, LHS, RHS))
return Res;
+ // (fcmp uno x, c) | (fcmp uno y, c) -> (fcmp uno x, y)
+ if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
+ if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
+ if (Instruction *Res = FoldOrOfFCmps(I, LHS, RHS))
+ return Res;
+
// fold (or (cast A), (cast B)) -> (cast (or A, B))
if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
if (Op0C->getOpcode() == Op1C->getOpcode()) {// same cast kind ?
- if (!isa<ICmpInst>(Op0C->getOperand(0)) ||
- !isa<ICmpInst>(Op1C->getOperand(0))) {
- const Type *SrcTy = Op0C->getOperand(0)->getType();
- if (SrcTy == Op1C->getOperand(0)->getType() &&
- SrcTy->isIntOrIntVector() &&
+ const Type *SrcTy = Op0C->getOperand(0)->getType();
+ if (SrcTy == Op1C->getOperand(0)->getType() &&
+ SrcTy->isIntOrIntVectorTy()) {
+ Value *Op0COp = Op0C->getOperand(0), *Op1COp = Op1C->getOperand(0);
+
+ if ((!isa<ICmpInst>(Op0COp) || !isa<ICmpInst>(Op1COp)) &&
// Only do this if the casts both really cause code to be
// generated.
- ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
- I.getType()) &&
- ValueRequiresCast(Op1C->getOpcode(), Op1C->getOperand(0),
- I.getType())) {
- Value *NewOp = Builder->CreateOr(Op0C->getOperand(0),
- Op1C->getOperand(0), I.getName());
+ ShouldOptimizeCast(Op0C->getOpcode(), Op0COp, I.getType()) &&
+ ShouldOptimizeCast(Op1C->getOpcode(), Op1COp, I.getType())) {
+ Value *NewOp = Builder->CreateOr(Op0COp, Op1COp, I.getName());
return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
}
+
+ // If this is or(cast(icmp), cast(icmp)), try to fold this even if the
+ // cast is otherwise not optimizable. This happens for vector sexts.
+ if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1COp))
+ if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0COp))
+ if (Instruction *Res = FoldOrOfICmps(I, LHS, RHS)) {
+ InsertNewInstBefore(Res, I);
+ return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
+ }
+
+ // If this is or(cast(fcmp), cast(fcmp)), try to fold this even if the
+ // cast is otherwise not optimizable. This happens for vector sexts.
+ if (FCmpInst *RHS = dyn_cast<FCmpInst>(Op1COp))
+ if (FCmpInst *LHS = dyn_cast<FCmpInst>(Op0COp))
+ if (Instruction *Res = FoldOrOfFCmps(I, LHS, RHS)) {
+ InsertNewInstBefore(Res, I);
+ return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
+ }
}
}
}
-
- // (fcmp uno x, c) | (fcmp uno y, c) -> (fcmp uno x, y)
- if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) {
- if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
- if (Instruction *Res = FoldOrOfFCmps(I, LHS, RHS))
- return Res;
- }
-
return Changed ? &I : 0;
}
@@ -1978,12 +2016,12 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind?
const Type *SrcTy = Op0C->getOperand(0)->getType();
- if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isInteger() &&
+ if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isIntegerTy() &&
// Only do this if the casts both really cause code to be generated.
- ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
- I.getType()) &&
- ValueRequiresCast(Op1C->getOpcode(), Op1C->getOperand(0),
- I.getType())) {
+ ShouldOptimizeCast(Op0C->getOpcode(), Op0C->getOperand(0),
+ I.getType()) &&
+ ShouldOptimizeCast(Op1C->getOpcode(), Op1C->getOperand(0),
+ I.getType())) {
Value *NewOp = Builder->CreateXor(Op0C->getOperand(0),
Op1C->getOperand(0), I.getName());
return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 47c37c4..d7efdcf 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -199,7 +199,7 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
// Extract the length and alignment and fill if they are constant.
ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
- if (!LenC || !FillC || !FillC->getType()->isInteger(8))
+ if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
return 0;
uint64_t Len = LenC->getZExtValue();
Alignment = MI->getAlignment();
@@ -230,7 +230,6 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
return 0;
}
-
/// visitCallInst - CallInst simplification. This mostly only handles folding
/// of intrinsic instructions. For normal calls, it allows visitCallSite to do
/// the heavy lifting.
@@ -304,6 +303,60 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
switch (II->getIntrinsicID()) {
default: break;
+ case Intrinsic::objectsize: {
+ const Type *ReturnTy = CI.getType();
+ Value *Op1 = II->getOperand(1);
+ bool Min = (cast<ConstantInt>(II->getOperand(2))->getZExtValue() == 1);
+
+ // We need target data for just about everything so depend on it.
+ if (!TD) break;
+
+ // Get to the real allocated thing and offset as fast as possible.
+ Op1 = Op1->stripPointerCasts();
+
+ // If we've stripped down to a single global variable that we
+ // can know the size of then just return that.
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) {
+ if (GV->hasDefinitiveInitializer()) {
+ Constant *C = GV->getInitializer();
+ size_t globalSize = TD->getTypeAllocSize(C->getType());
+ return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, globalSize));
+ } else {
+ Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
+ return ReplaceInstUsesWith(CI, RetVal);
+ }
+ } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op1)) {
+
+ // Only handle constant GEPs here.
+ if (CE->getOpcode() != Instruction::GetElementPtr) break;
+ GEPOperator *GEP = cast<GEPOperator>(CE);
+
+ // Make sure we're not a constant offset from an external
+ // global.
+ Value *Operand = GEP->getPointerOperand();
+ Operand = Operand->stripPointerCasts();
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Operand))
+ if (!GV->hasDefinitiveInitializer()) break;
+
+ // Get what we're pointing to and its size.
+ const PointerType *BaseType =
+ cast<PointerType>(Operand->getType());
+ size_t Size = TD->getTypeAllocSize(BaseType->getElementType());
+
+ // Get the current byte offset into the thing. Use the original
+ // operand in case we're looking through a bitcast.
+ SmallVector<Value*, 8> Ops(CE->op_begin()+1, CE->op_end());
+ const PointerType *OffsetType =
+ cast<PointerType>(GEP->getPointerOperand()->getType());
+ size_t Offset = TD->getIndexedOffset(OffsetType, &Ops[0], Ops.size());
+
+ assert(Size >= Offset);
+
+ Constant *RetVal = ConstantInt::get(ReturnTy, Size-Offset);
+ return ReplaceInstUsesWith(CI, RetVal);
+
+ }
+ }
case Intrinsic::bswap:
// bswap(bswap(x)) -> x
if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getOperand(1)))
@@ -632,18 +685,6 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
return EraseInstFromFunction(CI);
break;
}
- case Intrinsic::objectsize: {
- ConstantInt *Const = cast<ConstantInt>(II->getOperand(2));
- const Type *Ty = CI.getType();
-
- // 0 is maximum number of bytes left, 1 is minimum number of bytes left.
- // TODO: actually add these values, the current return values are "don't
- // know".
- if (Const->getZExtValue() == 0)
- return ReplaceInstUsesWith(CI, Constant::getAllOnesValue(Ty));
- else
- return ReplaceInstUsesWith(CI, ConstantInt::get(Ty, 0));
- }
}
return visitCallSite(II);
@@ -692,10 +733,14 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
Value *Callee = CS.getCalledValue();
if (Function *CalleeF = dyn_cast<Function>(Callee))
- if (CalleeF->getCallingConv() != CS.getCallingConv()) {
+ // If the call and callee calling conventions don't match, this call must
+ // be unreachable, as the call is undefined.
+ if (CalleeF->getCallingConv() != CS.getCallingConv() &&
+ // Only do this for calls to a function with a body. A prototype may
+ // not actually end up matching the implementation's calling conv for a
+ // variety of reasons (e.g. it may be written in assembly).
+ !CalleeF->isDeclaration()) {
Instruction *OldCall = CS.getInstruction();
- // If the call and callee calling conventions don't match, this call must
- // be unreachable, as the call is undefined.
new StoreInst(ConstantInt::getTrue(Callee->getContext()),
UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
OldCall);
@@ -703,8 +748,13 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
// This allows ValueHandlers and custom metadata to adjust itself.
if (!OldCall->getType()->isVoidTy())
OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType()));
- if (isa<CallInst>(OldCall)) // Not worth removing an invoke here.
+ if (isa<CallInst>(OldCall))
return EraseInstFromFunction(*OldCall);
+
+ // We cannot remove an invoke, because it would change the CFG, just
+ // change the callee to a null pointer.
+ cast<InvokeInst>(OldCall)->setOperand(0,
+ Constant::getNullValue(CalleeF->getType()));
return 0;
}
diff --git a/lib/Transforms/InstCombine/InstCombineCasts.cpp b/lib/Transforms/InstCombine/InstCombineCasts.cpp
index f25dd35..bb4a0e9 100644
--- a/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -23,7 +23,7 @@ using namespace PatternMatch;
///
static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
int &Offset) {
- assert(Val->getType()->isInteger(32) && "Unexpected allocation size type!");
+ assert(Val->getType()->isIntegerTy(32) && "Unexpected allocation size type!");
if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
Offset = CI->getZExtValue();
Scale = 0;
@@ -255,17 +255,26 @@ isEliminableCastPair(
return Instruction::CastOps(Res);
}
-/// ValueRequiresCast - Return true if the cast from "V to Ty" actually results
-/// in any code being generated. It does not require codegen if V is simple
-/// enough or if the cast can be folded into other casts.
-bool InstCombiner::ValueRequiresCast(Instruction::CastOps opcode,const Value *V,
- const Type *Ty) {
+/// ShouldOptimizeCast - Return true if the cast from "V to Ty" actually
+/// results in any code being generated and is interesting to optimize out. If
+/// the cast can be eliminated by some other simple transformation, we prefer
+/// to do the simplification first.
+bool InstCombiner::ShouldOptimizeCast(Instruction::CastOps opc, const Value *V,
+ const Type *Ty) {
+ // Noop casts and casts of constants should be eliminated trivially.
if (V->getType() == Ty || isa<Constant>(V)) return false;
- // If this is another cast that can be eliminated, it isn't codegen either.
+ // If this is another cast that can be eliminated, we prefer to have it
+ // eliminated.
if (const CastInst *CI = dyn_cast<CastInst>(V))
- if (isEliminableCastPair(CI, opcode, Ty, TD))
+ if (isEliminableCastPair(CI, opc, Ty, TD))
return false;
+
+ // If this is a vector sext from a compare, then we don't want to break the
+ // idiom where each element of the extended vector is either zero or all ones.
+ if (opc == Instruction::SExt && isa<CmpInst>(V) && isa<VectorType>(Ty))
+ return false;
+
return true;
}
@@ -828,7 +837,7 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
// zext (xor i1 X, true) to i32 --> xor (zext i1 X to i32), 1
Value *X;
- if (SrcI && SrcI->hasOneUse() && SrcI->getType()->isInteger(1) &&
+ if (SrcI && SrcI->hasOneUse() && SrcI->getType()->isIntegerTy(1) &&
match(SrcI, m_Not(m_Value(X))) &&
(!X->hasOneUse() || !isa<CmpInst>(X))) {
Value *New = Builder->CreateZExt(X, CI.getType());
@@ -923,12 +932,6 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
Value *Src = CI.getOperand(0);
const Type *SrcTy = Src->getType(), *DestTy = CI.getType();
- // Canonicalize sign-extend from i1 to a select.
- if (Src->getType()->isInteger(1))
- return SelectInst::Create(Src,
- Constant::getAllOnesValue(CI.getType()),
- Constant::getNullValue(CI.getType()));
-
// Attempt to extend the entire input expression tree to the destination
// type. Only do this if the dest type is a simple type, don't convert the
// expression tree to something weird like i93 unless the source is also
@@ -968,6 +971,30 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
return BinaryOperator::CreateAShr(Res, ShAmt);
}
+
+ // (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if signed
+ // (x >s -1) ? -1 : 0 -> ashr x, 31 -> all ones if not signed
+ {
+ ICmpInst::Predicate Pred; Value *CmpLHS; ConstantInt *CmpRHS;
+ if (match(Src, m_ICmp(Pred, m_Value(CmpLHS), m_ConstantInt(CmpRHS)))) {
+ // sext (x <s 0) to i32 --> x>>s31 true if signbit set.
+ // sext (x >s -1) to i32 --> (x>>s31)^-1 true if signbit clear.
+ if ((Pred == ICmpInst::ICMP_SLT && CmpRHS->isZero()) ||
+ (Pred == ICmpInst::ICMP_SGT && CmpRHS->isAllOnesValue())) {
+ Value *Sh = ConstantInt::get(CmpLHS->getType(),
+ CmpLHS->getType()->getScalarSizeInBits()-1);
+ Value *In = Builder->CreateAShr(CmpLHS, Sh, CmpLHS->getName()+".lobit");
+ if (In->getType() != CI.getType())
+ In = Builder->CreateIntCast(In, CI.getType(), true/*SExt*/, "tmp");
+
+ if (Pred == ICmpInst::ICMP_SGT)
+ In = Builder->CreateNot(In, In->getName()+".not");
+ return ReplaceInstUsesWith(CI, In);
+ }
+ }
+ }
+
+
// If the input is a shl/ashr pair of a same constant, then this is a sign
// extension from a smaller value. If we could trust arbitrary bitwidth
// integers, we could turn this into a truncate to the smaller bit and then
@@ -1127,16 +1154,22 @@ Instruction *InstCombiner::visitSIToFP(CastInst &CI) {
}
Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
- // If the source integer type is larger than the intptr_t type for
- // this target, do a trunc to the intptr_t type, then inttoptr of it. This
- // allows the trunc to be exposed to other transforms. Don't do this for
- // extending inttoptr's, because we don't know if the target sign or zero
- // extends to pointers.
- if (TD && CI.getOperand(0)->getType()->getScalarSizeInBits() >
- TD->getPointerSizeInBits()) {
- Value *P = Builder->CreateTrunc(CI.getOperand(0),
- TD->getIntPtrType(CI.getContext()), "tmp");
- return new IntToPtrInst(P, CI.getType());
+ // If the source integer type is not the intptr_t type for this target, do a
+ // trunc or zext to the intptr_t type, then inttoptr of it. This allows the
+ // cast to be exposed to other transforms.
+ if (TD) {
+ if (CI.getOperand(0)->getType()->getScalarSizeInBits() >
+ TD->getPointerSizeInBits()) {
+ Value *P = Builder->CreateTrunc(CI.getOperand(0),
+ TD->getIntPtrType(CI.getContext()), "tmp");
+ return new IntToPtrInst(P, CI.getType());
+ }
+ if (CI.getOperand(0)->getType()->getScalarSizeInBits() <
+ TD->getPointerSizeInBits()) {
+ Value *P = Builder->CreateZExt(CI.getOperand(0),
+ TD->getIntPtrType(CI.getContext()), "tmp");
+ return new IntToPtrInst(P, CI.getType());
+ }
}
if (Instruction *I = commonCastTransforms(CI))
@@ -1198,17 +1231,22 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
}
Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
- // If the destination integer type is smaller than the intptr_t type for
- // this target, do a ptrtoint to intptr_t then do a trunc. This allows the
- // trunc to be exposed to other transforms. Don't do this for extending
- // ptrtoint's, because we don't know if the target sign or zero extends its
- // pointers.
- if (TD &&
- CI.getType()->getScalarSizeInBits() < TD->getPointerSizeInBits()) {
- Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
- TD->getIntPtrType(CI.getContext()),
- "tmp");
- return new TruncInst(P, CI.getType());
+ // If the destination integer type is not the intptr_t type for this target,
+ // do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast
+ // to be exposed to other transforms.
+ if (TD) {
+ if (CI.getType()->getScalarSizeInBits() < TD->getPointerSizeInBits()) {
+ Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
+ TD->getIntPtrType(CI.getContext()),
+ "tmp");
+ return new TruncInst(P, CI.getType());
+ }
+ if (CI.getType()->getScalarSizeInBits() > TD->getPointerSizeInBits()) {
+ Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
+ TD->getIntPtrType(CI.getContext()),
+ "tmp");
+ return new ZExtInst(P, CI.getType());
+ }
}
return commonPointerCastTransforms(CI);
diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp
index e59406c6..72af80f 100644
--- a/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -1589,24 +1589,24 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
bool Changed = false;
+ Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
/// Orders the operands of the compare so that they are listed from most
/// complex to least complex. This puts constants before unary operators,
/// before binary operators.
- if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) {
+ if (getComplexity(Op0) < getComplexity(Op1)) {
I.swapOperands();
+ std::swap(Op0, Op1);
Changed = true;
}
- Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
-
if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, TD))
return ReplaceInstUsesWith(I, V);
const Type *Ty = Op0->getType();
// icmp's with boolean values can always be turned into bitwise operations
- if (Ty == Type::getInt1Ty(I.getContext())) {
+ if (Ty->isIntegerTy(1)) {
switch (I.getPredicate()) {
default: llvm_unreachable("Invalid icmp instruction!");
case ICmpInst::ICMP_EQ: { // icmp eq i1 A, B -> ~(A^B)
@@ -1650,7 +1650,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
unsigned BitWidth = 0;
if (TD)
BitWidth = TD->getTypeSizeInBits(Ty->getScalarType());
- else if (Ty->isIntOrIntVector())
+ else if (Ty->isIntOrIntVectorTy())
BitWidth = Ty->getScalarSizeInBits();
bool isSignBit = false;
diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index ae728dd..e6c59c7 100644
--- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -87,7 +87,7 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
const Type *SrcPTy = SrcTy->getElementType();
- if (DestPTy->isInteger() || isa<PointerType>(DestPTy) ||
+ if (DestPTy->isIntegerTy() || isa<PointerType>(DestPTy) ||
isa<VectorType>(DestPTy)) {
// If the source is an array, the code below will not succeed. Check to
// see if a trivial 'gep P, 0, 0' will help matters. Only do this for
@@ -104,7 +104,7 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
}
if (IC.getTargetData() &&
- (SrcPTy->isInteger() || isa<PointerType>(SrcPTy) ||
+ (SrcPTy->isIntegerTy() || isa<PointerType>(SrcPTy) ||
isa<VectorType>(SrcPTy)) &&
// Do not allow turning this into a load of an integer, which is then
// casted to a pointer, this pessimizes pointer analysis a lot.
@@ -115,8 +115,9 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
// Okay, we are casting from one integer or pointer type to another of
// the same size. Instead of casting the pointer before the load, cast
// the result of the loaded value.
- Value *NewLoad =
+ LoadInst *NewLoad =
IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName());
+ NewLoad->setAlignment(LI.getAlignment());
// Now cast the result of the load.
return new BitCastInst(NewLoad, LI.getType());
}
@@ -199,12 +200,15 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
//
if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
// load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
- if (isSafeToLoadUnconditionally(SI->getOperand(1), SI) &&
- isSafeToLoadUnconditionally(SI->getOperand(2), SI)) {
- Value *V1 = Builder->CreateLoad(SI->getOperand(1),
- SI->getOperand(1)->getName()+".val");
- Value *V2 = Builder->CreateLoad(SI->getOperand(2),
- SI->getOperand(2)->getName()+".val");
+ unsigned Align = LI.getAlignment();
+ if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, TD) &&
+ isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, TD)) {
+ LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
+ SI->getOperand(1)->getName()+".val");
+ LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
+ SI->getOperand(2)->getName()+".val");
+ V1->setAlignment(Align);
+ V2->setAlignment(Align);
return SelectInst::Create(SI->getCondition(), V1, V2);
}
@@ -239,7 +243,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
const Type *SrcPTy = SrcTy->getElementType();
- if (!DestPTy->isInteger() && !isa<PointerType>(DestPTy))
+ if (!DestPTy->isIntegerTy() && !isa<PointerType>(DestPTy))
return 0;
/// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
@@ -273,7 +277,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace());
}
- if (!SrcPTy->isInteger() && !isa<PointerType>(SrcPTy))
+ if (!SrcPTy->isIntegerTy() && !isa<PointerType>(SrcPTy))
return 0;
// If the pointers point into different address spaces or if they point to
@@ -294,7 +298,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
const Type* CastSrcTy = SIOp0->getType();
const Type* CastDstTy = SrcPTy;
if (isa<PointerType>(CastDstTy)) {
- if (CastSrcTy->isInteger())
+ if (CastSrcTy->isIntegerTy())
opcode = Instruction::IntToPtr;
} else if (isa<IntegerType>(CastDstTy)) {
if (isa<PointerType>(SIOp0->getType()))
diff --git a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index 2e26a75..668c34f 100644
--- a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -157,7 +157,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
}
/// i1 mul -> i1 and.
- if (I.getType()->isInteger(1))
+ if (I.getType()->isIntegerTy(1))
return BinaryOperator::CreateAnd(Op0, Op1);
// X*(1 << Y) --> X << Y
@@ -314,7 +314,7 @@ Instruction *InstCombiner::commonDivTransforms(BinaryOperator &I) {
// undef / X -> 0 for integer.
// undef / X -> undef for FP (the undef could be a snan).
if (isa<UndefValue>(Op0)) {
- if (Op0->getType()->isFPOrFPVector())
+ if (Op0->getType()->isFPOrFPVectorTy())
return ReplaceInstUsesWith(I, Op0);
return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
}
@@ -386,7 +386,7 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
// It can't be division by zero, hence it must be division by one.
- if (I.getType()->isInteger(1))
+ if (I.getType()->isIntegerTy(1))
return ReplaceInstUsesWith(I, Op0);
if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1)) {
@@ -493,7 +493,7 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
// If the sign bits of both operands are zero (i.e. we can prove they are
// unsigned inputs), turn this into a udiv.
- if (I.getType()->isInteger()) {
+ if (I.getType()->isIntegerTy()) {
APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits()));
if (MaskedValueIsZero(Op0, Mask)) {
if (MaskedValueIsZero(Op1, Mask)) {
@@ -527,7 +527,7 @@ Instruction *InstCombiner::commonRemTransforms(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
if (isa<UndefValue>(Op0)) { // undef % X -> 0
- if (I.getType()->isFPOrFPVector())
+ if (I.getType()->isFPOrFPVectorTy())
return ReplaceInstUsesWith(I, Op0); // X % undef -> undef (could be SNaN)
return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
}
@@ -648,7 +648,7 @@ Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
// If the sign bits of both operands are zero (i.e. we can prove they are
// unsigned inputs), turn this into a urem.
- if (I.getType()->isInteger()) {
+ if (I.getType()->isIntegerTy()) {
APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits()));
if (MaskedValueIsZero(Op1, Mask) && MaskedValueIsZero(Op0, Mask)) {
// X srem Y -> X urem Y, iff X and Y don't have sign bit set
diff --git a/lib/Transforms/InstCombine/InstCombineSelect.cpp b/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 18b2dff..7807d9a 100644
--- a/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -326,44 +326,6 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI,
break;
}
}
-
- // (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if signed
- // (x >s -1) ? -1 : 0 -> ashr x, 31 -> all ones if not signed
- CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
- if (match(TrueVal, m_ConstantInt<-1>()) &&
- match(FalseVal, m_ConstantInt<0>()))
- Pred = ICI->getPredicate();
- else if (match(TrueVal, m_ConstantInt<0>()) &&
- match(FalseVal, m_ConstantInt<-1>()))
- Pred = CmpInst::getInversePredicate(ICI->getPredicate());
-
- if (Pred != CmpInst::BAD_ICMP_PREDICATE) {
- // If we are just checking for a icmp eq of a single bit and zext'ing it
- // to an integer, then shift the bit to the appropriate place and then
- // cast to integer to avoid the comparison.
- const APInt &Op1CV = CI->getValue();
-
- // sext (x <s 0) to i32 --> x>>s31 true if signbit set.
- // sext (x >s -1) to i32 --> (x>>s31)^-1 true if signbit clear.
- if ((Pred == ICmpInst::ICMP_SLT && Op1CV == 0) ||
- (Pred == ICmpInst::ICMP_SGT && Op1CV.isAllOnesValue())) {
- Value *In = ICI->getOperand(0);
- Value *Sh = ConstantInt::get(In->getType(),
- In->getType()->getScalarSizeInBits()-1);
- In = InsertNewInstBefore(BinaryOperator::CreateAShr(In, Sh,
- In->getName()+".lobit"),
- *ICI);
- if (In->getType() != SI.getType())
- In = CastInst::CreateIntegerCast(In, SI.getType(),
- true/*SExt*/, "tmp", ICI);
-
- if (Pred == ICmpInst::ICMP_SGT)
- In = InsertNewInstBefore(BinaryOperator::CreateNot(In,
- In->getName()+".not"), *ICI);
-
- return ReplaceInstUsesWith(SI, In);
- }
- }
}
if (CmpLHS == TrueVal && CmpRHS == FalseVal) {
@@ -479,7 +441,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
return ReplaceInstUsesWith(SI, FalseVal);
}
- if (SI.getType()->isInteger(1)) {
+ if (SI.getType()->isIntegerTy(1)) {
if (ConstantInt *C = dyn_cast<ConstantInt>(TrueVal)) {
if (C->getZExtValue()) {
// Change: A = select B, true, C --> A = or B, C
@@ -516,16 +478,25 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
if (ConstantInt *TrueValC = dyn_cast<ConstantInt>(TrueVal))
if (ConstantInt *FalseValC = dyn_cast<ConstantInt>(FalseVal)) {
// select C, 1, 0 -> zext C to int
- if (FalseValC->isZero() && TrueValC->getValue() == 1) {
- return CastInst::Create(Instruction::ZExt, CondVal, SI.getType());
- } else if (TrueValC->isZero() && FalseValC->getValue() == 1) {
- // select C, 0, 1 -> zext !C to int
- Value *NotCond =
- InsertNewInstBefore(BinaryOperator::CreateNot(CondVal,
- "not."+CondVal->getName()), SI);
- return CastInst::Create(Instruction::ZExt, NotCond, SI.getType());
+ if (FalseValC->isZero() && TrueValC->getValue() == 1)
+ return new ZExtInst(CondVal, SI.getType());
+
+ // select C, -1, 0 -> sext C to int
+ if (FalseValC->isZero() && TrueValC->isAllOnesValue())
+ return new SExtInst(CondVal, SI.getType());
+
+ // select C, 0, 1 -> zext !C to int
+ if (TrueValC->isZero() && FalseValC->getValue() == 1) {
+ Value *NotCond = Builder->CreateNot(CondVal, "not."+CondVal->getName());
+ return new ZExtInst(NotCond, SI.getType());
}
+ // select C, 0, -1 -> sext !C to int
+ if (TrueValC->isZero() && FalseValC->isAllOnesValue()) {
+ Value *NotCond = Builder->CreateNot(CondVal, "not."+CondVal->getName());
+ return new SExtInst(NotCond, SI.getType());
+ }
+
if (ICmpInst *IC = dyn_cast<ICmpInst>(SI.getCondition())) {
// If one of the constants is zero (we know they can't both be) and we
// have an icmp instruction with zero, and we have an 'and' with the
@@ -547,8 +518,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
ShouldNotVal ^= IC->getPredicate() == ICmpInst::ICMP_NE;
Value *V = ICA;
if (ShouldNotVal)
- V = InsertNewInstBefore(BinaryOperator::Create(
- Instruction::Xor, V, ICA->getOperand(1)), SI);
+ V = Builder->CreateXor(V, ICA->getOperand(1));
return ReplaceInstUsesWith(SI, V);
}
}
@@ -659,7 +629,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
}
// See if we can fold the select into one of our operands.
- if (SI.getType()->isInteger()) {
+ if (SI.getType()->isIntegerTy()) {
if (Instruction *FoldI = FoldSelectIntoOp(SI, TrueVal, FalseVal))
return FoldI;
diff --git a/lib/Transforms/InstCombine/InstCombineShifts.cpp b/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 321c91d..836bda3 100644
--- a/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "InstCombine.h"
+#include "llvm/IntrinsicInst.h"
#include "llvm/Support/PatternMatch.h"
using namespace llvm;
using namespace PatternMatch;
@@ -69,10 +70,9 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
if (Op1->uge(TypeBits)) {
if (I.getOpcode() != Instruction::AShr)
return ReplaceInstUsesWith(I, Constant::getNullValue(Op0->getType()));
- else {
- I.setOperand(1, ConstantInt::get(I.getType(), TypeBits-1));
- return &I;
- }
+ // ashr i32 X, 32 --> ashr i32 X, 31
+ I.setOperand(1, ConstantInt::get(I.getType(), TypeBits-1));
+ return &I;
}
// ((X*C1) << C2) == (X * (C1 << C2))
@@ -387,7 +387,29 @@ Instruction *InstCombiner::visitShl(BinaryOperator &I) {
}
Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
- return commonShiftTransforms(I);
+ if (Instruction *R = commonShiftTransforms(I))
+ return R;
+
+ Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
+
+ if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1))
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Op0)) {
+ unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
+ // ctlz.i32(x)>>5 --> zext(x == 0)
+ // cttz.i32(x)>>5 --> zext(x == 0)
+ // ctpop.i32(x)>>5 --> zext(x == -1)
+ if ((II->getIntrinsicID() == Intrinsic::ctlz ||
+ II->getIntrinsicID() == Intrinsic::cttz ||
+ II->getIntrinsicID() == Intrinsic::ctpop) &&
+ isPowerOf2_32(BitWidth) && Log2_32(BitWidth) == Op1C->getZExtValue()){
+ bool isCtPop = II->getIntrinsicID() == Intrinsic::ctpop;
+ Constant *RHS = ConstantInt::getSigned(Op0->getType(), isCtPop ? -1:0);
+ Value *Cmp = Builder->CreateICmpEQ(II->getOperand(1), RHS);
+ return new ZExtInst(Cmp, II->getType());
+ }
+ }
+
+ return 0;
}
Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
diff --git a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index 74a1b68..5e9a52f 100644
--- a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -107,7 +107,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
assert((TD || !isa<PointerType>(VTy)) &&
"SimplifyDemandedBits needs to know bit widths!");
assert((!TD || TD->getTypeSizeInBits(VTy->getScalarType()) == BitWidth) &&
- (!VTy->isIntOrIntVector() ||
+ (!VTy->isIntOrIntVectorTy() ||
VTy->getScalarSizeInBits() == BitWidth) &&
KnownZero.getBitWidth() == BitWidth &&
KnownOne.getBitWidth() == BitWidth &&
@@ -138,11 +138,11 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
return 0;
APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
- APInt &RHSKnownZero = KnownZero, &RHSKnownOne = KnownOne;
+ APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
Instruction *I = dyn_cast<Instruction>(V);
if (!I) {
- ComputeMaskedBits(V, DemandedMask, RHSKnownZero, RHSKnownOne, Depth);
+ ComputeMaskedBits(V, DemandedMask, KnownZero, KnownOne, Depth);
return 0; // Only analyze instructions.
}
@@ -219,7 +219,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
switch (I->getOpcode()) {
default:
- ComputeMaskedBits(I, DemandedMask, RHSKnownZero, RHSKnownOne, Depth);
+ ComputeMaskedBits(I, DemandedMask, KnownZero, KnownOne, Depth);
break;
case Instruction::And:
// If either the LHS or the RHS are Zero, the result is zero.
@@ -249,9 +249,9 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
return I;
// Output known-1 bits are only known if set in both the LHS & RHS.
- RHSKnownOne &= LHSKnownOne;
+ KnownOne = RHSKnownOne & LHSKnownOne;
// Output known-0 are known to be clear if zero in either the LHS | RHS.
- RHSKnownZero |= LHSKnownZero;
+ KnownZero = RHSKnownZero | LHSKnownZero;
break;
case Instruction::Or:
// If either the LHS or the RHS are One, the result is One.
@@ -286,9 +286,9 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
return I;
// Output known-0 bits are only known if clear in both the LHS & RHS.
- RHSKnownZero &= LHSKnownZero;
+ KnownZero = RHSKnownZero & LHSKnownZero;
// Output known-1 are known to be set if set in either the LHS | RHS.
- RHSKnownOne |= LHSKnownOne;
+ KnownOne = RHSKnownOne | LHSKnownOne;
break;
case Instruction::Xor: {
if (SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
@@ -306,13 +306,6 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
if ((DemandedMask & LHSKnownZero) == DemandedMask)
return I->getOperand(1);
- // Output known-0 bits are known if clear or set in both the LHS & RHS.
- APInt KnownZeroOut = (RHSKnownZero & LHSKnownZero) |
- (RHSKnownOne & LHSKnownOne);
- // Output known-1 are known to be set if set in only one of the LHS, RHS.
- APInt KnownOneOut = (RHSKnownZero & LHSKnownOne) |
- (RHSKnownOne & LHSKnownZero);
-
// If all of the demanded bits are known to be zero on one side or the
// other, turn this into an *inclusive* or.
// e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
@@ -368,10 +361,11 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
BinaryOperator::CreateXor(NewAnd, XorC, "tmp");
return InsertNewInstBefore(NewXor, *I);
}
-
-
- RHSKnownZero = KnownZeroOut;
- RHSKnownOne = KnownOneOut;
+
+ // Output known-0 bits are known if clear or set in both the LHS & RHS.
+ KnownZero= (RHSKnownZero & LHSKnownZero) | (RHSKnownOne & LHSKnownOne);
+ // Output known-1 are known to be set if set in only one of the LHS, RHS.
+ KnownOne = (RHSKnownZero & LHSKnownOne) | (RHSKnownOne & LHSKnownZero);
break;
}
case Instruction::Select:
@@ -389,61 +383,61 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
return I;
// Only known if known in both the LHS and RHS.
- RHSKnownOne &= LHSKnownOne;
- RHSKnownZero &= LHSKnownZero;
+ KnownOne = RHSKnownOne & LHSKnownOne;
+ KnownZero = RHSKnownZero & LHSKnownZero;
break;
case Instruction::Trunc: {
unsigned truncBf = I->getOperand(0)->getType()->getScalarSizeInBits();
DemandedMask.zext(truncBf);
- RHSKnownZero.zext(truncBf);
- RHSKnownOne.zext(truncBf);
+ KnownZero.zext(truncBf);
+ KnownOne.zext(truncBf);
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
- RHSKnownZero, RHSKnownOne, Depth+1))
+ KnownZero, KnownOne, Depth+1))
return I;
DemandedMask.trunc(BitWidth);
- RHSKnownZero.trunc(BitWidth);
- RHSKnownOne.trunc(BitWidth);
- assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
+ KnownZero.trunc(BitWidth);
+ KnownOne.trunc(BitWidth);
+ assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
break;
}
case Instruction::BitCast:
- if (!I->getOperand(0)->getType()->isIntOrIntVector())
- return false; // vector->int or fp->int?
+ if (!I->getOperand(0)->getType()->isIntOrIntVectorTy())
+ return 0; // vector->int or fp->int?
if (const VectorType *DstVTy = dyn_cast<VectorType>(I->getType())) {
if (const VectorType *SrcVTy =
dyn_cast<VectorType>(I->getOperand(0)->getType())) {
if (DstVTy->getNumElements() != SrcVTy->getNumElements())
// Don't touch a bitcast between vectors of different element counts.
- return false;
+ return 0;
} else
// Don't touch a scalar-to-vector bitcast.
- return false;
+ return 0;
} else if (isa<VectorType>(I->getOperand(0)->getType()))
// Don't touch a vector-to-scalar bitcast.
- return false;
+ return 0;
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
- RHSKnownZero, RHSKnownOne, Depth+1))
+ KnownZero, KnownOne, Depth+1))
return I;
- assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
+ assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
break;
case Instruction::ZExt: {
// Compute the bits in the result that are not present in the input.
unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits();
DemandedMask.trunc(SrcBitWidth);
- RHSKnownZero.trunc(SrcBitWidth);
- RHSKnownOne.trunc(SrcBitWidth);
+ KnownZero.trunc(SrcBitWidth);
+ KnownOne.trunc(SrcBitWidth);
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
- RHSKnownZero, RHSKnownOne, Depth+1))
+ KnownZero, KnownOne, Depth+1))
return I;
DemandedMask.zext(BitWidth);
- RHSKnownZero.zext(BitWidth);
- RHSKnownOne.zext(BitWidth);
- assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
+ KnownZero.zext(BitWidth);
+ KnownOne.zext(BitWidth);
+ assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
// The top bits are known to be zero.
- RHSKnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
+ KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
break;
}
case Instruction::SExt: {
@@ -460,27 +454,27 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
InputDemandedBits.set(SrcBitWidth-1);
InputDemandedBits.trunc(SrcBitWidth);
- RHSKnownZero.trunc(SrcBitWidth);
- RHSKnownOne.trunc(SrcBitWidth);
+ KnownZero.trunc(SrcBitWidth);
+ KnownOne.trunc(SrcBitWidth);
if (SimplifyDemandedBits(I->getOperandUse(0), InputDemandedBits,
- RHSKnownZero, RHSKnownOne, Depth+1))
+ KnownZero, KnownOne, Depth+1))
return I;
InputDemandedBits.zext(BitWidth);
- RHSKnownZero.zext(BitWidth);
- RHSKnownOne.zext(BitWidth);
- assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
+ KnownZero.zext(BitWidth);
+ KnownOne.zext(BitWidth);
+ assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
// If the sign bit of the input is known set or clear, then we know the
// top bits of the result.
// If the input sign bit is known zero, or if the NewBits are not demanded
// convert this into a zero extension.
- if (RHSKnownZero[SrcBitWidth-1] || (NewBits & ~DemandedMask) == NewBits) {
+ if (KnownZero[SrcBitWidth-1] || (NewBits & ~DemandedMask) == NewBits) {
// Convert to ZExt cast
CastInst *NewCast = new ZExtInst(I->getOperand(0), VTy, I->getName());
return InsertNewInstBefore(NewCast, *I);
- } else if (RHSKnownOne[SrcBitWidth-1]) { // Input sign bit known set
- RHSKnownOne |= NewBits;
+ } else if (KnownOne[SrcBitWidth-1]) { // Input sign bit known set
+ KnownOne |= NewBits;
}
break;
}
@@ -540,12 +534,12 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// Bits are known one if they are known zero in one operand and one in the
// other, and there is no input carry.
- RHSKnownOne = ((LHSKnownZero & RHSVal) |
- (LHSKnownOne & ~RHSVal)) & ~CarryBits;
+ KnownOne = ((LHSKnownZero & RHSVal) |
+ (LHSKnownOne & ~RHSVal)) & ~CarryBits;
// Bits are known zero if they are known zero in both operands and there
// is no input carry.
- RHSKnownZero = LHSKnownZero & ~RHSVal & ~CarryBits;
+ KnownZero = LHSKnownZero & ~RHSVal & ~CarryBits;
} else {
// If the high-bits of this ADD are not demanded, then it does not demand
// the high bits of its LHS or RHS.
@@ -578,21 +572,21 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
}
// Otherwise just hand the sub off to ComputeMaskedBits to fill in
// the known zeros and ones.
- ComputeMaskedBits(V, DemandedMask, RHSKnownZero, RHSKnownOne, Depth);
+ ComputeMaskedBits(V, DemandedMask, KnownZero, KnownOne, Depth);
break;
case Instruction::Shl:
if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
APInt DemandedMaskIn(DemandedMask.lshr(ShiftAmt));
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn,
- RHSKnownZero, RHSKnownOne, Depth+1))
+ KnownZero, KnownOne, Depth+1))
return I;
- assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
- RHSKnownZero <<= ShiftAmt;
- RHSKnownOne <<= ShiftAmt;
+ assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
+ KnownZero <<= ShiftAmt;
+ KnownOne <<= ShiftAmt;
// low bits known zero.
if (ShiftAmt)
- RHSKnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt);
+ KnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt);
}
break;
case Instruction::LShr:
@@ -603,15 +597,15 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// Unsigned shift right.
APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn,
- RHSKnownZero, RHSKnownOne, Depth+1))
+ KnownZero, KnownOne, Depth+1))
return I;
- assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
- RHSKnownZero = APIntOps::lshr(RHSKnownZero, ShiftAmt);
- RHSKnownOne = APIntOps::lshr(RHSKnownOne, ShiftAmt);
+ assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
+ KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
+ KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
if (ShiftAmt) {
// Compute the new bits that are at the top now.
APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
- RHSKnownZero |= HighBits; // high bits known zero.
+ KnownZero |= HighBits; // high bits known zero.
}
}
break;
@@ -642,13 +636,13 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
if (DemandedMask.countLeadingZeros() <= ShiftAmt)
DemandedMaskIn.set(BitWidth-1);
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn,
- RHSKnownZero, RHSKnownOne, Depth+1))
+ KnownZero, KnownOne, Depth+1))
return I;
- assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
+ assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
// Compute the new bits that are at the top now.
APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
- RHSKnownZero = APIntOps::lshr(RHSKnownZero, ShiftAmt);
- RHSKnownOne = APIntOps::lshr(RHSKnownOne, ShiftAmt);
+ KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
+ KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
// Handle the sign bits.
APInt SignBit(APInt::getSignBit(BitWidth));
@@ -657,14 +651,14 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// If the input sign bit is known to be zero, or if none of the top bits
// are demanded, turn this into an unsigned shift right.
- if (BitWidth <= ShiftAmt || RHSKnownZero[BitWidth-ShiftAmt-1] ||
+ if (BitWidth <= ShiftAmt || KnownZero[BitWidth-ShiftAmt-1] ||
(HighBits & ~DemandedMask) == HighBits) {
// Perform the logical shift right.
Instruction *NewVal = BinaryOperator::CreateLShr(
I->getOperand(0), SA, I->getName());
return InsertNewInstBefore(NewVal, *I);
- } else if ((RHSKnownOne & SignBit) != 0) { // New bits are known one.
- RHSKnownOne |= HighBits;
+ } else if ((KnownOne & SignBit) != 0) { // New bits are known one.
+ KnownOne |= HighBits;
}
}
break;
@@ -681,10 +675,19 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
LHSKnownZero, LHSKnownOne, Depth+1))
return I;
+ // The low bits of LHS are unchanged by the srem.
+ KnownZero = LHSKnownZero & LowBits;
+ KnownOne = LHSKnownOne & LowBits;
+
+ // If LHS is non-negative or has all low bits zero, then the upper bits
+ // are all zero.
if (LHSKnownZero[BitWidth-1] || ((LHSKnownZero & LowBits) == LowBits))
- LHSKnownZero |= ~LowBits;
+ KnownZero |= ~LowBits;
- KnownZero |= LHSKnownZero & DemandedMask;
+ // If LHS is negative and not all low bits are zero, then the upper bits
+ // are all one.
+ if (LHSKnownOne[BitWidth-1] && ((LHSKnownOne & LowBits) != 0))
+ KnownOne |= ~LowBits;
assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
}
@@ -743,15 +746,15 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
}
}
}
- ComputeMaskedBits(V, DemandedMask, RHSKnownZero, RHSKnownOne, Depth);
+ ComputeMaskedBits(V, DemandedMask, KnownZero, KnownOne, Depth);
break;
}
// If the client is only demanding bits that we know, return the known
// constant.
- if ((DemandedMask & (RHSKnownZero|RHSKnownOne)) == DemandedMask)
- return Constant::getIntegerValue(VTy, RHSKnownOne);
- return false;
+ if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask)
+ return Constant::getIntegerValue(VTy, KnownOne);
+ return 0;
}
@@ -764,7 +767,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
/// operation, the operation is simplified, then the resultant value is
/// returned. This returns null if no change was made.
Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
- APInt& UndefElts,
+ APInt &UndefElts,
unsigned Depth) {
unsigned VWidth = cast<VectorType>(V->getType())->getNumElements();
APInt EltMask(APInt::getAllOnesValue(VWidth));
@@ -774,13 +777,15 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
// If the entire vector is undefined, just return this info.
UndefElts = EltMask;
return 0;
- } else if (DemandedElts == 0) { // If nothing is demanded, provide undef.
+ }
+
+ if (DemandedElts == 0) { // If nothing is demanded, provide undef.
UndefElts = EltMask;
return UndefValue::get(V->getType());
}
UndefElts = 0;
- if (ConstantVector *CP = dyn_cast<ConstantVector>(V)) {
+ if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
const Type *EltTy = cast<VectorType>(V->getType())->getElementType();
Constant *Undef = UndefValue::get(EltTy);
@@ -789,23 +794,25 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
if (!DemandedElts[i]) { // If not demanded, set to undef.
Elts.push_back(Undef);
UndefElts.set(i);
- } else if (isa<UndefValue>(CP->getOperand(i))) { // Already undef.
+ } else if (isa<UndefValue>(CV->getOperand(i))) { // Already undef.
Elts.push_back(Undef);
UndefElts.set(i);
} else { // Otherwise, defined.
- Elts.push_back(CP->getOperand(i));
+ Elts.push_back(CV->getOperand(i));
}
// If we changed the constant, return it.
Constant *NewCP = ConstantVector::get(Elts);
- return NewCP != CP ? NewCP : 0;
- } else if (isa<ConstantAggregateZero>(V)) {
+ return NewCP != CV ? NewCP : 0;
+ }
+
+ if (isa<ConstantAggregateZero>(V)) {
// Simplify the CAZ to a ConstantVector where the non-demanded elements are
// set to undef.
// Check if this is identity. If so, return 0 since we are not simplifying
// anything.
- if (DemandedElts == ((1ULL << VWidth) -1))
+ if (DemandedElts.isAllOnesValue())
return 0;
const Type *EltTy = cast<VectorType>(V->getType())->getElementType();
diff --git a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index f11f557..20fda1a 100644
--- a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -162,7 +162,8 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
// property.
if (EI.getOperand(0)->hasOneUse() && VectorWidth != 1) {
APInt UndefElts(VectorWidth, 0);
- APInt DemandedMask(VectorWidth, 1 << IndexVal);
+ APInt DemandedMask(VectorWidth, 0);
+ DemandedMask.set(IndexVal);
if (Value *V = SimplifyDemandedVectorElts(EI.getOperand(0),
DemandedMask, UndefElts)) {
EI.setOperand(0, V);
diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp
index 93b1961..96c0342 100644
--- a/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -158,7 +158,7 @@ Value *InstCombiner::dyn_castNegVal(Value *V) const {
return ConstantExpr::getNeg(C);
if (ConstantVector *C = dyn_cast<ConstantVector>(V))
- if (C->getType()->getElementType()->isInteger())
+ if (C->getType()->getElementType()->isIntegerTy())
return ConstantExpr::getNeg(C);
return 0;
@@ -177,7 +177,7 @@ Value *InstCombiner::dyn_castFNegVal(Value *V) const {
return ConstantExpr::getFNeg(C);
if (ConstantVector *C = dyn_cast<ConstantVector>(V))
- if (C->getType()->getElementType()->isFloatingPoint())
+ if (C->getType()->getElementType()->isFloatingPointTy())
return ConstantExpr::getFNeg(C);
return 0;
@@ -226,7 +226,7 @@ Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
if (isa<Constant>(TV) || isa<Constant>(FV)) {
// Bool selects with constant operands can be folded to logical ops.
- if (SI->getType()->isInteger(1)) return 0;
+ if (SI->getType()->isIntegerTy(1)) return 0;
Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this);
Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, this);
@@ -596,7 +596,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// (where tmp = 8*tmp2) into:
// getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
- if (TD && isa<ArrayType>(SrcElTy) && ResElTy->isInteger(8)) {
+ if (TD && isa<ArrayType>(SrcElTy) && ResElTy->isIntegerTy(8)) {
uint64_t ArrayEltSize =
TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType());
diff --git a/lib/Transforms/InstCombine/Makefile b/lib/Transforms/InstCombine/Makefile
index f9de42a..0c488e78 100644
--- a/lib/Transforms/InstCombine/Makefile
+++ b/lib/Transforms/InstCombine/Makefile
@@ -10,7 +10,6 @@
LEVEL = ../../..
LIBRARYNAME = LLVMInstCombine
BUILD_ARCHIVE = 1
-CXXFLAGS = -fno-rtti
include $(LEVEL)/Makefile.common
diff --git a/lib/Transforms/Instrumentation/Makefile b/lib/Transforms/Instrumentation/Makefile
index 1238896..6cbc7a9 100644
--- a/lib/Transforms/Instrumentation/Makefile
+++ b/lib/Transforms/Instrumentation/Makefile
@@ -10,7 +10,6 @@
LEVEL = ../../..
LIBRARYNAME = LLVMInstrumentation
BUILD_ARCHIVE = 1
-CXXFLAGS = -fno-rtti
include $(LEVEL)/Makefile.common
diff --git a/lib/Transforms/Instrumentation/ProfilingUtils.cpp b/lib/Transforms/Instrumentation/ProfilingUtils.cpp
index 3214c8c..8662a82 100644
--- a/lib/Transforms/Instrumentation/ProfilingUtils.cpp
+++ b/lib/Transforms/Instrumentation/ProfilingUtils.cpp
@@ -84,7 +84,7 @@ void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName,
AI = MainFn->arg_begin();
// If the program looked at argc, have it look at the return value of the
// init call instead.
- if (!AI->getType()->isInteger(32)) {
+ if (!AI->getType()->isIntegerTy(32)) {
Instruction::CastOps opcode;
if (!AI->use_empty()) {
opcode = CastInst::getCastOpcode(InitCall, true, AI->getType(), true);
diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp
index c3139a5..21e6f89 100644
--- a/lib/Transforms/Scalar/CodeGenPrepare.cpp
+++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp
@@ -32,7 +32,6 @@
#include "llvm/ADT/SmallSet.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/Support/CallSite.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/PatternMatch.h"
@@ -40,9 +39,6 @@
using namespace llvm;
using namespace llvm::PatternMatch;
-static cl::opt<bool> FactorCommonPreds("split-critical-paths-tweak",
- cl::init(false), cl::Hidden);
-
namespace {
class CodeGenPrepare : public FunctionPass {
/// TLI - Keep a pointer of a TargetLowering to consult for determining
@@ -63,6 +59,10 @@ namespace {
AU.addPreserved<ProfileInfo>();
}
+ virtual void releaseMemory() {
+ BackEdges.clear();
+ }
+
private:
bool EliminateMostlyEmptyBlocks(Function &F);
bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
@@ -297,6 +297,70 @@ void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) {
DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
}
+/// FindReusablePredBB - Check all of the predecessors of the block DestPHI
+/// lives in to see if there is a block that we can reuse as a critical edge
+/// from TIBB.
+static BasicBlock *FindReusablePredBB(PHINode *DestPHI, BasicBlock *TIBB) {
+ BasicBlock *Dest = DestPHI->getParent();
+
+ /// TIPHIValues - This array is lazily computed to determine the values of
+ /// PHIs in Dest that TI would provide.
+ SmallVector<Value*, 32> TIPHIValues;
+
+ /// TIBBEntryNo - This is a cache to speed up pred queries for TIBB.
+ unsigned TIBBEntryNo = 0;
+
+ // Check to see if Dest has any blocks that can be used as a split edge for
+ // this terminator.
+ for (unsigned pi = 0, e = DestPHI->getNumIncomingValues(); pi != e; ++pi) {
+ BasicBlock *Pred = DestPHI->getIncomingBlock(pi);
+ // To be usable, the pred has to end with an uncond branch to the dest.
+ BranchInst *PredBr = dyn_cast<BranchInst>(Pred->getTerminator());
+ if (!PredBr || !PredBr->isUnconditional())
+ continue;
+ // Must be empty other than the branch and debug info.
+ BasicBlock::iterator I = Pred->begin();
+ while (isa<DbgInfoIntrinsic>(I))
+ I++;
+ if (&*I != PredBr)
+ continue;
+ // Cannot be the entry block; its label does not get emitted.
+ if (Pred == &Dest->getParent()->getEntryBlock())
+ continue;
+
+ // Finally, since we know that Dest has phi nodes in it, we have to make
+ // sure that jumping to Pred will have the same effect as going to Dest in
+ // terms of PHI values.
+ PHINode *PN;
+ unsigned PHINo = 0;
+ unsigned PredEntryNo = pi;
+
+ bool FoundMatch = true;
+ for (BasicBlock::iterator I = Dest->begin();
+ (PN = dyn_cast<PHINode>(I)); ++I, ++PHINo) {
+ if (PHINo == TIPHIValues.size()) {
+ if (PN->getIncomingBlock(TIBBEntryNo) != TIBB)
+ TIBBEntryNo = PN->getBasicBlockIndex(TIBB);
+ TIPHIValues.push_back(PN->getIncomingValue(TIBBEntryNo));
+ }
+
+ // If the PHI entry doesn't work, we can't use this pred.
+ if (PN->getIncomingBlock(PredEntryNo) != Pred)
+ PredEntryNo = PN->getBasicBlockIndex(Pred);
+
+ if (TIPHIValues[PHINo] != PN->getIncomingValue(PredEntryNo)) {
+ FoundMatch = false;
+ break;
+ }
+ }
+
+ // If we found a workable predecessor, change TI to branch to Succ.
+ if (FoundMatch)
+ return Pred;
+ }
+ return 0;
+}
+
/// SplitEdgeNicely - Split the critical edge from TI to its specified
/// successor if it will improve codegen. We only do this if the successor has
@@ -311,13 +375,12 @@ static void SplitEdgeNicely(TerminatorInst *TI, unsigned SuccNum,
BasicBlock *Dest = TI->getSuccessor(SuccNum);
assert(isa<PHINode>(Dest->begin()) &&
"This should only be called if Dest has a PHI!");
+ PHINode *DestPHI = cast<PHINode>(Dest->begin());
// Do not split edges to EH landing pads.
- if (InvokeInst *Invoke = dyn_cast<InvokeInst>(TI)) {
+ if (InvokeInst *Invoke = dyn_cast<InvokeInst>(TI))
if (Invoke->getSuccessor(1) == Dest)
return;
- }
-
// As a hack, never split backedges of loops. Even though the copy for any
// PHIs inserted on the backedge would be dead for exits from the loop, we
@@ -325,92 +388,16 @@ static void SplitEdgeNicely(TerminatorInst *TI, unsigned SuccNum,
if (BackEdges.count(std::make_pair(TIBB, Dest)))
return;
- if (!FactorCommonPreds) {
- /// TIPHIValues - This array is lazily computed to determine the values of
- /// PHIs in Dest that TI would provide.
- SmallVector<Value*, 32> TIPHIValues;
-
- // Check to see if Dest has any blocks that can be used as a split edge for
- // this terminator.
- for (pred_iterator PI = pred_begin(Dest), E = pred_end(Dest); PI != E; ++PI) {
- BasicBlock *Pred = *PI;
- // To be usable, the pred has to end with an uncond branch to the dest.
- BranchInst *PredBr = dyn_cast<BranchInst>(Pred->getTerminator());
- if (!PredBr || !PredBr->isUnconditional())
- continue;
- // Must be empty other than the branch and debug info.
- BasicBlock::iterator I = Pred->begin();
- while (isa<DbgInfoIntrinsic>(I))
- I++;
- if (dyn_cast<Instruction>(I) != PredBr)
- continue;
- // Cannot be the entry block; its label does not get emitted.
- if (Pred == &(Dest->getParent()->getEntryBlock()))
- continue;
-
- // Finally, since we know that Dest has phi nodes in it, we have to make
- // sure that jumping to Pred will have the same effect as going to Dest in
- // terms of PHI values.
- PHINode *PN;
- unsigned PHINo = 0;
- bool FoundMatch = true;
- for (BasicBlock::iterator I = Dest->begin();
- (PN = dyn_cast<PHINode>(I)); ++I, ++PHINo) {
- if (PHINo == TIPHIValues.size())
- TIPHIValues.push_back(PN->getIncomingValueForBlock(TIBB));
-
- // If the PHI entry doesn't work, we can't use this pred.
- if (TIPHIValues[PHINo] != PN->getIncomingValueForBlock(Pred)) {
- FoundMatch = false;
- break;
- }
- }
-
- // If we found a workable predecessor, change TI to branch to Succ.
- if (FoundMatch) {
- ProfileInfo *PFI = P->getAnalysisIfAvailable<ProfileInfo>();
- if (PFI)
- PFI->splitEdge(TIBB, Dest, Pred);
- Dest->removePredecessor(TIBB);
- TI->setSuccessor(SuccNum, Pred);
- return;
- }
- }
-
- SplitCriticalEdge(TI, SuccNum, P, true);
+ if (BasicBlock *ReuseBB = FindReusablePredBB(DestPHI, TIBB)) {
+ ProfileInfo *PFI = P->getAnalysisIfAvailable<ProfileInfo>();
+ if (PFI)
+ PFI->splitEdge(TIBB, Dest, ReuseBB);
+ Dest->removePredecessor(TIBB);
+ TI->setSuccessor(SuccNum, ReuseBB);
return;
}
- PHINode *PN;
- SmallVector<Value*, 8> TIPHIValues;
- for (BasicBlock::iterator I = Dest->begin();
- (PN = dyn_cast<PHINode>(I)); ++I)
- TIPHIValues.push_back(PN->getIncomingValueForBlock(TIBB));
-
- SmallVector<BasicBlock*, 8> IdenticalPreds;
- for (pred_iterator PI = pred_begin(Dest), E = pred_end(Dest); PI != E; ++PI) {
- BasicBlock *Pred = *PI;
- if (BackEdges.count(std::make_pair(Pred, Dest)))
- continue;
- if (PI == TIBB)
- IdenticalPreds.push_back(Pred);
- else {
- bool Identical = true;
- unsigned PHINo = 0;
- for (BasicBlock::iterator I = Dest->begin();
- (PN = dyn_cast<PHINode>(I)); ++I, ++PHINo)
- if (TIPHIValues[PHINo] != PN->getIncomingValueForBlock(Pred)) {
- Identical = false;
- break;
- }
- if (Identical)
- IdenticalPreds.push_back(Pred);
- }
- }
-
- assert(!IdenticalPreds.empty());
- SplitBlockPredecessors(Dest, &IdenticalPreds[0], IdenticalPreds.size(),
- ".critedge", P);
+ SplitCriticalEdge(TI, SuccNum, P, true);
}
diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 320afa1..09c01d3 100644
--- a/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -44,8 +44,14 @@ namespace {
virtual bool runOnFunction(Function &F) {
bool Changed = false;
+
+ DominatorTree &DT = getAnalysis<DominatorTree>();
+
for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
- Changed |= runOnBasicBlock(*I);
+ // Only check non-dead blocks. Dead blocks may have strange pointer
+ // cycles that will confuse alias analysis.
+ if (DT.isReachableFromEntry(I))
+ Changed |= runOnBasicBlock(*I);
return Changed;
}
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index b29fe74..3ce7482 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -60,6 +60,7 @@ STATISTIC(NumPRELoad, "Number of loads PRE'd");
static cl::opt<bool> EnablePRE("enable-pre",
cl::init(true), cl::Hidden);
static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true));
+static cl::opt<bool> EnableFullLoadPRE("enable-full-load-pre", cl::init(false));
//===----------------------------------------------------------------------===//
// ValueTable Class
@@ -1522,8 +1523,6 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
while (TmpBB->getSinglePredecessor()) {
isSinglePred = true;
TmpBB = TmpBB->getSinglePredecessor();
- if (!TmpBB) // If haven't found any, bail now.
- return false;
if (TmpBB == LoadBB) // Infinite (unreachable) loop.
return false;
if (Blockers.count(TmpBB))
@@ -1539,10 +1538,12 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
// at least one of the values is LI. Since this means that we won't be able
// to eliminate LI even if we insert uses in the other predecessors, we will
// end up increasing code size. Reject this by scanning for LI.
- for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
- if (ValuesPerBlock[i].isSimpleValue() &&
- ValuesPerBlock[i].getSimpleValue() == LI)
- return false;
+ if (!EnableFullLoadPRE) {
+ for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
+ if (ValuesPerBlock[i].isSimpleValue() &&
+ ValuesPerBlock[i].getSimpleValue() == LI)
+ return false;
+ }
// FIXME: It is extremely unclear what this loop is doing, other than
// artificially restricting loadpre.
@@ -1566,13 +1567,9 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
return false;
}
- // Okay, we have some hope :). Check to see if the loaded value is fully
- // available in all but one predecessor.
- // FIXME: If we could restructure the CFG, we could make a common pred with
- // all the preds that don't have an available LI and insert a new load into
- // that one block.
- BasicBlock *UnavailablePred = 0;
-
+ // Check to see how many predecessors have the loaded value fully
+ // available.
+ DenseMap<BasicBlock*, Value*> PredLoads;
DenseMap<BasicBlock*, char> FullyAvailableBlocks;
for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
FullyAvailableBlocks[ValuesPerBlock[i].BB] = true;
@@ -1581,79 +1578,93 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB);
PI != E; ++PI) {
- if (IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
+ BasicBlock *Pred = *PI;
+ if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) {
continue;
-
- // If this load is not available in multiple predecessors, reject it.
- if (UnavailablePred && UnavailablePred != *PI)
+ }
+ PredLoads[Pred] = 0;
+ // We don't currently handle critical edges :(
+ if (Pred->getTerminator()->getNumSuccessors() != 1) {
+ DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF CRITICAL EDGE '"
+ << Pred->getName() << "': " << *LI << '\n');
return false;
- UnavailablePred = *PI;
+ }
}
- assert(UnavailablePred != 0 &&
+ // Decide whether PRE is profitable for this load.
+ unsigned NumUnavailablePreds = PredLoads.size();
+ assert(NumUnavailablePreds != 0 &&
"Fully available value should be eliminated above!");
-
- // We don't currently handle critical edges :(
- if (UnavailablePred->getTerminator()->getNumSuccessors() != 1) {
- DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF CRITICAL EDGE '"
- << UnavailablePred->getName() << "': " << *LI << '\n');
- return false;
+ if (!EnableFullLoadPRE) {
+ // If this load is unavailable in multiple predecessors, reject it.
+ // FIXME: If we could restructure the CFG, we could make a common pred with
+ // all the preds that don't have an available LI and insert a new load into
+ // that one block.
+ if (NumUnavailablePreds != 1)
+ return false;
}
-
- // Do PHI translation to get its value in the predecessor if necessary. The
- // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
- //
+
+ // Check if the load can safely be moved to all the unavailable predecessors.
+ bool CanDoPRE = true;
SmallVector<Instruction*, 8> NewInsts;
-
- // If all preds have a single successor, then we know it is safe to insert the
- // load on the pred (?!?), so we can insert code to materialize the pointer if
- // it is not available.
- PHITransAddr Address(LI->getOperand(0), TD);
- Value *LoadPtr = 0;
- if (allSingleSucc) {
- LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,
- *DT, NewInsts);
- } else {
- Address.PHITranslateValue(LoadBB, UnavailablePred);
- LoadPtr = Address.getAddr();
+ for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
+ E = PredLoads.end(); I != E; ++I) {
+ BasicBlock *UnavailablePred = I->first;
+
+ // Do PHI translation to get its value in the predecessor if necessary. The
+ // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
+
+ // If all preds have a single successor, then we know it is safe to insert
+ // the load on the pred (?!?), so we can insert code to materialize the
+ // pointer if it is not available.
+ PHITransAddr Address(LI->getOperand(0), TD);
+ Value *LoadPtr = 0;
+ if (allSingleSucc) {
+ LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,
+ *DT, NewInsts);
+ } else {
+ Address.PHITranslateValue(LoadBB, UnavailablePred);
+ LoadPtr = Address.getAddr();
- // Make sure the value is live in the predecessor.
- if (Instruction *Inst = dyn_cast_or_null<Instruction>(LoadPtr))
- if (!DT->dominates(Inst->getParent(), UnavailablePred))
- LoadPtr = 0;
- }
+ // Make sure the value is live in the predecessor.
+ if (Instruction *Inst = dyn_cast_or_null<Instruction>(LoadPtr))
+ if (!DT->dominates(Inst->getParent(), UnavailablePred))
+ LoadPtr = 0;
+ }
- // If we couldn't find or insert a computation of this phi translated value,
- // we fail PRE.
- if (LoadPtr == 0) {
- assert(NewInsts.empty() && "Shouldn't insert insts on failure");
- DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
- << *LI->getOperand(0) << "\n");
- return false;
- }
+ // If we couldn't find or insert a computation of this phi translated value,
+ // we fail PRE.
+ if (LoadPtr == 0) {
+ DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
+ << *LI->getOperand(0) << "\n");
+ CanDoPRE = false;
+ break;
+ }
- // Assign value numbers to these new instructions.
- for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) {
- // FIXME: We really _ought_ to insert these value numbers into their
- // parent's availability map. However, in doing so, we risk getting into
- // ordering issues. If a block hasn't been processed yet, we would be
- // marking a value as AVAIL-IN, which isn't what we intend.
- VN.lookup_or_add(NewInsts[i]);
+ // Make sure it is valid to move this load here. We have to watch out for:
+ // @1 = getelementptr (i8* p, ...
+ // test p and branch if == 0
+ // load @1
+ // It is valid to have the getelementptr before the test, even if p can be 0,
+ // as getelementptr only does address arithmetic.
+ // If we are not pushing the value through any multiple-successor blocks
+ // we do not have this case. Otherwise, check that the load is safe to
+ // put anywhere; this can be improved, but should be conservatively safe.
+ if (!allSingleSucc &&
+ // FIXME: REEVALUTE THIS.
+ !isSafeToLoadUnconditionally(LoadPtr,
+ UnavailablePred->getTerminator(),
+ LI->getAlignment(), TD)) {
+ CanDoPRE = false;
+ break;
+ }
+
+ I->second = LoadPtr;
}
-
- // Make sure it is valid to move this load here. We have to watch out for:
- // @1 = getelementptr (i8* p, ...
- // test p and branch if == 0
- // load @1
- // It is valid to have the getelementptr before the test, even if p can be 0,
- // as getelementptr only does address arithmetic.
- // If we are not pushing the value through any multiple-successor blocks
- // we do not have this case. Otherwise, check that the load is safe to
- // put anywhere; this can be improved, but should be conservatively safe.
- if (!allSingleSucc &&
- // FIXME: REEVALUTE THIS.
- !isSafeToLoadUnconditionally(LoadPtr, UnavailablePred->getTerminator())) {
- assert(NewInsts.empty() && "Should not have inserted instructions");
+
+ if (!CanDoPRE) {
+ while (!NewInsts.empty())
+ NewInsts.pop_back_val()->eraseFromParent();
return false;
}
@@ -1665,12 +1676,28 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
dbgs() << "INSERTED " << NewInsts.size() << " INSTS: "
<< *NewInsts.back() << '\n');
- Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
- LI->getAlignment(),
- UnavailablePred->getTerminator());
+ // Assign value numbers to the new instructions.
+ for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) {
+ // FIXME: We really _ought_ to insert these value numbers into their
+ // parent's availability map. However, in doing so, we risk getting into
+ // ordering issues. If a block hasn't been processed yet, we would be
+ // marking a value as AVAIL-IN, which isn't what we intend.
+ VN.lookup_or_add(NewInsts[i]);
+ }
- // Add the newly created load.
- ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,NewLoad));
+ for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
+ E = PredLoads.end(); I != E; ++I) {
+ BasicBlock *UnavailablePred = I->first;
+ Value *LoadPtr = I->second;
+
+ Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
+ LI->getAlignment(),
+ UnavailablePred->getTerminator());
+
+ // Add the newly created load.
+ ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,
+ NewLoad));
+ }
// Perform PHI construction.
Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
@@ -1864,6 +1891,10 @@ Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) {
/// by inserting it into the appropriate sets
bool GVN::processInstruction(Instruction *I,
SmallVectorImpl<Instruction*> &toErase) {
+ // Ignore dbg info intrinsics.
+ if (isa<DbgInfoIntrinsic>(I))
+ return false;
+
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
bool Changed = processLoad(LI, toErase);
@@ -2075,7 +2106,7 @@ bool GVN::performPRE(Function &F) {
for (pred_iterator PI = pred_begin(CurrentBlock),
PE = pred_end(CurrentBlock); PI != PE; ++PI) {
// We're not interested in PRE where the block is its
- // own predecessor, on in blocks with predecessors
+ // own predecessor, or in blocks with predecessors
// that are not reachable.
if (*PI == CurrentBlock) {
NumWithout = 2;
@@ -2123,10 +2154,10 @@ bool GVN::performPRE(Function &F) {
continue;
}
- // Instantiate the expression the in predecessor that lacked it.
+ // Instantiate the expression in the predecessor that lacked it.
// Because we are going top-down through the block, all value numbers
// will be available in the predecessor by the time we need them. Any
- // that weren't original present will have been instantiated earlier
+ // that weren't originally present will have been instantiated earlier
// in this loop.
Instruction *PREInstr = CurInst->clone();
bool success = true;
diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp
index 17f7d98..5302fdc 100644
--- a/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -364,23 +364,17 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
if (ExitingBlock)
NeedCannIV = true;
}
- for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) {
- const SCEV *Stride = IU->StrideOrder[i];
- const Type *Ty = SE->getEffectiveSCEVType(Stride->getType());
+ for (IVUsers::const_iterator I = IU->begin(), E = IU->end(); I != E; ++I) {
+ const Type *Ty =
+ SE->getEffectiveSCEVType(I->getOperandValToReplace()->getType());
if (!LargestType ||
SE->getTypeSizeInBits(Ty) >
SE->getTypeSizeInBits(LargestType))
LargestType = Ty;
-
- std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
- IU->IVUsesByStride.find(IU->StrideOrder[i]);
- assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!");
-
- if (!SI->second->Users.empty())
- NeedCannIV = true;
+ NeedCannIV = true;
}
- // Now that we know the largest of of the induction variable expressions
+ // Now that we know the largest of the induction variable expressions
// in this loop, insert a canonical induction variable of the largest size.
Value *IndVar = 0;
if (NeedCannIV) {
@@ -455,72 +449,64 @@ void IndVarSimplify::RewriteIVExpressions(Loop *L, const Type *LargestType,
// add the offsets to the primary induction variable and cast, avoiding
// the need for the code evaluation methods to insert induction variables
// of different sizes.
- for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) {
- const SCEV *Stride = IU->StrideOrder[i];
-
- std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
- IU->IVUsesByStride.find(IU->StrideOrder[i]);
- assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!");
- ilist<IVStrideUse> &List = SI->second->Users;
- for (ilist<IVStrideUse>::iterator UI = List.begin(),
- E = List.end(); UI != E; ++UI) {
- Value *Op = UI->getOperandValToReplace();
- const Type *UseTy = Op->getType();
- Instruction *User = UI->getUser();
-
- // Compute the final addrec to expand into code.
- const SCEV *AR = IU->getReplacementExpr(*UI);
-
- // Evaluate the expression out of the loop, if possible.
- if (!L->contains(UI->getUser())) {
- const SCEV *ExitVal = SE->getSCEVAtScope(AR, L->getParentLoop());
- if (ExitVal->isLoopInvariant(L))
- AR = ExitVal;
- }
+ for (IVUsers::iterator UI = IU->begin(), E = IU->end(); UI != E; ++UI) {
+ const SCEV *Stride = UI->getStride();
+ Value *Op = UI->getOperandValToReplace();
+ const Type *UseTy = Op->getType();
+ Instruction *User = UI->getUser();
+
+ // Compute the final addrec to expand into code.
+ const SCEV *AR = IU->getReplacementExpr(*UI);
+
+ // Evaluate the expression out of the loop, if possible.
+ if (!L->contains(UI->getUser())) {
+ const SCEV *ExitVal = SE->getSCEVAtScope(AR, L->getParentLoop());
+ if (ExitVal->isLoopInvariant(L))
+ AR = ExitVal;
+ }
- // FIXME: It is an extremely bad idea to indvar substitute anything more
- // complex than affine induction variables. Doing so will put expensive
- // polynomial evaluations inside of the loop, and the str reduction pass
- // currently can only reduce affine polynomials. For now just disable
- // indvar subst on anything more complex than an affine addrec, unless
- // it can be expanded to a trivial value.
- if (!AR->isLoopInvariant(L) && !Stride->isLoopInvariant(L))
- continue;
+ // FIXME: It is an extremely bad idea to indvar substitute anything more
+ // complex than affine induction variables. Doing so will put expensive
+ // polynomial evaluations inside of the loop, and the str reduction pass
+ // currently can only reduce affine polynomials. For now just disable
+ // indvar subst on anything more complex than an affine addrec, unless
+ // it can be expanded to a trivial value.
+ if (!AR->isLoopInvariant(L) && !Stride->isLoopInvariant(L))
+ continue;
- // Determine the insertion point for this user. By default, insert
- // immediately before the user. The SCEVExpander class will automatically
- // hoist loop invariants out of the loop. For PHI nodes, there may be
- // multiple uses, so compute the nearest common dominator for the
- // incoming blocks.
- Instruction *InsertPt = User;
- if (PHINode *PHI = dyn_cast<PHINode>(InsertPt))
- for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i)
- if (PHI->getIncomingValue(i) == Op) {
- if (InsertPt == User)
- InsertPt = PHI->getIncomingBlock(i)->getTerminator();
- else
- InsertPt =
- DT->findNearestCommonDominator(InsertPt->getParent(),
- PHI->getIncomingBlock(i))
- ->getTerminator();
- }
-
- // Now expand it into actual Instructions and patch it into place.
- Value *NewVal = Rewriter.expandCodeFor(AR, UseTy, InsertPt);
-
- // Patch the new value into place.
- if (Op->hasName())
- NewVal->takeName(Op);
- User->replaceUsesOfWith(Op, NewVal);
- UI->setOperandValToReplace(NewVal);
- DEBUG(dbgs() << "INDVARS: Rewrote IV '" << *AR << "' " << *Op << '\n'
- << " into = " << *NewVal << "\n");
- ++NumRemoved;
- Changed = true;
-
- // The old value may be dead now.
- DeadInsts.push_back(Op);
- }
+ // Determine the insertion point for this user. By default, insert
+ // immediately before the user. The SCEVExpander class will automatically
+ // hoist loop invariants out of the loop. For PHI nodes, there may be
+ // multiple uses, so compute the nearest common dominator for the
+ // incoming blocks.
+ Instruction *InsertPt = User;
+ if (PHINode *PHI = dyn_cast<PHINode>(InsertPt))
+ for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i)
+ if (PHI->getIncomingValue(i) == Op) {
+ if (InsertPt == User)
+ InsertPt = PHI->getIncomingBlock(i)->getTerminator();
+ else
+ InsertPt =
+ DT->findNearestCommonDominator(InsertPt->getParent(),
+ PHI->getIncomingBlock(i))
+ ->getTerminator();
+ }
+
+ // Now expand it into actual Instructions and patch it into place.
+ Value *NewVal = Rewriter.expandCodeFor(AR, UseTy, InsertPt);
+
+ // Patch the new value into place.
+ if (Op->hasName())
+ NewVal->takeName(Op);
+ User->replaceUsesOfWith(Op, NewVal);
+ UI->setOperandValToReplace(NewVal);
+ DEBUG(dbgs() << "INDVARS: Rewrote IV '" << *AR << "' " << *Op << '\n'
+ << " into = " << *NewVal << "\n");
+ ++NumRemoved;
+ Changed = true;
+
+ // The old value may be dead now.
+ DeadInsts.push_back(Op);
}
// Clear the rewriter cache, because values that are in the rewriter's cache
diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp
index 9531311..8f21aac 100644
--- a/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/lib/Transforms/Scalar/JumpThreading.cpp
@@ -336,13 +336,18 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){
else
InterestingVal = ConstantInt::getFalse(I->getContext());
- // Scan for the sentinel.
+ // Scan for the sentinel. If we find an undef, force it to the
+ // interesting value: x|undef -> true and x&undef -> false.
for (unsigned i = 0, e = LHSVals.size(); i != e; ++i)
- if (LHSVals[i].first == InterestingVal || LHSVals[i].first == 0)
+ if (LHSVals[i].first == InterestingVal || LHSVals[i].first == 0) {
Result.push_back(LHSVals[i]);
+ Result.back().first = InterestingVal;
+ }
for (unsigned i = 0, e = RHSVals.size(); i != e; ++i)
- if (RHSVals[i].first == InterestingVal || RHSVals[i].first == 0)
+ if (RHSVals[i].first == InterestingVal || RHSVals[i].first == 0) {
Result.push_back(RHSVals[i]);
+ Result.back().first = InterestingVal;
+ }
return !Result.empty();
}
@@ -400,7 +405,7 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){
// If comparing a live-in value against a constant, see if we know the
// live-in value on any predecessors.
if (LVI && isa<Constant>(Cmp->getOperand(1)) &&
- Cmp->getType()->isInteger() && // Not vector compare.
+ Cmp->getType()->isIntegerTy() && // Not vector compare.
(!isa<Instruction>(Cmp->getOperand(0)) ||
cast<Instruction>(Cmp->getOperand(0))->getParent() != BB)) {
Constant *RHSCst = cast<Constant>(Cmp->getOperand(1));
@@ -451,6 +456,12 @@ static unsigned GetBestDestForJumpOnUndef(BasicBlock *BB) {
/// ProcessBlock - If there are any predecessors whose control can be threaded
/// through to a successor, transform them now.
bool JumpThreading::ProcessBlock(BasicBlock *BB) {
+ // If the block is trivially dead, just return and let the caller nuke it.
+ // This simplifies other transformations.
+ if (pred_begin(BB) == pred_end(BB) &&
+ BB != &BB->getParent()->getEntryBlock())
+ return false;
+
// If this block has a single predecessor, and if that pred has a single
// successor, merge the blocks. This encourages recursive jump threading
// because now the condition in this block can be threaded through
@@ -1117,6 +1128,11 @@ bool JumpThreading::ProcessBranchOnXOR(BinaryOperator *BO) {
isa<ConstantInt>(BO->getOperand(1)))
return false;
+ // If the first instruction in BB isn't a phi, we won't be able to infer
+ // anything special about any particular predecessor.
+ if (!isa<PHINode>(BB->front()))
+ return false;
+
// If we have a xor as the branch input to this block, and we know that the
// LHS or RHS of the xor in any predecessor is true/false, then we can clone
// the condition into the predecessor and fix that value to true, saving some
@@ -1174,6 +1190,26 @@ bool JumpThreading::ProcessBranchOnXOR(BinaryOperator *BO) {
BlocksToFoldInto.push_back(XorOpValues[i].second);
}
+ // If we inferred a value for all of the predecessors, then duplication won't
+ // help us. However, we can just replace the LHS or RHS with the constant.
+ if (BlocksToFoldInto.size() ==
+ cast<PHINode>(BB->front()).getNumIncomingValues()) {
+ if (SplitVal == 0) {
+ // If all preds provide undef, just nuke the xor, because it is undef too.
+ BO->replaceAllUsesWith(UndefValue::get(BO->getType()));
+ BO->eraseFromParent();
+ } else if (SplitVal->isZero()) {
+ // If all preds provide 0, replace the xor with the other input.
+ BO->replaceAllUsesWith(BO->getOperand(isLHS));
+ BO->eraseFromParent();
+ } else {
+ // If all preds provide 1, set the computed value to 1.
+ BO->setOperand(!isLHS, SplitVal);
+ }
+
+ return true;
+ }
+
// Try to duplicate BB into PredBB.
return DuplicateCondBranchOnPHIIntoPred(BB, BlocksToFoldInto);
}
@@ -1393,9 +1429,9 @@ bool JumpThreading::DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB,
// Unless PredBB ends with an unconditional branch, split the edge so that we
// can just clone the bits from BB into the end of the new PredBB.
- BranchInst *OldPredBranch = cast<BranchInst>(PredBB->getTerminator());
+ BranchInst *OldPredBranch = dyn_cast<BranchInst>(PredBB->getTerminator());
- if (!OldPredBranch->isUnconditional()) {
+ if (OldPredBranch == 0 || !OldPredBranch->isUnconditional()) {
PredBB = SplitEdge(PredBB, BB, this);
OldPredBranch = cast<BranchInst>(PredBB->getTerminator());
}
diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index fa820ed..240b298 100644
--- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -17,6 +17,40 @@
// available on the target, and it performs a variety of other optimizations
// related to loop induction variables.
//
+// Terminology note: this code has a lot of handling for "post-increment" or
+// "post-inc" users. This is not talking about post-increment addressing modes;
+// it is instead talking about code like this:
+//
+// %i = phi [ 0, %entry ], [ %i.next, %latch ]
+// ...
+// %i.next = add %i, 1
+// %c = icmp eq %i.next, %n
+//
+// The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however
+// it's useful to think about these as the same register, with some uses using
+// the value of the register before the add and some using // it after. In this
+// example, the icmp is a post-increment user, since it uses %i.next, which is
+// the value of the induction variable after the increment. The other common
+// case of post-increment users is users outside the loop.
+//
+// TODO: More sophistication in the way Formulae are generated and filtered.
+//
+// TODO: Handle multiple loops at a time.
+//
+// TODO: Should TargetLowering::AddrMode::BaseGV be changed to a ConstantExpr
+// instead of a GlobalValue?
+//
+// TODO: When truncation is free, truncate ICmp users' operands to make it a
+// smaller encoding (on x86 at least).
+//
+// TODO: When a negated register is used by an add (such as in a list of
+// multiple base registers, or as the increment expression in an addrec),
+// we may not actually need both reg and (-1 * reg) in registers; the
+// negation can be implemented by using a sub instead of an add. The
+// lack of support for taking this into consideration when making
+// register pressure decisions is partly worked around by the "Special"
+// use kind.
+//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "loop-reduce"
@@ -26,208 +60,401 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Analysis/IVUsers.h"
+#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolutionExpander.h"
-#include "llvm/Transforms/Utils/AddrModeMatcher.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLowering.h"
#include <algorithm>
using namespace llvm;
-STATISTIC(NumReduced , "Number of IV uses strength reduced");
-STATISTIC(NumInserted, "Number of PHIs inserted");
-STATISTIC(NumVariable, "Number of PHIs with variable strides");
-STATISTIC(NumEliminated, "Number of strides eliminated");
-STATISTIC(NumShadow, "Number of Shadow IVs optimized");
-STATISTIC(NumImmSunk, "Number of common expr immediates sunk into uses");
-STATISTIC(NumLoopCond, "Number of loop terminating conds optimized");
-STATISTIC(NumCountZero, "Number of count iv optimized to count toward zero");
+namespace {
+
+/// RegSortData - This class holds data which is used to order reuse candidates.
+class RegSortData {
+public:
+ /// UsedByIndices - This represents the set of LSRUse indices which reference
+ /// a particular register.
+ SmallBitVector UsedByIndices;
+
+ RegSortData() {}
+
+ void print(raw_ostream &OS) const;
+ void dump() const;
+};
-static cl::opt<bool> EnableFullLSRMode("enable-full-lsr",
- cl::init(false),
- cl::Hidden);
+}
+
+void RegSortData::print(raw_ostream &OS) const {
+ OS << "[NumUses=" << UsedByIndices.count() << ']';
+}
+
+void RegSortData::dump() const {
+ print(errs()); errs() << '\n';
+}
namespace {
- struct BasedUser;
+/// RegUseTracker - Map register candidates to information about how they are
+/// used.
+class RegUseTracker {
+ typedef DenseMap<const SCEV *, RegSortData> RegUsesTy;
- /// IVInfo - This structure keeps track of one IV expression inserted during
- /// StrengthReduceStridedIVUsers. It contains the stride, the common base, as
- /// well as the PHI node and increment value created for rewrite.
- struct IVExpr {
- const SCEV *Stride;
- const SCEV *Base;
- PHINode *PHI;
+ RegUsesTy RegUses;
+ SmallVector<const SCEV *, 16> RegSequence;
- IVExpr(const SCEV *const stride, const SCEV *const base, PHINode *phi)
- : Stride(stride), Base(base), PHI(phi) {}
- };
+public:
+ void CountRegister(const SCEV *Reg, size_t LUIdx);
+
+ bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const;
+
+ const SmallBitVector &getUsedByIndices(const SCEV *Reg) const;
+
+ void clear();
+
+ typedef SmallVectorImpl<const SCEV *>::iterator iterator;
+ typedef SmallVectorImpl<const SCEV *>::const_iterator const_iterator;
+ iterator begin() { return RegSequence.begin(); }
+ iterator end() { return RegSequence.end(); }
+ const_iterator begin() const { return RegSequence.begin(); }
+ const_iterator end() const { return RegSequence.end(); }
+};
+
+}
- /// IVsOfOneStride - This structure keeps track of all IV expression inserted
- /// during StrengthReduceStridedIVUsers for a particular stride of the IV.
- struct IVsOfOneStride {
- std::vector<IVExpr> IVs;
+void
+RegUseTracker::CountRegister(const SCEV *Reg, size_t LUIdx) {
+ std::pair<RegUsesTy::iterator, bool> Pair =
+ RegUses.insert(std::make_pair(Reg, RegSortData()));
+ RegSortData &RSD = Pair.first->second;
+ if (Pair.second)
+ RegSequence.push_back(Reg);
+ RSD.UsedByIndices.resize(std::max(RSD.UsedByIndices.size(), LUIdx + 1));
+ RSD.UsedByIndices.set(LUIdx);
+}
+
+bool
+RegUseTracker::isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const {
+ if (!RegUses.count(Reg)) return false;
+ const SmallBitVector &UsedByIndices =
+ RegUses.find(Reg)->second.UsedByIndices;
+ int i = UsedByIndices.find_first();
+ if (i == -1) return false;
+ if ((size_t)i != LUIdx) return true;
+ return UsedByIndices.find_next(i) != -1;
+}
+
+const SmallBitVector &RegUseTracker::getUsedByIndices(const SCEV *Reg) const {
+ RegUsesTy::const_iterator I = RegUses.find(Reg);
+ assert(I != RegUses.end() && "Unknown register!");
+ return I->second.UsedByIndices;
+}
+
+void RegUseTracker::clear() {
+ RegUses.clear();
+ RegSequence.clear();
+}
+
+namespace {
+
+/// Formula - This class holds information that describes a formula for
+/// computing satisfying a use. It may include broken-out immediates and scaled
+/// registers.
+struct Formula {
+ /// AM - This is used to represent complex addressing, as well as other kinds
+ /// of interesting uses.
+ TargetLowering::AddrMode AM;
+
+ /// BaseRegs - The list of "base" registers for this use. When this is
+ /// non-empty, AM.HasBaseReg should be set to true.
+ SmallVector<const SCEV *, 2> BaseRegs;
- void addIV(const SCEV *const Stride, const SCEV *const Base, PHINode *PHI) {
- IVs.push_back(IVExpr(Stride, Base, PHI));
+ /// ScaledReg - The 'scaled' register for this use. This should be non-null
+ /// when AM.Scale is not zero.
+ const SCEV *ScaledReg;
+
+ Formula() : ScaledReg(0) {}
+
+ void InitialMatch(const SCEV *S, Loop *L,
+ ScalarEvolution &SE, DominatorTree &DT);
+
+ unsigned getNumRegs() const;
+ const Type *getType() const;
+
+ bool referencesReg(const SCEV *S) const;
+ bool hasRegsUsedByUsesOtherThan(size_t LUIdx,
+ const RegUseTracker &RegUses) const;
+
+ void print(raw_ostream &OS) const;
+ void dump() const;
+};
+
+}
+
+/// DoInitialMatch - Recurrsion helper for InitialMatch.
+static void DoInitialMatch(const SCEV *S, Loop *L,
+ SmallVectorImpl<const SCEV *> &Good,
+ SmallVectorImpl<const SCEV *> &Bad,
+ ScalarEvolution &SE, DominatorTree &DT) {
+ // Collect expressions which properly dominate the loop header.
+ if (S->properlyDominates(L->getHeader(), &DT)) {
+ Good.push_back(S);
+ return;
+ }
+
+ // Look at add operands.
+ if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
+ for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
+ I != E; ++I)
+ DoInitialMatch(*I, L, Good, Bad, SE, DT);
+ return;
+ }
+
+ // Look at addrec operands.
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
+ if (!AR->getStart()->isZero()) {
+ DoInitialMatch(AR->getStart(), L, Good, Bad, SE, DT);
+ DoInitialMatch(SE.getAddRecExpr(SE.getIntegerSCEV(0, AR->getType()),
+ AR->getStepRecurrence(SE),
+ AR->getLoop()),
+ L, Good, Bad, SE, DT);
+ return;
}
- };
- class LoopStrengthReduce : public LoopPass {
- IVUsers *IU;
- ScalarEvolution *SE;
- bool Changed;
-
- /// IVsByStride - Keep track of all IVs that have been inserted for a
- /// particular stride.
- std::map<const SCEV *, IVsOfOneStride> IVsByStride;
-
- /// DeadInsts - Keep track of instructions we may have made dead, so that
- /// we can remove them after we are done working.
- SmallVector<WeakVH, 16> DeadInsts;
-
- /// TLI - Keep a pointer of a TargetLowering to consult for determining
- /// transformation profitability.
- const TargetLowering *TLI;
-
- public:
- static char ID; // Pass ID, replacement for typeid
- explicit LoopStrengthReduce(const TargetLowering *tli = NULL) :
- LoopPass(&ID), TLI(tli) {}
-
- bool runOnLoop(Loop *L, LPPassManager &LPM);
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- // We split critical edges, so we change the CFG. However, we do update
- // many analyses if they are around.
- AU.addPreservedID(LoopSimplifyID);
- AU.addPreserved("loops");
- AU.addPreserved("domfrontier");
- AU.addPreserved("domtree");
-
- AU.addRequiredID(LoopSimplifyID);
- AU.addRequired<ScalarEvolution>();
- AU.addPreserved<ScalarEvolution>();
- AU.addRequired<IVUsers>();
- AU.addPreserved<IVUsers>();
+ // Handle a multiplication by -1 (negation) if it didn't fold.
+ if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S))
+ if (Mul->getOperand(0)->isAllOnesValue()) {
+ SmallVector<const SCEV *, 4> Ops(Mul->op_begin()+1, Mul->op_end());
+ const SCEV *NewMul = SE.getMulExpr(Ops);
+
+ SmallVector<const SCEV *, 4> MyGood;
+ SmallVector<const SCEV *, 4> MyBad;
+ DoInitialMatch(NewMul, L, MyGood, MyBad, SE, DT);
+ const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue(
+ SE.getEffectiveSCEVType(NewMul->getType())));
+ for (SmallVectorImpl<const SCEV *>::const_iterator I = MyGood.begin(),
+ E = MyGood.end(); I != E; ++I)
+ Good.push_back(SE.getMulExpr(NegOne, *I));
+ for (SmallVectorImpl<const SCEV *>::const_iterator I = MyBad.begin(),
+ E = MyBad.end(); I != E; ++I)
+ Bad.push_back(SE.getMulExpr(NegOne, *I));
+ return;
}
- private:
- void OptimizeIndvars(Loop *L);
-
- /// OptimizeLoopTermCond - Change loop terminating condition to use the
- /// postinc iv when possible.
- void OptimizeLoopTermCond(Loop *L);
-
- /// OptimizeShadowIV - If IV is used in a int-to-float cast
- /// inside the loop then try to eliminate the cast opeation.
- void OptimizeShadowIV(Loop *L);
-
- /// OptimizeMax - Rewrite the loop's terminating condition
- /// if it uses a max computation.
- ICmpInst *OptimizeMax(Loop *L, ICmpInst *Cond,
- IVStrideUse* &CondUse);
-
- /// OptimizeLoopCountIV - If, after all sharing of IVs, the IV used for
- /// deciding when to exit the loop is used only for that purpose, try to
- /// rearrange things so it counts down to a test against zero.
- bool OptimizeLoopCountIV(Loop *L);
- bool OptimizeLoopCountIVOfStride(const SCEV* &Stride,
- IVStrideUse* &CondUse, Loop *L);
-
- /// StrengthReduceIVUsersOfStride - Strength reduce all of the users of a
- /// single stride of IV. All of the users may have different starting
- /// values, and this may not be the only stride.
- void StrengthReduceIVUsersOfStride(const SCEV *Stride,
- IVUsersOfOneStride &Uses,
- Loop *L);
- void StrengthReduceIVUsers(Loop *L);
-
- ICmpInst *ChangeCompareStride(Loop *L, ICmpInst *Cond,
- IVStrideUse* &CondUse,
- const SCEV* &CondStride,
- bool PostPass = false);
-
- bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse,
- const SCEV* &CondStride);
- bool RequiresTypeConversion(const Type *Ty, const Type *NewTy);
- const SCEV *CheckForIVReuse(bool, bool, bool, const SCEV *,
- IVExpr&, const Type*,
- const std::vector<BasedUser>& UsersToProcess);
- bool ValidScale(bool, int64_t,
- const std::vector<BasedUser>& UsersToProcess);
- bool ValidOffset(bool, int64_t, int64_t,
- const std::vector<BasedUser>& UsersToProcess);
- const SCEV *CollectIVUsers(const SCEV *Stride,
- IVUsersOfOneStride &Uses,
- Loop *L,
- bool &AllUsesAreAddresses,
- bool &AllUsesAreOutsideLoop,
- std::vector<BasedUser> &UsersToProcess);
- bool StrideMightBeShared(const SCEV *Stride, Loop *L, bool CheckPreInc);
- bool ShouldUseFullStrengthReductionMode(
- const std::vector<BasedUser> &UsersToProcess,
- const Loop *L,
- bool AllUsesAreAddresses,
- const SCEV *Stride);
- void PrepareToStrengthReduceFully(
- std::vector<BasedUser> &UsersToProcess,
- const SCEV *Stride,
- const SCEV *CommonExprs,
- const Loop *L,
- SCEVExpander &PreheaderRewriter);
- void PrepareToStrengthReduceFromSmallerStride(
- std::vector<BasedUser> &UsersToProcess,
- Value *CommonBaseV,
- const IVExpr &ReuseIV,
- Instruction *PreInsertPt);
- void PrepareToStrengthReduceWithNewPhi(
- std::vector<BasedUser> &UsersToProcess,
- const SCEV *Stride,
- const SCEV *CommonExprs,
- Value *CommonBaseV,
- Instruction *IVIncInsertPt,
- const Loop *L,
- SCEVExpander &PreheaderRewriter);
-
- void DeleteTriviallyDeadInstructions();
- };
+ // Ok, we can't do anything interesting. Just stuff the whole thing into a
+ // register and hope for the best.
+ Bad.push_back(S);
}
-char LoopStrengthReduce::ID = 0;
-static RegisterPass<LoopStrengthReduce>
-X("loop-reduce", "Loop Strength Reduction");
+/// InitialMatch - Incorporate loop-variant parts of S into this Formula,
+/// attempting to keep all loop-invariant and loop-computable values in a
+/// single base register.
+void Formula::InitialMatch(const SCEV *S, Loop *L,
+ ScalarEvolution &SE, DominatorTree &DT) {
+ SmallVector<const SCEV *, 4> Good;
+ SmallVector<const SCEV *, 4> Bad;
+ DoInitialMatch(S, L, Good, Bad, SE, DT);
+ if (!Good.empty()) {
+ BaseRegs.push_back(SE.getAddExpr(Good));
+ AM.HasBaseReg = true;
+ }
+ if (!Bad.empty()) {
+ BaseRegs.push_back(SE.getAddExpr(Bad));
+ AM.HasBaseReg = true;
+ }
+}
-Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) {
- return new LoopStrengthReduce(TLI);
+/// getNumRegs - Return the total number of register operands used by this
+/// formula. This does not include register uses implied by non-constant
+/// addrec strides.
+unsigned Formula::getNumRegs() const {
+ return !!ScaledReg + BaseRegs.size();
}
-/// DeleteTriviallyDeadInstructions - If any of the instructions is the
-/// specified set are trivially dead, delete them and see if this makes any of
-/// their operands subsequently dead.
-void LoopStrengthReduce::DeleteTriviallyDeadInstructions() {
- while (!DeadInsts.empty()) {
- Instruction *I = dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val());
+/// getType - Return the type of this formula, if it has one, or null
+/// otherwise. This type is meaningless except for the bit size.
+const Type *Formula::getType() const {
+ return !BaseRegs.empty() ? BaseRegs.front()->getType() :
+ ScaledReg ? ScaledReg->getType() :
+ AM.BaseGV ? AM.BaseGV->getType() :
+ 0;
+}
- if (I == 0 || !isInstructionTriviallyDead(I))
- continue;
+/// referencesReg - Test if this formula references the given register.
+bool Formula::referencesReg(const SCEV *S) const {
+ return S == ScaledReg ||
+ std::find(BaseRegs.begin(), BaseRegs.end(), S) != BaseRegs.end();
+}
- for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
- if (Instruction *U = dyn_cast<Instruction>(*OI)) {
- *OI = 0;
- if (U->use_empty())
- DeadInsts.push_back(U);
+/// hasRegsUsedByUsesOtherThan - Test whether this formula uses registers
+/// which are used by uses other than the use with the given index.
+bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx,
+ const RegUseTracker &RegUses) const {
+ if (ScaledReg)
+ if (RegUses.isRegUsedByUsesOtherThan(ScaledReg, LUIdx))
+ return true;
+ for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(),
+ E = BaseRegs.end(); I != E; ++I)
+ if (RegUses.isRegUsedByUsesOtherThan(*I, LUIdx))
+ return true;
+ return false;
+}
+
+void Formula::print(raw_ostream &OS) const {
+ bool First = true;
+ if (AM.BaseGV) {
+ if (!First) OS << " + "; else First = false;
+ WriteAsOperand(OS, AM.BaseGV, /*PrintType=*/false);
+ }
+ if (AM.BaseOffs != 0) {
+ if (!First) OS << " + "; else First = false;
+ OS << AM.BaseOffs;
+ }
+ for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(),
+ E = BaseRegs.end(); I != E; ++I) {
+ if (!First) OS << " + "; else First = false;
+ OS << "reg(" << **I << ')';
+ }
+ if (AM.Scale != 0) {
+ if (!First) OS << " + "; else First = false;
+ OS << AM.Scale << "*reg(";
+ if (ScaledReg)
+ OS << *ScaledReg;
+ else
+ OS << "<unknown>";
+ OS << ')';
+ }
+}
+
+void Formula::dump() const {
+ print(errs()); errs() << '\n';
+}
+
+/// getSDiv - Return an expression for LHS /s RHS, if it can be determined,
+/// or null otherwise. If IgnoreSignificantBits is true, expressions like
+/// (X * Y) /s Y are simplified to Y, ignoring that the multiplication may
+/// overflow, which is useful when the result will be used in a context where
+/// the most significant bits are ignored.
+static const SCEV *getSDiv(const SCEV *LHS, const SCEV *RHS,
+ ScalarEvolution &SE,
+ bool IgnoreSignificantBits = false) {
+ // Handle the trivial case, which works for any SCEV type.
+ if (LHS == RHS)
+ return SE.getIntegerSCEV(1, LHS->getType());
+
+ // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do some
+ // folding.
+ if (RHS->isAllOnesValue())
+ return SE.getMulExpr(LHS, RHS);
+
+ // Check for a division of a constant by a constant.
+ if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) {
+ const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS);
+ if (!RC)
+ return 0;
+ if (C->getValue()->getValue().srem(RC->getValue()->getValue()) != 0)
+ return 0;
+ return SE.getConstant(C->getValue()->getValue()
+ .sdiv(RC->getValue()->getValue()));
+ }
+
+ // Distribute the sdiv over addrec operands.
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) {
+ const SCEV *Start = getSDiv(AR->getStart(), RHS, SE,
+ IgnoreSignificantBits);
+ if (!Start) return 0;
+ const SCEV *Step = getSDiv(AR->getStepRecurrence(SE), RHS, SE,
+ IgnoreSignificantBits);
+ if (!Step) return 0;
+ return SE.getAddRecExpr(Start, Step, AR->getLoop());
+ }
+
+ // Distribute the sdiv over add operands.
+ if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS)) {
+ SmallVector<const SCEV *, 8> Ops;
+ for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
+ I != E; ++I) {
+ const SCEV *Op = getSDiv(*I, RHS, SE,
+ IgnoreSignificantBits);
+ if (!Op) return 0;
+ Ops.push_back(Op);
+ }
+ return SE.getAddExpr(Ops);
+ }
+
+ // Check for a multiply operand that we can pull RHS out of.
+ if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS))
+ if (IgnoreSignificantBits || Mul->hasNoSignedWrap()) {
+ SmallVector<const SCEV *, 4> Ops;
+ bool Found = false;
+ for (SCEVMulExpr::op_iterator I = Mul->op_begin(), E = Mul->op_end();
+ I != E; ++I) {
+ if (!Found)
+ if (const SCEV *Q = getSDiv(*I, RHS, SE, IgnoreSignificantBits)) {
+ Ops.push_back(Q);
+ Found = true;
+ continue;
+ }
+ Ops.push_back(*I);
}
+ return Found ? SE.getMulExpr(Ops) : 0;
+ }
- I->eraseFromParent();
- Changed = true;
+ // Otherwise we don't know.
+ return 0;
+}
+
+/// ExtractImmediate - If S involves the addition of a constant integer value,
+/// return that integer value, and mutate S to point to a new SCEV with that
+/// value excluded.
+static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) {
+ if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
+ if (C->getValue()->getValue().getMinSignedBits() <= 64) {
+ S = SE.getIntegerSCEV(0, C->getType());
+ return C->getValue()->getSExtValue();
+ }
+ } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
+ SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end());
+ int64_t Result = ExtractImmediate(NewOps.front(), SE);
+ S = SE.getAddExpr(NewOps);
+ return Result;
+ } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
+ SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end());
+ int64_t Result = ExtractImmediate(NewOps.front(), SE);
+ S = SE.getAddRecExpr(NewOps, AR->getLoop());
+ return Result;
+ }
+ return 0;
+}
+
+/// ExtractSymbol - If S involves the addition of a GlobalValue address,
+/// return that symbol, and mutate S to point to a new SCEV with that
+/// value excluded.
+static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) {
+ if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
+ if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) {
+ S = SE.getIntegerSCEV(0, GV->getType());
+ return GV;
+ }
+ } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
+ SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end());
+ GlobalValue *Result = ExtractSymbol(NewOps.back(), SE);
+ S = SE.getAddExpr(NewOps);
+ return Result;
+ } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
+ SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end());
+ GlobalValue *Result = ExtractSymbol(NewOps.front(), SE);
+ S = SE.getAddRecExpr(NewOps, AR->getLoop());
+ return Result;
}
+ return 0;
}
/// isAddressUse - Returns true if the specified instruction is using the
@@ -276,1775 +503,832 @@ static const Type *getAccessType(const Instruction *Inst) {
break;
}
}
- return AccessTy;
-}
-
-namespace {
- /// BasedUser - For a particular base value, keep information about how we've
- /// partitioned the expression so far.
- struct BasedUser {
- /// Base - The Base value for the PHI node that needs to be inserted for
- /// this use. As the use is processed, information gets moved from this
- /// field to the Imm field (below). BasedUser values are sorted by this
- /// field.
- const SCEV *Base;
-
- /// Inst - The instruction using the induction variable.
- Instruction *Inst;
-
- /// OperandValToReplace - The operand value of Inst to replace with the
- /// EmittedBase.
- Value *OperandValToReplace;
-
- /// Imm - The immediate value that should be added to the base immediately
- /// before Inst, because it will be folded into the imm field of the
- /// instruction. This is also sometimes used for loop-variant values that
- /// must be added inside the loop.
- const SCEV *Imm;
-
- /// Phi - The induction variable that performs the striding that
- /// should be used for this user.
- PHINode *Phi;
-
- // isUseOfPostIncrementedValue - True if this should use the
- // post-incremented version of this IV, not the preincremented version.
- // This can only be set in special cases, such as the terminating setcc
- // instruction for a loop and uses outside the loop that are dominated by
- // the loop.
- bool isUseOfPostIncrementedValue;
-
- BasedUser(IVStrideUse &IVSU, ScalarEvolution *se)
- : Base(IVSU.getOffset()), Inst(IVSU.getUser()),
- OperandValToReplace(IVSU.getOperandValToReplace()),
- Imm(se->getIntegerSCEV(0, Base->getType())),
- isUseOfPostIncrementedValue(IVSU.isUseOfPostIncrementedValue()) {}
-
- // Once we rewrite the code to insert the new IVs we want, update the
- // operands of Inst to use the new expression 'NewBase', with 'Imm' added
- // to it.
- void RewriteInstructionToUseNewBase(const SCEV *NewBase,
- Instruction *InsertPt,
- SCEVExpander &Rewriter, Loop *L, Pass *P,
- SmallVectorImpl<WeakVH> &DeadInsts,
- ScalarEvolution *SE);
-
- Value *InsertCodeForBaseAtPosition(const SCEV *NewBase,
- const Type *Ty,
- SCEVExpander &Rewriter,
- Instruction *IP,
- ScalarEvolution *SE);
- void dump() const;
- };
-}
-
-void BasedUser::dump() const {
- dbgs() << " Base=" << *Base;
- dbgs() << " Imm=" << *Imm;
- dbgs() << " Inst: " << *Inst;
-}
-
-Value *BasedUser::InsertCodeForBaseAtPosition(const SCEV *NewBase,
- const Type *Ty,
- SCEVExpander &Rewriter,
- Instruction *IP,
- ScalarEvolution *SE) {
- Value *Base = Rewriter.expandCodeFor(NewBase, 0, IP);
- // Wrap the base in a SCEVUnknown so that ScalarEvolution doesn't try to
- // re-analyze it.
- const SCEV *NewValSCEV = SE->getUnknown(Base);
+ // All pointers have the same requirements, so canonicalize them to an
+ // arbitrary pointer type to minimize variation.
+ if (const PointerType *PTy = dyn_cast<PointerType>(AccessTy))
+ AccessTy = PointerType::get(IntegerType::get(PTy->getContext(), 1),
+ PTy->getAddressSpace());
- // Always emit the immediate into the same block as the user.
- NewValSCEV = SE->getAddExpr(NewValSCEV, Imm);
-
- return Rewriter.expandCodeFor(NewValSCEV, Ty, IP);
+ return AccessTy;
}
+/// DeleteTriviallyDeadInstructions - If any of the instructions is the
+/// specified set are trivially dead, delete them and see if this makes any of
+/// their operands subsequently dead.
+static bool
+DeleteTriviallyDeadInstructions(SmallVectorImpl<WeakVH> &DeadInsts) {
+ bool Changed = false;
-// Once we rewrite the code to insert the new IVs we want, update the
-// operands of Inst to use the new expression 'NewBase', with 'Imm' added
-// to it. NewBasePt is the last instruction which contributes to the
-// value of NewBase in the case that it's a diffferent instruction from
-// the PHI that NewBase is computed from, or null otherwise.
-//
-void BasedUser::RewriteInstructionToUseNewBase(const SCEV *NewBase,
- Instruction *NewBasePt,
- SCEVExpander &Rewriter, Loop *L, Pass *P,
- SmallVectorImpl<WeakVH> &DeadInsts,
- ScalarEvolution *SE) {
- if (!isa<PHINode>(Inst)) {
- // By default, insert code at the user instruction.
- BasicBlock::iterator InsertPt = Inst;
-
- // However, if the Operand is itself an instruction, the (potentially
- // complex) inserted code may be shared by many users. Because of this, we
- // want to emit code for the computation of the operand right before its old
- // computation. This is usually safe, because we obviously used to use the
- // computation when it was computed in its current block. However, in some
- // cases (e.g. use of a post-incremented induction variable) the NewBase
- // value will be pinned to live somewhere after the original computation.
- // In this case, we have to back off.
- //
- // If this is a use outside the loop (which means after, since it is based
- // on a loop indvar) we use the post-incremented value, so that we don't
- // artificially make the preinc value live out the bottom of the loop.
- if (!isUseOfPostIncrementedValue && L->contains(Inst)) {
- if (NewBasePt && isa<PHINode>(OperandValToReplace)) {
- InsertPt = NewBasePt;
- ++InsertPt;
- } else if (Instruction *OpInst
- = dyn_cast<Instruction>(OperandValToReplace)) {
- InsertPt = OpInst;
- while (isa<PHINode>(InsertPt)) ++InsertPt;
- }
- }
- Value *NewVal = InsertCodeForBaseAtPosition(NewBase,
- OperandValToReplace->getType(),
- Rewriter, InsertPt, SE);
- // Replace the use of the operand Value with the new Phi we just created.
- Inst->replaceUsesOfWith(OperandValToReplace, NewVal);
-
- DEBUG(dbgs() << " Replacing with ");
- DEBUG(WriteAsOperand(dbgs(), NewVal, /*PrintType=*/false));
- DEBUG(dbgs() << ", which has value " << *NewBase << " plus IMM "
- << *Imm << "\n");
- return;
- }
+ while (!DeadInsts.empty()) {
+ Instruction *I = dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val());
- // PHI nodes are more complex. We have to insert one copy of the NewBase+Imm
- // expression into each operand block that uses it. Note that PHI nodes can
- // have multiple entries for the same predecessor. We use a map to make sure
- // that a PHI node only has a single Value* for each predecessor (which also
- // prevents us from inserting duplicate code in some blocks).
- DenseMap<BasicBlock*, Value*> InsertedCode;
- PHINode *PN = cast<PHINode>(Inst);
- for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
- if (PN->getIncomingValue(i) == OperandValToReplace) {
- // If the original expression is outside the loop, put the replacement
- // code in the same place as the original expression,
- // which need not be an immediate predecessor of this PHI. This way we
- // need only one copy of it even if it is referenced multiple times in
- // the PHI. We don't do this when the original expression is inside the
- // loop because multiple copies sometimes do useful sinking of code in
- // that case(?).
- Instruction *OldLoc = dyn_cast<Instruction>(OperandValToReplace);
- BasicBlock *PHIPred = PN->getIncomingBlock(i);
- if (L->contains(OldLoc)) {
- // If this is a critical edge, split the edge so that we do not insert
- // the code on all predecessor/successor paths. We do this unless this
- // is the canonical backedge for this loop, as this can make some
- // inserted code be in an illegal position.
- if (e != 1 && PHIPred->getTerminator()->getNumSuccessors() > 1 &&
- !isa<IndirectBrInst>(PHIPred->getTerminator()) &&
- (PN->getParent() != L->getHeader() || !L->contains(PHIPred))) {
-
- // First step, split the critical edge.
- BasicBlock *NewBB = SplitCriticalEdge(PHIPred, PN->getParent(),
- P, false);
-
- // Next step: move the basic block. In particular, if the PHI node
- // is outside of the loop, and PredTI is in the loop, we want to
- // move the block to be immediately before the PHI block, not
- // immediately after PredTI.
- if (L->contains(PHIPred) && !L->contains(PN))
- NewBB->moveBefore(PN->getParent());
+ if (I == 0 || !isInstructionTriviallyDead(I))
+ continue;
- // Splitting the edge can reduce the number of PHI entries we have.
- e = PN->getNumIncomingValues();
- PHIPred = NewBB;
- i = PN->getBasicBlockIndex(PHIPred);
- }
- }
- Value *&Code = InsertedCode[PHIPred];
- if (!Code) {
- // Insert the code into the end of the predecessor block.
- Instruction *InsertPt = (L->contains(OldLoc)) ?
- PHIPred->getTerminator() :
- OldLoc->getParent()->getTerminator();
- Code = InsertCodeForBaseAtPosition(NewBase, PN->getType(),
- Rewriter, InsertPt, SE);
-
- DEBUG(dbgs() << " Changing PHI use to ");
- DEBUG(WriteAsOperand(dbgs(), Code, /*PrintType=*/false));
- DEBUG(dbgs() << ", which has value " << *NewBase << " plus IMM "
- << *Imm << "\n");
+ for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
+ if (Instruction *U = dyn_cast<Instruction>(*OI)) {
+ *OI = 0;
+ if (U->use_empty())
+ DeadInsts.push_back(U);
}
- // Replace the use of the operand Value with the new Phi we just created.
- PN->setIncomingValue(i, Code);
- Rewriter.clear();
- }
+ I->eraseFromParent();
+ Changed = true;
}
- // PHI node might have become a constant value after SplitCriticalEdge.
- DeadInsts.push_back(Inst);
+ return Changed;
}
+namespace {
-/// fitsInAddressMode - Return true if V can be subsumed within an addressing
-/// mode, and does not need to be put in a register first.
-static bool fitsInAddressMode(const SCEV *V, const Type *AccessTy,
- const TargetLowering *TLI, bool HasBaseReg) {
- if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(V)) {
- int64_t VC = SC->getValue()->getSExtValue();
- if (TLI) {
- TargetLowering::AddrMode AM;
- AM.BaseOffs = VC;
- AM.HasBaseReg = HasBaseReg;
- return TLI->isLegalAddressingMode(AM, AccessTy);
- } else {
- // Defaults to PPC. PPC allows a sign-extended 16-bit immediate field.
- return (VC > -(1 << 16) && VC < (1 << 16)-1);
- }
- }
-
- if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V))
- if (GlobalValue *GV = dyn_cast<GlobalValue>(SU->getValue())) {
- if (TLI) {
- TargetLowering::AddrMode AM;
- AM.BaseGV = GV;
- AM.HasBaseReg = HasBaseReg;
- return TLI->isLegalAddressingMode(AM, AccessTy);
- } else {
- // Default: assume global addresses are not legal.
- }
- }
+/// Cost - This class is used to measure and compare candidate formulae.
+class Cost {
+ /// TODO: Some of these could be merged. Also, a lexical ordering
+ /// isn't always optimal.
+ unsigned NumRegs;
+ unsigned AddRecCost;
+ unsigned NumIVMuls;
+ unsigned NumBaseAdds;
+ unsigned ImmCost;
+ unsigned SetupCost;
+
+public:
+ Cost()
+ : NumRegs(0), AddRecCost(0), NumIVMuls(0), NumBaseAdds(0), ImmCost(0),
+ SetupCost(0) {}
+
+ unsigned getNumRegs() const { return NumRegs; }
+
+ bool operator<(const Cost &Other) const;
+
+ void Loose();
+
+ void RateFormula(const Formula &F,
+ SmallPtrSet<const SCEV *, 16> &Regs,
+ const DenseSet<const SCEV *> &VisitedRegs,
+ const Loop *L,
+ const SmallVectorImpl<int64_t> &Offsets,
+ ScalarEvolution &SE, DominatorTree &DT);
+
+ void print(raw_ostream &OS) const;
+ void dump() const;
+
+private:
+ void RateRegister(const SCEV *Reg,
+ SmallPtrSet<const SCEV *, 16> &Regs,
+ const Loop *L,
+ ScalarEvolution &SE, DominatorTree &DT);
+ void RatePrimaryRegister(const SCEV *Reg,
+ SmallPtrSet<const SCEV *, 16> &Regs,
+ const Loop *L,
+ ScalarEvolution &SE, DominatorTree &DT);
+};
- return false;
}
-/// MoveLoopVariantsToImmediateField - Move any subexpressions from Val that are
-/// loop varying to the Imm operand.
-static void MoveLoopVariantsToImmediateField(const SCEV *&Val, const SCEV *&Imm,
- Loop *L, ScalarEvolution *SE) {
- if (Val->isLoopInvariant(L)) return; // Nothing to do.
-
- if (const SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
- SmallVector<const SCEV *, 4> NewOps;
- NewOps.reserve(SAE->getNumOperands());
+/// RateRegister - Tally up interesting quantities from the given register.
+void Cost::RateRegister(const SCEV *Reg,
+ SmallPtrSet<const SCEV *, 16> &Regs,
+ const Loop *L,
+ ScalarEvolution &SE, DominatorTree &DT) {
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) {
+ if (AR->getLoop() == L)
+ AddRecCost += 1; /// TODO: This should be a function of the stride.
+
+ // If this is an addrec for a loop that's already been visited by LSR,
+ // don't second-guess its addrec phi nodes. LSR isn't currently smart
+ // enough to reason about more than one loop at a time. Consider these
+ // registers free and leave them alone.
+ else if (L->contains(AR->getLoop()) ||
+ (!AR->getLoop()->contains(L) &&
+ DT.dominates(L->getHeader(), AR->getLoop()->getHeader()))) {
+ for (BasicBlock::iterator I = AR->getLoop()->getHeader()->begin();
+ PHINode *PN = dyn_cast<PHINode>(I); ++I)
+ if (SE.isSCEVable(PN->getType()) &&
+ (SE.getEffectiveSCEVType(PN->getType()) ==
+ SE.getEffectiveSCEVType(AR->getType())) &&
+ SE.getSCEV(PN) == AR)
+ return;
- for (unsigned i = 0; i != SAE->getNumOperands(); ++i)
- if (!SAE->getOperand(i)->isLoopInvariant(L)) {
- // If this is a loop-variant expression, it must stay in the immediate
- // field of the expression.
- Imm = SE->getAddExpr(Imm, SAE->getOperand(i));
- } else {
- NewOps.push_back(SAE->getOperand(i));
- }
+ // If this isn't one of the addrecs that the loop already has, it
+ // would require a costly new phi and add. TODO: This isn't
+ // precisely modeled right now.
+ ++NumBaseAdds;
+ if (!Regs.count(AR->getStart()))
+ RateRegister(AR->getStart(), Regs, L, SE, DT);
+ }
- if (NewOps.empty())
- Val = SE->getIntegerSCEV(0, Val->getType());
- else
- Val = SE->getAddExpr(NewOps);
- } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
- // Try to pull immediates out of the start value of nested addrec's.
- const SCEV *Start = SARE->getStart();
- MoveLoopVariantsToImmediateField(Start, Imm, L, SE);
-
- SmallVector<const SCEV *, 4> Ops(SARE->op_begin(), SARE->op_end());
- Ops[0] = Start;
- Val = SE->getAddRecExpr(Ops, SARE->getLoop());
- } else {
- // Otherwise, all of Val is variant, move the whole thing over.
- Imm = SE->getAddExpr(Imm, Val);
- Val = SE->getIntegerSCEV(0, Val->getType());
+ // Add the step value register, if it needs one.
+ // TODO: The non-affine case isn't precisely modeled here.
+ if (!AR->isAffine() || !isa<SCEVConstant>(AR->getOperand(1)))
+ if (!Regs.count(AR->getStart()))
+ RateRegister(AR->getOperand(1), Regs, L, SE, DT);
}
+ ++NumRegs;
+
+ // Rough heuristic; favor registers which don't require extra setup
+ // instructions in the preheader.
+ if (!isa<SCEVUnknown>(Reg) &&
+ !isa<SCEVConstant>(Reg) &&
+ !(isa<SCEVAddRecExpr>(Reg) &&
+ (isa<SCEVUnknown>(cast<SCEVAddRecExpr>(Reg)->getStart()) ||
+ isa<SCEVConstant>(cast<SCEVAddRecExpr>(Reg)->getStart()))))
+ ++SetupCost;
}
+/// RatePrimaryRegister - Record this register in the set. If we haven't seen it
+/// before, rate it.
+void Cost::RatePrimaryRegister(const SCEV *Reg,
+ SmallPtrSet<const SCEV *, 16> &Regs,
+ const Loop *L,
+ ScalarEvolution &SE, DominatorTree &DT) {
+ if (Regs.insert(Reg))
+ RateRegister(Reg, Regs, L, SE, DT);
+}
-/// MoveImmediateValues - Look at Val, and pull out any additions of constants
-/// that can fit into the immediate field of instructions in the target.
-/// Accumulate these immediate values into the Imm value.
-static void MoveImmediateValues(const TargetLowering *TLI,
- const Type *AccessTy,
- const SCEV *&Val, const SCEV *&Imm,
- bool isAddress, Loop *L,
- ScalarEvolution *SE) {
- if (const SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
- SmallVector<const SCEV *, 4> NewOps;
- NewOps.reserve(SAE->getNumOperands());
-
- for (unsigned i = 0; i != SAE->getNumOperands(); ++i) {
- const SCEV *NewOp = SAE->getOperand(i);
- MoveImmediateValues(TLI, AccessTy, NewOp, Imm, isAddress, L, SE);
-
- if (!NewOp->isLoopInvariant(L)) {
- // If this is a loop-variant expression, it must stay in the immediate
- // field of the expression.
- Imm = SE->getAddExpr(Imm, NewOp);
- } else {
- NewOps.push_back(NewOp);
- }
- }
-
- if (NewOps.empty())
- Val = SE->getIntegerSCEV(0, Val->getType());
- else
- Val = SE->getAddExpr(NewOps);
- return;
- } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
- // Try to pull immediates out of the start value of nested addrec's.
- const SCEV *Start = SARE->getStart();
- MoveImmediateValues(TLI, AccessTy, Start, Imm, isAddress, L, SE);
-
- if (Start != SARE->getStart()) {
- SmallVector<const SCEV *, 4> Ops(SARE->op_begin(), SARE->op_end());
- Ops[0] = Start;
- Val = SE->getAddRecExpr(Ops, SARE->getLoop());
+void Cost::RateFormula(const Formula &F,
+ SmallPtrSet<const SCEV *, 16> &Regs,
+ const DenseSet<const SCEV *> &VisitedRegs,
+ const Loop *L,
+ const SmallVectorImpl<int64_t> &Offsets,
+ ScalarEvolution &SE, DominatorTree &DT) {
+ // Tally up the registers.
+ if (const SCEV *ScaledReg = F.ScaledReg) {
+ if (VisitedRegs.count(ScaledReg)) {
+ Loose();
+ return;
}
- return;
- } else if (const SCEVMulExpr *SME = dyn_cast<SCEVMulExpr>(Val)) {
- // Transform "8 * (4 + v)" -> "32 + 8*V" if "32" fits in the immed field.
- if (isAddress &&
- fitsInAddressMode(SME->getOperand(0), AccessTy, TLI, false) &&
- SME->getNumOperands() == 2 && SME->isLoopInvariant(L)) {
-
- const SCEV *SubImm = SE->getIntegerSCEV(0, Val->getType());
- const SCEV *NewOp = SME->getOperand(1);
- MoveImmediateValues(TLI, AccessTy, NewOp, SubImm, isAddress, L, SE);
-
- // If we extracted something out of the subexpressions, see if we can
- // simplify this!
- if (NewOp != SME->getOperand(1)) {
- // Scale SubImm up by "8". If the result is a target constant, we are
- // good.
- SubImm = SE->getMulExpr(SubImm, SME->getOperand(0));
- if (fitsInAddressMode(SubImm, AccessTy, TLI, false)) {
- // Accumulate the immediate.
- Imm = SE->getAddExpr(Imm, SubImm);
-
- // Update what is left of 'Val'.
- Val = SE->getMulExpr(SME->getOperand(0), NewOp);
- return;
- }
- }
+ RatePrimaryRegister(ScaledReg, Regs, L, SE, DT);
+ }
+ for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(),
+ E = F.BaseRegs.end(); I != E; ++I) {
+ const SCEV *BaseReg = *I;
+ if (VisitedRegs.count(BaseReg)) {
+ Loose();
+ return;
}
+ RatePrimaryRegister(BaseReg, Regs, L, SE, DT);
+
+ NumIVMuls += isa<SCEVMulExpr>(BaseReg) &&
+ BaseReg->hasComputableLoopEvolution(L);
}
- // Loop-variant expressions must stay in the immediate field of the
- // expression.
- if ((isAddress && fitsInAddressMode(Val, AccessTy, TLI, false)) ||
- !Val->isLoopInvariant(L)) {
- Imm = SE->getAddExpr(Imm, Val);
- Val = SE->getIntegerSCEV(0, Val->getType());
- return;
+ if (F.BaseRegs.size() > 1)
+ NumBaseAdds += F.BaseRegs.size() - 1;
+
+ // Tally up the non-zero immediates.
+ for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(),
+ E = Offsets.end(); I != E; ++I) {
+ int64_t Offset = (uint64_t)*I + F.AM.BaseOffs;
+ if (F.AM.BaseGV)
+ ImmCost += 64; // Handle symbolic values conservatively.
+ // TODO: This should probably be the pointer size.
+ else if (Offset != 0)
+ ImmCost += APInt(64, Offset, true).getMinSignedBits();
}
+}
- // Otherwise, no immediates to move.
+/// Loose - Set this cost to a loosing value.
+void Cost::Loose() {
+ NumRegs = ~0u;
+ AddRecCost = ~0u;
+ NumIVMuls = ~0u;
+ NumBaseAdds = ~0u;
+ ImmCost = ~0u;
+ SetupCost = ~0u;
}
-static void MoveImmediateValues(const TargetLowering *TLI,
- Instruction *User,
- const SCEV *&Val, const SCEV *&Imm,
- bool isAddress, Loop *L,
- ScalarEvolution *SE) {
- const Type *AccessTy = getAccessType(User);
- MoveImmediateValues(TLI, AccessTy, Val, Imm, isAddress, L, SE);
+/// operator< - Choose the lower cost.
+bool Cost::operator<(const Cost &Other) const {
+ if (NumRegs != Other.NumRegs)
+ return NumRegs < Other.NumRegs;
+ if (AddRecCost != Other.AddRecCost)
+ return AddRecCost < Other.AddRecCost;
+ if (NumIVMuls != Other.NumIVMuls)
+ return NumIVMuls < Other.NumIVMuls;
+ if (NumBaseAdds != Other.NumBaseAdds)
+ return NumBaseAdds < Other.NumBaseAdds;
+ if (ImmCost != Other.ImmCost)
+ return ImmCost < Other.ImmCost;
+ if (SetupCost != Other.SetupCost)
+ return SetupCost < Other.SetupCost;
+ return false;
}
-/// SeparateSubExprs - Decompose Expr into all of the subexpressions that are
-/// added together. This is used to reassociate common addition subexprs
-/// together for maximal sharing when rewriting bases.
-static void SeparateSubExprs(SmallVector<const SCEV *, 16> &SubExprs,
- const SCEV *Expr,
- ScalarEvolution *SE) {
- if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(Expr)) {
- for (unsigned j = 0, e = AE->getNumOperands(); j != e; ++j)
- SeparateSubExprs(SubExprs, AE->getOperand(j), SE);
- } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Expr)) {
- const SCEV *Zero = SE->getIntegerSCEV(0, Expr->getType());
- if (SARE->getOperand(0) == Zero) {
- SubExprs.push_back(Expr);
- } else {
- // Compute the addrec with zero as its base.
- SmallVector<const SCEV *, 4> Ops(SARE->op_begin(), SARE->op_end());
- Ops[0] = Zero; // Start with zero base.
- SubExprs.push_back(SE->getAddRecExpr(Ops, SARE->getLoop()));
+void Cost::print(raw_ostream &OS) const {
+ OS << NumRegs << " reg" << (NumRegs == 1 ? "" : "s");
+ if (AddRecCost != 0)
+ OS << ", with addrec cost " << AddRecCost;
+ if (NumIVMuls != 0)
+ OS << ", plus " << NumIVMuls << " IV mul" << (NumIVMuls == 1 ? "" : "s");
+ if (NumBaseAdds != 0)
+ OS << ", plus " << NumBaseAdds << " base add"
+ << (NumBaseAdds == 1 ? "" : "s");
+ if (ImmCost != 0)
+ OS << ", plus " << ImmCost << " imm cost";
+ if (SetupCost != 0)
+ OS << ", plus " << SetupCost << " setup cost";
+}
+void Cost::dump() const {
+ print(errs()); errs() << '\n';
+}
- SeparateSubExprs(SubExprs, SARE->getOperand(0), SE);
- }
- } else if (!Expr->isZero()) {
- // Do not add zero.
- SubExprs.push_back(Expr);
- }
-}
-
-// This is logically local to the following function, but C++ says we have
-// to make it file scope.
-struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; };
-
-/// RemoveCommonExpressionsFromUseBases - Look through all of the Bases of all
-/// the Uses, removing any common subexpressions, except that if all such
-/// subexpressions can be folded into an addressing mode for all uses inside
-/// the loop (this case is referred to as "free" in comments herein) we do
-/// not remove anything. This looks for things like (a+b+c) and
-/// (a+c+d) and computes the common (a+c) subexpression. The common expression
-/// is *removed* from the Bases and returned.
-static const SCEV *
-RemoveCommonExpressionsFromUseBases(std::vector<BasedUser> &Uses,
- ScalarEvolution *SE, Loop *L,
- const TargetLowering *TLI) {
- unsigned NumUses = Uses.size();
-
- // Only one use? This is a very common case, so we handle it specially and
- // cheaply.
- const SCEV *Zero = SE->getIntegerSCEV(0, Uses[0].Base->getType());
- const SCEV *Result = Zero;
- const SCEV *FreeResult = Zero;
- if (NumUses == 1) {
- // If the use is inside the loop, use its base, regardless of what it is:
- // it is clearly shared across all the IV's. If the use is outside the loop
- // (which means after it) we don't want to factor anything *into* the loop,
- // so just use 0 as the base.
- if (L->contains(Uses[0].Inst))
- std::swap(Result, Uses[0].Base);
- return Result;
- }
+namespace {
- // To find common subexpressions, count how many of Uses use each expression.
- // If any subexpressions are used Uses.size() times, they are common.
- // Also track whether all uses of each expression can be moved into an
- // an addressing mode "for free"; such expressions are left within the loop.
- // struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; };
- std::map<const SCEV *, SubExprUseData> SubExpressionUseData;
-
- // UniqueSubExprs - Keep track of all of the subexpressions we see in the
- // order we see them.
- SmallVector<const SCEV *, 16> UniqueSubExprs;
-
- SmallVector<const SCEV *, 16> SubExprs;
- unsigned NumUsesInsideLoop = 0;
- for (unsigned i = 0; i != NumUses; ++i) {
- // If the user is outside the loop, just ignore it for base computation.
- // Since the user is outside the loop, it must be *after* the loop (if it
- // were before, it could not be based on the loop IV). We don't want users
- // after the loop to affect base computation of values *inside* the loop,
- // because we can always add their offsets to the result IV after the loop
- // is done, ensuring we get good code inside the loop.
- if (!L->contains(Uses[i].Inst))
- continue;
- NumUsesInsideLoop++;
+/// LSRFixup - An operand value in an instruction which is to be replaced
+/// with some equivalent, possibly strength-reduced, replacement.
+struct LSRFixup {
+ /// UserInst - The instruction which will be updated.
+ Instruction *UserInst;
- // If the base is zero (which is common), return zero now, there are no
- // CSEs we can find.
- if (Uses[i].Base == Zero) return Zero;
+ /// OperandValToReplace - The operand of the instruction which will
+ /// be replaced. The operand may be used more than once; every instance
+ /// will be replaced.
+ Value *OperandValToReplace;
- // If this use is as an address we may be able to put CSEs in the addressing
- // mode rather than hoisting them.
- bool isAddrUse = isAddressUse(Uses[i].Inst, Uses[i].OperandValToReplace);
- // We may need the AccessTy below, but only when isAddrUse, so compute it
- // only in that case.
- const Type *AccessTy = 0;
- if (isAddrUse)
- AccessTy = getAccessType(Uses[i].Inst);
-
- // Split the expression into subexprs.
- SeparateSubExprs(SubExprs, Uses[i].Base, SE);
- // Add one to SubExpressionUseData.Count for each subexpr present, and
- // if the subexpr is not a valid immediate within an addressing mode use,
- // set SubExpressionUseData.notAllUsesAreFree. We definitely want to
- // hoist these out of the loop (if they are common to all uses).
- for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) {
- if (++SubExpressionUseData[SubExprs[j]].Count == 1)
- UniqueSubExprs.push_back(SubExprs[j]);
- if (!isAddrUse || !fitsInAddressMode(SubExprs[j], AccessTy, TLI, false))
- SubExpressionUseData[SubExprs[j]].notAllUsesAreFree = true;
- }
- SubExprs.clear();
- }
-
- // Now that we know how many times each is used, build Result. Iterate over
- // UniqueSubexprs so that we have a stable ordering.
- for (unsigned i = 0, e = UniqueSubExprs.size(); i != e; ++i) {
- std::map<const SCEV *, SubExprUseData>::iterator I =
- SubExpressionUseData.find(UniqueSubExprs[i]);
- assert(I != SubExpressionUseData.end() && "Entry not found?");
- if (I->second.Count == NumUsesInsideLoop) { // Found CSE!
- if (I->second.notAllUsesAreFree)
- Result = SE->getAddExpr(Result, I->first);
- else
- FreeResult = SE->getAddExpr(FreeResult, I->first);
- } else
- // Remove non-cse's from SubExpressionUseData.
- SubExpressionUseData.erase(I);
- }
-
- if (FreeResult != Zero) {
- // We have some subexpressions that can be subsumed into addressing
- // modes in every use inside the loop. However, it's possible that
- // there are so many of them that the combined FreeResult cannot
- // be subsumed, or that the target cannot handle both a FreeResult
- // and a Result in the same instruction (for example because it would
- // require too many registers). Check this.
- for (unsigned i=0; i<NumUses; ++i) {
- if (!L->contains(Uses[i].Inst))
- continue;
- // We know this is an addressing mode use; if there are any uses that
- // are not, FreeResult would be Zero.
- const Type *AccessTy = getAccessType(Uses[i].Inst);
- if (!fitsInAddressMode(FreeResult, AccessTy, TLI, Result!=Zero)) {
- // FIXME: could split up FreeResult into pieces here, some hoisted
- // and some not. There is no obvious advantage to this.
- Result = SE->getAddExpr(Result, FreeResult);
- FreeResult = Zero;
- break;
- }
- }
- }
+ /// PostIncLoop - If this user is to use the post-incremented value of an
+ /// induction variable, this variable is non-null and holds the loop
+ /// associated with the induction variable.
+ const Loop *PostIncLoop;
- // If we found no CSE's, return now.
- if (Result == Zero) return Result;
+ /// LUIdx - The index of the LSRUse describing the expression which
+ /// this fixup needs, minus an offset (below).
+ size_t LUIdx;
- // If we still have a FreeResult, remove its subexpressions from
- // SubExpressionUseData. This means they will remain in the use Bases.
- if (FreeResult != Zero) {
- SeparateSubExprs(SubExprs, FreeResult, SE);
- for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) {
- std::map<const SCEV *, SubExprUseData>::iterator I =
- SubExpressionUseData.find(SubExprs[j]);
- SubExpressionUseData.erase(I);
- }
- SubExprs.clear();
- }
+ /// Offset - A constant offset to be added to the LSRUse expression.
+ /// This allows multiple fixups to share the same LSRUse with different
+ /// offsets, for example in an unrolled loop.
+ int64_t Offset;
- // Otherwise, remove all of the CSE's we found from each of the base values.
- for (unsigned i = 0; i != NumUses; ++i) {
- // Uses outside the loop don't necessarily include the common base, but
- // the final IV value coming into those uses does. Instead of trying to
- // remove the pieces of the common base, which might not be there,
- // subtract off the base to compensate for this.
- if (!L->contains(Uses[i].Inst)) {
- Uses[i].Base = SE->getMinusSCEV(Uses[i].Base, Result);
- continue;
- }
+ LSRFixup();
- // Split the expression into subexprs.
- SeparateSubExprs(SubExprs, Uses[i].Base, SE);
+ void print(raw_ostream &OS) const;
+ void dump() const;
+};
- // Remove any common subexpressions.
- for (unsigned j = 0, e = SubExprs.size(); j != e; ++j)
- if (SubExpressionUseData.count(SubExprs[j])) {
- SubExprs.erase(SubExprs.begin()+j);
- --j; --e;
- }
+}
- // Finally, add the non-shared expressions together.
- if (SubExprs.empty())
- Uses[i].Base = Zero;
- else
- Uses[i].Base = SE->getAddExpr(SubExprs);
- SubExprs.clear();
+LSRFixup::LSRFixup()
+ : UserInst(0), OperandValToReplace(0), PostIncLoop(0),
+ LUIdx(~size_t(0)), Offset(0) {}
+
+void LSRFixup::print(raw_ostream &OS) const {
+ OS << "UserInst=";
+ // Store is common and interesting enough to be worth special-casing.
+ if (StoreInst *Store = dyn_cast<StoreInst>(UserInst)) {
+ OS << "store ";
+ WriteAsOperand(OS, Store->getOperand(0), /*PrintType=*/false);
+ } else if (UserInst->getType()->isVoidTy())
+ OS << UserInst->getOpcodeName();
+ else
+ WriteAsOperand(OS, UserInst, /*PrintType=*/false);
+
+ OS << ", OperandValToReplace=";
+ WriteAsOperand(OS, OperandValToReplace, /*PrintType=*/false);
+
+ if (PostIncLoop) {
+ OS << ", PostIncLoop=";
+ WriteAsOperand(OS, PostIncLoop->getHeader(), /*PrintType=*/false);
}
- return Result;
-}
+ if (LUIdx != ~size_t(0))
+ OS << ", LUIdx=" << LUIdx;
-/// ValidScale - Check whether the given Scale is valid for all loads and
-/// stores in UsersToProcess.
-///
-bool LoopStrengthReduce::ValidScale(bool HasBaseReg, int64_t Scale,
- const std::vector<BasedUser>& UsersToProcess) {
- if (!TLI)
- return true;
+ if (Offset != 0)
+ OS << ", Offset=" << Offset;
+}
- for (unsigned i = 0, e = UsersToProcess.size(); i!=e; ++i) {
- // If this is a load or other access, pass the type of the access in.
- const Type *AccessTy =
- Type::getVoidTy(UsersToProcess[i].Inst->getContext());
- if (isAddressUse(UsersToProcess[i].Inst,
- UsersToProcess[i].OperandValToReplace))
- AccessTy = getAccessType(UsersToProcess[i].Inst);
- else if (isa<PHINode>(UsersToProcess[i].Inst))
- continue;
+void LSRFixup::dump() const {
+ print(errs()); errs() << '\n';
+}
- TargetLowering::AddrMode AM;
- if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(UsersToProcess[i].Imm))
- AM.BaseOffs = SC->getValue()->getSExtValue();
- AM.HasBaseReg = HasBaseReg || !UsersToProcess[i].Base->isZero();
- AM.Scale = Scale;
+namespace {
- // If load[imm+r*scale] is illegal, bail out.
- if (!TLI->isLegalAddressingMode(AM, AccessTy))
- return false;
+/// UniquifierDenseMapInfo - A DenseMapInfo implementation for holding
+/// DenseMaps and DenseSets of sorted SmallVectors of const SCEV*.
+struct UniquifierDenseMapInfo {
+ static SmallVector<const SCEV *, 2> getEmptyKey() {
+ SmallVector<const SCEV *, 2> V;
+ V.push_back(reinterpret_cast<const SCEV *>(-1));
+ return V;
}
- return true;
-}
-
-/// ValidOffset - Check whether the given Offset is valid for all loads and
-/// stores in UsersToProcess.
-///
-bool LoopStrengthReduce::ValidOffset(bool HasBaseReg,
- int64_t Offset,
- int64_t Scale,
- const std::vector<BasedUser>& UsersToProcess) {
- if (!TLI)
- return true;
- for (unsigned i=0, e = UsersToProcess.size(); i!=e; ++i) {
- // If this is a load or other access, pass the type of the access in.
- const Type *AccessTy =
- Type::getVoidTy(UsersToProcess[i].Inst->getContext());
- if (isAddressUse(UsersToProcess[i].Inst,
- UsersToProcess[i].OperandValToReplace))
- AccessTy = getAccessType(UsersToProcess[i].Inst);
- else if (isa<PHINode>(UsersToProcess[i].Inst))
- continue;
+ static SmallVector<const SCEV *, 2> getTombstoneKey() {
+ SmallVector<const SCEV *, 2> V;
+ V.push_back(reinterpret_cast<const SCEV *>(-2));
+ return V;
+ }
- TargetLowering::AddrMode AM;
- if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(UsersToProcess[i].Imm))
- AM.BaseOffs = SC->getValue()->getSExtValue();
- AM.BaseOffs = (uint64_t)AM.BaseOffs + (uint64_t)Offset;
- AM.HasBaseReg = HasBaseReg || !UsersToProcess[i].Base->isZero();
- AM.Scale = Scale;
+ static unsigned getHashValue(const SmallVector<const SCEV *, 2> &V) {
+ unsigned Result = 0;
+ for (SmallVectorImpl<const SCEV *>::const_iterator I = V.begin(),
+ E = V.end(); I != E; ++I)
+ Result ^= DenseMapInfo<const SCEV *>::getHashValue(*I);
+ return Result;
+ }
- // If load[imm+r*scale] is illegal, bail out.
- if (!TLI->isLegalAddressingMode(AM, AccessTy))
- return false;
+ static bool isEqual(const SmallVector<const SCEV *, 2> &LHS,
+ const SmallVector<const SCEV *, 2> &RHS) {
+ return LHS == RHS;
}
- return true;
-}
+};
+
+/// LSRUse - This class holds the state that LSR keeps for each use in
+/// IVUsers, as well as uses invented by LSR itself. It includes information
+/// about what kinds of things can be folded into the user, information about
+/// the user itself, and information about how the use may be satisfied.
+/// TODO: Represent multiple users of the same expression in common?
+class LSRUse {
+ DenseSet<SmallVector<const SCEV *, 2>, UniquifierDenseMapInfo> Uniquifier;
+
+public:
+ /// KindType - An enum for a kind of use, indicating what types of
+ /// scaled and immediate operands it might support.
+ enum KindType {
+ Basic, ///< A normal use, with no folding.
+ Special, ///< A special case of basic, allowing -1 scales.
+ Address, ///< An address use; folding according to TargetLowering
+ ICmpZero ///< An equality icmp with both operands folded into one.
+ // TODO: Add a generic icmp too?
+ };
-/// RequiresTypeConversion - Returns true if converting Ty1 to Ty2 is not
-/// a nop.
-bool LoopStrengthReduce::RequiresTypeConversion(const Type *Ty1,
- const Type *Ty2) {
- if (Ty1 == Ty2)
- return false;
- Ty1 = SE->getEffectiveSCEVType(Ty1);
- Ty2 = SE->getEffectiveSCEVType(Ty2);
- if (Ty1 == Ty2)
- return false;
- if (Ty1->canLosslesslyBitCastTo(Ty2))
- return false;
- if (TLI && TLI->isTruncateFree(Ty1, Ty2))
- return false;
- return true;
-}
+ KindType Kind;
+ const Type *AccessTy;
-/// CheckForIVReuse - Returns the multiple if the stride is the multiple
-/// of a previous stride and it is a legal value for the target addressing
-/// mode scale component and optional base reg. This allows the users of
-/// this stride to be rewritten as prev iv * factor. It returns 0 if no
-/// reuse is possible. Factors can be negative on same targets, e.g. ARM.
-///
-/// If all uses are outside the loop, we don't require that all multiplies
-/// be folded into the addressing mode, nor even that the factor be constant;
-/// a multiply (executed once) outside the loop is better than another IV
-/// within. Well, usually.
-const SCEV *LoopStrengthReduce::CheckForIVReuse(bool HasBaseReg,
- bool AllUsesAreAddresses,
- bool AllUsesAreOutsideLoop,
- const SCEV *Stride,
- IVExpr &IV, const Type *Ty,
- const std::vector<BasedUser>& UsersToProcess) {
- if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Stride)) {
- int64_t SInt = SC->getValue()->getSExtValue();
- for (unsigned NewStride = 0, e = IU->StrideOrder.size();
- NewStride != e; ++NewStride) {
- std::map<const SCEV *, IVsOfOneStride>::iterator SI =
- IVsByStride.find(IU->StrideOrder[NewStride]);
- if (SI == IVsByStride.end() || !isa<SCEVConstant>(SI->first))
- continue;
- // The other stride has no uses, don't reuse it.
- std::map<const SCEV *, IVUsersOfOneStride *>::iterator UI =
- IU->IVUsesByStride.find(IU->StrideOrder[NewStride]);
- if (UI->second->Users.empty())
- continue;
- int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
- if (SI->first != Stride &&
- (unsigned(abs64(SInt)) < SSInt || (SInt % SSInt) != 0))
- continue;
- int64_t Scale = SInt / SSInt;
- // Check that this stride is valid for all the types used for loads and
- // stores; if it can be used for some and not others, we might as well use
- // the original stride everywhere, since we have to create the IV for it
- // anyway. If the scale is 1, then we don't need to worry about folding
- // multiplications.
- if (Scale == 1 ||
- (AllUsesAreAddresses &&
- ValidScale(HasBaseReg, Scale, UsersToProcess))) {
- // Prefer to reuse an IV with a base of zero.
- for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
- IE = SI->second.IVs.end(); II != IE; ++II)
- // Only reuse previous IV if it would not require a type conversion
- // and if the base difference can be folded.
- if (II->Base->isZero() &&
- !RequiresTypeConversion(II->Base->getType(), Ty)) {
- IV = *II;
- return SE->getIntegerSCEV(Scale, Stride->getType());
- }
- // Otherwise, settle for an IV with a foldable base.
- if (AllUsesAreAddresses)
- for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
- IE = SI->second.IVs.end(); II != IE; ++II)
- // Only reuse previous IV if it would not require a type conversion
- // and if the base difference can be folded.
- if (SE->getEffectiveSCEVType(II->Base->getType()) ==
- SE->getEffectiveSCEVType(Ty) &&
- isa<SCEVConstant>(II->Base)) {
- int64_t Base =
- cast<SCEVConstant>(II->Base)->getValue()->getSExtValue();
- if (Base > INT32_MIN && Base <= INT32_MAX &&
- ValidOffset(HasBaseReg, -Base * Scale,
- Scale, UsersToProcess)) {
- IV = *II;
- return SE->getIntegerSCEV(Scale, Stride->getType());
- }
- }
- }
- }
- } else if (AllUsesAreOutsideLoop) {
- // Accept nonconstant strides here; it is really really right to substitute
- // an existing IV if we can.
- for (unsigned NewStride = 0, e = IU->StrideOrder.size();
- NewStride != e; ++NewStride) {
- std::map<const SCEV *, IVsOfOneStride>::iterator SI =
- IVsByStride.find(IU->StrideOrder[NewStride]);
- if (SI == IVsByStride.end() || !isa<SCEVConstant>(SI->first))
- continue;
- int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
- if (SI->first != Stride && SSInt != 1)
- continue;
- for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
- IE = SI->second.IVs.end(); II != IE; ++II)
- // Accept nonzero base here.
- // Only reuse previous IV if it would not require a type conversion.
- if (!RequiresTypeConversion(II->Base->getType(), Ty)) {
- IV = *II;
- return Stride;
- }
- }
- // Special case, old IV is -1*x and this one is x. Can treat this one as
- // -1*old.
- for (unsigned NewStride = 0, e = IU->StrideOrder.size();
- NewStride != e; ++NewStride) {
- std::map<const SCEV *, IVsOfOneStride>::iterator SI =
- IVsByStride.find(IU->StrideOrder[NewStride]);
- if (SI == IVsByStride.end())
- continue;
- if (const SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(SI->first))
- if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(ME->getOperand(0)))
- if (Stride == ME->getOperand(1) &&
- SC->getValue()->getSExtValue() == -1LL)
- for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
- IE = SI->second.IVs.end(); II != IE; ++II)
- // Accept nonzero base here.
- // Only reuse previous IV if it would not require type conversion.
- if (!RequiresTypeConversion(II->Base->getType(), Ty)) {
- IV = *II;
- return SE->getIntegerSCEV(-1LL, Stride->getType());
- }
- }
- }
- return SE->getIntegerSCEV(0, Stride->getType());
-}
-
-/// PartitionByIsUseOfPostIncrementedValue - Simple boolean predicate that
-/// returns true if Val's isUseOfPostIncrementedValue is true.
-static bool PartitionByIsUseOfPostIncrementedValue(const BasedUser &Val) {
- return Val.isUseOfPostIncrementedValue;
-}
-
-/// isNonConstantNegative - Return true if the specified scev is negated, but
-/// not a constant.
-static bool isNonConstantNegative(const SCEV *Expr) {
- const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Expr);
- if (!Mul) return false;
-
- // If there is a constant factor, it will be first.
- const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
- if (!SC) return false;
-
- // Return true if the value is negative, this matches things like (-42 * V).
- return SC->getValue()->getValue().isNegative();
-}
-
-/// CollectIVUsers - Transform our list of users and offsets to a bit more
-/// complex table. In this new vector, each 'BasedUser' contains 'Base', the
-/// base of the strided accesses, as well as the old information from Uses. We
-/// progressively move information from the Base field to the Imm field, until
-/// we eventually have the full access expression to rewrite the use.
-const SCEV *LoopStrengthReduce::CollectIVUsers(const SCEV *Stride,
- IVUsersOfOneStride &Uses,
- Loop *L,
- bool &AllUsesAreAddresses,
- bool &AllUsesAreOutsideLoop,
- std::vector<BasedUser> &UsersToProcess) {
- // FIXME: Generalize to non-affine IV's.
- if (!Stride->isLoopInvariant(L))
- return SE->getIntegerSCEV(0, Stride->getType());
-
- UsersToProcess.reserve(Uses.Users.size());
- for (ilist<IVStrideUse>::iterator I = Uses.Users.begin(),
- E = Uses.Users.end(); I != E; ++I) {
- UsersToProcess.push_back(BasedUser(*I, SE));
-
- // Move any loop variant operands from the offset field to the immediate
- // field of the use, so that we don't try to use something before it is
- // computed.
- MoveLoopVariantsToImmediateField(UsersToProcess.back().Base,
- UsersToProcess.back().Imm, L, SE);
- assert(UsersToProcess.back().Base->isLoopInvariant(L) &&
- "Base value is not loop invariant!");
- }
-
- // We now have a whole bunch of uses of like-strided induction variables, but
- // they might all have different bases. We want to emit one PHI node for this
- // stride which we fold as many common expressions (between the IVs) into as
- // possible. Start by identifying the common expressions in the base values
- // for the strides (e.g. if we have "A+C+B" and "A+B+D" as our bases, find
- // "A+B"), emit it to the preheader, then remove the expression from the
- // UsersToProcess base values.
- const SCEV *CommonExprs =
- RemoveCommonExpressionsFromUseBases(UsersToProcess, SE, L, TLI);
-
- // Next, figure out what we can represent in the immediate fields of
- // instructions. If we can represent anything there, move it to the imm
- // fields of the BasedUsers. We do this so that it increases the commonality
- // of the remaining uses.
- unsigned NumPHI = 0;
- bool HasAddress = false;
- for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
- // If the user is not in the current loop, this means it is using the exit
- // value of the IV. Do not put anything in the base, make sure it's all in
- // the immediate field to allow as much factoring as possible.
- if (!L->contains(UsersToProcess[i].Inst)) {
- UsersToProcess[i].Imm = SE->getAddExpr(UsersToProcess[i].Imm,
- UsersToProcess[i].Base);
- UsersToProcess[i].Base =
- SE->getIntegerSCEV(0, UsersToProcess[i].Base->getType());
- } else {
- // Not all uses are outside the loop.
- AllUsesAreOutsideLoop = false;
-
- // Addressing modes can be folded into loads and stores. Be careful that
- // the store is through the expression, not of the expression though.
- bool isPHI = false;
- bool isAddress = isAddressUse(UsersToProcess[i].Inst,
- UsersToProcess[i].OperandValToReplace);
- if (isa<PHINode>(UsersToProcess[i].Inst)) {
- isPHI = true;
- ++NumPHI;
- }
+ SmallVector<int64_t, 8> Offsets;
+ int64_t MinOffset;
+ int64_t MaxOffset;
- if (isAddress)
- HasAddress = true;
+ /// AllFixupsOutsideLoop - This records whether all of the fixups using this
+ /// LSRUse are outside of the loop, in which case some special-case heuristics
+ /// may be used.
+ bool AllFixupsOutsideLoop;
- // If this use isn't an address, then not all uses are addresses.
- if (!isAddress && !isPHI)
- AllUsesAreAddresses = false;
+ /// Formulae - A list of ways to build a value that can satisfy this user.
+ /// After the list is populated, one of these is selected heuristically and
+ /// used to formulate a replacement for OperandValToReplace in UserInst.
+ SmallVector<Formula, 12> Formulae;
- MoveImmediateValues(TLI, UsersToProcess[i].Inst, UsersToProcess[i].Base,
- UsersToProcess[i].Imm, isAddress, L, SE);
- }
- }
+ /// Regs - The set of register candidates used by all formulae in this LSRUse.
+ SmallPtrSet<const SCEV *, 4> Regs;
- // If one of the use is a PHI node and all other uses are addresses, still
- // allow iv reuse. Essentially we are trading one constant multiplication
- // for one fewer iv.
- if (NumPHI > 1)
- AllUsesAreAddresses = false;
+ LSRUse(KindType K, const Type *T) : Kind(K), AccessTy(T),
+ MinOffset(INT64_MAX),
+ MaxOffset(INT64_MIN),
+ AllFixupsOutsideLoop(true) {}
- // There are no in-loop address uses.
- if (AllUsesAreAddresses && (!HasAddress && !AllUsesAreOutsideLoop))
- AllUsesAreAddresses = false;
+ bool InsertFormula(size_t LUIdx, const Formula &F);
- return CommonExprs;
-}
+ void check() const;
-/// ShouldUseFullStrengthReductionMode - Test whether full strength-reduction
-/// is valid and profitable for the given set of users of a stride. In
-/// full strength-reduction mode, all addresses at the current stride are
-/// strength-reduced all the way down to pointer arithmetic.
-///
-bool LoopStrengthReduce::ShouldUseFullStrengthReductionMode(
- const std::vector<BasedUser> &UsersToProcess,
- const Loop *L,
- bool AllUsesAreAddresses,
- const SCEV *Stride) {
- if (!EnableFullLSRMode)
- return false;
+ void print(raw_ostream &OS) const;
+ void dump() const;
+};
- // The heuristics below aim to avoid increasing register pressure, but
- // fully strength-reducing all the addresses increases the number of
- // add instructions, so don't do this when optimizing for size.
- // TODO: If the loop is large, the savings due to simpler addresses
- // may oughtweight the costs of the extra increment instructions.
- if (L->getHeader()->getParent()->hasFnAttr(Attribute::OptimizeForSize))
- return false;
+/// InsertFormula - If the given formula has not yet been inserted, add it to
+/// the list, and return true. Return false otherwise.
+bool LSRUse::InsertFormula(size_t LUIdx, const Formula &F) {
+ SmallVector<const SCEV *, 2> Key = F.BaseRegs;
+ if (F.ScaledReg) Key.push_back(F.ScaledReg);
+ // Unstable sort by host order ok, because this is only used for uniquifying.
+ std::sort(Key.begin(), Key.end());
- // TODO: For now, don't do full strength reduction if there could
- // potentially be greater-stride multiples of the current stride
- // which could reuse the current stride IV.
- if (IU->StrideOrder.back() != Stride)
+ if (!Uniquifier.insert(Key).second)
return false;
- // Iterate through the uses to find conditions that automatically rule out
- // full-lsr mode.
- for (unsigned i = 0, e = UsersToProcess.size(); i != e; ) {
- const SCEV *Base = UsersToProcess[i].Base;
- const SCEV *Imm = UsersToProcess[i].Imm;
- // If any users have a loop-variant component, they can't be fully
- // strength-reduced.
- if (Imm && !Imm->isLoopInvariant(L))
- return false;
- // If there are to users with the same base and the difference between
- // the two Imm values can't be folded into the address, full
- // strength reduction would increase register pressure.
- do {
- const SCEV *CurImm = UsersToProcess[i].Imm;
- if ((CurImm || Imm) && CurImm != Imm) {
- if (!CurImm) CurImm = SE->getIntegerSCEV(0, Stride->getType());
- if (!Imm) Imm = SE->getIntegerSCEV(0, Stride->getType());
- const Instruction *Inst = UsersToProcess[i].Inst;
- const Type *AccessTy = getAccessType(Inst);
- const SCEV *Diff = SE->getMinusSCEV(UsersToProcess[i].Imm, Imm);
- if (!Diff->isZero() &&
- (!AllUsesAreAddresses ||
- !fitsInAddressMode(Diff, AccessTy, TLI, /*HasBaseReg=*/true)))
- return false;
- }
- } while (++i != e && Base == UsersToProcess[i].Base);
- }
+ // Using a register to hold the value of 0 is not profitable.
+ assert((!F.ScaledReg || !F.ScaledReg->isZero()) &&
+ "Zero allocated in a scaled register!");
+#ifndef NDEBUG
+ for (SmallVectorImpl<const SCEV *>::const_iterator I =
+ F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I)
+ assert(!(*I)->isZero() && "Zero allocated in a base register!");
+#endif
- // If there's exactly one user in this stride, fully strength-reducing it
- // won't increase register pressure. If it's starting from a non-zero base,
- // it'll be simpler this way.
- if (UsersToProcess.size() == 1 && !UsersToProcess[0].Base->isZero())
- return true;
+ // Add the formula to the list.
+ Formulae.push_back(F);
- // Otherwise, if there are any users in this stride that don't require
- // a register for their base, full strength-reduction will increase
- // register pressure.
- for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
- if (UsersToProcess[i].Base->isZero())
- return false;
+ // Record registers now being used by this use.
+ if (F.ScaledReg) Regs.insert(F.ScaledReg);
+ Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
- // Otherwise, go for it.
return true;
}
-/// InsertAffinePhi Create and insert a PHI node for an induction variable
-/// with the specified start and step values in the specified loop.
-///
-/// If NegateStride is true, the stride should be negated by using a
-/// subtract instead of an add.
-///
-/// Return the created phi node.
-///
-static PHINode *InsertAffinePhi(const SCEV *Start, const SCEV *Step,
- Instruction *IVIncInsertPt,
- const Loop *L,
- SCEVExpander &Rewriter) {
- assert(Start->isLoopInvariant(L) && "New PHI start is not loop invariant!");
- assert(Step->isLoopInvariant(L) && "New PHI stride is not loop invariant!");
-
- BasicBlock *Header = L->getHeader();
- BasicBlock *Preheader = L->getLoopPreheader();
- BasicBlock *LatchBlock = L->getLoopLatch();
- const Type *Ty = Start->getType();
- Ty = Rewriter.SE.getEffectiveSCEVType(Ty);
-
- PHINode *PN = PHINode::Create(Ty, "lsr.iv", Header->begin());
- PN->addIncoming(Rewriter.expandCodeFor(Start, Ty, Preheader->getTerminator()),
- Preheader);
-
- // If the stride is negative, insert a sub instead of an add for the
- // increment.
- bool isNegative = isNonConstantNegative(Step);
- const SCEV *IncAmount = Step;
- if (isNegative)
- IncAmount = Rewriter.SE.getNegativeSCEV(Step);
-
- // Insert an add instruction right before the terminator corresponding
- // to the back-edge or just before the only use. The location is determined
- // by the caller and passed in as IVIncInsertPt.
- Value *StepV = Rewriter.expandCodeFor(IncAmount, Ty,
- Preheader->getTerminator());
- Instruction *IncV;
- if (isNegative) {
- IncV = BinaryOperator::CreateSub(PN, StepV, "lsr.iv.next",
- IVIncInsertPt);
- } else {
- IncV = BinaryOperator::CreateAdd(PN, StepV, "lsr.iv.next",
- IVIncInsertPt);
- }
- if (!isa<ConstantInt>(StepV)) ++NumVariable;
-
- PN->addIncoming(IncV, LatchBlock);
-
- ++NumInserted;
- return PN;
-}
-
-static void SortUsersToProcess(std::vector<BasedUser> &UsersToProcess) {
- // We want to emit code for users inside the loop first. To do this, we
- // rearrange BasedUser so that the entries at the end have
- // isUseOfPostIncrementedValue = false, because we pop off the end of the
- // vector (so we handle them first).
- std::partition(UsersToProcess.begin(), UsersToProcess.end(),
- PartitionByIsUseOfPostIncrementedValue);
-
- // Sort this by base, so that things with the same base are handled
- // together. By partitioning first and stable-sorting later, we are
- // guaranteed that within each base we will pop off users from within the
- // loop before users outside of the loop with a particular base.
- //
- // We would like to use stable_sort here, but we can't. The problem is that
- // const SCEV *'s don't have a deterministic ordering w.r.t to each other, so
- // we don't have anything to do a '<' comparison on. Because we think the
- // number of uses is small, do a horrible bubble sort which just relies on
- // ==.
- for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
- // Get a base value.
- const SCEV *Base = UsersToProcess[i].Base;
-
- // Compact everything with this base to be consecutive with this one.
- for (unsigned j = i+1; j != e; ++j) {
- if (UsersToProcess[j].Base == Base) {
- std::swap(UsersToProcess[i+1], UsersToProcess[j]);
- ++i;
- }
- }
+void LSRUse::print(raw_ostream &OS) const {
+ OS << "LSR Use: Kind=";
+ switch (Kind) {
+ case Basic: OS << "Basic"; break;
+ case Special: OS << "Special"; break;
+ case ICmpZero: OS << "ICmpZero"; break;
+ case Address:
+ OS << "Address of ";
+ if (isa<PointerType>(AccessTy))
+ OS << "pointer"; // the full pointer type could be really verbose
+ else
+ OS << *AccessTy;
}
-}
-/// PrepareToStrengthReduceFully - Prepare to fully strength-reduce
-/// UsersToProcess, meaning lowering addresses all the way down to direct
-/// pointer arithmetic.
-///
-void
-LoopStrengthReduce::PrepareToStrengthReduceFully(
- std::vector<BasedUser> &UsersToProcess,
- const SCEV *Stride,
- const SCEV *CommonExprs,
- const Loop *L,
- SCEVExpander &PreheaderRewriter) {
- DEBUG(dbgs() << " Fully reducing all users\n");
-
- // Rewrite the UsersToProcess records, creating a separate PHI for each
- // unique Base value.
- Instruction *IVIncInsertPt = L->getLoopLatch()->getTerminator();
- for (unsigned i = 0, e = UsersToProcess.size(); i != e; ) {
- // TODO: The uses are grouped by base, but not sorted. We arbitrarily
- // pick the first Imm value here to start with, and adjust it for the
- // other uses.
- const SCEV *Imm = UsersToProcess[i].Imm;
- const SCEV *Base = UsersToProcess[i].Base;
- const SCEV *Start = SE->getAddExpr(CommonExprs, Base, Imm);
- PHINode *Phi = InsertAffinePhi(Start, Stride, IVIncInsertPt, L,
- PreheaderRewriter);
- // Loop over all the users with the same base.
- do {
- UsersToProcess[i].Base = SE->getIntegerSCEV(0, Stride->getType());
- UsersToProcess[i].Imm = SE->getMinusSCEV(UsersToProcess[i].Imm, Imm);
- UsersToProcess[i].Phi = Phi;
- assert(UsersToProcess[i].Imm->isLoopInvariant(L) &&
- "ShouldUseFullStrengthReductionMode should reject this!");
- } while (++i != e && Base == UsersToProcess[i].Base);
- }
-}
-
-/// FindIVIncInsertPt - Return the location to insert the increment instruction.
-/// If the only use if a use of postinc value, (must be the loop termination
-/// condition), then insert it just before the use.
-static Instruction *FindIVIncInsertPt(std::vector<BasedUser> &UsersToProcess,
- const Loop *L) {
- if (UsersToProcess.size() == 1 &&
- UsersToProcess[0].isUseOfPostIncrementedValue &&
- L->contains(UsersToProcess[0].Inst))
- return UsersToProcess[0].Inst;
- return L->getLoopLatch()->getTerminator();
-}
-
-/// PrepareToStrengthReduceWithNewPhi - Insert a new induction variable for the
-/// given users to share.
-///
-void
-LoopStrengthReduce::PrepareToStrengthReduceWithNewPhi(
- std::vector<BasedUser> &UsersToProcess,
- const SCEV *Stride,
- const SCEV *CommonExprs,
- Value *CommonBaseV,
- Instruction *IVIncInsertPt,
- const Loop *L,
- SCEVExpander &PreheaderRewriter) {
- DEBUG(dbgs() << " Inserting new PHI:\n");
-
- PHINode *Phi = InsertAffinePhi(SE->getUnknown(CommonBaseV),
- Stride, IVIncInsertPt, L,
- PreheaderRewriter);
-
- // Remember this in case a later stride is multiple of this.
- IVsByStride[Stride].addIV(Stride, CommonExprs, Phi);
-
- // All the users will share this new IV.
- for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
- UsersToProcess[i].Phi = Phi;
-
- DEBUG(dbgs() << " IV=");
- DEBUG(WriteAsOperand(dbgs(), Phi, /*PrintType=*/false));
- DEBUG(dbgs() << "\n");
-}
-
-/// PrepareToStrengthReduceFromSmallerStride - Prepare for the given users to
-/// reuse an induction variable with a stride that is a factor of the current
-/// induction variable.
-///
-void
-LoopStrengthReduce::PrepareToStrengthReduceFromSmallerStride(
- std::vector<BasedUser> &UsersToProcess,
- Value *CommonBaseV,
- const IVExpr &ReuseIV,
- Instruction *PreInsertPt) {
- DEBUG(dbgs() << " Rewriting in terms of existing IV of STRIDE "
- << *ReuseIV.Stride << " and BASE " << *ReuseIV.Base << "\n");
-
- // All the users will share the reused IV.
- for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
- UsersToProcess[i].Phi = ReuseIV.PHI;
-
- Constant *C = dyn_cast<Constant>(CommonBaseV);
- if (C &&
- (!C->isNullValue() &&
- !fitsInAddressMode(SE->getUnknown(CommonBaseV), CommonBaseV->getType(),
- TLI, false)))
- // We want the common base emitted into the preheader! This is just
- // using cast as a copy so BitCast (no-op cast) is appropriate
- CommonBaseV = new BitCastInst(CommonBaseV, CommonBaseV->getType(),
- "commonbase", PreInsertPt);
-}
-
-static bool IsImmFoldedIntoAddrMode(GlobalValue *GV, int64_t Offset,
- const Type *AccessTy,
- std::vector<BasedUser> &UsersToProcess,
- const TargetLowering *TLI) {
- SmallVector<Instruction*, 16> AddrModeInsts;
- for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
- if (UsersToProcess[i].isUseOfPostIncrementedValue)
- continue;
- ExtAddrMode AddrMode =
- AddressingModeMatcher::Match(UsersToProcess[i].OperandValToReplace,
- AccessTy, UsersToProcess[i].Inst,
- AddrModeInsts, *TLI);
- if (GV && GV != AddrMode.BaseGV)
- return false;
- if (Offset && !AddrMode.BaseOffs)
- // FIXME: How to accurate check it's immediate offset is folded.
- return false;
- AddrModeInsts.clear();
+ OS << ", Offsets={";
+ for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(),
+ E = Offsets.end(); I != E; ++I) {
+ OS << *I;
+ if (next(I) != E)
+ OS << ',';
}
- return true;
-}
+ OS << '}';
-/// StrengthReduceIVUsersOfStride - Strength reduce all of the users of a single
-/// stride of IV. All of the users may have different starting values, and this
-/// may not be the only stride.
-void
-LoopStrengthReduce::StrengthReduceIVUsersOfStride(const SCEV *Stride,
- IVUsersOfOneStride &Uses,
- Loop *L) {
- // If all the users are moved to another stride, then there is nothing to do.
- if (Uses.Users.empty())
- return;
+ if (AllFixupsOutsideLoop)
+ OS << ", all-fixups-outside-loop";
+}
- // Keep track if every use in UsersToProcess is an address. If they all are,
- // we may be able to rewrite the entire collection of them in terms of a
- // smaller-stride IV.
- bool AllUsesAreAddresses = true;
-
- // Keep track if every use of a single stride is outside the loop. If so,
- // we want to be more aggressive about reusing a smaller-stride IV; a
- // multiply outside the loop is better than another IV inside. Well, usually.
- bool AllUsesAreOutsideLoop = true;
-
- // Transform our list of users and offsets to a bit more complex table. In
- // this new vector, each 'BasedUser' contains 'Base' the base of the
- // strided accessas well as the old information from Uses. We progressively
- // move information from the Base field to the Imm field, until we eventually
- // have the full access expression to rewrite the use.
- std::vector<BasedUser> UsersToProcess;
- const SCEV *CommonExprs = CollectIVUsers(Stride, Uses, L, AllUsesAreAddresses,
- AllUsesAreOutsideLoop,
- UsersToProcess);
-
- // Sort the UsersToProcess array so that users with common bases are
- // next to each other.
- SortUsersToProcess(UsersToProcess);
-
- // If we managed to find some expressions in common, we'll need to carry
- // their value in a register and add it in for each use. This will take up
- // a register operand, which potentially restricts what stride values are
- // valid.
- bool HaveCommonExprs = !CommonExprs->isZero();
- const Type *ReplacedTy = CommonExprs->getType();
-
- // If all uses are addresses, consider sinking the immediate part of the
- // common expression back into uses if they can fit in the immediate fields.
- if (TLI && HaveCommonExprs && AllUsesAreAddresses) {
- const SCEV *NewCommon = CommonExprs;
- const SCEV *Imm = SE->getIntegerSCEV(0, ReplacedTy);
- MoveImmediateValues(TLI, Type::getVoidTy(
- L->getLoopPreheader()->getContext()),
- NewCommon, Imm, true, L, SE);
- if (!Imm->isZero()) {
- bool DoSink = true;
-
- // If the immediate part of the common expression is a GV, check if it's
- // possible to fold it into the target addressing mode.
- GlobalValue *GV = 0;
- if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(Imm))
- GV = dyn_cast<GlobalValue>(SU->getValue());
- int64_t Offset = 0;
- if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Imm))
- Offset = SC->getValue()->getSExtValue();
- if (GV || Offset)
- // Pass VoidTy as the AccessTy to be conservative, because
- // there could be multiple access types among all the uses.
- DoSink = IsImmFoldedIntoAddrMode(GV, Offset,
- Type::getVoidTy(L->getLoopPreheader()->getContext()),
- UsersToProcess, TLI);
-
- if (DoSink) {
- DEBUG(dbgs() << " Sinking " << *Imm << " back down into uses\n");
- for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
- UsersToProcess[i].Imm = SE->getAddExpr(UsersToProcess[i].Imm, Imm);
- CommonExprs = NewCommon;
- HaveCommonExprs = !CommonExprs->isZero();
- ++NumImmSunk;
- }
- }
- }
+void LSRUse::dump() const {
+ print(errs()); errs() << '\n';
+}
- // Now that we know what we need to do, insert the PHI node itself.
- //
- DEBUG(dbgs() << "LSR: Examining IVs of TYPE " << *ReplacedTy << " of STRIDE "
- << *Stride << ":\n"
- << " Common base: " << *CommonExprs << "\n");
+/// isLegalUse - Test whether the use described by AM is "legal", meaning it can
+/// be completely folded into the user instruction at isel time. This includes
+/// address-mode folding and special icmp tricks.
+static bool isLegalUse(const TargetLowering::AddrMode &AM,
+ LSRUse::KindType Kind, const Type *AccessTy,
+ const TargetLowering *TLI) {
+ switch (Kind) {
+ case LSRUse::Address:
+ // If we have low-level target information, ask the target if it can
+ // completely fold this address.
+ if (TLI) return TLI->isLegalAddressingMode(AM, AccessTy);
+
+ // Otherwise, just guess that reg+reg addressing is legal.
+ return !AM.BaseGV && AM.BaseOffs == 0 && AM.Scale <= 1;
+
+ case LSRUse::ICmpZero:
+ // There's not even a target hook for querying whether it would be legal to
+ // fold a GV into an ICmp.
+ if (AM.BaseGV)
+ return false;
- SCEVExpander Rewriter(*SE);
- SCEVExpander PreheaderRewriter(*SE);
+ // ICmp only has two operands; don't allow more than two non-trivial parts.
+ if (AM.Scale != 0 && AM.HasBaseReg && AM.BaseOffs != 0)
+ return false;
- BasicBlock *Preheader = L->getLoopPreheader();
- Instruction *PreInsertPt = Preheader->getTerminator();
- BasicBlock *LatchBlock = L->getLoopLatch();
- Instruction *IVIncInsertPt = LatchBlock->getTerminator();
-
- Value *CommonBaseV = Constant::getNullValue(ReplacedTy);
-
- const SCEV *RewriteFactor = SE->getIntegerSCEV(0, ReplacedTy);
- IVExpr ReuseIV(SE->getIntegerSCEV(0,
- Type::getInt32Ty(Preheader->getContext())),
- SE->getIntegerSCEV(0,
- Type::getInt32Ty(Preheader->getContext())),
- 0);
-
- // Choose a strength-reduction strategy and prepare for it by creating
- // the necessary PHIs and adjusting the bookkeeping.
- if (ShouldUseFullStrengthReductionMode(UsersToProcess, L,
- AllUsesAreAddresses, Stride)) {
- PrepareToStrengthReduceFully(UsersToProcess, Stride, CommonExprs, L,
- PreheaderRewriter);
- } else {
- // Emit the initial base value into the loop preheader.
- CommonBaseV = PreheaderRewriter.expandCodeFor(CommonExprs, ReplacedTy,
- PreInsertPt);
-
- // If all uses are addresses, check if it is possible to reuse an IV. The
- // new IV must have a stride that is a multiple of the old stride; the
- // multiple must be a number that can be encoded in the scale field of the
- // target addressing mode; and we must have a valid instruction after this
- // substitution, including the immediate field, if any.
- RewriteFactor = CheckForIVReuse(HaveCommonExprs, AllUsesAreAddresses,
- AllUsesAreOutsideLoop,
- Stride, ReuseIV, ReplacedTy,
- UsersToProcess);
- if (!RewriteFactor->isZero())
- PrepareToStrengthReduceFromSmallerStride(UsersToProcess, CommonBaseV,
- ReuseIV, PreInsertPt);
- else {
- IVIncInsertPt = FindIVIncInsertPt(UsersToProcess, L);
- PrepareToStrengthReduceWithNewPhi(UsersToProcess, Stride, CommonExprs,
- CommonBaseV, IVIncInsertPt,
- L, PreheaderRewriter);
- }
- }
+ // ICmp only supports no scale or a -1 scale, as we can "fold" a -1 scale by
+ // putting the scaled register in the other operand of the icmp.
+ if (AM.Scale != 0 && AM.Scale != -1)
+ return false;
- // Process all the users now, replacing their strided uses with
- // strength-reduced forms. This outer loop handles all bases, the inner
- // loop handles all users of a particular base.
- while (!UsersToProcess.empty()) {
- const SCEV *Base = UsersToProcess.back().Base;
- Instruction *Inst = UsersToProcess.back().Inst;
-
- // Emit the code for Base into the preheader.
- Value *BaseV = 0;
- if (!Base->isZero()) {
- BaseV = PreheaderRewriter.expandCodeFor(Base, 0, PreInsertPt);
-
- DEBUG(dbgs() << " INSERTING code for BASE = " << *Base << ":");
- if (BaseV->hasName())
- DEBUG(dbgs() << " Result value name = %" << BaseV->getName());
- DEBUG(dbgs() << "\n");
-
- // If BaseV is a non-zero constant, make sure that it gets inserted into
- // the preheader, instead of being forward substituted into the uses. We
- // do this by forcing a BitCast (noop cast) to be inserted into the
- // preheader in this case.
- if (!fitsInAddressMode(Base, getAccessType(Inst), TLI, false) &&
- isa<Constant>(BaseV)) {
- // We want this constant emitted into the preheader! This is just
- // using cast as a copy so BitCast (no-op cast) is appropriate
- BaseV = new BitCastInst(BaseV, BaseV->getType(), "preheaderinsert",
- PreInsertPt);
- }
+ // If we have low-level target information, ask the target if it can fold an
+ // integer immediate on an icmp.
+ if (AM.BaseOffs != 0) {
+ if (TLI) return TLI->isLegalICmpImmediate(-AM.BaseOffs);
+ return false;
}
- // Emit the code to add the immediate offset to the Phi value, just before
- // the instructions that we identified as using this stride and base.
- do {
- // FIXME: Use emitted users to emit other users.
- BasedUser &User = UsersToProcess.back();
-
- DEBUG(dbgs() << " Examining ");
- if (User.isUseOfPostIncrementedValue)
- DEBUG(dbgs() << "postinc");
- else
- DEBUG(dbgs() << "preinc");
- DEBUG(dbgs() << " use ");
- DEBUG(WriteAsOperand(dbgs(), UsersToProcess.back().OperandValToReplace,
- /*PrintType=*/false));
- DEBUG(dbgs() << " in Inst: " << *User.Inst);
-
- // If this instruction wants to use the post-incremented value, move it
- // after the post-inc and use its value instead of the PHI.
- Value *RewriteOp = User.Phi;
- if (User.isUseOfPostIncrementedValue) {
- RewriteOp = User.Phi->getIncomingValueForBlock(LatchBlock);
- // If this user is in the loop, make sure it is the last thing in the
- // loop to ensure it is dominated by the increment. In case it's the
- // only use of the iv, the increment instruction is already before the
- // use.
- if (L->contains(User.Inst) && User.Inst != IVIncInsertPt)
- User.Inst->moveBefore(IVIncInsertPt);
- }
-
- const SCEV *RewriteExpr = SE->getUnknown(RewriteOp);
-
- if (SE->getEffectiveSCEVType(RewriteOp->getType()) !=
- SE->getEffectiveSCEVType(ReplacedTy)) {
- assert(SE->getTypeSizeInBits(RewriteOp->getType()) >
- SE->getTypeSizeInBits(ReplacedTy) &&
- "Unexpected widening cast!");
- RewriteExpr = SE->getTruncateExpr(RewriteExpr, ReplacedTy);
- }
+ return true;
- // If we had to insert new instructions for RewriteOp, we have to
- // consider that they may not have been able to end up immediately
- // next to RewriteOp, because non-PHI instructions may never precede
- // PHI instructions in a block. In this case, remember where the last
- // instruction was inserted so that if we're replacing a different
- // PHI node, we can use the later point to expand the final
- // RewriteExpr.
- Instruction *NewBasePt = dyn_cast<Instruction>(RewriteOp);
- if (RewriteOp == User.Phi) NewBasePt = 0;
-
- // Clear the SCEVExpander's expression map so that we are guaranteed
- // to have the code emitted where we expect it.
- Rewriter.clear();
-
- // If we are reusing the iv, then it must be multiplied by a constant
- // factor to take advantage of the addressing mode scale component.
- if (!RewriteFactor->isZero()) {
- // If we're reusing an IV with a nonzero base (currently this happens
- // only when all reuses are outside the loop) subtract that base here.
- // The base has been used to initialize the PHI node but we don't want
- // it here.
- if (!ReuseIV.Base->isZero()) {
- const SCEV *typedBase = ReuseIV.Base;
- if (SE->getEffectiveSCEVType(RewriteExpr->getType()) !=
- SE->getEffectiveSCEVType(ReuseIV.Base->getType())) {
- // It's possible the original IV is a larger type than the new IV,
- // in which case we have to truncate the Base. We checked in
- // RequiresTypeConversion that this is valid.
- assert(SE->getTypeSizeInBits(RewriteExpr->getType()) <
- SE->getTypeSizeInBits(ReuseIV.Base->getType()) &&
- "Unexpected lengthening conversion!");
- typedBase = SE->getTruncateExpr(ReuseIV.Base,
- RewriteExpr->getType());
- }
- RewriteExpr = SE->getMinusSCEV(RewriteExpr, typedBase);
- }
+ case LSRUse::Basic:
+ // Only handle single-register values.
+ return !AM.BaseGV && AM.Scale == 0 && AM.BaseOffs == 0;
- // Multiply old variable, with base removed, by new scale factor.
- RewriteExpr = SE->getMulExpr(RewriteFactor,
- RewriteExpr);
-
- // The common base is emitted in the loop preheader. But since we
- // are reusing an IV, it has not been used to initialize the PHI node.
- // Add it to the expression used to rewrite the uses.
- // When this use is outside the loop, we earlier subtracted the
- // common base, and are adding it back here. Use the same expression
- // as before, rather than CommonBaseV, so DAGCombiner will zap it.
- if (!CommonExprs->isZero()) {
- if (L->contains(User.Inst))
- RewriteExpr = SE->getAddExpr(RewriteExpr,
- SE->getUnknown(CommonBaseV));
- else
- RewriteExpr = SE->getAddExpr(RewriteExpr, CommonExprs);
- }
- }
-
- // Now that we know what we need to do, insert code before User for the
- // immediate and any loop-variant expressions.
- if (BaseV)
- // Add BaseV to the PHI value if needed.
- RewriteExpr = SE->getAddExpr(RewriteExpr, SE->getUnknown(BaseV));
-
- User.RewriteInstructionToUseNewBase(RewriteExpr, NewBasePt,
- Rewriter, L, this,
- DeadInsts, SE);
-
- // Mark old value we replaced as possibly dead, so that it is eliminated
- // if we just replaced the last use of that value.
- DeadInsts.push_back(User.OperandValToReplace);
-
- UsersToProcess.pop_back();
- ++NumReduced;
-
- // If there are any more users to process with the same base, process them
- // now. We sorted by base above, so we just have to check the last elt.
- } while (!UsersToProcess.empty() && UsersToProcess.back().Base == Base);
- // TODO: Next, find out which base index is the most common, pull it out.
- }
-
- // IMPORTANT TODO: Figure out how to partition the IV's with this stride, but
- // different starting values, into different PHIs.
-}
-
-void LoopStrengthReduce::StrengthReduceIVUsers(Loop *L) {
- // Note: this processes each stride/type pair individually. All users
- // passed into StrengthReduceIVUsersOfStride have the same type AND stride.
- // Also, note that we iterate over IVUsesByStride indirectly by using
- // StrideOrder. This extra layer of indirection makes the ordering of
- // strides deterministic - not dependent on map order.
- for (unsigned Stride = 0, e = IU->StrideOrder.size(); Stride != e; ++Stride) {
- std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
- IU->IVUsesByStride.find(IU->StrideOrder[Stride]);
- assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!");
- // FIXME: Generalize to non-affine IV's.
- if (!SI->first->isLoopInvariant(L))
- continue;
- StrengthReduceIVUsersOfStride(SI->first, *SI->second, L);
+ case LSRUse::Special:
+ // Only handle -1 scales, or no scale.
+ return AM.Scale == 0 || AM.Scale == -1;
}
+
+ return false;
}
-/// FindIVUserForCond - If Cond has an operand that is an expression of an IV,
-/// set the IV user and stride information and return true, otherwise return
-/// false.
-bool LoopStrengthReduce::FindIVUserForCond(ICmpInst *Cond,
- IVStrideUse *&CondUse,
- const SCEV* &CondStride) {
- for (unsigned Stride = 0, e = IU->StrideOrder.size();
- Stride != e && !CondUse; ++Stride) {
- std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
- IU->IVUsesByStride.find(IU->StrideOrder[Stride]);
- assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!");
-
- for (ilist<IVStrideUse>::iterator UI = SI->second->Users.begin(),
- E = SI->second->Users.end(); UI != E; ++UI)
- if (UI->getUser() == Cond) {
- // NOTE: we could handle setcc instructions with multiple uses here, but
- // InstCombine does it as well for simple uses, it's not clear that it
- // occurs enough in real life to handle.
- CondUse = UI;
- CondStride = SI->first;
- return true;
- }
+static bool isLegalUse(TargetLowering::AddrMode AM,
+ int64_t MinOffset, int64_t MaxOffset,
+ LSRUse::KindType Kind, const Type *AccessTy,
+ const TargetLowering *TLI) {
+ // Check for overflow.
+ if (((int64_t)((uint64_t)AM.BaseOffs + MinOffset) > AM.BaseOffs) !=
+ (MinOffset > 0))
+ return false;
+ AM.BaseOffs = (uint64_t)AM.BaseOffs + MinOffset;
+ if (isLegalUse(AM, Kind, AccessTy, TLI)) {
+ AM.BaseOffs = (uint64_t)AM.BaseOffs - MinOffset;
+ // Check for overflow.
+ if (((int64_t)((uint64_t)AM.BaseOffs + MaxOffset) > AM.BaseOffs) !=
+ (MaxOffset > 0))
+ return false;
+ AM.BaseOffs = (uint64_t)AM.BaseOffs + MaxOffset;
+ return isLegalUse(AM, Kind, AccessTy, TLI);
}
return false;
}
-namespace {
- // Constant strides come first which in turns are sorted by their absolute
- // values. If absolute values are the same, then positive strides comes first.
- // e.g.
- // 4, -1, X, 1, 2 ==> 1, -1, 2, 4, X
- struct StrideCompare {
- const ScalarEvolution *SE;
- explicit StrideCompare(const ScalarEvolution *se) : SE(se) {}
-
- bool operator()(const SCEV *LHS, const SCEV *RHS) {
- const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS);
- const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS);
- if (LHSC && RHSC) {
- int64_t LV = LHSC->getValue()->getSExtValue();
- int64_t RV = RHSC->getValue()->getSExtValue();
- uint64_t ALV = (LV < 0) ? -LV : LV;
- uint64_t ARV = (RV < 0) ? -RV : RV;
- if (ALV == ARV) {
- if (LV != RV)
- return LV > RV;
- } else {
- return ALV < ARV;
- }
+static bool isAlwaysFoldable(int64_t BaseOffs,
+ GlobalValue *BaseGV,
+ bool HasBaseReg,
+ LSRUse::KindType Kind, const Type *AccessTy,
+ const TargetLowering *TLI,
+ ScalarEvolution &SE) {
+ // Fast-path: zero is always foldable.
+ if (BaseOffs == 0 && !BaseGV) return true;
+
+ // Conservatively, create an address with an immediate and a
+ // base and a scale.
+ TargetLowering::AddrMode AM;
+ AM.BaseOffs = BaseOffs;
+ AM.BaseGV = BaseGV;
+ AM.HasBaseReg = HasBaseReg;
+ AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1;
+
+ return isLegalUse(AM, Kind, AccessTy, TLI);
+}
- // If it's the same value but different type, sort by bit width so
- // that we emit larger induction variables before smaller
- // ones, letting the smaller be re-written in terms of larger ones.
- return SE->getTypeSizeInBits(RHS->getType()) <
- SE->getTypeSizeInBits(LHS->getType());
- }
- return LHSC && !RHSC;
- }
- };
+static bool isAlwaysFoldable(const SCEV *S,
+ int64_t MinOffset, int64_t MaxOffset,
+ bool HasBaseReg,
+ LSRUse::KindType Kind, const Type *AccessTy,
+ const TargetLowering *TLI,
+ ScalarEvolution &SE) {
+ // Fast-path: zero is always foldable.
+ if (S->isZero()) return true;
+
+ // Conservatively, create an address with an immediate and a
+ // base and a scale.
+ int64_t BaseOffs = ExtractImmediate(S, SE);
+ GlobalValue *BaseGV = ExtractSymbol(S, SE);
+
+ // If there's anything else involved, it's not foldable.
+ if (!S->isZero()) return false;
+
+ // Fast-path: zero is always foldable.
+ if (BaseOffs == 0 && !BaseGV) return true;
+
+ // Conservatively, create an address with an immediate and a
+ // base and a scale.
+ TargetLowering::AddrMode AM;
+ AM.BaseOffs = BaseOffs;
+ AM.BaseGV = BaseGV;
+ AM.HasBaseReg = HasBaseReg;
+ AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1;
+
+ return isLegalUse(AM, MinOffset, MaxOffset, Kind, AccessTy, TLI);
}
-/// ChangeCompareStride - If a loop termination compare instruction is the
-/// only use of its stride, and the compaison is against a constant value,
-/// try eliminate the stride by moving the compare instruction to another
-/// stride and change its constant operand accordingly. e.g.
-///
-/// loop:
-/// ...
-/// v1 = v1 + 3
-/// v2 = v2 + 1
-/// if (v2 < 10) goto loop
-/// =>
-/// loop:
-/// ...
-/// v1 = v1 + 3
-/// if (v1 < 30) goto loop
-ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
- IVStrideUse* &CondUse,
- const SCEV* &CondStride,
- bool PostPass) {
- // If there's only one stride in the loop, there's nothing to do here.
- if (IU->StrideOrder.size() < 2)
- return Cond;
- // If there are other users of the condition's stride, don't bother
- // trying to change the condition because the stride will still
- // remain.
- std::map<const SCEV *, IVUsersOfOneStride *>::iterator I =
- IU->IVUsesByStride.find(CondStride);
- if (I == IU->IVUsesByStride.end())
- return Cond;
- if (I->second->Users.size() > 1) {
- for (ilist<IVStrideUse>::iterator II = I->second->Users.begin(),
- EE = I->second->Users.end(); II != EE; ++II) {
- if (II->getUser() == Cond)
- continue;
- if (!isInstructionTriviallyDead(II->getUser()))
- return Cond;
- }
+/// FormulaSorter - This class implements an ordering for formulae which sorts
+/// the by their standalone cost.
+class FormulaSorter {
+ /// These two sets are kept empty, so that we compute standalone costs.
+ DenseSet<const SCEV *> VisitedRegs;
+ SmallPtrSet<const SCEV *, 16> Regs;
+ Loop *L;
+ LSRUse *LU;
+ ScalarEvolution &SE;
+ DominatorTree &DT;
+
+public:
+ FormulaSorter(Loop *l, LSRUse &lu, ScalarEvolution &se, DominatorTree &dt)
+ : L(l), LU(&lu), SE(se), DT(dt) {}
+
+ bool operator()(const Formula &A, const Formula &B) {
+ Cost CostA;
+ CostA.RateFormula(A, Regs, VisitedRegs, L, LU->Offsets, SE, DT);
+ Regs.clear();
+ Cost CostB;
+ CostB.RateFormula(B, Regs, VisitedRegs, L, LU->Offsets, SE, DT);
+ Regs.clear();
+ return CostA < CostB;
+ }
+};
+
+/// LSRInstance - This class holds state for the main loop strength reduction
+/// logic.
+class LSRInstance {
+ IVUsers &IU;
+ ScalarEvolution &SE;
+ DominatorTree &DT;
+ const TargetLowering *const TLI;
+ Loop *const L;
+ bool Changed;
+
+ /// IVIncInsertPos - This is the insert position that the current loop's
+ /// induction variable increment should be placed. In simple loops, this is
+ /// the latch block's terminator. But in more complicated cases, this is a
+ /// position which will dominate all the in-loop post-increment users.
+ Instruction *IVIncInsertPos;
+
+ /// Factors - Interesting factors between use strides.
+ SmallSetVector<int64_t, 8> Factors;
+
+ /// Types - Interesting use types, to facilitate truncation reuse.
+ SmallSetVector<const Type *, 4> Types;
+
+ /// Fixups - The list of operands which are to be replaced.
+ SmallVector<LSRFixup, 16> Fixups;
+
+ /// Uses - The list of interesting uses.
+ SmallVector<LSRUse, 16> Uses;
+
+ /// RegUses - Track which uses use which register candidates.
+ RegUseTracker RegUses;
+
+ void OptimizeShadowIV();
+ bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse);
+ ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse);
+ bool OptimizeLoopTermCond();
+
+ void CollectInterestingTypesAndFactors();
+ void CollectFixupsAndInitialFormulae();
+
+ LSRFixup &getNewFixup() {
+ Fixups.push_back(LSRFixup());
+ return Fixups.back();
}
- // Only handle constant strides for now.
- const SCEVConstant *SC = dyn_cast<SCEVConstant>(CondStride);
- if (!SC) return Cond;
-
- ICmpInst::Predicate Predicate = Cond->getPredicate();
- int64_t CmpSSInt = SC->getValue()->getSExtValue();
- unsigned BitWidth = SE->getTypeSizeInBits(CondStride->getType());
- uint64_t SignBit = 1ULL << (BitWidth-1);
- const Type *CmpTy = Cond->getOperand(0)->getType();
- const Type *NewCmpTy = NULL;
- unsigned TyBits = SE->getTypeSizeInBits(CmpTy);
- unsigned NewTyBits = 0;
- const SCEV *NewStride = NULL;
- Value *NewCmpLHS = NULL;
- Value *NewCmpRHS = NULL;
- int64_t Scale = 1;
- const SCEV *NewOffset = SE->getIntegerSCEV(0, CmpTy);
-
- if (ConstantInt *C = dyn_cast<ConstantInt>(Cond->getOperand(1))) {
- int64_t CmpVal = C->getValue().getSExtValue();
-
- // Check the relevant induction variable for conformance to
- // the pattern.
- const SCEV *IV = SE->getSCEV(Cond->getOperand(0));
- const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
- if (!AR || !AR->isAffine())
- return Cond;
-
- const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
- // Check stride constant and the comparision constant signs to detect
- // overflow.
- if (StartC) {
- if ((StartC->getValue()->getSExtValue() < CmpVal && CmpSSInt < 0) ||
- (StartC->getValue()->getSExtValue() > CmpVal && CmpSSInt > 0))
- return Cond;
- } else {
- // More restrictive check for the other cases.
- if ((CmpVal & SignBit) != (CmpSSInt & SignBit))
- return Cond;
- }
-
- // Look for a suitable stride / iv as replacement.
- for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) {
- std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
- IU->IVUsesByStride.find(IU->StrideOrder[i]);
- if (!isa<SCEVConstant>(SI->first) || SI->second->Users.empty())
- continue;
- int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
- if (SSInt == CmpSSInt ||
- abs64(SSInt) < abs64(CmpSSInt) ||
- (SSInt % CmpSSInt) != 0)
- continue;
- Scale = SSInt / CmpSSInt;
- int64_t NewCmpVal = CmpVal * Scale;
+ // Support for sharing of LSRUses between LSRFixups.
+ typedef DenseMap<const SCEV *, size_t> UseMapTy;
+ UseMapTy UseMap;
+
+ bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset,
+ LSRUse::KindType Kind, const Type *AccessTy);
+
+ std::pair<size_t, int64_t> getUse(const SCEV *&Expr,
+ LSRUse::KindType Kind,
+ const Type *AccessTy);
+
+public:
+ void InsertInitialFormula(const SCEV *S, Loop *L, LSRUse &LU, size_t LUIdx);
+ void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx);
+ void CountRegisters(const Formula &F, size_t LUIdx);
+ bool InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F);
+
+ void CollectLoopInvariantFixupsAndFormulae();
+
+ void GenerateReassociations(LSRUse &LU, unsigned LUIdx, Formula Base,
+ unsigned Depth = 0);
+ void GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base);
+ void GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base);
+ void GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base);
+ void GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base);
+ void GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base);
+ void GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base);
+ void GenerateCrossUseConstantOffsets();
+ void GenerateAllReuseFormulae();
+
+ void FilterOutUndesirableDedicatedRegisters();
+ void NarrowSearchSpaceUsingHeuristics();
+
+ void SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
+ Cost &SolutionCost,
+ SmallVectorImpl<const Formula *> &Workspace,
+ const Cost &CurCost,
+ const SmallPtrSet<const SCEV *, 16> &CurRegs,
+ DenseSet<const SCEV *> &VisitedRegs) const;
+ void Solve(SmallVectorImpl<const Formula *> &Solution) const;
+
+ Value *Expand(const LSRFixup &LF,
+ const Formula &F,
+ BasicBlock::iterator IP, Loop *L, Instruction *IVIncInsertPos,
+ SCEVExpander &Rewriter,
+ SmallVectorImpl<WeakVH> &DeadInsts,
+ ScalarEvolution &SE, DominatorTree &DT) const;
+ void Rewrite(const LSRFixup &LF,
+ const Formula &F,
+ Loop *L, Instruction *IVIncInsertPos,
+ SCEVExpander &Rewriter,
+ SmallVectorImpl<WeakVH> &DeadInsts,
+ ScalarEvolution &SE, DominatorTree &DT,
+ Pass *P) const;
+ void ImplementSolution(const SmallVectorImpl<const Formula *> &Solution,
+ Pass *P);
+
+ LSRInstance(const TargetLowering *tli, Loop *l, Pass *P);
+
+ bool getChanged() const { return Changed; }
+
+ void print_factors_and_types(raw_ostream &OS) const;
+ void print_fixups(raw_ostream &OS) const;
+ void print_uses(raw_ostream &OS) const;
+ void print(raw_ostream &OS) const;
+ void dump() const;
+};
- // If old icmp value fits in icmp immediate field, but the new one doesn't
- // try something else.
- if (TLI &&
- TLI->isLegalICmpImmediate(CmpVal) &&
- !TLI->isLegalICmpImmediate(NewCmpVal))
- continue;
+}
- APInt Mul = APInt(BitWidth*2, CmpVal, true);
- Mul = Mul * APInt(BitWidth*2, Scale, true);
- // Check for overflow.
- if (!Mul.isSignedIntN(BitWidth))
- continue;
- // Check for overflow in the stride's type too.
- if (!Mul.isSignedIntN(SE->getTypeSizeInBits(SI->first->getType())))
- continue;
+/// OptimizeShadowIV - If IV is used in a int-to-float cast
+/// inside the loop then try to eliminate the cast opeation.
+void LSRInstance::OptimizeShadowIV() {
+ const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
+ if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
+ return;
- // Watch out for overflow.
- if (ICmpInst::isSigned(Predicate) &&
- (CmpVal & SignBit) != (NewCmpVal & SignBit))
- continue;
+ for (IVUsers::const_iterator UI = IU.begin(), E = IU.end();
+ UI != E; /* empty */) {
+ IVUsers::const_iterator CandidateUI = UI;
+ ++UI;
+ Instruction *ShadowUse = CandidateUI->getUser();
+ const Type *DestTy = NULL;
- // Pick the best iv to use trying to avoid a cast.
- NewCmpLHS = NULL;
- for (ilist<IVStrideUse>::iterator UI = SI->second->Users.begin(),
- E = SI->second->Users.end(); UI != E; ++UI) {
- Value *Op = UI->getOperandValToReplace();
-
- // If the IVStrideUse implies a cast, check for an actual cast which
- // can be used to find the original IV expression.
- if (SE->getEffectiveSCEVType(Op->getType()) !=
- SE->getEffectiveSCEVType(SI->first->getType())) {
- CastInst *CI = dyn_cast<CastInst>(Op);
- // If it's not a simple cast, it's complicated.
- if (!CI)
- continue;
- // If it's a cast from a type other than the stride type,
- // it's complicated.
- if (CI->getOperand(0)->getType() != SI->first->getType())
- continue;
- // Ok, we found the IV expression in the stride's type.
- Op = CI->getOperand(0);
- }
+ /* If shadow use is a int->float cast then insert a second IV
+ to eliminate this cast.
- NewCmpLHS = Op;
- if (NewCmpLHS->getType() == CmpTy)
- break;
- }
- if (!NewCmpLHS)
- continue;
+ for (unsigned i = 0; i < n; ++i)
+ foo((double)i);
- NewCmpTy = NewCmpLHS->getType();
- NewTyBits = SE->getTypeSizeInBits(NewCmpTy);
- const Type *NewCmpIntTy = IntegerType::get(Cond->getContext(), NewTyBits);
- if (RequiresTypeConversion(NewCmpTy, CmpTy)) {
- // Check if it is possible to rewrite it using
- // an iv / stride of a smaller integer type.
- unsigned Bits = NewTyBits;
- if (ICmpInst::isSigned(Predicate))
- --Bits;
- uint64_t Mask = (1ULL << Bits) - 1;
- if (((uint64_t)NewCmpVal & Mask) != (uint64_t)NewCmpVal)
- continue;
- }
+ is transformed into
- // Don't rewrite if use offset is non-constant and the new type is
- // of a different type.
- // FIXME: too conservative?
- if (NewTyBits != TyBits && !isa<SCEVConstant>(CondUse->getOffset()))
- continue;
+ double d = 0.0;
+ for (unsigned i = 0; i < n; ++i, ++d)
+ foo(d);
+ */
+ if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser()))
+ DestTy = UCast->getDestTy();
+ else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser()))
+ DestTy = SCast->getDestTy();
+ if (!DestTy) continue;
- if (!PostPass) {
- bool AllUsesAreAddresses = true;
- bool AllUsesAreOutsideLoop = true;
- std::vector<BasedUser> UsersToProcess;
- const SCEV *CommonExprs = CollectIVUsers(SI->first, *SI->second, L,
- AllUsesAreAddresses,
- AllUsesAreOutsideLoop,
- UsersToProcess);
- // Avoid rewriting the compare instruction with an iv of new stride
- // if it's likely the new stride uses will be rewritten using the
- // stride of the compare instruction.
- if (AllUsesAreAddresses &&
- ValidScale(!CommonExprs->isZero(), Scale, UsersToProcess))
- continue;
- }
+ if (TLI) {
+ // If target does not support DestTy natively then do not apply
+ // this transformation.
+ EVT DVT = TLI->getValueType(DestTy);
+ if (!TLI->isTypeLegal(DVT)) continue;
+ }
- // Avoid rewriting the compare instruction with an iv which has
- // implicit extension or truncation built into it.
- // TODO: This is over-conservative.
- if (SE->getTypeSizeInBits(CondUse->getOffset()->getType()) != TyBits)
- continue;
+ PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0));
+ if (!PH) continue;
+ if (PH->getNumIncomingValues() != 2) continue;
- // If scale is negative, use swapped predicate unless it's testing
- // for equality.
- if (Scale < 0 && !Cond->isEquality())
- Predicate = ICmpInst::getSwappedPredicate(Predicate);
+ const Type *SrcTy = PH->getType();
+ int Mantissa = DestTy->getFPMantissaWidth();
+ if (Mantissa == -1) continue;
+ if ((int)SE.getTypeSizeInBits(SrcTy) > Mantissa)
+ continue;
- NewStride = IU->StrideOrder[i];
- if (!isa<PointerType>(NewCmpTy))
- NewCmpRHS = ConstantInt::get(NewCmpTy, NewCmpVal);
- else {
- Constant *CI = ConstantInt::get(NewCmpIntTy, NewCmpVal);
- NewCmpRHS = ConstantExpr::getIntToPtr(CI, NewCmpTy);
- }
- NewOffset = TyBits == NewTyBits
- ? SE->getMulExpr(CondUse->getOffset(),
- SE->getConstant(CmpTy, Scale))
- : SE->getConstant(NewCmpIntTy,
- cast<SCEVConstant>(CondUse->getOffset())->getValue()
- ->getSExtValue()*Scale);
- break;
+ unsigned Entry, Latch;
+ if (PH->getIncomingBlock(0) == L->getLoopPreheader()) {
+ Entry = 0;
+ Latch = 1;
+ } else {
+ Entry = 1;
+ Latch = 0;
}
- }
- // Forgo this transformation if it the increment happens to be
- // unfortunately positioned after the condition, and the condition
- // has multiple uses which prevent it from being moved immediately
- // before the branch. See
- // test/Transforms/LoopStrengthReduce/change-compare-stride-trickiness-*.ll
- // for an example of this situation.
- if (!Cond->hasOneUse()) {
- for (BasicBlock::iterator I = Cond, E = Cond->getParent()->end();
- I != E; ++I)
- if (I == NewCmpLHS)
- return Cond;
- }
+ ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry));
+ if (!Init) continue;
+ Constant *NewInit = ConstantFP::get(DestTy, Init->getZExtValue());
- if (NewCmpRHS) {
- // Create a new compare instruction using new stride / iv.
- ICmpInst *OldCond = Cond;
- // Insert new compare instruction.
- Cond = new ICmpInst(OldCond, Predicate, NewCmpLHS, NewCmpRHS,
- L->getHeader()->getName() + ".termcond");
+ BinaryOperator *Incr =
+ dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch));
+ if (!Incr) continue;
+ if (Incr->getOpcode() != Instruction::Add
+ && Incr->getOpcode() != Instruction::Sub)
+ continue;
+
+ /* Initialize new IV, double d = 0.0 in above example. */
+ ConstantInt *C = NULL;
+ if (Incr->getOperand(0) == PH)
+ C = dyn_cast<ConstantInt>(Incr->getOperand(1));
+ else if (Incr->getOperand(1) == PH)
+ C = dyn_cast<ConstantInt>(Incr->getOperand(0));
+ else
+ continue;
- DEBUG(dbgs() << " Change compare stride in Inst " << *OldCond);
- DEBUG(dbgs() << " to " << *Cond << '\n');
+ if (!C) continue;
- // Remove the old compare instruction. The old indvar is probably dead too.
- DeadInsts.push_back(CondUse->getOperandValToReplace());
- OldCond->replaceAllUsesWith(Cond);
- OldCond->eraseFromParent();
+ // Ignore negative constants, as the code below doesn't handle them
+ // correctly. TODO: Remove this restriction.
+ if (!C->getValue().isStrictlyPositive()) continue;
- IU->IVUsesByStride[NewStride]->addUser(NewOffset, Cond, NewCmpLHS);
- CondUse = &IU->IVUsesByStride[NewStride]->Users.back();
- CondStride = NewStride;
- ++NumEliminated;
- Changed = true;
+ /* Add new PHINode. */
+ PHINode *NewPH = PHINode::Create(DestTy, "IV.S.", PH);
+
+ /* create new increment. '++d' in above example. */
+ Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue());
+ BinaryOperator *NewIncr =
+ BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ?
+ Instruction::FAdd : Instruction::FSub,
+ NewPH, CFP, "IV.S.next.", Incr);
+
+ NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry));
+ NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch));
+
+ /* Remove cast operation */
+ ShadowUse->replaceAllUsesWith(NewPH);
+ ShadowUse->eraseFromParent();
+ break;
}
+}
- return Cond;
+/// FindIVUserForCond - If Cond has an operand that is an expression of an IV,
+/// set the IV user and stride information and return true, otherwise return
+/// false.
+bool LSRInstance::FindIVUserForCond(ICmpInst *Cond,
+ IVStrideUse *&CondUse) {
+ for (IVUsers::iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI)
+ if (UI->getUser() == Cond) {
+ // NOTE: we could handle setcc instructions with multiple uses here, but
+ // InstCombine does it as well for simple uses, it's not clear that it
+ // occurs enough in real life to handle.
+ CondUse = UI;
+ return true;
+ }
+ return false;
}
/// OptimizeMax - Rewrite the loop's terminating condition if it uses
@@ -2087,7 +1371,7 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
/// are designed around them. The most obvious example of this is the
/// LoopInfo analysis, which doesn't remember trip count values. It
/// expects to be able to rediscover the trip count each time it is
-/// needed, and it does this using a simple analyis that only succeeds if
+/// needed, and it does this using a simple analysis that only succeeds if
/// the loop has a canonical induction variable.
///
/// However, when it comes time to generate code, the maximum operation
@@ -2097,8 +1381,7 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
/// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting
/// the instructions for the maximum computation.
///
-ICmpInst *LoopStrengthReduce::OptimizeMax(Loop *L, ICmpInst *Cond,
- IVStrideUse* &CondUse) {
+ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) {
// Check that the loop matches the pattern we're looking for.
if (Cond->getPredicate() != CmpInst::ICMP_EQ &&
Cond->getPredicate() != CmpInst::ICMP_NE)
@@ -2107,19 +1390,19 @@ ICmpInst *LoopStrengthReduce::OptimizeMax(Loop *L, ICmpInst *Cond,
SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1));
if (!Sel || !Sel->hasOneUse()) return Cond;
- const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
+ const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
return Cond;
- const SCEV *One = SE->getIntegerSCEV(1, BackedgeTakenCount->getType());
+ const SCEV *One = SE.getIntegerSCEV(1, BackedgeTakenCount->getType());
// Add one to the backedge-taken count to get the trip count.
- const SCEV *IterationCount = SE->getAddExpr(BackedgeTakenCount, One);
+ const SCEV *IterationCount = SE.getAddExpr(BackedgeTakenCount, One);
// Check for a max calculation that matches the pattern.
if (!isa<SCEVSMaxExpr>(IterationCount) && !isa<SCEVUMaxExpr>(IterationCount))
return Cond;
const SCEVNAryExpr *Max = cast<SCEVNAryExpr>(IterationCount);
- if (Max != SE->getSCEV(Sel)) return Cond;
+ if (Max != SE.getSCEV(Sel)) return Cond;
// To handle a max with more than two operands, this optimization would
// require additional checking and setup.
@@ -2129,14 +1412,13 @@ ICmpInst *LoopStrengthReduce::OptimizeMax(Loop *L, ICmpInst *Cond,
const SCEV *MaxLHS = Max->getOperand(0);
const SCEV *MaxRHS = Max->getOperand(1);
if (!MaxLHS || MaxLHS != One) return Cond;
-
// Check the relevant induction variable for conformance to
// the pattern.
- const SCEV *IV = SE->getSCEV(Cond->getOperand(0));
+ const SCEV *IV = SE.getSCEV(Cond->getOperand(0));
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
if (!AR || !AR->isAffine() ||
AR->getStart() != One ||
- AR->getStepRecurrence(*SE) != One)
+ AR->getStepRecurrence(SE) != One)
return Cond;
assert(AR->getLoop() == L &&
@@ -2145,9 +1427,9 @@ ICmpInst *LoopStrengthReduce::OptimizeMax(Loop *L, ICmpInst *Cond,
// Check the right operand of the select, and remember it, as it will
// be used in the new comparison instruction.
Value *NewRHS = 0;
- if (SE->getSCEV(Sel->getOperand(1)) == MaxRHS)
+ if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS)
NewRHS = Sel->getOperand(1);
- else if (SE->getSCEV(Sel->getOperand(2)) == MaxRHS)
+ else if (SE.getSCEV(Sel->getOperand(2)) == MaxRHS)
NewRHS = Sel->getOperand(2);
if (!NewRHS) return Cond;
@@ -2174,552 +1456,1764 @@ ICmpInst *LoopStrengthReduce::OptimizeMax(Loop *L, ICmpInst *Cond,
return NewCond;
}
-/// OptimizeShadowIV - If IV is used in a int-to-float cast
-/// inside the loop then try to eliminate the cast opeation.
-void LoopStrengthReduce::OptimizeShadowIV(Loop *L) {
+/// OptimizeLoopTermCond - Change loop terminating condition to use the
+/// postinc iv when possible.
+bool
+LSRInstance::OptimizeLoopTermCond() {
+ SmallPtrSet<Instruction *, 4> PostIncs;
- const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
- if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
- return;
+ BasicBlock *LatchBlock = L->getLoopLatch();
+ SmallVector<BasicBlock*, 8> ExitingBlocks;
+ L->getExitingBlocks(ExitingBlocks);
+
+ for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
+ BasicBlock *ExitingBlock = ExitingBlocks[i];
+
+ // Get the terminating condition for the loop if possible. If we
+ // can, we want to change it to use a post-incremented version of its
+ // induction variable, to allow coalescing the live ranges for the IV into
+ // one register value.
+
+ BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
+ if (!TermBr)
+ continue;
+ // FIXME: Overly conservative, termination condition could be an 'or' etc..
+ if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition()))
+ continue;
- for (unsigned Stride = 0, e = IU->StrideOrder.size(); Stride != e;
- ++Stride) {
- std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
- IU->IVUsesByStride.find(IU->StrideOrder[Stride]);
- assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!");
- if (!isa<SCEVConstant>(SI->first))
+ // Search IVUsesByStride to find Cond's IVUse if there is one.
+ IVStrideUse *CondUse = 0;
+ ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition());
+ if (!FindIVUserForCond(Cond, CondUse))
continue;
- for (ilist<IVStrideUse>::iterator UI = SI->second->Users.begin(),
- E = SI->second->Users.end(); UI != E; /* empty */) {
- ilist<IVStrideUse>::iterator CandidateUI = UI;
- ++UI;
- Instruction *ShadowUse = CandidateUI->getUser();
- const Type *DestTy = NULL;
-
- /* If shadow use is a int->float cast then insert a second IV
- to eliminate this cast.
-
- for (unsigned i = 0; i < n; ++i)
- foo((double)i);
-
- is transformed into
-
- double d = 0.0;
- for (unsigned i = 0; i < n; ++i, ++d)
- foo(d);
- */
- if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser()))
- DestTy = UCast->getDestTy();
- else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser()))
- DestTy = SCast->getDestTy();
- if (!DestTy) continue;
-
- if (TLI) {
- // If target does not support DestTy natively then do not apply
- // this transformation.
- EVT DVT = TLI->getValueType(DestTy);
- if (!TLI->isTypeLegal(DVT)) continue;
- }
+ // If the trip count is computed in terms of a max (due to ScalarEvolution
+ // being unable to find a sufficient guard, for example), change the loop
+ // comparison to use SLT or ULT instead of NE.
+ // One consequence of doing this now is that it disrupts the count-down
+ // optimization. That's not always a bad thing though, because in such
+ // cases it may still be worthwhile to avoid a max.
+ Cond = OptimizeMax(Cond, CondUse);
+
+ // If this exiting block dominates the latch block, it may also use
+ // the post-inc value if it won't be shared with other uses.
+ // Check for dominance.
+ if (!DT.dominates(ExitingBlock, LatchBlock))
+ continue;
- PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0));
- if (!PH) continue;
- if (PH->getNumIncomingValues() != 2) continue;
+ // Conservatively avoid trying to use the post-inc value in non-latch
+ // exits if there may be pre-inc users in intervening blocks.
+ if (LatchBlock != ExitingBlock)
+ for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI)
+ // Test if the use is reachable from the exiting block. This dominator
+ // query is a conservative approximation of reachability.
+ if (&*UI != CondUse &&
+ !DT.properlyDominates(UI->getUser()->getParent(), ExitingBlock)) {
+ // Conservatively assume there may be reuse if the quotient of their
+ // strides could be a legal scale.
+ const SCEV *A = CondUse->getStride();
+ const SCEV *B = UI->getStride();
+ if (SE.getTypeSizeInBits(A->getType()) !=
+ SE.getTypeSizeInBits(B->getType())) {
+ if (SE.getTypeSizeInBits(A->getType()) >
+ SE.getTypeSizeInBits(B->getType()))
+ B = SE.getSignExtendExpr(B, A->getType());
+ else
+ A = SE.getSignExtendExpr(A, B->getType());
+ }
+ if (const SCEVConstant *D =
+ dyn_cast_or_null<SCEVConstant>(getSDiv(B, A, SE))) {
+ // Stride of one or negative one can have reuse with non-addresses.
+ if (D->getValue()->isOne() ||
+ D->getValue()->isAllOnesValue())
+ goto decline_post_inc;
+ // Avoid weird situations.
+ if (D->getValue()->getValue().getMinSignedBits() >= 64 ||
+ D->getValue()->getValue().isMinSignedValue())
+ goto decline_post_inc;
+ // Without TLI, assume that any stride might be valid, and so any
+ // use might be shared.
+ if (!TLI)
+ goto decline_post_inc;
+ // Check for possible scaled-address reuse.
+ const Type *AccessTy = getAccessType(UI->getUser());
+ TargetLowering::AddrMode AM;
+ AM.Scale = D->getValue()->getSExtValue();
+ if (TLI->isLegalAddressingMode(AM, AccessTy))
+ goto decline_post_inc;
+ AM.Scale = -AM.Scale;
+ if (TLI->isLegalAddressingMode(AM, AccessTy))
+ goto decline_post_inc;
+ }
+ }
- const Type *SrcTy = PH->getType();
- int Mantissa = DestTy->getFPMantissaWidth();
- if (Mantissa == -1) continue;
- if ((int)SE->getTypeSizeInBits(SrcTy) > Mantissa)
- continue;
+ DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: "
+ << *Cond << '\n');
- unsigned Entry, Latch;
- if (PH->getIncomingBlock(0) == L->getLoopPreheader()) {
- Entry = 0;
- Latch = 1;
+ // It's possible for the setcc instruction to be anywhere in the loop, and
+ // possible for it to have multiple users. If it is not immediately before
+ // the exiting block branch, move it.
+ if (&*++BasicBlock::iterator(Cond) != TermBr) {
+ if (Cond->hasOneUse()) {
+ Cond->moveBefore(TermBr);
} else {
- Entry = 1;
- Latch = 0;
+ // Clone the terminating condition and insert into the loopend.
+ ICmpInst *OldCond = Cond;
+ Cond = cast<ICmpInst>(Cond->clone());
+ Cond->setName(L->getHeader()->getName() + ".termcond");
+ ExitingBlock->getInstList().insert(TermBr, Cond);
+
+ // Clone the IVUse, as the old use still exists!
+ CondUse = &IU.AddUser(CondUse->getStride(), CondUse->getOffset(),
+ Cond, CondUse->getOperandValToReplace());
+ TermBr->replaceUsesOfWith(OldCond, Cond);
}
+ }
- ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry));
- if (!Init) continue;
- Constant *NewInit = ConstantFP::get(DestTy, Init->getZExtValue());
+ // If we get to here, we know that we can transform the setcc instruction to
+ // use the post-incremented version of the IV, allowing us to coalesce the
+ // live ranges for the IV correctly.
+ CondUse->setOffset(SE.getMinusSCEV(CondUse->getOffset(),
+ CondUse->getStride()));
+ CondUse->setIsUseOfPostIncrementedValue(true);
+ Changed = true;
- BinaryOperator *Incr =
- dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch));
- if (!Incr) continue;
- if (Incr->getOpcode() != Instruction::Add
- && Incr->getOpcode() != Instruction::Sub)
- continue;
+ PostIncs.insert(Cond);
+ decline_post_inc:;
+ }
- /* Initialize new IV, double d = 0.0 in above example. */
- ConstantInt *C = NULL;
- if (Incr->getOperand(0) == PH)
- C = dyn_cast<ConstantInt>(Incr->getOperand(1));
- else if (Incr->getOperand(1) == PH)
- C = dyn_cast<ConstantInt>(Incr->getOperand(0));
- else
- continue;
+ // Determine an insertion point for the loop induction variable increment. It
+ // must dominate all the post-inc comparisons we just set up, and it must
+ // dominate the loop latch edge.
+ IVIncInsertPos = L->getLoopLatch()->getTerminator();
+ for (SmallPtrSet<Instruction *, 4>::const_iterator I = PostIncs.begin(),
+ E = PostIncs.end(); I != E; ++I) {
+ BasicBlock *BB =
+ DT.findNearestCommonDominator(IVIncInsertPos->getParent(),
+ (*I)->getParent());
+ if (BB == (*I)->getParent())
+ IVIncInsertPos = *I;
+ else if (BB != IVIncInsertPos->getParent())
+ IVIncInsertPos = BB->getTerminator();
+ }
- if (!C) continue;
+ return Changed;
+}
+
+bool
+LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset,
+ LSRUse::KindType Kind, const Type *AccessTy) {
+ int64_t NewMinOffset = LU.MinOffset;
+ int64_t NewMaxOffset = LU.MaxOffset;
+ const Type *NewAccessTy = AccessTy;
+
+ // Check for a mismatched kind. It's tempting to collapse mismatched kinds to
+ // something conservative, however this can pessimize in the case that one of
+ // the uses will have all its uses outside the loop, for example.
+ if (LU.Kind != Kind)
+ return false;
+ // Conservatively assume HasBaseReg is true for now.
+ if (NewOffset < LU.MinOffset) {
+ if (!isAlwaysFoldable(LU.MaxOffset - NewOffset, 0, /*HasBaseReg=*/true,
+ Kind, AccessTy, TLI, SE))
+ return false;
+ NewMinOffset = NewOffset;
+ } else if (NewOffset > LU.MaxOffset) {
+ if (!isAlwaysFoldable(NewOffset - LU.MinOffset, 0, /*HasBaseReg=*/true,
+ Kind, AccessTy, TLI, SE))
+ return false;
+ NewMaxOffset = NewOffset;
+ }
+ // Check for a mismatched access type, and fall back conservatively as needed.
+ if (Kind == LSRUse::Address && AccessTy != LU.AccessTy)
+ NewAccessTy = Type::getVoidTy(AccessTy->getContext());
+
+ // Update the use.
+ LU.MinOffset = NewMinOffset;
+ LU.MaxOffset = NewMaxOffset;
+ LU.AccessTy = NewAccessTy;
+ if (NewOffset != LU.Offsets.back())
+ LU.Offsets.push_back(NewOffset);
+ return true;
+}
- // Ignore negative constants, as the code below doesn't handle them
- // correctly. TODO: Remove this restriction.
- if (!C->getValue().isStrictlyPositive()) continue;
+/// getUse - Return an LSRUse index and an offset value for a fixup which
+/// needs the given expression, with the given kind and optional access type.
+/// Either reuse an exisitng use or create a new one, as needed.
+std::pair<size_t, int64_t>
+LSRInstance::getUse(const SCEV *&Expr,
+ LSRUse::KindType Kind, const Type *AccessTy) {
+ const SCEV *Copy = Expr;
+ int64_t Offset = ExtractImmediate(Expr, SE);
+
+ // Basic uses can't accept any offset, for example.
+ if (!isAlwaysFoldable(Offset, 0, /*HasBaseReg=*/true,
+ Kind, AccessTy, TLI, SE)) {
+ Expr = Copy;
+ Offset = 0;
+ }
- /* Add new PHINode. */
- PHINode *NewPH = PHINode::Create(DestTy, "IV.S.", PH);
+ std::pair<UseMapTy::iterator, bool> P =
+ UseMap.insert(std::make_pair(Expr, 0));
+ if (!P.second) {
+ // A use already existed with this base.
+ size_t LUIdx = P.first->second;
+ LSRUse &LU = Uses[LUIdx];
+ if (reconcileNewOffset(LU, Offset, Kind, AccessTy))
+ // Reuse this use.
+ return std::make_pair(LUIdx, Offset);
+ }
- /* create new increment. '++d' in above example. */
- Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue());
- BinaryOperator *NewIncr =
- BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ?
- Instruction::FAdd : Instruction::FSub,
- NewPH, CFP, "IV.S.next.", Incr);
+ // Create a new use.
+ size_t LUIdx = Uses.size();
+ P.first->second = LUIdx;
+ Uses.push_back(LSRUse(Kind, AccessTy));
+ LSRUse &LU = Uses[LUIdx];
- NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry));
- NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch));
+ // We don't need to track redundant offsets, but we don't need to go out
+ // of our way here to avoid them.
+ if (LU.Offsets.empty() || Offset != LU.Offsets.back())
+ LU.Offsets.push_back(Offset);
- /* Remove cast operation */
- ShadowUse->replaceAllUsesWith(NewPH);
- ShadowUse->eraseFromParent();
- NumShadow++;
- break;
+ LU.MinOffset = Offset;
+ LU.MaxOffset = Offset;
+ return std::make_pair(LUIdx, Offset);
+}
+
+void LSRInstance::CollectInterestingTypesAndFactors() {
+ SmallSetVector<const SCEV *, 4> Strides;
+
+ // Collect interesting types and factors.
+ for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) {
+ const SCEV *Stride = UI->getStride();
+
+ // Collect interesting types.
+ Types.insert(SE.getEffectiveSCEVType(Stride->getType()));
+
+ // Collect interesting factors.
+ for (SmallSetVector<const SCEV *, 4>::const_iterator NewStrideIter =
+ Strides.begin(), SEnd = Strides.end(); NewStrideIter != SEnd;
+ ++NewStrideIter) {
+ const SCEV *OldStride = Stride;
+ const SCEV *NewStride = *NewStrideIter;
+ if (OldStride == NewStride)
+ continue;
+
+ if (SE.getTypeSizeInBits(OldStride->getType()) !=
+ SE.getTypeSizeInBits(NewStride->getType())) {
+ if (SE.getTypeSizeInBits(OldStride->getType()) >
+ SE.getTypeSizeInBits(NewStride->getType()))
+ NewStride = SE.getSignExtendExpr(NewStride, OldStride->getType());
+ else
+ OldStride = SE.getSignExtendExpr(OldStride, NewStride->getType());
+ }
+ if (const SCEVConstant *Factor =
+ dyn_cast_or_null<SCEVConstant>(getSDiv(NewStride, OldStride,
+ SE, true))) {
+ if (Factor->getValue()->getValue().getMinSignedBits() <= 64)
+ Factors.insert(Factor->getValue()->getValue().getSExtValue());
+ } else if (const SCEVConstant *Factor =
+ dyn_cast_or_null<SCEVConstant>(getSDiv(OldStride, NewStride,
+ SE, true))) {
+ if (Factor->getValue()->getValue().getMinSignedBits() <= 64)
+ Factors.insert(Factor->getValue()->getValue().getSExtValue());
+ }
}
+ Strides.insert(Stride);
}
-}
-/// OptimizeIndvars - Now that IVUsesByStride is set up with all of the indvar
-/// uses in the loop, look to see if we can eliminate some, in favor of using
-/// common indvars for the different uses.
-void LoopStrengthReduce::OptimizeIndvars(Loop *L) {
- // TODO: implement optzns here.
+ // If all uses use the same type, don't bother looking for truncation-based
+ // reuse.
+ if (Types.size() == 1)
+ Types.clear();
- OptimizeShadowIV(L);
+ DEBUG(print_factors_and_types(dbgs()));
}
-bool LoopStrengthReduce::StrideMightBeShared(const SCEV* Stride, Loop *L,
- bool CheckPreInc) {
- int64_t SInt = cast<SCEVConstant>(Stride)->getValue()->getSExtValue();
- for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) {
- std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
- IU->IVUsesByStride.find(IU->StrideOrder[i]);
- const SCEV *Share = SI->first;
- if (!isa<SCEVConstant>(SI->first) || Share == Stride)
- continue;
- int64_t SSInt = cast<SCEVConstant>(Share)->getValue()->getSExtValue();
- if (SSInt == SInt)
- return true; // This can definitely be reused.
- if (unsigned(abs64(SSInt)) < SInt || (SSInt % SInt) != 0)
- continue;
- int64_t Scale = SSInt / SInt;
- bool AllUsesAreAddresses = true;
- bool AllUsesAreOutsideLoop = true;
- std::vector<BasedUser> UsersToProcess;
- const SCEV *CommonExprs = CollectIVUsers(SI->first, *SI->second, L,
- AllUsesAreAddresses,
- AllUsesAreOutsideLoop,
- UsersToProcess);
- if (AllUsesAreAddresses &&
- ValidScale(!CommonExprs->isZero(), Scale, UsersToProcess)) {
- if (!CheckPreInc)
- return true;
- // Any pre-inc iv use?
- IVUsersOfOneStride &StrideUses = *IU->IVUsesByStride[Share];
- for (ilist<IVStrideUse>::iterator I = StrideUses.Users.begin(),
- E = StrideUses.Users.end(); I != E; ++I) {
- if (!I->isUseOfPostIncrementedValue())
- return true;
+void LSRInstance::CollectFixupsAndInitialFormulae() {
+ for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) {
+ // Record the uses.
+ LSRFixup &LF = getNewFixup();
+ LF.UserInst = UI->getUser();
+ LF.OperandValToReplace = UI->getOperandValToReplace();
+ if (UI->isUseOfPostIncrementedValue())
+ LF.PostIncLoop = L;
+
+ LSRUse::KindType Kind = LSRUse::Basic;
+ const Type *AccessTy = 0;
+ if (isAddressUse(LF.UserInst, LF.OperandValToReplace)) {
+ Kind = LSRUse::Address;
+ AccessTy = getAccessType(LF.UserInst);
+ }
+
+ const SCEV *S = IU.getCanonicalExpr(*UI);
+
+ // Equality (== and !=) ICmps are special. We can rewrite (i == N) as
+ // (N - i == 0), and this allows (N - i) to be the expression that we work
+ // with rather than just N or i, so we can consider the register
+ // requirements for both N and i at the same time. Limiting this code to
+ // equality icmps is not a problem because all interesting loops use
+ // equality icmps, thanks to IndVarSimplify.
+ if (ICmpInst *CI = dyn_cast<ICmpInst>(LF.UserInst))
+ if (CI->isEquality()) {
+ // Swap the operands if needed to put the OperandValToReplace on the
+ // left, for consistency.
+ Value *NV = CI->getOperand(1);
+ if (NV == LF.OperandValToReplace) {
+ CI->setOperand(1, CI->getOperand(0));
+ CI->setOperand(0, NV);
+ }
+
+ // x == y --> x - y == 0
+ const SCEV *N = SE.getSCEV(NV);
+ if (N->isLoopInvariant(L)) {
+ Kind = LSRUse::ICmpZero;
+ S = SE.getMinusSCEV(N, S);
+ }
+
+ // -1 and the negations of all interesting strides (except the negation
+ // of -1) are now also interesting.
+ for (size_t i = 0, e = Factors.size(); i != e; ++i)
+ if (Factors[i] != -1)
+ Factors.insert(-(uint64_t)Factors[i]);
+ Factors.insert(-1);
}
+
+ // Set up the initial formula for this use.
+ std::pair<size_t, int64_t> P = getUse(S, Kind, AccessTy);
+ LF.LUIdx = P.first;
+ LF.Offset = P.second;
+ LSRUse &LU = Uses[LF.LUIdx];
+ LU.AllFixupsOutsideLoop &= !L->contains(LF.UserInst);
+
+ // If this is the first use of this LSRUse, give it a formula.
+ if (LU.Formulae.empty()) {
+ InsertInitialFormula(S, L, LU, LF.LUIdx);
+ CountRegisters(LU.Formulae.back(), LF.LUIdx);
}
}
- return false;
+
+ DEBUG(print_fixups(dbgs()));
}
-/// isUsedByExitBranch - Return true if icmp is used by a loop terminating
-/// conditional branch or it's and / or with other conditions before being used
-/// as the condition.
-static bool isUsedByExitBranch(ICmpInst *Cond, Loop *L) {
- BasicBlock *CondBB = Cond->getParent();
- if (!L->isLoopExiting(CondBB))
- return false;
- BranchInst *TermBr = dyn_cast<BranchInst>(CondBB->getTerminator());
- if (!TermBr || !TermBr->isConditional())
+void
+LSRInstance::InsertInitialFormula(const SCEV *S, Loop *L,
+ LSRUse &LU, size_t LUIdx) {
+ Formula F;
+ F.InitialMatch(S, L, SE, DT);
+ bool Inserted = InsertFormula(LU, LUIdx, F);
+ assert(Inserted && "Initial formula already exists!"); (void)Inserted;
+}
+
+void
+LSRInstance::InsertSupplementalFormula(const SCEV *S,
+ LSRUse &LU, size_t LUIdx) {
+ Formula F;
+ F.BaseRegs.push_back(S);
+ F.AM.HasBaseReg = true;
+ bool Inserted = InsertFormula(LU, LUIdx, F);
+ assert(Inserted && "Supplemental formula already exists!"); (void)Inserted;
+}
+
+/// CountRegisters - Note which registers are used by the given formula,
+/// updating RegUses.
+void LSRInstance::CountRegisters(const Formula &F, size_t LUIdx) {
+ if (F.ScaledReg)
+ RegUses.CountRegister(F.ScaledReg, LUIdx);
+ for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(),
+ E = F.BaseRegs.end(); I != E; ++I)
+ RegUses.CountRegister(*I, LUIdx);
+}
+
+/// InsertFormula - If the given formula has not yet been inserted, add it to
+/// the list, and return true. Return false otherwise.
+bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) {
+ if (!LU.InsertFormula(LUIdx, F))
return false;
- Value *User = *Cond->use_begin();
- Instruction *UserInst = dyn_cast<Instruction>(User);
- while (UserInst &&
- (UserInst->getOpcode() == Instruction::And ||
- UserInst->getOpcode() == Instruction::Or)) {
- if (!UserInst->hasOneUse() || UserInst->getParent() != CondBB)
- return false;
- User = *User->use_begin();
- UserInst = dyn_cast<Instruction>(User);
+ CountRegisters(F, LUIdx);
+ return true;
+}
+
+/// CollectLoopInvariantFixupsAndFormulae - Check for other uses of
+/// loop-invariant values which we're tracking. These other uses will pin these
+/// values in registers, making them less profitable for elimination.
+/// TODO: This currently misses non-constant addrec step registers.
+/// TODO: Should this give more weight to users inside the loop?
+void
+LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
+ SmallVector<const SCEV *, 8> Worklist(RegUses.begin(), RegUses.end());
+ SmallPtrSet<const SCEV *, 8> Inserted;
+
+ while (!Worklist.empty()) {
+ const SCEV *S = Worklist.pop_back_val();
+
+ if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S))
+ Worklist.insert(Worklist.end(), N->op_begin(), N->op_end());
+ else if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S))
+ Worklist.push_back(C->getOperand());
+ else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
+ Worklist.push_back(D->getLHS());
+ Worklist.push_back(D->getRHS());
+ } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
+ if (!Inserted.insert(U)) continue;
+ const Value *V = U->getValue();
+ if (const Instruction *Inst = dyn_cast<Instruction>(V))
+ if (L->contains(Inst)) continue;
+ for (Value::use_const_iterator UI = V->use_begin(), UE = V->use_end();
+ UI != UE; ++UI) {
+ const Instruction *UserInst = dyn_cast<Instruction>(*UI);
+ // Ignore non-instructions.
+ if (!UserInst)
+ continue;
+ // Ignore instructions in other functions (as can happen with
+ // Constants).
+ if (UserInst->getParent()->getParent() != L->getHeader()->getParent())
+ continue;
+ // Ignore instructions not dominated by the loop.
+ const BasicBlock *UseBB = !isa<PHINode>(UserInst) ?
+ UserInst->getParent() :
+ cast<PHINode>(UserInst)->getIncomingBlock(
+ PHINode::getIncomingValueNumForOperand(UI.getOperandNo()));
+ if (!DT.dominates(L->getHeader(), UseBB))
+ continue;
+ // Ignore uses which are part of other SCEV expressions, to avoid
+ // analyzing them multiple times.
+ if (SE.isSCEVable(UserInst->getType()) &&
+ !isa<SCEVUnknown>(SE.getSCEV(const_cast<Instruction *>(UserInst))))
+ continue;
+ // Ignore icmp instructions which are already being analyzed.
+ if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) {
+ unsigned OtherIdx = !UI.getOperandNo();
+ Value *OtherOp = const_cast<Value *>(ICI->getOperand(OtherIdx));
+ if (SE.getSCEV(OtherOp)->hasComputableLoopEvolution(L))
+ continue;
+ }
+
+ LSRFixup &LF = getNewFixup();
+ LF.UserInst = const_cast<Instruction *>(UserInst);
+ LF.OperandValToReplace = UI.getUse();
+ std::pair<size_t, int64_t> P = getUse(S, LSRUse::Basic, 0);
+ LF.LUIdx = P.first;
+ LF.Offset = P.second;
+ LSRUse &LU = Uses[LF.LUIdx];
+ LU.AllFixupsOutsideLoop &= L->contains(LF.UserInst);
+ InsertSupplementalFormula(U, LU, LF.LUIdx);
+ CountRegisters(LU.Formulae.back(), Uses.size() - 1);
+ break;
+ }
+ }
}
- return User == TermBr;
}
-static bool ShouldCountToZero(ICmpInst *Cond, IVStrideUse* &CondUse,
- ScalarEvolution *SE, Loop *L,
- const TargetLowering *TLI = 0) {
- if (!L->contains(Cond))
- return false;
+/// CollectSubexprs - Split S into subexpressions which can be pulled out into
+/// separate registers. If C is non-null, multiply each subexpression by C.
+static void CollectSubexprs(const SCEV *S, const SCEVConstant *C,
+ SmallVectorImpl<const SCEV *> &Ops,
+ ScalarEvolution &SE) {
+ if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
+ // Break out add operands.
+ for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
+ I != E; ++I)
+ CollectSubexprs(*I, C, Ops, SE);
+ return;
+ } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
+ // Split a non-zero base out of an addrec.
+ if (!AR->getStart()->isZero()) {
+ CollectSubexprs(SE.getAddRecExpr(SE.getIntegerSCEV(0, AR->getType()),
+ AR->getStepRecurrence(SE),
+ AR->getLoop()), C, Ops, SE);
+ CollectSubexprs(AR->getStart(), C, Ops, SE);
+ return;
+ }
+ } else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
+ // Break (C * (a + b + c)) into C*a + C*b + C*c.
+ if (Mul->getNumOperands() == 2)
+ if (const SCEVConstant *Op0 =
+ dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
+ CollectSubexprs(Mul->getOperand(1),
+ C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0,
+ Ops, SE);
+ return;
+ }
+ }
- if (!isa<SCEVConstant>(CondUse->getOffset()))
- return false;
+ // Otherwise use the value itself.
+ Ops.push_back(C ? SE.getMulExpr(C, S) : S);
+}
- // Handle only tests for equality for the moment.
- if (!Cond->isEquality() || !Cond->hasOneUse())
- return false;
- if (!isUsedByExitBranch(Cond, L))
- return false;
+/// GenerateReassociations - Split out subexpressions from adds and the bases of
+/// addrecs.
+void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
+ Formula Base,
+ unsigned Depth) {
+ // Arbitrarily cap recursion to protect compile time.
+ if (Depth >= 3) return;
+
+ for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) {
+ const SCEV *BaseReg = Base.BaseRegs[i];
+
+ SmallVector<const SCEV *, 8> AddOps;
+ CollectSubexprs(BaseReg, 0, AddOps, SE);
+ if (AddOps.size() == 1) continue;
+
+ for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(),
+ JE = AddOps.end(); J != JE; ++J) {
+ // Don't pull a constant into a register if the constant could be folded
+ // into an immediate field.
+ if (isAlwaysFoldable(*J, LU.MinOffset, LU.MaxOffset,
+ Base.getNumRegs() > 1,
+ LU.Kind, LU.AccessTy, TLI, SE))
+ continue;
- Value *CondOp0 = Cond->getOperand(0);
- const SCEV *IV = SE->getSCEV(CondOp0);
- const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
- if (!AR || !AR->isAffine())
- return false;
+ // Collect all operands except *J.
+ SmallVector<const SCEV *, 8> InnerAddOps;
+ for (SmallVectorImpl<const SCEV *>::const_iterator K = AddOps.begin(),
+ KE = AddOps.end(); K != KE; ++K)
+ if (K != J)
+ InnerAddOps.push_back(*K);
+
+ // Don't leave just a constant behind in a register if the constant could
+ // be folded into an immediate field.
+ if (InnerAddOps.size() == 1 &&
+ isAlwaysFoldable(InnerAddOps[0], LU.MinOffset, LU.MaxOffset,
+ Base.getNumRegs() > 1,
+ LU.Kind, LU.AccessTy, TLI, SE))
+ continue;
- const SCEVConstant *SC = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE));
- if (!SC || SC->getValue()->getSExtValue() < 0)
- // If it's already counting down, don't do anything.
- return false;
+ Formula F = Base;
+ F.BaseRegs[i] = SE.getAddExpr(InnerAddOps);
+ F.BaseRegs.push_back(*J);
+ if (InsertFormula(LU, LUIdx, F))
+ // If that formula hadn't been seen before, recurse to find more like
+ // it.
+ GenerateReassociations(LU, LUIdx, LU.Formulae.back(), Depth+1);
+ }
+ }
+}
- // If the RHS of the comparison is not an loop invariant, the rewrite
- // cannot be done. Also bail out if it's already comparing against a zero.
- // If we are checking this before cmp stride optimization, check if it's
- // comparing against a already legal immediate.
- Value *RHS = Cond->getOperand(1);
- ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS);
- if (!L->isLoopInvariant(RHS) ||
- (RHSC && RHSC->isZero()) ||
- (RHSC && TLI && TLI->isLegalICmpImmediate(RHSC->getSExtValue())))
- return false;
+/// GenerateCombinations - Generate a formula consisting of all of the
+/// loop-dominating registers added into a single register.
+void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx,
+ Formula Base) {
+ // This method is only intersting on a plurality of registers.
+ if (Base.BaseRegs.size() <= 1) return;
+
+ Formula F = Base;
+ F.BaseRegs.clear();
+ SmallVector<const SCEV *, 4> Ops;
+ for (SmallVectorImpl<const SCEV *>::const_iterator
+ I = Base.BaseRegs.begin(), E = Base.BaseRegs.end(); I != E; ++I) {
+ const SCEV *BaseReg = *I;
+ if (BaseReg->properlyDominates(L->getHeader(), &DT) &&
+ !BaseReg->hasComputableLoopEvolution(L))
+ Ops.push_back(BaseReg);
+ else
+ F.BaseRegs.push_back(BaseReg);
+ }
+ if (Ops.size() > 1) {
+ const SCEV *Sum = SE.getAddExpr(Ops);
+ // TODO: If Sum is zero, it probably means ScalarEvolution missed an
+ // opportunity to fold something. For now, just ignore such cases
+ // rather than procede with zero in a register.
+ if (!Sum->isZero()) {
+ F.BaseRegs.push_back(Sum);
+ (void)InsertFormula(LU, LUIdx, F);
+ }
+ }
+}
- // Make sure the IV is only used for counting. Value may be preinc or
- // postinc; 2 uses in either case.
- if (!CondOp0->hasNUses(2))
- return false;
+/// GenerateSymbolicOffsets - Generate reuse formulae using symbolic offsets.
+void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx,
+ Formula Base) {
+ // We can't add a symbolic offset if the address already contains one.
+ if (Base.AM.BaseGV) return;
- return true;
+ for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) {
+ const SCEV *G = Base.BaseRegs[i];
+ GlobalValue *GV = ExtractSymbol(G, SE);
+ if (G->isZero() || !GV)
+ continue;
+ Formula F = Base;
+ F.AM.BaseGV = GV;
+ if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset,
+ LU.Kind, LU.AccessTy, TLI))
+ continue;
+ F.BaseRegs[i] = G;
+ (void)InsertFormula(LU, LUIdx, F);
+ }
}
-/// OptimizeLoopTermCond - Change loop terminating condition to use the
-/// postinc iv when possible.
-void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) {
- BasicBlock *LatchBlock = L->getLoopLatch();
- bool LatchExit = L->isLoopExiting(LatchBlock);
- SmallVector<BasicBlock*, 8> ExitingBlocks;
- L->getExitingBlocks(ExitingBlocks);
+/// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets.
+void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx,
+ Formula Base) {
+ // TODO: For now, just add the min and max offset, because it usually isn't
+ // worthwhile looking at everything inbetween.
+ SmallVector<int64_t, 4> Worklist;
+ Worklist.push_back(LU.MinOffset);
+ if (LU.MaxOffset != LU.MinOffset)
+ Worklist.push_back(LU.MaxOffset);
+
+ for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) {
+ const SCEV *G = Base.BaseRegs[i];
+
+ for (SmallVectorImpl<int64_t>::const_iterator I = Worklist.begin(),
+ E = Worklist.end(); I != E; ++I) {
+ Formula F = Base;
+ F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs - *I;
+ if (isLegalUse(F.AM, LU.MinOffset - *I, LU.MaxOffset - *I,
+ LU.Kind, LU.AccessTy, TLI)) {
+ F.BaseRegs[i] = SE.getAddExpr(G, SE.getIntegerSCEV(*I, G->getType()));
+
+ (void)InsertFormula(LU, LUIdx, F);
+ }
+ }
- for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
- BasicBlock *ExitingBlock = ExitingBlocks[i];
+ int64_t Imm = ExtractImmediate(G, SE);
+ if (G->isZero() || Imm == 0)
+ continue;
+ Formula F = Base;
+ F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Imm;
+ if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset,
+ LU.Kind, LU.AccessTy, TLI))
+ continue;
+ F.BaseRegs[i] = G;
+ (void)InsertFormula(LU, LUIdx, F);
+ }
+}
- // Finally, get the terminating condition for the loop if possible. If we
- // can, we want to change it to use a post-incremented version of its
- // induction variable, to allow coalescing the live ranges for the IV into
- // one register value.
+/// GenerateICmpZeroScales - For ICmpZero, check to see if we can scale up
+/// the comparison. For example, x == y -> x*c == y*c.
+void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx,
+ Formula Base) {
+ if (LU.Kind != LSRUse::ICmpZero) return;
- BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
- if (!TermBr)
+ // Determine the integer type for the base formula.
+ const Type *IntTy = Base.getType();
+ if (!IntTy) return;
+ if (SE.getTypeSizeInBits(IntTy) > 64) return;
+
+ // Don't do this if there is more than one offset.
+ if (LU.MinOffset != LU.MaxOffset) return;
+
+ assert(!Base.AM.BaseGV && "ICmpZero use is not legal!");
+
+ // Check each interesting stride.
+ for (SmallSetVector<int64_t, 8>::const_iterator
+ I = Factors.begin(), E = Factors.end(); I != E; ++I) {
+ int64_t Factor = *I;
+ Formula F = Base;
+
+ // Check that the multiplication doesn't overflow.
+ F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs * Factor;
+ if ((int64_t)F.AM.BaseOffs / Factor != Base.AM.BaseOffs)
continue;
- // FIXME: Overly conservative, termination condition could be an 'or' etc..
- if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition()))
+
+ // Check that multiplying with the use offset doesn't overflow.
+ int64_t Offset = LU.MinOffset;
+ Offset = (uint64_t)Offset * Factor;
+ if ((int64_t)Offset / Factor != LU.MinOffset)
continue;
- // Search IVUsesByStride to find Cond's IVUse if there is one.
- IVStrideUse *CondUse = 0;
- const SCEV *CondStride = 0;
- ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition());
- if (!FindIVUserForCond(Cond, CondUse, CondStride))
+ // Check that this scale is legal.
+ if (!isLegalUse(F.AM, Offset, Offset, LU.Kind, LU.AccessTy, TLI))
continue;
- // If the latch block is exiting and it's not a single block loop, it's
- // not safe to use postinc iv in other exiting blocks. FIXME: overly
- // conservative? How about icmp stride optimization?
- bool UsePostInc = !(e > 1 && LatchExit && ExitingBlock != LatchBlock);
- if (UsePostInc && ExitingBlock != LatchBlock) {
- if (!Cond->hasOneUse())
- // See below, we don't want the condition to be cloned.
- UsePostInc = false;
- else {
- // If exiting block is the latch block, we know it's safe and profitable
- // to transform the icmp to use post-inc iv. Otherwise do so only if it
- // would not reuse another iv and its iv would be reused by other uses.
- // We are optimizing for the case where the icmp is the only use of the
- // iv.
- IVUsersOfOneStride &StrideUses = *IU->IVUsesByStride[CondStride];
- for (ilist<IVStrideUse>::iterator I = StrideUses.Users.begin(),
- E = StrideUses.Users.end(); I != E; ++I) {
- if (I->getUser() == Cond)
- continue;
- if (!I->isUseOfPostIncrementedValue()) {
- UsePostInc = false;
- break;
- }
+ // Compensate for the use having MinOffset built into it.
+ F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Offset - LU.MinOffset;
+
+ const SCEV *FactorS = SE.getIntegerSCEV(Factor, IntTy);
+
+ // Check that multiplying with each base register doesn't overflow.
+ for (size_t i = 0, e = F.BaseRegs.size(); i != e; ++i) {
+ F.BaseRegs[i] = SE.getMulExpr(F.BaseRegs[i], FactorS);
+ if (getSDiv(F.BaseRegs[i], FactorS, SE) != Base.BaseRegs[i])
+ goto next;
+ }
+
+ // Check that multiplying with the scaled register doesn't overflow.
+ if (F.ScaledReg) {
+ F.ScaledReg = SE.getMulExpr(F.ScaledReg, FactorS);
+ if (getSDiv(F.ScaledReg, FactorS, SE) != Base.ScaledReg)
+ continue;
+ }
+
+ // If we make it here and it's legal, add it.
+ (void)InsertFormula(LU, LUIdx, F);
+ next:;
+ }
+}
+
+/// GenerateScales - Generate stride factor reuse formulae by making use of
+/// scaled-offset address modes, for example.
+void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx,
+ Formula Base) {
+ // Determine the integer type for the base formula.
+ const Type *IntTy = Base.getType();
+ if (!IntTy) return;
+
+ // If this Formula already has a scaled register, we can't add another one.
+ if (Base.AM.Scale != 0) return;
+
+ // Check each interesting stride.
+ for (SmallSetVector<int64_t, 8>::const_iterator
+ I = Factors.begin(), E = Factors.end(); I != E; ++I) {
+ int64_t Factor = *I;
+
+ Base.AM.Scale = Factor;
+ Base.AM.HasBaseReg = Base.BaseRegs.size() > 1;
+ // Check whether this scale is going to be legal.
+ if (!isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset,
+ LU.Kind, LU.AccessTy, TLI)) {
+ // As a special-case, handle special out-of-loop Basic users specially.
+ // TODO: Reconsider this special case.
+ if (LU.Kind == LSRUse::Basic &&
+ isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset,
+ LSRUse::Special, LU.AccessTy, TLI) &&
+ LU.AllFixupsOutsideLoop)
+ LU.Kind = LSRUse::Special;
+ else
+ continue;
+ }
+ // For an ICmpZero, negating a solitary base register won't lead to
+ // new solutions.
+ if (LU.Kind == LSRUse::ICmpZero &&
+ !Base.AM.HasBaseReg && Base.AM.BaseOffs == 0 && !Base.AM.BaseGV)
+ continue;
+ // For each addrec base reg, apply the scale, if possible.
+ for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i)
+ if (const SCEVAddRecExpr *AR =
+ dyn_cast<SCEVAddRecExpr>(Base.BaseRegs[i])) {
+ const SCEV *FactorS = SE.getIntegerSCEV(Factor, IntTy);
+ if (FactorS->isZero())
+ continue;
+ // Divide out the factor, ignoring high bits, since we'll be
+ // scaling the value back up in the end.
+ if (const SCEV *Quotient = getSDiv(AR, FactorS, SE, true)) {
+ // TODO: This could be optimized to avoid all the copying.
+ Formula F = Base;
+ F.ScaledReg = Quotient;
+ std::swap(F.BaseRegs[i], F.BaseRegs.back());
+ F.BaseRegs.pop_back();
+ (void)InsertFormula(LU, LUIdx, F);
}
}
+ }
+}
- // If iv for the stride might be shared and any of the users use pre-inc
- // iv might be used, then it's not safe to use post-inc iv.
- if (UsePostInc &&
- isa<SCEVConstant>(CondStride) &&
- StrideMightBeShared(CondStride, L, true))
- UsePostInc = false;
- }
+/// GenerateTruncates - Generate reuse formulae from different IV types.
+void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx,
+ Formula Base) {
+ // This requires TargetLowering to tell us which truncates are free.
+ if (!TLI) return;
+
+ // Don't bother truncating symbolic values.
+ if (Base.AM.BaseGV) return;
+
+ // Determine the integer type for the base formula.
+ const Type *DstTy = Base.getType();
+ if (!DstTy) return;
+ DstTy = SE.getEffectiveSCEVType(DstTy);
+
+ for (SmallSetVector<const Type *, 4>::const_iterator
+ I = Types.begin(), E = Types.end(); I != E; ++I) {
+ const Type *SrcTy = *I;
+ if (SrcTy != DstTy && TLI->isTruncateFree(SrcTy, DstTy)) {
+ Formula F = Base;
+
+ if (F.ScaledReg) F.ScaledReg = SE.getAnyExtendExpr(F.ScaledReg, *I);
+ for (SmallVectorImpl<const SCEV *>::iterator J = F.BaseRegs.begin(),
+ JE = F.BaseRegs.end(); J != JE; ++J)
+ *J = SE.getAnyExtendExpr(*J, SrcTy);
+
+ // TODO: This assumes we've done basic processing on all uses and
+ // have an idea what the register usage is.
+ if (!F.hasRegsUsedByUsesOtherThan(LUIdx, RegUses))
+ continue;
- // If the trip count is computed in terms of a max (due to ScalarEvolution
- // being unable to find a sufficient guard, for example), change the loop
- // comparison to use SLT or ULT instead of NE.
- Cond = OptimizeMax(L, Cond, CondUse);
-
- // If possible, change stride and operands of the compare instruction to
- // eliminate one stride. However, avoid rewriting the compare instruction
- // with an iv of new stride if it's likely the new stride uses will be
- // rewritten using the stride of the compare instruction.
- if (ExitingBlock == LatchBlock && isa<SCEVConstant>(CondStride)) {
- // If the condition stride is a constant and it's the only use, we might
- // want to optimize it first by turning it to count toward zero.
- if (!StrideMightBeShared(CondStride, L, false) &&
- !ShouldCountToZero(Cond, CondUse, SE, L, TLI))
- Cond = ChangeCompareStride(L, Cond, CondUse, CondStride);
+ (void)InsertFormula(LU, LUIdx, F);
}
+ }
+}
+
+namespace {
+
+/// WorkItem - Helper class for GenerateCrossUseConstantOffsets. It's used to
+/// defer modifications so that the search phase doesn't have to worry about
+/// the data structures moving underneath it.
+struct WorkItem {
+ size_t LUIdx;
+ int64_t Imm;
+ const SCEV *OrigReg;
+
+ WorkItem(size_t LI, int64_t I, const SCEV *R)
+ : LUIdx(LI), Imm(I), OrigReg(R) {}
+
+ void print(raw_ostream &OS) const;
+ void dump() const;
+};
+
+}
+
+void WorkItem::print(raw_ostream &OS) const {
+ OS << "in formulae referencing " << *OrigReg << " in use " << LUIdx
+ << " , add offset " << Imm;
+}
+
+void WorkItem::dump() const {
+ print(errs()); errs() << '\n';
+}
+
+/// GenerateCrossUseConstantOffsets - Look for registers which are a constant
+/// distance apart and try to form reuse opportunities between them.
+void LSRInstance::GenerateCrossUseConstantOffsets() {
+ // Group the registers by their value without any added constant offset.
+ typedef std::map<int64_t, const SCEV *> ImmMapTy;
+ typedef DenseMap<const SCEV *, ImmMapTy> RegMapTy;
+ RegMapTy Map;
+ DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap;
+ SmallVector<const SCEV *, 8> Sequence;
+ for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end();
+ I != E; ++I) {
+ const SCEV *Reg = *I;
+ int64_t Imm = ExtractImmediate(Reg, SE);
+ std::pair<RegMapTy::iterator, bool> Pair =
+ Map.insert(std::make_pair(Reg, ImmMapTy()));
+ if (Pair.second)
+ Sequence.push_back(Reg);
+ Pair.first->second.insert(std::make_pair(Imm, *I));
+ UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(*I);
+ }
- if (!UsePostInc)
+ // Now examine each set of registers with the same base value. Build up
+ // a list of work to do and do the work in a separate step so that we're
+ // not adding formulae and register counts while we're searching.
+ SmallVector<WorkItem, 32> WorkItems;
+ SmallSet<std::pair<size_t, int64_t>, 32> UniqueItems;
+ for (SmallVectorImpl<const SCEV *>::const_iterator I = Sequence.begin(),
+ E = Sequence.end(); I != E; ++I) {
+ const SCEV *Reg = *I;
+ const ImmMapTy &Imms = Map.find(Reg)->second;
+
+ // It's not worthwhile looking for reuse if there's only one offset.
+ if (Imms.size() == 1)
continue;
- DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: "
- << *Cond << '\n');
+ DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':';
+ for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end();
+ J != JE; ++J)
+ dbgs() << ' ' << J->first;
+ dbgs() << '\n');
- // It's possible for the setcc instruction to be anywhere in the loop, and
- // possible for it to have multiple users. If it is not immediately before
- // the exiting block branch, move it.
- if (&*++BasicBlock::iterator(Cond) != (Instruction*)TermBr) {
- if (Cond->hasOneUse()) { // Condition has a single use, just move it.
- Cond->moveBefore(TermBr);
- } else {
- // Otherwise, clone the terminating condition and insert into the
- // loopend.
- Cond = cast<ICmpInst>(Cond->clone());
- Cond->setName(L->getHeader()->getName() + ".termcond");
- ExitingBlock->getInstList().insert(TermBr, Cond);
+ // Examine each offset.
+ for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end();
+ J != JE; ++J) {
+ const SCEV *OrigReg = J->second;
- // Clone the IVUse, as the old use still exists!
- IU->IVUsesByStride[CondStride]->addUser(CondUse->getOffset(), Cond,
- CondUse->getOperandValToReplace());
- CondUse = &IU->IVUsesByStride[CondStride]->Users.back();
+ int64_t JImm = J->first;
+ const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(OrigReg);
+
+ if (!isa<SCEVConstant>(OrigReg) &&
+ UsedByIndicesMap[Reg].count() == 1) {
+ DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg << '\n');
+ continue;
+ }
+
+ // Conservatively examine offsets between this orig reg a few selected
+ // other orig regs.
+ ImmMapTy::const_iterator OtherImms[] = {
+ Imms.begin(), prior(Imms.end()),
+ Imms.upper_bound((Imms.begin()->first + prior(Imms.end())->first) / 2)
+ };
+ for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) {
+ ImmMapTy::const_iterator M = OtherImms[i];
+ if (M == J || M == JE) continue;
+
+ // Compute the difference between the two.
+ int64_t Imm = (uint64_t)JImm - M->first;
+ for (int LUIdx = UsedByIndices.find_first(); LUIdx != -1;
+ LUIdx = UsedByIndices.find_next(LUIdx))
+ // Make a memo of this use, offset, and register tuple.
+ if (UniqueItems.insert(std::make_pair(LUIdx, Imm)))
+ WorkItems.push_back(WorkItem(LUIdx, Imm, OrigReg));
}
}
+ }
- // If we get to here, we know that we can transform the setcc instruction to
- // use the post-incremented version of the IV, allowing us to coalesce the
- // live ranges for the IV correctly.
- CondUse->setOffset(SE->getMinusSCEV(CondUse->getOffset(), CondStride));
- CondUse->setIsUseOfPostIncrementedValue(true);
- Changed = true;
+ Map.clear();
+ Sequence.clear();
+ UsedByIndicesMap.clear();
+ UniqueItems.clear();
+
+ // Now iterate through the worklist and add new formulae.
+ for (SmallVectorImpl<WorkItem>::const_iterator I = WorkItems.begin(),
+ E = WorkItems.end(); I != E; ++I) {
+ const WorkItem &WI = *I;
+ size_t LUIdx = WI.LUIdx;
+ LSRUse &LU = Uses[LUIdx];
+ int64_t Imm = WI.Imm;
+ const SCEV *OrigReg = WI.OrigReg;
+
+ const Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType());
+ const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm));
+ unsigned BitWidth = SE.getTypeSizeInBits(IntTy);
+
+ // TODO: Use a more targetted data structure.
+ for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) {
+ Formula F = LU.Formulae[L];
+ // Use the immediate in the scaled register.
+ if (F.ScaledReg == OrigReg) {
+ int64_t Offs = (uint64_t)F.AM.BaseOffs +
+ Imm * (uint64_t)F.AM.Scale;
+ // Don't create 50 + reg(-50).
+ if (F.referencesReg(SE.getSCEV(
+ ConstantInt::get(IntTy, -(uint64_t)Offs))))
+ continue;
+ Formula NewF = F;
+ NewF.AM.BaseOffs = Offs;
+ if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset,
+ LU.Kind, LU.AccessTy, TLI))
+ continue;
+ NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg);
+
+ // If the new scale is a constant in a register, and adding the constant
+ // value to the immediate would produce a value closer to zero than the
+ // immediate itself, then the formula isn't worthwhile.
+ if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewF.ScaledReg))
+ if (C->getValue()->getValue().isNegative() !=
+ (NewF.AM.BaseOffs < 0) &&
+ (C->getValue()->getValue().abs() * APInt(BitWidth, F.AM.Scale))
+ .ule(APInt(BitWidth, NewF.AM.BaseOffs).abs()))
+ continue;
- ++NumLoopCond;
+ // OK, looks good.
+ (void)InsertFormula(LU, LUIdx, NewF);
+ } else {
+ // Use the immediate in a base register.
+ for (size_t N = 0, NE = F.BaseRegs.size(); N != NE; ++N) {
+ const SCEV *BaseReg = F.BaseRegs[N];
+ if (BaseReg != OrigReg)
+ continue;
+ Formula NewF = F;
+ NewF.AM.BaseOffs = (uint64_t)NewF.AM.BaseOffs + Imm;
+ if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset,
+ LU.Kind, LU.AccessTy, TLI))
+ continue;
+ NewF.BaseRegs[N] = SE.getAddExpr(NegImmS, BaseReg);
+
+ // If the new formula has a constant in a register, and adding the
+ // constant value to the immediate would produce a value closer to
+ // zero than the immediate itself, then the formula isn't worthwhile.
+ for (SmallVectorImpl<const SCEV *>::const_iterator
+ J = NewF.BaseRegs.begin(), JE = NewF.BaseRegs.end();
+ J != JE; ++J)
+ if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*J))
+ if (C->getValue()->getValue().isNegative() !=
+ (NewF.AM.BaseOffs < 0) &&
+ C->getValue()->getValue().abs()
+ .ule(APInt(BitWidth, NewF.AM.BaseOffs).abs()))
+ goto skip_formula;
+
+ // Ok, looks good.
+ (void)InsertFormula(LU, LUIdx, NewF);
+ break;
+ skip_formula:;
+ }
+ }
+ }
}
}
-bool LoopStrengthReduce::OptimizeLoopCountIVOfStride(const SCEV* &Stride,
- IVStrideUse* &CondUse,
- Loop *L) {
- // If the only use is an icmp of a loop exiting conditional branch, then
- // attempt the optimization.
- BasedUser User = BasedUser(*CondUse, SE);
- assert(isa<ICmpInst>(User.Inst) && "Expecting an ICMPInst!");
- ICmpInst *Cond = cast<ICmpInst>(User.Inst);
+/// GenerateAllReuseFormulae - Generate formulae for each use.
+void
+LSRInstance::GenerateAllReuseFormulae() {
+ // This is split into multiple loops so that hasRegsUsedByUsesOtherThan
+ // queries are more precise.
+ for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
+ LSRUse &LU = Uses[LUIdx];
+ for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
+ GenerateReassociations(LU, LUIdx, LU.Formulae[i]);
+ for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
+ GenerateCombinations(LU, LUIdx, LU.Formulae[i]);
+ }
+ for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
+ LSRUse &LU = Uses[LUIdx];
+ for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
+ GenerateSymbolicOffsets(LU, LUIdx, LU.Formulae[i]);
+ for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
+ GenerateConstantOffsets(LU, LUIdx, LU.Formulae[i]);
+ for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
+ GenerateICmpZeroScales(LU, LUIdx, LU.Formulae[i]);
+ for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
+ GenerateScales(LU, LUIdx, LU.Formulae[i]);
+ }
+ for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
+ LSRUse &LU = Uses[LUIdx];
+ for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
+ GenerateTruncates(LU, LUIdx, LU.Formulae[i]);
+ }
- // Less strict check now that compare stride optimization is done.
- if (!ShouldCountToZero(Cond, CondUse, SE, L))
- return false;
+ GenerateCrossUseConstantOffsets();
+}
- Value *CondOp0 = Cond->getOperand(0);
- PHINode *PHIExpr = dyn_cast<PHINode>(CondOp0);
- Instruction *Incr;
- if (!PHIExpr) {
- // Value tested is postinc. Find the phi node.
- Incr = dyn_cast<BinaryOperator>(CondOp0);
- // FIXME: Just use User.OperandValToReplace here?
- if (!Incr || Incr->getOpcode() != Instruction::Add)
- return false;
+/// If their are multiple formulae with the same set of registers used
+/// by other uses, pick the best one and delete the others.
+void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
+#ifndef NDEBUG
+ bool Changed = false;
+#endif
+
+ // Collect the best formula for each unique set of shared registers. This
+ // is reset for each use.
+ typedef DenseMap<SmallVector<const SCEV *, 2>, size_t, UniquifierDenseMapInfo>
+ BestFormulaeTy;
+ BestFormulaeTy BestFormulae;
+
+ for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
+ LSRUse &LU = Uses[LUIdx];
+ FormulaSorter Sorter(L, LU, SE, DT);
+
+ // Clear out the set of used regs; it will be recomputed.
+ LU.Regs.clear();
+
+ for (size_t FIdx = 0, NumForms = LU.Formulae.size();
+ FIdx != NumForms; ++FIdx) {
+ Formula &F = LU.Formulae[FIdx];
+
+ SmallVector<const SCEV *, 2> Key;
+ for (SmallVectorImpl<const SCEV *>::const_iterator J = F.BaseRegs.begin(),
+ JE = F.BaseRegs.end(); J != JE; ++J) {
+ const SCEV *Reg = *J;
+ if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx))
+ Key.push_back(Reg);
+ }
+ if (F.ScaledReg &&
+ RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx))
+ Key.push_back(F.ScaledReg);
+ // Unstable sort by host order ok, because this is only used for
+ // uniquifying.
+ std::sort(Key.begin(), Key.end());
+
+ std::pair<BestFormulaeTy::const_iterator, bool> P =
+ BestFormulae.insert(std::make_pair(Key, FIdx));
+ if (!P.second) {
+ Formula &Best = LU.Formulae[P.first->second];
+ if (Sorter.operator()(F, Best))
+ std::swap(F, Best);
+ DEBUG(dbgs() << "Filtering out "; F.print(dbgs());
+ dbgs() << "\n"
+ " in favor of "; Best.print(dbgs());
+ dbgs() << '\n');
+#ifndef NDEBUG
+ Changed = true;
+#endif
+ std::swap(F, LU.Formulae.back());
+ LU.Formulae.pop_back();
+ --FIdx;
+ --NumForms;
+ continue;
+ }
+ if (F.ScaledReg) LU.Regs.insert(F.ScaledReg);
+ LU.Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
+ }
+ BestFormulae.clear();
+ }
- PHIExpr = dyn_cast<PHINode>(Incr->getOperand(0));
- if (!PHIExpr)
- return false;
- // 1 use for preinc value, the increment.
- if (!PHIExpr->hasOneUse())
- return false;
- } else {
- assert(isa<PHINode>(CondOp0) &&
- "Unexpected loop exiting counting instruction sequence!");
- PHIExpr = cast<PHINode>(CondOp0);
- // Value tested is preinc. Find the increment.
- // A CmpInst is not a BinaryOperator; we depend on this.
- Instruction::use_iterator UI = PHIExpr->use_begin();
- Incr = dyn_cast<BinaryOperator>(UI);
- if (!Incr)
- Incr = dyn_cast<BinaryOperator>(++UI);
- // One use for postinc value, the phi. Unnecessarily conservative?
- if (!Incr || !Incr->hasOneUse() || Incr->getOpcode() != Instruction::Add)
- return false;
+ DEBUG(if (Changed) {
+ dbgs() << "\n"
+ "After filtering out undesirable candidates:\n";
+ print_uses(dbgs());
+ });
+}
+
+/// NarrowSearchSpaceUsingHeuristics - If there are an extrordinary number of
+/// formulae to choose from, use some rough heuristics to prune down the number
+/// of formulae. This keeps the main solver from taking an extrordinary amount
+/// of time in some worst-case scenarios.
+void LSRInstance::NarrowSearchSpaceUsingHeuristics() {
+ // This is a rough guess that seems to work fairly well.
+ const size_t Limit = UINT16_MAX;
+
+ SmallPtrSet<const SCEV *, 4> Taken;
+ for (;;) {
+ // Estimate the worst-case number of solutions we might consider. We almost
+ // never consider this many solutions because we prune the search space,
+ // but the pruning isn't always sufficient.
+ uint32_t Power = 1;
+ for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(),
+ E = Uses.end(); I != E; ++I) {
+ size_t FSize = I->Formulae.size();
+ if (FSize >= Limit) {
+ Power = Limit;
+ break;
+ }
+ Power *= FSize;
+ if (Power >= Limit)
+ break;
+ }
+ if (Power < Limit)
+ break;
+
+ // Ok, we have too many of formulae on our hands to conveniently handle.
+ // Use a rough heuristic to thin out the list.
+
+ // Pick the register which is used by the most LSRUses, which is likely
+ // to be a good reuse register candidate.
+ const SCEV *Best = 0;
+ unsigned BestNum = 0;
+ for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end();
+ I != E; ++I) {
+ const SCEV *Reg = *I;
+ if (Taken.count(Reg))
+ continue;
+ if (!Best)
+ Best = Reg;
+ else {
+ unsigned Count = RegUses.getUsedByIndices(Reg).count();
+ if (Count > BestNum) {
+ Best = Reg;
+ BestNum = Count;
+ }
+ }
+ }
+
+ DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best
+ << " will yeild profitable reuse.\n");
+ Taken.insert(Best);
+
+ // In any use with formulae which references this register, delete formulae
+ // which don't reference it.
+ for (SmallVectorImpl<LSRUse>::iterator I = Uses.begin(),
+ E = Uses.end(); I != E; ++I) {
+ LSRUse &LU = *I;
+ if (!LU.Regs.count(Best)) continue;
+
+ // Clear out the set of used regs; it will be recomputed.
+ LU.Regs.clear();
+
+ for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) {
+ Formula &F = LU.Formulae[i];
+ if (!F.referencesReg(Best)) {
+ DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n');
+ std::swap(LU.Formulae.back(), F);
+ LU.Formulae.pop_back();
+ --e;
+ --i;
+ continue;
+ }
+
+ if (F.ScaledReg) LU.Regs.insert(F.ScaledReg);
+ LU.Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
+ }
+ }
+
+ DEBUG(dbgs() << "After pre-selection:\n";
+ print_uses(dbgs()));
}
+}
- // Replace the increment with a decrement.
- DEBUG(dbgs() << "LSR: Examining use ");
- DEBUG(WriteAsOperand(dbgs(), CondOp0, /*PrintType=*/false));
- DEBUG(dbgs() << " in Inst: " << *Cond << '\n');
- BinaryOperator *Decr = BinaryOperator::Create(Instruction::Sub,
- Incr->getOperand(0), Incr->getOperand(1), "tmp", Incr);
- Incr->replaceAllUsesWith(Decr);
- Incr->eraseFromParent();
-
- // Substitute endval-startval for the original startval, and 0 for the
- // original endval. Since we're only testing for equality this is OK even
- // if the computation wraps around.
- BasicBlock *Preheader = L->getLoopPreheader();
- Instruction *PreInsertPt = Preheader->getTerminator();
- unsigned InBlock = L->contains(PHIExpr->getIncomingBlock(0)) ? 1 : 0;
- Value *StartVal = PHIExpr->getIncomingValue(InBlock);
- Value *EndVal = Cond->getOperand(1);
- DEBUG(dbgs() << " Optimize loop counting iv to count down ["
- << *EndVal << " .. " << *StartVal << "]\n");
-
- // FIXME: check for case where both are constant.
- Constant* Zero = ConstantInt::get(Cond->getOperand(1)->getType(), 0);
- BinaryOperator *NewStartVal = BinaryOperator::Create(Instruction::Sub,
- EndVal, StartVal, "tmp", PreInsertPt);
- PHIExpr->setIncomingValue(InBlock, NewStartVal);
- Cond->setOperand(1, Zero);
- DEBUG(dbgs() << " New icmp: " << *Cond << "\n");
-
- int64_t SInt = cast<SCEVConstant>(Stride)->getValue()->getSExtValue();
- const SCEV *NewStride = 0;
- bool Found = false;
- for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) {
- const SCEV *OldStride = IU->StrideOrder[i];
- if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OldStride))
- if (SC->getValue()->getSExtValue() == -SInt) {
- Found = true;
- NewStride = OldStride;
+/// SolveRecurse - This is the recursive solver.
+void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
+ Cost &SolutionCost,
+ SmallVectorImpl<const Formula *> &Workspace,
+ const Cost &CurCost,
+ const SmallPtrSet<const SCEV *, 16> &CurRegs,
+ DenseSet<const SCEV *> &VisitedRegs) const {
+ // Some ideas:
+ // - prune more:
+ // - use more aggressive filtering
+ // - sort the formula so that the most profitable solutions are found first
+ // - sort the uses too
+ // - search faster:
+ // - dont compute a cost, and then compare. compare while computing a cost
+ // and bail early.
+ // - track register sets with SmallBitVector
+
+ const LSRUse &LU = Uses[Workspace.size()];
+
+ // If this use references any register that's already a part of the
+ // in-progress solution, consider it a requirement that a formula must
+ // reference that register in order to be considered. This prunes out
+ // unprofitable searching.
+ SmallSetVector<const SCEV *, 4> ReqRegs;
+ for (SmallPtrSet<const SCEV *, 16>::const_iterator I = CurRegs.begin(),
+ E = CurRegs.end(); I != E; ++I)
+ if (LU.Regs.count(*I))
+ ReqRegs.insert(*I);
+
+ bool AnySatisfiedReqRegs = false;
+ SmallPtrSet<const SCEV *, 16> NewRegs;
+ Cost NewCost;
+retry:
+ for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(),
+ E = LU.Formulae.end(); I != E; ++I) {
+ const Formula &F = *I;
+
+ // Ignore formulae which do not use any of the required registers.
+ for (SmallSetVector<const SCEV *, 4>::const_iterator J = ReqRegs.begin(),
+ JE = ReqRegs.end(); J != JE; ++J) {
+ const SCEV *Reg = *J;
+ if ((!F.ScaledReg || F.ScaledReg != Reg) &&
+ std::find(F.BaseRegs.begin(), F.BaseRegs.end(), Reg) ==
+ F.BaseRegs.end())
+ goto skip;
+ }
+ AnySatisfiedReqRegs = true;
+
+ // Evaluate the cost of the current formula. If it's already worse than
+ // the current best, prune the search at that point.
+ NewCost = CurCost;
+ NewRegs = CurRegs;
+ NewCost.RateFormula(F, NewRegs, VisitedRegs, L, LU.Offsets, SE, DT);
+ if (NewCost < SolutionCost) {
+ Workspace.push_back(&F);
+ if (Workspace.size() != Uses.size()) {
+ SolveRecurse(Solution, SolutionCost, Workspace, NewCost,
+ NewRegs, VisitedRegs);
+ if (F.getNumRegs() == 1 && Workspace.size() == 1)
+ VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]);
+ } else {
+ DEBUG(dbgs() << "New best at "; NewCost.print(dbgs());
+ dbgs() << ". Regs:";
+ for (SmallPtrSet<const SCEV *, 16>::const_iterator
+ I = NewRegs.begin(), E = NewRegs.end(); I != E; ++I)
+ dbgs() << ' ' << **I;
+ dbgs() << '\n');
+
+ SolutionCost = NewCost;
+ Solution = Workspace;
+ }
+ Workspace.pop_back();
+ }
+ skip:;
+ }
+
+ // If none of the formulae had all of the required registers, relax the
+ // constraint so that we don't exclude all formulae.
+ if (!AnySatisfiedReqRegs) {
+ ReqRegs.clear();
+ goto retry;
+ }
+}
+
+void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const {
+ SmallVector<const Formula *, 8> Workspace;
+ Cost SolutionCost;
+ SolutionCost.Loose();
+ Cost CurCost;
+ SmallPtrSet<const SCEV *, 16> CurRegs;
+ DenseSet<const SCEV *> VisitedRegs;
+ Workspace.reserve(Uses.size());
+
+ SolveRecurse(Solution, SolutionCost, Workspace, CurCost,
+ CurRegs, VisitedRegs);
+
+ // Ok, we've now made all our decisions.
+ DEBUG(dbgs() << "\n"
+ "The chosen solution requires "; SolutionCost.print(dbgs());
+ dbgs() << ":\n";
+ for (size_t i = 0, e = Uses.size(); i != e; ++i) {
+ dbgs() << " ";
+ Uses[i].print(dbgs());
+ dbgs() << "\n"
+ " ";
+ Solution[i]->print(dbgs());
+ dbgs() << '\n';
+ });
+}
+
+/// getImmediateDominator - A handy utility for the specific DominatorTree
+/// query that we need here.
+///
+static BasicBlock *getImmediateDominator(BasicBlock *BB, DominatorTree &DT) {
+ DomTreeNode *Node = DT.getNode(BB);
+ if (!Node) return 0;
+ Node = Node->getIDom();
+ if (!Node) return 0;
+ return Node->getBlock();
+}
+
+Value *LSRInstance::Expand(const LSRFixup &LF,
+ const Formula &F,
+ BasicBlock::iterator IP,
+ Loop *L, Instruction *IVIncInsertPos,
+ SCEVExpander &Rewriter,
+ SmallVectorImpl<WeakVH> &DeadInsts,
+ ScalarEvolution &SE, DominatorTree &DT) const {
+ const LSRUse &LU = Uses[LF.LUIdx];
+
+ // Then, collect some instructions which we will remain dominated by when
+ // expanding the replacement. These must be dominated by any operands that
+ // will be required in the expansion.
+ SmallVector<Instruction *, 4> Inputs;
+ if (Instruction *I = dyn_cast<Instruction>(LF.OperandValToReplace))
+ Inputs.push_back(I);
+ if (LU.Kind == LSRUse::ICmpZero)
+ if (Instruction *I =
+ dyn_cast<Instruction>(cast<ICmpInst>(LF.UserInst)->getOperand(1)))
+ Inputs.push_back(I);
+ if (LF.PostIncLoop && !L->contains(LF.UserInst))
+ Inputs.push_back(L->getLoopLatch()->getTerminator());
+
+ // Then, climb up the immediate dominator tree as far as we can go while
+ // still being dominated by the input positions.
+ for (;;) {
+ bool AllDominate = true;
+ Instruction *BetterPos = 0;
+ BasicBlock *IDom = getImmediateDominator(IP->getParent(), DT);
+ if (!IDom) break;
+ Instruction *Tentative = IDom->getTerminator();
+ for (SmallVectorImpl<Instruction *>::const_iterator I = Inputs.begin(),
+ E = Inputs.end(); I != E; ++I) {
+ Instruction *Inst = *I;
+ if (Inst == Tentative || !DT.dominates(Inst, Tentative)) {
+ AllDominate = false;
break;
}
+ if (IDom == Inst->getParent() &&
+ (!BetterPos || DT.dominates(BetterPos, Inst)))
+ BetterPos = next(BasicBlock::iterator(Inst));
+ }
+ if (!AllDominate)
+ break;
+ if (BetterPos)
+ IP = BetterPos;
+ else
+ IP = Tentative;
}
+ while (isa<PHINode>(IP)) ++IP;
+
+ // Inform the Rewriter if we have a post-increment use, so that it can
+ // perform an advantageous expansion.
+ Rewriter.setPostInc(LF.PostIncLoop);
+
+ // This is the type that the user actually needs.
+ const Type *OpTy = LF.OperandValToReplace->getType();
+ // This will be the type that we'll initially expand to.
+ const Type *Ty = F.getType();
+ if (!Ty)
+ // No type known; just expand directly to the ultimate type.
+ Ty = OpTy;
+ else if (SE.getEffectiveSCEVType(Ty) == SE.getEffectiveSCEVType(OpTy))
+ // Expand directly to the ultimate type if it's the right size.
+ Ty = OpTy;
+ // This is the type to do integer arithmetic in.
+ const Type *IntTy = SE.getEffectiveSCEVType(Ty);
+
+ // Build up a list of operands to add together to form the full base.
+ SmallVector<const SCEV *, 8> Ops;
+
+ // Expand the BaseRegs portion.
+ for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(),
+ E = F.BaseRegs.end(); I != E; ++I) {
+ const SCEV *Reg = *I;
+ assert(!Reg->isZero() && "Zero allocated in a base register!");
+
+ // If we're expanding for a post-inc user for the add-rec's loop, make the
+ // post-inc adjustment.
+ const SCEV *Start = Reg;
+ while (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Start)) {
+ if (AR->getLoop() == LF.PostIncLoop) {
+ Reg = SE.getAddExpr(Reg, AR->getStepRecurrence(SE));
+ // If the user is inside the loop, insert the code after the increment
+ // so that it is dominated by its operand.
+ if (L->contains(LF.UserInst))
+ IP = IVIncInsertPos;
+ break;
+ }
+ Start = AR->getStart();
+ }
- if (!Found)
- NewStride = SE->getIntegerSCEV(-SInt, Stride->getType());
- IU->AddUser(NewStride, CondUse->getOffset(), Cond, Cond->getOperand(0));
- IU->IVUsesByStride[Stride]->removeUser(CondUse);
+ Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, 0, IP)));
+ }
- CondUse = &IU->IVUsesByStride[NewStride]->Users.back();
- Stride = NewStride;
+ // Expand the ScaledReg portion.
+ Value *ICmpScaledV = 0;
+ if (F.AM.Scale != 0) {
+ const SCEV *ScaledS = F.ScaledReg;
+
+ // If we're expanding for a post-inc user for the add-rec's loop, make the
+ // post-inc adjustment.
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(ScaledS))
+ if (AR->getLoop() == LF.PostIncLoop)
+ ScaledS = SE.getAddExpr(ScaledS, AR->getStepRecurrence(SE));
+
+ if (LU.Kind == LSRUse::ICmpZero) {
+ // An interesting way of "folding" with an icmp is to use a negated
+ // scale, which we'll implement by inserting it into the other operand
+ // of the icmp.
+ assert(F.AM.Scale == -1 &&
+ "The only scale supported by ICmpZero uses is -1!");
+ ICmpScaledV = Rewriter.expandCodeFor(ScaledS, 0, IP);
+ } else {
+ // Otherwise just expand the scaled register and an explicit scale,
+ // which is expected to be matched as part of the address.
+ ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, 0, IP));
+ ScaledS = SE.getMulExpr(ScaledS,
+ SE.getIntegerSCEV(F.AM.Scale,
+ ScaledS->getType()));
+ Ops.push_back(ScaledS);
+ }
+ }
- ++NumCountZero;
+ // Expand the immediate portions.
+ if (F.AM.BaseGV)
+ Ops.push_back(SE.getSCEV(F.AM.BaseGV));
+ int64_t Offset = (uint64_t)F.AM.BaseOffs + LF.Offset;
+ if (Offset != 0) {
+ if (LU.Kind == LSRUse::ICmpZero) {
+ // The other interesting way of "folding" with an ICmpZero is to use a
+ // negated immediate.
+ if (!ICmpScaledV)
+ ICmpScaledV = ConstantInt::get(IntTy, -Offset);
+ else {
+ Ops.push_back(SE.getUnknown(ICmpScaledV));
+ ICmpScaledV = ConstantInt::get(IntTy, Offset);
+ }
+ } else {
+ // Just add the immediate values. These again are expected to be matched
+ // as part of the address.
+ Ops.push_back(SE.getIntegerSCEV(Offset, IntTy));
+ }
+ }
- return true;
+ // Emit instructions summing all the operands.
+ const SCEV *FullS = Ops.empty() ?
+ SE.getIntegerSCEV(0, IntTy) :
+ SE.getAddExpr(Ops);
+ Value *FullV = Rewriter.expandCodeFor(FullS, Ty, IP);
+
+ // We're done expanding now, so reset the rewriter.
+ Rewriter.setPostInc(0);
+
+ // An ICmpZero Formula represents an ICmp which we're handling as a
+ // comparison against zero. Now that we've expanded an expression for that
+ // form, update the ICmp's other operand.
+ if (LU.Kind == LSRUse::ICmpZero) {
+ ICmpInst *CI = cast<ICmpInst>(LF.UserInst);
+ DeadInsts.push_back(CI->getOperand(1));
+ assert(!F.AM.BaseGV && "ICmp does not support folding a global value and "
+ "a scale at the same time!");
+ if (F.AM.Scale == -1) {
+ if (ICmpScaledV->getType() != OpTy) {
+ Instruction *Cast =
+ CastInst::Create(CastInst::getCastOpcode(ICmpScaledV, false,
+ OpTy, false),
+ ICmpScaledV, OpTy, "tmp", CI);
+ ICmpScaledV = Cast;
+ }
+ CI->setOperand(1, ICmpScaledV);
+ } else {
+ assert(F.AM.Scale == 0 &&
+ "ICmp does not support folding a global value and "
+ "a scale at the same time!");
+ Constant *C = ConstantInt::getSigned(SE.getEffectiveSCEVType(OpTy),
+ -(uint64_t)Offset);
+ if (C->getType() != OpTy)
+ C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
+ OpTy, false),
+ C, OpTy);
+
+ CI->setOperand(1, C);
+ }
+ }
+
+ return FullV;
}
-/// OptimizeLoopCountIV - If, after all sharing of IVs, the IV used for deciding
-/// when to exit the loop is used only for that purpose, try to rearrange things
-/// so it counts down to a test against zero.
-bool LoopStrengthReduce::OptimizeLoopCountIV(Loop *L) {
- bool ThisChanged = false;
- for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) {
- const SCEV *Stride = IU->StrideOrder[i];
- std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
- IU->IVUsesByStride.find(Stride);
- assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!");
- // FIXME: Generalize to non-affine IV's.
- if (!SI->first->isLoopInvariant(L))
- continue;
- // If stride is a constant and it has an icmpinst use, check if we can
- // optimize the loop to count down.
- if (isa<SCEVConstant>(Stride) && SI->second->Users.size() == 1) {
- Instruction *User = SI->second->Users.begin()->getUser();
- if (!isa<ICmpInst>(User))
- continue;
- const SCEV *CondStride = Stride;
- IVStrideUse *Use = &*SI->second->Users.begin();
- if (!OptimizeLoopCountIVOfStride(CondStride, Use, L))
- continue;
- ThisChanged = true;
+/// Rewrite - Emit instructions for the leading candidate expression for this
+/// LSRUse (this is called "expanding"), and update the UserInst to reference
+/// the newly expanded value.
+void LSRInstance::Rewrite(const LSRFixup &LF,
+ const Formula &F,
+ Loop *L, Instruction *IVIncInsertPos,
+ SCEVExpander &Rewriter,
+ SmallVectorImpl<WeakVH> &DeadInsts,
+ ScalarEvolution &SE, DominatorTree &DT,
+ Pass *P) const {
+ const Type *OpTy = LF.OperandValToReplace->getType();
+
+ // First, find an insertion point that dominates UserInst. For PHI nodes,
+ // find the nearest block which dominates all the relevant uses.
+ if (PHINode *PN = dyn_cast<PHINode>(LF.UserInst)) {
+ DenseMap<BasicBlock *, Value *> Inserted;
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
+ if (PN->getIncomingValue(i) == LF.OperandValToReplace) {
+ BasicBlock *BB = PN->getIncomingBlock(i);
- // Now check if it's possible to reuse this iv for other stride uses.
- for (unsigned j = 0, ee = IU->StrideOrder.size(); j != ee; ++j) {
- const SCEV *SStride = IU->StrideOrder[j];
- if (SStride == CondStride)
- continue;
- std::map<const SCEV *, IVUsersOfOneStride *>::iterator SII =
- IU->IVUsesByStride.find(SStride);
- assert(SII != IU->IVUsesByStride.end() && "Stride doesn't exist!");
- // FIXME: Generalize to non-affine IV's.
- if (!SII->first->isLoopInvariant(L))
- continue;
- // FIXME: Rewrite other stride using CondStride.
+ // If this is a critical edge, split the edge so that we do not insert
+ // the code on all predecessor/successor paths. We do this unless this
+ // is the canonical backedge for this loop, which complicates post-inc
+ // users.
+ if (e != 1 && BB->getTerminator()->getNumSuccessors() > 1 &&
+ !isa<IndirectBrInst>(BB->getTerminator()) &&
+ (PN->getParent() != L->getHeader() || !L->contains(BB))) {
+ // Split the critical edge.
+ BasicBlock *NewBB = SplitCriticalEdge(BB, PN->getParent(), P);
+
+ // If PN is outside of the loop and BB is in the loop, we want to
+ // move the block to be immediately before the PHI block, not
+ // immediately after BB.
+ if (L->contains(BB) && !L->contains(PN))
+ NewBB->moveBefore(PN->getParent());
+
+ // Splitting the edge can reduce the number of PHI entries we have.
+ e = PN->getNumIncomingValues();
+ BB = NewBB;
+ i = PN->getBasicBlockIndex(BB);
+ }
+
+ std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> Pair =
+ Inserted.insert(std::make_pair(BB, static_cast<Value *>(0)));
+ if (!Pair.second)
+ PN->setIncomingValue(i, Pair.first->second);
+ else {
+ Value *FullV = Expand(LF, F, BB->getTerminator(), L, IVIncInsertPos,
+ Rewriter, DeadInsts, SE, DT);
+
+ // If this is reuse-by-noop-cast, insert the noop cast.
+ if (FullV->getType() != OpTy)
+ FullV =
+ CastInst::Create(CastInst::getCastOpcode(FullV, false,
+ OpTy, false),
+ FullV, LF.OperandValToReplace->getType(),
+ "tmp", BB->getTerminator());
+
+ PN->setIncomingValue(i, FullV);
+ Pair.first->second = FullV;
+ }
}
+ } else {
+ Value *FullV = Expand(LF, F, LF.UserInst, L, IVIncInsertPos,
+ Rewriter, DeadInsts, SE, DT);
+
+ // If this is reuse-by-noop-cast, insert the noop cast.
+ if (FullV->getType() != OpTy) {
+ Instruction *Cast =
+ CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false),
+ FullV, OpTy, "tmp", LF.UserInst);
+ FullV = Cast;
}
+
+ // Update the user. ICmpZero is handled specially here (for now) because
+ // Expand may have updated one of the operands of the icmp already, and
+ // its new value may happen to be equal to LF.OperandValToReplace, in
+ // which case doing replaceUsesOfWith leads to replacing both operands
+ // with the same value. TODO: Reorganize this.
+ if (Uses[LF.LUIdx].Kind == LSRUse::ICmpZero)
+ LF.UserInst->setOperand(0, FullV);
+ else
+ LF.UserInst->replaceUsesOfWith(LF.OperandValToReplace, FullV);
}
- Changed |= ThisChanged;
- return ThisChanged;
+ DeadInsts.push_back(LF.OperandValToReplace);
}
-bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager &LPM) {
- IU = &getAnalysis<IVUsers>();
- SE = &getAnalysis<ScalarEvolution>();
- Changed = false;
+void
+LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution,
+ Pass *P) {
+ // Keep track of instructions we may have made dead, so that
+ // we can remove them after we are done working.
+ SmallVector<WeakVH, 16> DeadInsts;
+
+ SCEVExpander Rewriter(SE);
+ Rewriter.disableCanonicalMode();
+ Rewriter.setIVIncInsertPos(L, IVIncInsertPos);
- // If LoopSimplify form is not available, stay out of trouble.
- if (!L->getLoopPreheader() || !L->getLoopLatch())
- return false;
+ // Expand the new value definitions and update the users.
+ for (size_t i = 0, e = Fixups.size(); i != e; ++i) {
+ size_t LUIdx = Fixups[i].LUIdx;
+
+ Rewrite(Fixups[i], *Solution[LUIdx], L, IVIncInsertPos, Rewriter,
+ DeadInsts, SE, DT, P);
+
+ Changed = true;
+ }
- if (!IU->IVUsesByStride.empty()) {
- DEBUG(dbgs() << "\nLSR on \"" << L->getHeader()->getParent()->getName()
- << "\" ";
- L->print(dbgs()));
+ // Clean up after ourselves. This must be done before deleting any
+ // instructions.
+ Rewriter.clear();
- // Sort the StrideOrder so we process larger strides first.
- std::stable_sort(IU->StrideOrder.begin(), IU->StrideOrder.end(),
- StrideCompare(SE));
+ Changed |= DeleteTriviallyDeadInstructions(DeadInsts);
+}
- // Optimize induction variables. Some indvar uses can be transformed to use
- // strides that will be needed for other purposes. A common example of this
- // is the exit test for the loop, which can often be rewritten to use the
- // computation of some other indvar to decide when to terminate the loop.
- OptimizeIndvars(L);
+LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P)
+ : IU(P->getAnalysis<IVUsers>()),
+ SE(P->getAnalysis<ScalarEvolution>()),
+ DT(P->getAnalysis<DominatorTree>()),
+ TLI(tli), L(l), Changed(false), IVIncInsertPos(0) {
- // Change loop terminating condition to use the postinc iv when possible
- // and optimize loop terminating compare. FIXME: Move this after
- // StrengthReduceIVUsersOfStride?
- OptimizeLoopTermCond(L);
+ // If LoopSimplify form is not available, stay out of trouble.
+ if (!L->isLoopSimplifyForm()) return;
+
+ // If there's no interesting work to be done, bail early.
+ if (IU.empty()) return;
+
+ DEBUG(dbgs() << "\nLSR on loop ";
+ WriteAsOperand(dbgs(), L->getHeader(), /*PrintType=*/false);
+ dbgs() << ":\n");
+
+ /// OptimizeShadowIV - If IV is used in a int-to-float cast
+ /// inside the loop then try to eliminate the cast opeation.
+ OptimizeShadowIV();
+
+ // Change loop terminating condition to use the postinc iv when possible.
+ Changed |= OptimizeLoopTermCond();
+
+ CollectInterestingTypesAndFactors();
+ CollectFixupsAndInitialFormulae();
+ CollectLoopInvariantFixupsAndFormulae();
+
+ DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n";
+ print_uses(dbgs()));
+
+ // Now use the reuse data to generate a bunch of interesting ways
+ // to formulate the values needed for the uses.
+ GenerateAllReuseFormulae();
+
+ DEBUG(dbgs() << "\n"
+ "After generating reuse formulae:\n";
+ print_uses(dbgs()));
+
+ FilterOutUndesirableDedicatedRegisters();
+ NarrowSearchSpaceUsingHeuristics();
+
+ SmallVector<const Formula *, 8> Solution;
+ Solve(Solution);
+ assert(Solution.size() == Uses.size() && "Malformed solution!");
+
+ // Release memory that is no longer needed.
+ Factors.clear();
+ Types.clear();
+ RegUses.clear();
+
+#ifndef NDEBUG
+ // Formulae should be legal.
+ for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(),
+ E = Uses.end(); I != E; ++I) {
+ const LSRUse &LU = *I;
+ for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(),
+ JE = LU.Formulae.end(); J != JE; ++J)
+ assert(isLegalUse(J->AM, LU.MinOffset, LU.MaxOffset,
+ LU.Kind, LU.AccessTy, TLI) &&
+ "Illegal formula generated!");
+ };
+#endif
- // FIXME: We can shrink overlarge IV's here. e.g. if the code has
- // computation in i64 values and the target doesn't support i64, demote
- // the computation to 32-bit if safe.
+ // Now that we've decided what we want, make it so.
+ ImplementSolution(Solution, P);
+}
- // FIXME: Attempt to reuse values across multiple IV's. In particular, we
- // could have something like "for(i) { foo(i*8); bar(i*16) }", which should
- // be codegened as "for (j = 0;; j+=8) { foo(j); bar(j+j); }" on X86/PPC.
- // Need to be careful that IV's are all the same type. Only works for
- // intptr_t indvars.
+void LSRInstance::print_factors_and_types(raw_ostream &OS) const {
+ if (Factors.empty() && Types.empty()) return;
- // IVsByStride keeps IVs for one particular loop.
- assert(IVsByStride.empty() && "Stale entries in IVsByStride?");
+ OS << "LSR has identified the following interesting factors and types: ";
+ bool First = true;
- StrengthReduceIVUsers(L);
+ for (SmallSetVector<int64_t, 8>::const_iterator
+ I = Factors.begin(), E = Factors.end(); I != E; ++I) {
+ if (!First) OS << ", ";
+ First = false;
+ OS << '*' << *I;
+ }
- // After all sharing is done, see if we can adjust the loop to test against
- // zero instead of counting up to a maximum. This is usually faster.
- OptimizeLoopCountIV(L);
+ for (SmallSetVector<const Type *, 4>::const_iterator
+ I = Types.begin(), E = Types.end(); I != E; ++I) {
+ if (!First) OS << ", ";
+ First = false;
+ OS << '(' << **I << ')';
+ }
+ OS << '\n';
+}
- // We're done analyzing this loop; release all the state we built up for it.
- IVsByStride.clear();
+void LSRInstance::print_fixups(raw_ostream &OS) const {
+ OS << "LSR is examining the following fixup sites:\n";
+ for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(),
+ E = Fixups.end(); I != E; ++I) {
+ const LSRFixup &LF = *I;
+ dbgs() << " ";
+ LF.print(OS);
+ OS << '\n';
+ }
+}
- // Clean up after ourselves
- DeleteTriviallyDeadInstructions();
+void LSRInstance::print_uses(raw_ostream &OS) const {
+ OS << "LSR is examining the following uses:\n";
+ for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(),
+ E = Uses.end(); I != E; ++I) {
+ const LSRUse &LU = *I;
+ dbgs() << " ";
+ LU.print(OS);
+ OS << '\n';
+ for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(),
+ JE = LU.Formulae.end(); J != JE; ++J) {
+ OS << " ";
+ J->print(OS);
+ OS << '\n';
+ }
}
+}
+
+void LSRInstance::print(raw_ostream &OS) const {
+ print_factors_and_types(OS);
+ print_fixups(OS);
+ print_uses(OS);
+}
+
+void LSRInstance::dump() const {
+ print(errs()); errs() << '\n';
+}
+
+namespace {
+
+class LoopStrengthReduce : public LoopPass {
+ /// TLI - Keep a pointer of a TargetLowering to consult for determining
+ /// transformation profitability.
+ const TargetLowering *const TLI;
+
+public:
+ static char ID; // Pass ID, replacement for typeid
+ explicit LoopStrengthReduce(const TargetLowering *tli = 0);
+
+private:
+ bool runOnLoop(Loop *L, LPPassManager &LPM);
+ void getAnalysisUsage(AnalysisUsage &AU) const;
+};
+
+}
+
+char LoopStrengthReduce::ID = 0;
+static RegisterPass<LoopStrengthReduce>
+X("loop-reduce", "Loop Strength Reduction");
+
+Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) {
+ return new LoopStrengthReduce(TLI);
+}
+
+LoopStrengthReduce::LoopStrengthReduce(const TargetLowering *tli)
+ : LoopPass(&ID), TLI(tli) {}
+
+void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const {
+ // We split critical edges, so we change the CFG. However, we do update
+ // many analyses if they are around.
+ AU.addPreservedID(LoopSimplifyID);
+ AU.addPreserved<LoopInfo>();
+ AU.addPreserved("domfrontier");
+
+ AU.addRequiredID(LoopSimplifyID);
+ AU.addRequired<DominatorTree>();
+ AU.addPreserved<DominatorTree>();
+ AU.addRequired<ScalarEvolution>();
+ AU.addPreserved<ScalarEvolution>();
+ AU.addRequired<IVUsers>();
+ AU.addPreserved<IVUsers>();
+}
+
+bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) {
+ bool Changed = false;
+
+ // Run the main LSR transformation.
+ Changed |= LSRInstance(TLI, L, this).getChanged();
// At this point, it is worth checking to see if any recurrence PHIs are also
// dead, so that we can remove them as well.
diff --git a/lib/Transforms/Scalar/LoopUnrollPass.cpp b/lib/Transforms/Scalar/LoopUnrollPass.cpp
index ee8cb4f..a355ec3 100644
--- a/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -76,11 +76,12 @@ static RegisterPass<LoopUnroll> X("loop-unroll", "Unroll loops");
Pass *llvm::createLoopUnrollPass() { return new LoopUnroll(); }
/// ApproximateLoopSize - Approximate the size of the loop.
-static unsigned ApproximateLoopSize(const Loop *L) {
+static unsigned ApproximateLoopSize(const Loop *L, unsigned &NumCalls) {
CodeMetrics Metrics;
for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
I != E; ++I)
Metrics.analyzeBasicBlock(*I);
+ NumCalls = Metrics.NumCalls;
return Metrics.NumInsts;
}
@@ -110,8 +111,13 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
// Enforce the threshold.
if (UnrollThreshold != NoThreshold) {
- unsigned LoopSize = ApproximateLoopSize(L);
+ unsigned NumCalls;
+ unsigned LoopSize = ApproximateLoopSize(L, NumCalls);
DEBUG(dbgs() << " Loop Size = " << LoopSize << "\n");
+ if (NumCalls != 0) {
+ DEBUG(dbgs() << " Not unrolling loop with function calls.\n");
+ return false;
+ }
uint64_t Size = (uint64_t)LoopSize*Count;
if (TripCount != 1 && Size > UnrollThreshold) {
DEBUG(dbgs() << " Too large to fully unroll with count: " << Count
diff --git a/lib/Transforms/Scalar/LoopUnswitch.cpp b/lib/Transforms/Scalar/LoopUnswitch.cpp
index 527a7b5..990e0c4 100644
--- a/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -169,6 +169,10 @@ Pass *llvm::createLoopUnswitchPass(bool Os) {
/// invariant in the loop, or has an invariant piece, return the invariant.
/// Otherwise, return null.
static Value *FindLIVLoopCondition(Value *Cond, Loop *L, bool &Changed) {
+ // We can never unswitch on vector conditions.
+ if (isa<VectorType>(Cond->getType()))
+ return 0;
+
// Constants should be folded, not unswitched on!
if (isa<Constant>(Cond)) return 0;
@@ -401,7 +405,7 @@ bool LoopUnswitch::IsTrivialUnswitchCondition(Value *Cond, Constant **Val,
/// UnswitchIfProfitable - We have found that we can unswitch currentLoop when
/// LoopCond == Val to simplify the loop. If we decide that this is profitable,
/// unswitch the loop, reprocess the pieces, then return true.
-bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val){
+bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val) {
initLoopData();
@@ -867,7 +871,7 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC,
// If we know that LIC == Val, or that LIC == NotVal, just replace uses of LIC
// in the loop with the appropriate one directly.
if (IsEqual || (isa<ConstantInt>(Val) &&
- Val->getType()->isInteger(1))) {
+ Val->getType()->isIntegerTy(1))) {
Value *Replacement;
if (IsEqual)
Replacement = Val;
@@ -993,10 +997,10 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) {
case Instruction::And:
if (isa<ConstantInt>(I->getOperand(0)) &&
// constant -> RHS
- I->getOperand(0)->getType()->isInteger(1))
+ I->getOperand(0)->getType()->isIntegerTy(1))
cast<BinaryOperator>(I)->swapOperands();
if (ConstantInt *CB = dyn_cast<ConstantInt>(I->getOperand(1)))
- if (CB->getType()->isInteger(1)) {
+ if (CB->getType()->isIntegerTy(1)) {
if (CB->isOne()) // X & 1 -> X
ReplaceUsesOfWith(I, I->getOperand(0), Worklist, L, LPM);
else // X & 0 -> 0
@@ -1007,10 +1011,10 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) {
case Instruction::Or:
if (isa<ConstantInt>(I->getOperand(0)) &&
// constant -> RHS
- I->getOperand(0)->getType()->isInteger(1))
+ I->getOperand(0)->getType()->isIntegerTy(1))
cast<BinaryOperator>(I)->swapOperands();
if (ConstantInt *CB = dyn_cast<ConstantInt>(I->getOperand(1)))
- if (CB->getType()->isInteger(1)) {
+ if (CB->getType()->isIntegerTy(1)) {
if (CB->isOne()) // X | 1 -> 1
ReplaceUsesOfWith(I, I->getOperand(1), Worklist, L, LPM);
else // X | 0 -> X
diff --git a/lib/Transforms/Scalar/Makefile b/lib/Transforms/Scalar/Makefile
index e18f30f..cc42fd0 100644
--- a/lib/Transforms/Scalar/Makefile
+++ b/lib/Transforms/Scalar/Makefile
@@ -10,7 +10,6 @@
LEVEL = ../../..
LIBRARYNAME = LLVMScalarOpts
BUILD_ARCHIVE = 1
-CXXFLAGS = -fno-rtti
include $(LEVEL)/Makefile.common
diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index e0aa491..62e2977 100644
--- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -42,7 +42,7 @@ static Value *isBytewiseValue(Value *V) {
LLVMContext &Context = V->getContext();
// All byte-wide stores are splatable, even of arbitrary variables.
- if (V->getType()->isInteger(8)) return V;
+ if (V->getType()->isIntegerTy(8)) return V;
// Constant float and double values can be handled as integer values if the
// corresponding integer value is "byteable". An important case is 0.0.
diff --git a/lib/Transforms/Scalar/Reassociate.cpp b/lib/Transforms/Scalar/Reassociate.cpp
index 4a99f4a..187216a 100644
--- a/lib/Transforms/Scalar/Reassociate.cpp
+++ b/lib/Transforms/Scalar/Reassociate.cpp
@@ -182,7 +182,7 @@ unsigned Reassociate::getRank(Value *V) {
// If this is a not or neg instruction, do not count it for rank. This
// assures us that X and ~X will have the same rank.
- if (!I->getType()->isInteger() ||
+ if (!I->getType()->isIntegerTy() ||
(!BinaryOperator::isNot(I) && !BinaryOperator::isNeg(I)))
++Rank;
@@ -249,7 +249,7 @@ void Reassociate::LinearizeExpr(BinaryOperator *I) {
/// LinearizeExprTree - Given an associative binary expression tree, traverse
/// all of the uses putting it into canonical form. This forces a left-linear
-/// form of the the expression (((a+b)+c)+d), and collects information about the
+/// form of the expression (((a+b)+c)+d), and collects information about the
/// rank of the non-tree operands.
///
/// NOTE: These intentionally destroys the expression tree operands (turning
@@ -299,7 +299,7 @@ void Reassociate::LinearizeExprTree(BinaryOperator *I,
Success = false;
MadeChange = true;
} else if (RHSBO) {
- // Turn (A+B)+(C+D) -> (((A+B)+C)+D). This guarantees the the RHS is not
+ // Turn (A+B)+(C+D) -> (((A+B)+C)+D). This guarantees the RHS is not
// part of the expression tree.
LinearizeExpr(I);
LHS = LHSBO = cast<BinaryOperator>(I->getOperand(0));
@@ -929,10 +929,19 @@ void Reassociate::ReassociateBB(BasicBlock *BB) {
}
// Reject cases where it is pointless to do this.
- if (!isa<BinaryOperator>(BI) || BI->getType()->isFloatingPoint() ||
+ if (!isa<BinaryOperator>(BI) || BI->getType()->isFloatingPointTy() ||
isa<VectorType>(BI->getType()))
continue; // Floating point ops are not associative.
+ // Do not reassociate boolean (i1) expressions. We want to preserve the
+ // original order of evaluation for short-circuited comparisons that
+ // SimplifyCFG has folded to AND/OR expressions. If the expression
+ // is not further optimized, it is likely to be transformed back to a
+ // short-circuited form for code gen, and the source order may have been
+ // optimized for the most likely conditions.
+ if (BI->getType()->isIntegerTy(1))
+ continue;
+
// If this is a subtract instruction which is not already in negate form,
// see if we can convert it to X+-Y.
if (BI->getOpcode() == Instruction::Sub) {
diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index f473480..822712e 100644
--- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -202,12 +202,18 @@ bool SROA::performPromotion(Function &F) {
return Changed;
}
-/// getNumSAElements - Return the number of elements in the specific struct or
-/// array.
-static uint64_t getNumSAElements(const Type *T) {
+/// ShouldAttemptScalarRepl - Decide if an alloca is a good candidate for
+/// SROA. It must be a struct or array type with a small number of elements.
+static bool ShouldAttemptScalarRepl(AllocaInst *AI) {
+ const Type *T = AI->getAllocatedType();
+ // Do not promote any struct into more than 32 separate vars.
if (const StructType *ST = dyn_cast<StructType>(T))
- return ST->getNumElements();
- return cast<ArrayType>(T)->getNumElements();
+ return ST->getNumElements() <= 32;
+ // Arrays are much less likely to be safe for SROA; only consider
+ // them if they are very small.
+ if (const ArrayType *AT = dyn_cast<ArrayType>(T))
+ return AT->getNumElements() <= 8;
+ return false;
}
// performScalarRepl - This algorithm is a simple worklist driven algorithm,
@@ -266,22 +272,18 @@ bool SROA::performScalarRepl(Function &F) {
// Do not promote [0 x %struct].
if (AllocaSize == 0) continue;
+ // If the alloca looks like a good candidate for scalar replacement, and if
+ // all its users can be transformed, then split up the aggregate into its
+ // separate elements.
+ if (ShouldAttemptScalarRepl(AI) && isSafeAllocaToScalarRepl(AI)) {
+ DoScalarReplacement(AI, WorkList);
+ Changed = true;
+ continue;
+ }
+
// Do not promote any struct whose size is too big.
if (AllocaSize > SRThreshold) continue;
- if ((isa<StructType>(AI->getAllocatedType()) ||
- isa<ArrayType>(AI->getAllocatedType())) &&
- // Do not promote any struct into more than "32" separate vars.
- getNumSAElements(AI->getAllocatedType()) <= SRThreshold/4) {
- // Check that all of the users of the allocation are capable of being
- // transformed.
- if (isSafeAllocaToScalarRepl(AI)) {
- DoScalarReplacement(AI, WorkList);
- Changed = true;
- continue;
- }
- }
-
// If we can turn this aggregate value (potentially with casts) into a
// simple scalar value that can be mem2reg'd into a register value.
// IsNotTrivial tracks whether this is something that mem2reg could have
@@ -681,7 +683,7 @@ void SROA::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
Val->takeName(GEPI);
}
if (Val->getType() != GEPI->getType())
- Val = new BitCastInst(Val, GEPI->getType(), Val->getNameStr(), GEPI);
+ Val = new BitCastInst(Val, GEPI->getType(), Val->getName(), GEPI);
GEPI->replaceAllUsesWith(Val);
DeadInsts.push_back(GEPI);
}
@@ -769,7 +771,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
Value *Idx[2] = { Zero,
ConstantInt::get(Type::getInt32Ty(MI->getContext()), i) };
OtherElt = GetElementPtrInst::CreateInBounds(OtherPtr, Idx, Idx + 2,
- OtherPtr->getNameStr()+"."+Twine(i),
+ OtherPtr->getName()+"."+Twine(i),
MI);
uint64_t EltOffset;
const PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType());
@@ -833,7 +835,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
StoreVal = ConstantInt::get(Context, TotalVal);
if (isa<PointerType>(ValTy))
StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy);
- else if (ValTy->isFloatingPoint())
+ else if (ValTy->isFloatingPointTy())
StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy);
assert(StoreVal->getType() == ValTy && "Type mismatch!");
@@ -853,12 +855,11 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
// Cast the element pointer to BytePtrTy.
if (EltPtr->getType() != BytePtrTy)
- EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getNameStr(), MI);
+ EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getName(), MI);
// Cast the other pointer (if we have one) to BytePtrTy.
if (OtherElt && OtherElt->getType() != BytePtrTy)
- OtherElt = new BitCastInst(OtherElt, BytePtrTy,OtherElt->getNameStr(),
- MI);
+ OtherElt = new BitCastInst(OtherElt, BytePtrTy, OtherElt->getName(), MI);
unsigned EltSize = TD->getTypeAllocSize(EltTy);
@@ -938,7 +939,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
Value *DestField = NewElts[i];
if (EltVal->getType() == FieldTy) {
// Storing to an integer field of this size, just do it.
- } else if (FieldTy->isFloatingPoint() || isa<VectorType>(FieldTy)) {
+ } else if (FieldTy->isFloatingPointTy() || isa<VectorType>(FieldTy)) {
// Bitcast to the right element type (for fp/vector values).
EltVal = new BitCastInst(EltVal, FieldTy, "", SI);
} else {
@@ -982,7 +983,8 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
Value *DestField = NewElts[i];
if (EltVal->getType() == ArrayEltTy) {
// Storing to an integer field of this size, just do it.
- } else if (ArrayEltTy->isFloatingPoint() || isa<VectorType>(ArrayEltTy)) {
+ } else if (ArrayEltTy->isFloatingPointTy() ||
+ isa<VectorType>(ArrayEltTy)) {
// Bitcast to the right element type (for fp/vector values).
EltVal = new BitCastInst(EltVal, ArrayEltTy, "", SI);
} else {
@@ -1042,7 +1044,7 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
const IntegerType *FieldIntTy = IntegerType::get(LI->getContext(),
FieldSizeBits);
- if (!isa<IntegerType>(FieldTy) && !FieldTy->isFloatingPoint() &&
+ if (!isa<IntegerType>(FieldTy) && !FieldTy->isFloatingPointTy() &&
!isa<VectorType>(FieldTy))
SrcField = new BitCastInst(SrcField,
PointerType::getUnqual(FieldIntTy),
@@ -1384,9 +1386,9 @@ void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset) {
// If the source and destination are both to the same alloca, then this is
// a noop copy-to-self, just delete it. Otherwise, emit a load and store
// as appropriate.
- AllocaInst *OrigAI = cast<AllocaInst>(Ptr->getUnderlyingObject());
+ AllocaInst *OrigAI = cast<AllocaInst>(Ptr->getUnderlyingObject(0));
- if (MTI->getSource()->getUnderlyingObject() != OrigAI) {
+ if (MTI->getSource()->getUnderlyingObject(0) != OrigAI) {
// Dest must be OrigAI, change this to be a load from the original
// pointer (bitcasted), then a store to our new alloca.
assert(MTI->getRawDest() == Ptr && "Neither use is of pointer?");
@@ -1396,7 +1398,7 @@ void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset) {
LoadInst *SrcVal = Builder.CreateLoad(SrcPtr, "srcval");
SrcVal->setAlignment(MTI->getAlignment());
Builder.CreateStore(SrcVal, NewAI);
- } else if (MTI->getDest()->getUnderlyingObject() != OrigAI) {
+ } else if (MTI->getDest()->getUnderlyingObject(0) != OrigAI) {
// Src must be OrigAI, change this to be a load from NewAI then a store
// through the original dest pointer (bitcasted).
assert(MTI->getRawSource() == Ptr && "Neither use is of pointer?");
@@ -1521,7 +1523,7 @@ Value *SROA::ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
// If the result is an integer, this is a trunc or bitcast.
if (isa<IntegerType>(ToType)) {
// Should be done.
- } else if (ToType->isFloatingPoint() || isa<VectorType>(ToType)) {
+ } else if (ToType->isFloatingPointTy() || isa<VectorType>(ToType)) {
// Just do a bitcast, we know the sizes match up.
FromVal = Builder.CreateBitCast(FromVal, ToType, "tmp");
} else {
@@ -1599,7 +1601,7 @@ Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old,
unsigned DestWidth = TD->getTypeSizeInBits(AllocaType);
unsigned SrcStoreWidth = TD->getTypeStoreSizeInBits(SV->getType());
unsigned DestStoreWidth = TD->getTypeStoreSizeInBits(AllocaType);
- if (SV->getType()->isFloatingPoint() || isa<VectorType>(SV->getType()))
+ if (SV->getType()->isFloatingPointTy() || isa<VectorType>(SV->getType()))
SV = Builder.CreateBitCast(SV,
IntegerType::get(SV->getContext(),SrcWidth), "tmp");
else if (isa<PointerType>(SV->getType()))
diff --git a/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/lib/Transforms/Scalar/SimplifyCFGPass.cpp
index 43447de..62f34a2 100644
--- a/lib/Transforms/Scalar/SimplifyCFGPass.cpp
+++ b/lib/Transforms/Scalar/SimplifyCFGPass.cpp
@@ -30,6 +30,7 @@
#include "llvm/Attributes.h"
#include "llvm/Support/CFG.h"
#include "llvm/Pass.h"
+#include "llvm/Target/TargetData.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
@@ -261,7 +262,7 @@ static bool MergeEmptyReturnBlocks(Function &F) {
/// IterativeSimplifyCFG - Call SimplifyCFG on all the blocks in the function,
/// iterating until no more changes are made.
-static bool IterativeSimplifyCFG(Function &F) {
+static bool IterativeSimplifyCFG(Function &F, const TargetData *TD) {
bool Changed = false;
bool LocalChange = true;
while (LocalChange) {
@@ -271,7 +272,7 @@ static bool IterativeSimplifyCFG(Function &F) {
// if they are unneeded...
//
for (Function::iterator BBIt = ++F.begin(); BBIt != F.end(); ) {
- if (SimplifyCFG(BBIt++)) {
+ if (SimplifyCFG(BBIt++, TD)) {
LocalChange = true;
++NumSimpl;
}
@@ -285,10 +286,11 @@ static bool IterativeSimplifyCFG(Function &F) {
// simplify the CFG.
//
bool CFGSimplifyPass::runOnFunction(Function &F) {
+ const TargetData *TD = getAnalysisIfAvailable<TargetData>();
bool EverChanged = RemoveUnreachableBlocksFromFn(F);
EverChanged |= MergeEmptyReturnBlocks(F);
- EverChanged |= IterativeSimplifyCFG(F);
-
+ EverChanged |= IterativeSimplifyCFG(F, TD);
+
// If neither pass changed anything, we're done.
if (!EverChanged) return false;
@@ -299,11 +301,11 @@ bool CFGSimplifyPass::runOnFunction(Function &F) {
// RemoveUnreachableBlocksFromFn doesn't do anything.
if (!RemoveUnreachableBlocksFromFn(F))
return true;
-
+
do {
- EverChanged = IterativeSimplifyCFG(F);
+ EverChanged = IterativeSimplifyCFG(F, TD);
EverChanged |= RemoveUnreachableBlocksFromFn(F);
} while (EverChanged);
-
+
return true;
}
diff --git a/lib/Transforms/Scalar/SimplifyHalfPowrLibCalls.cpp b/lib/Transforms/Scalar/SimplifyHalfPowrLibCalls.cpp
index 5acd6aa..4464961 100644
--- a/lib/Transforms/Scalar/SimplifyHalfPowrLibCalls.cpp
+++ b/lib/Transforms/Scalar/SimplifyHalfPowrLibCalls.cpp
@@ -68,7 +68,7 @@ InlineHalfPowrs(const std::vector<Instruction *> &HalfPowrs,
Function *Callee = Call->getCalledFunction();
// Minimally sanity-check the CFG of half_powr to ensure that it contains
- // the the kind of code we expect. If we're running this pass, we have
+ // the kind of code we expect. If we're running this pass, we have
// reason to believe it will be what we expect.
Function::iterator I = Callee->begin();
BasicBlock *Prologue = I++;
diff --git a/lib/Transforms/Scalar/SimplifyLibCalls.cpp b/lib/Transforms/Scalar/SimplifyLibCalls.cpp
index a49da9c..54b4380 100644
--- a/lib/Transforms/Scalar/SimplifyLibCalls.cpp
+++ b/lib/Transforms/Scalar/SimplifyLibCalls.cpp
@@ -152,7 +152,7 @@ Value *LibCallOptimization::EmitStrLen(Value *Ptr, IRBuilder<> &B) {
Constant *StrLen =M->getOrInsertFunction("strlen", AttrListPtr::get(AWI, 2),
TD->getIntPtrType(*Context),
- Type::getInt8PtrTy(*Context),
+ Type::getInt8PtrTy(*Context),
NULL);
CallInst *CI = B.CreateCall(StrLen, CastToCStr(Ptr, B), "strlen");
if (const Function *F = dyn_cast<Function>(StrLen->stripPointerCasts()))
@@ -232,10 +232,10 @@ Value *LibCallOptimization::EmitMemChr(Value *Ptr, Value *Val,
AWI = AttributeWithIndex::get(~0u, Attribute::ReadOnly | Attribute::NoUnwind);
Value *MemChr = M->getOrInsertFunction("memchr", AttrListPtr::get(&AWI, 1),
- Type::getInt8PtrTy(*Context),
- Type::getInt8PtrTy(*Context),
+ Type::getInt8PtrTy(*Context),
+ Type::getInt8PtrTy(*Context),
Type::getInt32Ty(*Context),
- TD->getIntPtrType(*Context),
+ TD->getIntPtrType(*Context),
NULL);
CallInst *CI = B.CreateCall3(MemChr, CastToCStr(Ptr, B), Val, Len, "memchr");
@@ -321,9 +321,9 @@ Value *LibCallOptimization::EmitPutChar(Value *Char, IRBuilder<> &B) {
Type::getInt32Ty(*Context), NULL);
CallInst *CI = B.CreateCall(PutChar,
B.CreateIntCast(Char,
- Type::getInt32Ty(*Context),
- /*isSigned*/true,
- "chari"),
+ Type::getInt32Ty(*Context),
+ /*isSigned*/true,
+ "chari"),
"putchar");
if (const Function *F = dyn_cast<Function>(PutChar->stripPointerCasts()))
@@ -341,7 +341,7 @@ void LibCallOptimization::EmitPutS(Value *Str, IRBuilder<> &B) {
Value *PutS = M->getOrInsertFunction("puts", AttrListPtr::get(AWI, 2),
Type::getInt32Ty(*Context),
- Type::getInt8PtrTy(*Context),
+ Type::getInt8PtrTy(*Context),
NULL);
CallInst *CI = B.CreateCall(PutS, CastToCStr(Str, B), "puts");
if (const Function *F = dyn_cast<Function>(PutS->stripPointerCasts()))
@@ -359,13 +359,13 @@ void LibCallOptimization::EmitFPutC(Value *Char, Value *File, IRBuilder<> &B) {
Constant *F;
if (isa<PointerType>(File->getType()))
F = M->getOrInsertFunction("fputc", AttrListPtr::get(AWI, 2),
- Type::getInt32Ty(*Context),
+ Type::getInt32Ty(*Context),
Type::getInt32Ty(*Context), File->getType(),
- NULL);
+ NULL);
else
F = M->getOrInsertFunction("fputc",
- Type::getInt32Ty(*Context),
- Type::getInt32Ty(*Context),
+ Type::getInt32Ty(*Context),
+ Type::getInt32Ty(*Context),
File->getType(), NULL);
Char = B.CreateIntCast(Char, Type::getInt32Ty(*Context), /*isSigned*/true,
"chari");
@@ -386,7 +386,7 @@ void LibCallOptimization::EmitFPutS(Value *Str, Value *File, IRBuilder<> &B) {
Constant *F;
if (isa<PointerType>(File->getType()))
F = M->getOrInsertFunction("fputs", AttrListPtr::get(AWI, 3),
- Type::getInt32Ty(*Context),
+ Type::getInt32Ty(*Context),
Type::getInt8PtrTy(*Context),
File->getType(), NULL);
else
@@ -414,13 +414,13 @@ void LibCallOptimization::EmitFWrite(Value *Ptr, Value *Size, Value *File,
TD->getIntPtrType(*Context),
Type::getInt8PtrTy(*Context),
TD->getIntPtrType(*Context),
- TD->getIntPtrType(*Context),
+ TD->getIntPtrType(*Context),
File->getType(), NULL);
else
F = M->getOrInsertFunction("fwrite", TD->getIntPtrType(*Context),
Type::getInt8PtrTy(*Context),
TD->getIntPtrType(*Context),
- TD->getIntPtrType(*Context),
+ TD->getIntPtrType(*Context),
File->getType(), NULL);
CallInst *CI = B.CreateCall4(F, CastToCStr(Ptr, B), Size,
ConstantInt::get(TD->getIntPtrType(*Context), 1), File);
@@ -525,7 +525,7 @@ static uint64_t GetStringLengthH(Value *V, SmallPtrSet<PHINode*, 32> &PHIs) {
// Must be a Constant Array
ConstantArray *Array = dyn_cast<ConstantArray>(GlobalInit);
- if (!Array || !Array->getType()->getElementType()->isInteger(8))
+ if (!Array || !Array->getType()->getElementType()->isIntegerTy(8))
return false;
// Get the number of elements in the array
@@ -697,7 +697,7 @@ struct StrChrOpt : public LibCallOptimization {
if (!TD) return 0;
uint64_t Len = GetStringLength(SrcStr);
- if (Len == 0 || !FT->getParamType(1)->isInteger(32)) // memchr needs i32.
+ if (Len == 0 || !FT->getParamType(1)->isIntegerTy(32))// memchr needs i32.
return 0;
return EmitMemChr(SrcStr, CI->getOperand(2), // include nul.
@@ -739,7 +739,7 @@ struct StrCmpOpt : public LibCallOptimization {
// Verify the "strcmp" function prototype.
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
- !FT->getReturnType()->isInteger(32) ||
+ !FT->getReturnType()->isIntegerTy(32) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != Type::getInt8PtrTy(*Context))
return 0;
@@ -787,7 +787,7 @@ struct StrNCmpOpt : public LibCallOptimization {
// Verify the "strncmp" function prototype.
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 ||
- !FT->getReturnType()->isInteger(32) ||
+ !FT->getReturnType()->isIntegerTy(32) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != Type::getInt8PtrTy(*Context) ||
!isa<IntegerType>(FT->getParamType(2)))
@@ -1008,7 +1008,7 @@ struct MemCmpOpt : public LibCallOptimization {
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 || !isa<PointerType>(FT->getParamType(0)) ||
!isa<PointerType>(FT->getParamType(1)) ||
- !FT->getReturnType()->isInteger(32))
+ !FT->getReturnType()->isIntegerTy(32))
return 0;
Value *LHS = CI->getOperand(1), *RHS = CI->getOperand(2);
@@ -1203,22 +1203,23 @@ struct MemMoveChkOpt : public LibCallOptimization {
struct StrCpyChkOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- // These optimizations require TargetData.
- if (!TD) return 0;
-
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
!isa<PointerType>(FT->getParamType(0)) ||
- !isa<PointerType>(FT->getParamType(1)) ||
- !isa<IntegerType>(FT->getParamType(2)))
+ !isa<PointerType>(FT->getParamType(1)))
return 0;
ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getOperand(3));
if (!SizeCI)
return 0;
- // We don't have any length information, just lower to a plain strcpy.
- if (SizeCI->isAllOnesValue())
+ // If a) we don't have any length information, or b) we know this will
+ // fit then just lower to a plain strcpy. Otherwise we'll keep our
+ // strcpy_chk call which may fail at runtime if the size is too long.
+ // TODO: It might be nice to get a maximum length out of the possible
+ // string lengths for varying.
+ if (SizeCI->isAllOnesValue() ||
+ SizeCI->getZExtValue() >= GetStringLength(CI->getOperand(2)))
return EmitStrCpy(CI->getOperand(1), CI->getOperand(2), B);
return 0;
@@ -1240,7 +1241,7 @@ struct PowOpt : public LibCallOptimization {
// result type.
if (FT->getNumParams() != 2 || FT->getReturnType() != FT->getParamType(0) ||
FT->getParamType(0) != FT->getParamType(1) ||
- !FT->getParamType(0)->isFloatingPoint())
+ !FT->getParamType(0)->isFloatingPointTy())
return 0;
Value *Op1 = CI->getOperand(1), *Op2 = CI->getOperand(2);
@@ -1294,7 +1295,7 @@ struct Exp2Opt : public LibCallOptimization {
// Just make sure this has 1 argument of FP type, which matches the
// result type.
if (FT->getNumParams() != 1 || FT->getReturnType() != FT->getParamType(0) ||
- !FT->getParamType(0)->isFloatingPoint())
+ !FT->getParamType(0)->isFloatingPointTy())
return 0;
Value *Op = CI->getOperand(1);
@@ -1327,7 +1328,7 @@ struct Exp2Opt : public LibCallOptimization {
Module *M = Caller->getParent();
Value *Callee = M->getOrInsertFunction(Name, Op->getType(),
Op->getType(),
- Type::getInt32Ty(*Context),NULL);
+ Type::getInt32Ty(*Context),NULL);
CallInst *CI = B.CreateCall2(Callee, One, LdExpArg);
if (const Function *F = dyn_cast<Function>(Callee->stripPointerCasts()))
CI->setCallingConv(F->getCallingConv());
@@ -1374,7 +1375,7 @@ struct FFSOpt : public LibCallOptimization {
// Just make sure this has 2 arguments of the same FP type, which match the
// result type.
if (FT->getNumParams() != 1 ||
- !FT->getReturnType()->isInteger(32) ||
+ !FT->getReturnType()->isIntegerTy(32) ||
!isa<IntegerType>(FT->getParamType(0)))
return 0;
@@ -1410,7 +1411,7 @@ struct IsDigitOpt : public LibCallOptimization {
const FunctionType *FT = Callee->getFunctionType();
// We require integer(i32)
if (FT->getNumParams() != 1 || !isa<IntegerType>(FT->getReturnType()) ||
- !FT->getParamType(0)->isInteger(32))
+ !FT->getParamType(0)->isIntegerTy(32))
return 0;
// isdigit(c) -> (c-'0') <u 10
@@ -1431,7 +1432,7 @@ struct IsAsciiOpt : public LibCallOptimization {
const FunctionType *FT = Callee->getFunctionType();
// We require integer(i32)
if (FT->getNumParams() != 1 || !isa<IntegerType>(FT->getReturnType()) ||
- !FT->getParamType(0)->isInteger(32))
+ !FT->getParamType(0)->isIntegerTy(32))
return 0;
// isascii(c) -> c <u 128
@@ -1472,7 +1473,7 @@ struct ToAsciiOpt : public LibCallOptimization {
const FunctionType *FT = Callee->getFunctionType();
// We require i32(i32)
if (FT->getNumParams() != 1 || FT->getReturnType() != FT->getParamType(0) ||
- !FT->getParamType(0)->isInteger(32))
+ !FT->getParamType(0)->isIntegerTy(32))
return 0;
// isascii(c) -> c & 0x7f
diff --git a/lib/Transforms/Scalar/TailRecursionElimination.cpp b/lib/Transforms/Scalar/TailRecursionElimination.cpp
index 4119cb9..162d902 100644
--- a/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -211,7 +211,8 @@ bool TailCallElim::CanMoveAboveCall(Instruction *I, CallInst *CI) {
// FIXME: Writes to memory only matter if they may alias the pointer
// being loaded from.
if (CI->mayWriteToMemory() ||
- !isSafeToLoadUnconditionally(L->getPointerOperand(), L))
+ !isSafeToLoadUnconditionally(L->getPointerOperand(), L,
+ L->getAlignment()))
return false;
}
}
diff --git a/lib/Transforms/Utils/BreakCriticalEdges.cpp b/lib/Transforms/Utils/BreakCriticalEdges.cpp
index 19c7206..3657390 100644
--- a/lib/Transforms/Utils/BreakCriticalEdges.cpp
+++ b/lib/Transforms/Utils/BreakCriticalEdges.cpp
@@ -179,7 +179,7 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
// Create a new basic block, linking it into the CFG.
BasicBlock *NewBB = BasicBlock::Create(TI->getContext(),
TIBB->getName() + "." + DestBB->getName() + "_crit_edge");
- // Create our unconditional branch...
+ // Create our unconditional branch.
BranchInst::Create(DestBB, NewBB);
// Branch to the new block, breaking the edge.
@@ -192,16 +192,47 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
// If there are any PHI nodes in DestBB, we need to update them so that they
// merge incoming values from NewBB instead of from TIBB.
- //
- for (BasicBlock::iterator I = DestBB->begin(); isa<PHINode>(I); ++I) {
- PHINode *PN = cast<PHINode>(I);
- // We no longer enter through TIBB, now we come in through NewBB. Revector
- // exactly one entry in the PHI node that used to come from TIBB to come
- // from NewBB.
- int BBIdx = PN->getBasicBlockIndex(TIBB);
- PN->setIncomingBlock(BBIdx, NewBB);
+ if (PHINode *APHI = dyn_cast<PHINode>(DestBB->begin())) {
+ // This conceptually does:
+ // foreach (PHINode *PN in DestBB)
+ // PN->setIncomingBlock(PN->getIncomingBlock(TIBB), NewBB);
+ // but is optimized for two cases.
+
+ if (APHI->getNumIncomingValues() <= 8) { // Small # preds case.
+ unsigned BBIdx = 0;
+ for (BasicBlock::iterator I = DestBB->begin(); isa<PHINode>(I); ++I) {
+ // We no longer enter through TIBB, now we come in through NewBB.
+ // Revector exactly one entry in the PHI node that used to come from
+ // TIBB to come from NewBB.
+ PHINode *PN = cast<PHINode>(I);
+
+ // Reuse the previous value of BBIdx if it lines up. In cases where we
+ // have multiple phi nodes with *lots* of predecessors, this is a speed
+ // win because we don't have to scan the PHI looking for TIBB. This
+ // happens because the BB list of PHI nodes are usually in the same
+ // order.
+ if (PN->getIncomingBlock(BBIdx) != TIBB)
+ BBIdx = PN->getBasicBlockIndex(TIBB);
+ PN->setIncomingBlock(BBIdx, NewBB);
+ }
+ } else {
+ // However, the foreach loop is slow for blocks with lots of predecessors
+ // because PHINode::getIncomingBlock is O(n) in # preds. Instead, walk
+ // the user list of TIBB to find the PHI nodes.
+ SmallPtrSet<PHINode*, 16> UpdatedPHIs;
+
+ for (Value::use_iterator UI = TIBB->use_begin(), E = TIBB->use_end();
+ UI != E; ) {
+ Value::use_iterator Use = UI++;
+ if (PHINode *PN = dyn_cast<PHINode>(Use)) {
+ // Remove one entry from each PHI.
+ if (PN->getParent() == DestBB && UpdatedPHIs.insert(PN))
+ PN->setOperand(Use.getOperandNo(), NewBB);
+ }
+ }
+ }
}
-
+
// If there are any other edges from TIBB to DestBB, update those to go
// through the split block, making those edges non-critical as well (and
// reducing the number of phi entries in the DestBB if relevant).
@@ -221,6 +252,15 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
// If we don't have a pass object, we can't update anything...
if (P == 0) return NewBB;
+
+ DominatorTree *DT = P->getAnalysisIfAvailable<DominatorTree>();
+ DominanceFrontier *DF = P->getAnalysisIfAvailable<DominanceFrontier>();
+ LoopInfo *LI = P->getAnalysisIfAvailable<LoopInfo>();
+ ProfileInfo *PI = P->getAnalysisIfAvailable<ProfileInfo>();
+
+ // If we have nothing to update, just return.
+ if (DT == 0 && DF == 0 && LI == 0 && PI == 0)
+ return NewBB;
// Now update analysis information. Since the only predecessor of NewBB is
// the TIBB, TIBB clearly dominates NewBB. TIBB usually doesn't dominate
@@ -229,14 +269,23 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
// loop header) then NewBB dominates DestBB.
SmallVector<BasicBlock*, 8> OtherPreds;
- for (pred_iterator I = pred_begin(DestBB), E = pred_end(DestBB); I != E; ++I)
- if (*I != NewBB)
- OtherPreds.push_back(*I);
+ // If there is a PHI in the block, loop over predecessors with it, which is
+ // faster than iterating pred_begin/end.
+ if (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
+ if (PN->getIncomingBlock(i) != NewBB)
+ OtherPreds.push_back(PN->getIncomingBlock(i));
+ } else {
+ for (pred_iterator I = pred_begin(DestBB), E = pred_end(DestBB);
+ I != E; ++I)
+ if (*I != NewBB)
+ OtherPreds.push_back(*I);
+ }
bool NewBBDominatesDestBB = true;
// Should we update DominatorTree information?
- if (DominatorTree *DT = P->getAnalysisIfAvailable<DominatorTree>()) {
+ if (DT) {
DomTreeNode *TINode = DT->getNode(TIBB);
// The new block is not the immediate dominator for any other nodes, but
@@ -267,7 +316,7 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
}
// Should we update DominanceFrontier information?
- if (DominanceFrontier *DF = P->getAnalysisIfAvailable<DominanceFrontier>()) {
+ if (DF) {
// If NewBBDominatesDestBB hasn't been computed yet, do so with DF.
if (!OtherPreds.empty()) {
// FIXME: IMPLEMENT THIS!
@@ -301,7 +350,7 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
}
// Update LoopInfo if it is around.
- if (LoopInfo *LI = P->getAnalysisIfAvailable<LoopInfo>()) {
+ if (LI) {
if (Loop *TIL = LI->getLoopFor(TIBB)) {
// If one or the other blocks were not in a loop, the new block is not
// either, and thus LI doesn't need to be updated.
@@ -382,9 +431,8 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
}
// Update ProfileInfo if it is around.
- if (ProfileInfo *PI = P->getAnalysisIfAvailable<ProfileInfo>()) {
- PI->splitEdge(TIBB,DestBB,NewBB,MergeIdenticalEdges);
- }
+ if (PI)
+ PI->splitEdge(TIBB, DestBB, NewBB, MergeIdenticalEdges);
return NewBB;
}
diff --git a/lib/Transforms/Utils/CloneFunction.cpp b/lib/Transforms/Utils/CloneFunction.cpp
index bd750cc..c80827d 100644
--- a/lib/Transforms/Utils/CloneFunction.cpp
+++ b/lib/Transforms/Utils/CloneFunction.cpp
@@ -33,7 +33,7 @@ using namespace llvm;
// CloneBasicBlock - See comments in Cloning.h
BasicBlock *llvm::CloneBasicBlock(const BasicBlock *BB,
DenseMap<const Value*, Value*> &ValueMap,
- const char *NameSuffix, Function *F,
+ const Twine &NameSuffix, Function *F,
ClonedCodeInfo *CodeInfo) {
BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "", F);
if (BB->hasName()) NewBB->setName(BB->getName()+NameSuffix);
diff --git a/lib/Transforms/Utils/Local.cpp b/lib/Transforms/Utils/Local.cpp
index 92bdf2d..57ad459 100644
--- a/lib/Transforms/Utils/Local.cpp
+++ b/lib/Transforms/Utils/Local.cpp
@@ -38,20 +38,82 @@ using namespace llvm;
// Local analysis.
//
+/// getUnderlyingObjectWithOffset - Strip off up to MaxLookup GEPs and
+/// bitcasts to get back to the underlying object being addressed, keeping
+/// track of the offset in bytes from the GEPs relative to the result.
+/// This is closely related to Value::getUnderlyingObject but is located
+/// here to avoid making VMCore depend on TargetData.
+static Value *getUnderlyingObjectWithOffset(Value *V, const TargetData *TD,
+ uint64_t &ByteOffset,
+ unsigned MaxLookup = 6) {
+ if (!isa<PointerType>(V->getType()))
+ return V;
+ for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
+ if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
+ if (!GEP->hasAllConstantIndices())
+ return V;
+ SmallVector<Value*, 8> Indices(GEP->op_begin() + 1, GEP->op_end());
+ ByteOffset += TD->getIndexedOffset(GEP->getPointerOperandType(),
+ &Indices[0], Indices.size());
+ V = GEP->getPointerOperand();
+ } else if (Operator::getOpcode(V) == Instruction::BitCast) {
+ V = cast<Operator>(V)->getOperand(0);
+ } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
+ if (GA->mayBeOverridden())
+ return V;
+ V = GA->getAliasee();
+ } else {
+ return V;
+ }
+ assert(isa<PointerType>(V->getType()) && "Unexpected operand type!");
+ }
+ return V;
+}
+
/// isSafeToLoadUnconditionally - Return true if we know that executing a load
/// from this value cannot trap. If it is not obviously safe to load from the
/// specified pointer, we do a quick local scan of the basic block containing
/// ScanFrom, to determine if the address is already accessed.
-bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom) {
- // If it is an alloca it is always safe to load from.
- if (isa<AllocaInst>(V)) return true;
+bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
+ unsigned Align, const TargetData *TD) {
+ uint64_t ByteOffset = 0;
+ Value *Base = V;
+ if (TD)
+ Base = getUnderlyingObjectWithOffset(V, TD, ByteOffset);
+
+ const Type *BaseType = 0;
+ unsigned BaseAlign = 0;
+ if (const AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
+ // An alloca is safe to load from as load as it is suitably aligned.
+ BaseType = AI->getAllocatedType();
+ BaseAlign = AI->getAlignment();
+ } else if (const GlobalValue *GV = dyn_cast<GlobalValue>(Base)) {
+ // Global variables are safe to load from but their size cannot be
+ // guaranteed if they are overridden.
+ if (!isa<GlobalAlias>(GV) && !GV->mayBeOverridden()) {
+ BaseType = GV->getType()->getElementType();
+ BaseAlign = GV->getAlignment();
+ }
+ }
- // If it is a global variable it is mostly safe to load from.
- if (const GlobalValue *GV = dyn_cast<GlobalVariable>(V))
- // Don't try to evaluate aliases. External weak GV can be null.
- return !isa<GlobalAlias>(GV) && !GV->hasExternalWeakLinkage();
+ if (BaseType && BaseType->isSized()) {
+ if (TD && BaseAlign == 0)
+ BaseAlign = TD->getPrefTypeAlignment(BaseType);
- // Otherwise, be a little bit agressive by scanning the local block where we
+ if (Align <= BaseAlign) {
+ if (!TD)
+ return true; // Loading directly from an alloca or global is OK.
+
+ // Check if the load is within the bounds of the underlying object.
+ const PointerType *AddrTy = cast<PointerType>(V->getType());
+ uint64_t LoadSize = TD->getTypeStoreSize(AddrTy->getElementType());
+ if (ByteOffset + LoadSize <= TD->getTypeAllocSize(BaseType) &&
+ (Align == 0 || (ByteOffset % Align) == 0))
+ return true;
+ }
+ }
+
+ // Otherwise, be a little bit aggressive by scanning the local block where we
// want to check to see if the pointer is already being loaded or stored
// from/to. If so, the previous load or store would have already trapped,
// so there is no harm doing an extra load (also, CSE will later eliminate
@@ -428,6 +490,17 @@ void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB, Pass *P) {
// Splice all the instructions from PredBB to DestBB.
PredBB->getTerminator()->eraseFromParent();
DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList());
+
+ // Zap anything that took the address of DestBB. Not doing this will give the
+ // address an invalid value.
+ if (DestBB->hasAddressTaken()) {
+ BlockAddress *BA = BlockAddress::get(DestBB);
+ Constant *Replacement =
+ ConstantInt::get(llvm::Type::getInt32Ty(BA->getContext()), 1);
+ BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
+ BA->getType()));
+ BA->destroyConstant();
+ }
// Anything that branched to PredBB now branches to DestBB.
PredBB->replaceAllUsesWith(DestBB);
diff --git a/lib/Transforms/Utils/LoopSimplify.cpp b/lib/Transforms/Utils/LoopSimplify.cpp
index e81b779..57bab60 100644
--- a/lib/Transforms/Utils/LoopSimplify.cpp
+++ b/lib/Transforms/Utils/LoopSimplify.cpp
@@ -176,8 +176,9 @@ ReprocessLoop:
SmallVector<BasicBlock*, 8> ExitBlocks;
L->getExitBlocks(ExitBlocks);
- SetVector<BasicBlock*> ExitBlockSet(ExitBlocks.begin(), ExitBlocks.end());
- for (SetVector<BasicBlock*>::iterator I = ExitBlockSet.begin(),
+ SmallSetVector<BasicBlock *, 8> ExitBlockSet(ExitBlocks.begin(),
+ ExitBlocks.end());
+ for (SmallSetVector<BasicBlock *, 8>::iterator I = ExitBlockSet.begin(),
E = ExitBlockSet.end(); I != E; ++I) {
BasicBlock *ExitBlock = *I;
for (pred_iterator PI = pred_begin(ExitBlock), PE = pred_end(ExitBlock);
diff --git a/lib/Transforms/Utils/LoopUnroll.cpp b/lib/Transforms/Utils/LoopUnroll.cpp
index 53117a0..e47c86d 100644
--- a/lib/Transforms/Utils/LoopUnroll.cpp
+++ b/lib/Transforms/Utils/LoopUnroll.cpp
@@ -29,7 +29,6 @@
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/Local.h"
-#include <cstdio>
using namespace llvm;
@@ -204,15 +203,12 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, LoopInfo* LI, LPPassManager* LPM)
Latches.push_back(LatchBlock);
for (unsigned It = 1; It != Count; ++It) {
- char SuffixBuffer[100];
- sprintf(SuffixBuffer, ".%d", It);
-
std::vector<BasicBlock*> NewBlocks;
for (std::vector<BasicBlock*>::iterator BB = LoopBlocks.begin(),
E = LoopBlocks.end(); BB != E; ++BB) {
ValueMapTy ValueMap;
- BasicBlock *New = CloneBasicBlock(*BB, ValueMap, SuffixBuffer);
+ BasicBlock *New = CloneBasicBlock(*BB, ValueMap, "." + Twine(It));
Header->getParent()->getBasicBlockList().push_back(New);
// Loop over all of the PHI nodes in the block, changing them to use the
diff --git a/lib/Transforms/Utils/Makefile b/lib/Transforms/Utils/Makefile
index b9761df..d1e9336 100644
--- a/lib/Transforms/Utils/Makefile
+++ b/lib/Transforms/Utils/Makefile
@@ -10,7 +10,6 @@
LEVEL = ../../..
LIBRARYNAME = LLVMTransformUtils
BUILD_ARCHIVE = 1
-CXXFLAGS = -fno-rtti
include $(LEVEL)/Makefile.common
diff --git a/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
index d9261ac..544e20b 100644
--- a/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
+++ b/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
@@ -23,6 +23,7 @@
#include "llvm/Function.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
+#include "llvm/Metadata.h"
#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/AliasSetTracker.h"
@@ -84,6 +85,18 @@ bool llvm::isAllocaPromotable(const AllocaInst *AI) {
return true;
}
+/// FindAllocaDbgDeclare - Finds the llvm.dbg.declare intrinsic describing the
+/// alloca 'V', if any.
+static DbgDeclareInst *FindAllocaDbgDeclare(Value *V) {
+ if (MDNode *DebugNode = MDNode::getIfExists(V->getContext(), &V, 1))
+ for (Value::use_iterator UI = DebugNode->use_begin(),
+ E = DebugNode->use_end(); UI != E; ++UI)
+ if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(*UI))
+ return DDI;
+
+ return 0;
+}
+
namespace {
struct AllocaInfo;
@@ -188,6 +201,11 @@ namespace {
///
std::vector<Value*> PointerAllocaValues;
+ /// AllocaDbgDeclares - For each alloca, we keep track of the dbg.declare
+ /// intrinsic that describes it, if any, so that we can convert it to a
+ /// dbg.value intrinsic if the alloca gets promoted.
+ SmallVector<DbgDeclareInst*, 8> AllocaDbgDeclares;
+
/// Visited - The set of basic blocks the renamer has already visited.
///
SmallPtrSet<BasicBlock*, 16> Visited;
@@ -202,6 +220,9 @@ namespace {
PromoteMem2Reg(const std::vector<AllocaInst*> &A, DominatorTree &dt,
DominanceFrontier &df, AliasSetTracker *ast)
: Allocas(A), DT(dt), DF(df), DIF(0), AST(ast) {}
+ ~PromoteMem2Reg() {
+ delete DIF;
+ }
void run();
@@ -243,9 +264,9 @@ namespace {
LargeBlockInfo &LBI);
void PromoteSingleBlockAlloca(AllocaInst *AI, AllocaInfo &Info,
LargeBlockInfo &LBI);
- void ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI, StoreInst* SI,
- uint64_t Offset);
-
+ void ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI, StoreInst *SI);
+
+
void RenamePass(BasicBlock *BB, BasicBlock *Pred,
RenamePassData::ValVector &IncVals,
std::vector<RenamePassData> &Worklist);
@@ -262,6 +283,7 @@ namespace {
bool OnlyUsedInOneBlock;
Value *AllocaPointerVal;
+ DbgDeclareInst *DbgDeclare;
void clear() {
DefiningBlocks.clear();
@@ -270,6 +292,7 @@ namespace {
OnlyBlock = 0;
OnlyUsedInOneBlock = true;
AllocaPointerVal = 0;
+ DbgDeclare = 0;
}
/// AnalyzeAlloca - Scan the uses of the specified alloca, filling in our
@@ -304,28 +327,18 @@ namespace {
OnlyUsedInOneBlock = false;
}
}
+
+ DbgDeclare = FindAllocaDbgDeclare(AI);
}
};
} // end of anonymous namespace
-/// Finds the llvm.dbg.declare intrinsic corresponding to an alloca if any.
-static DbgDeclareInst *findDbgDeclare(AllocaInst *AI) {
- Function *F = AI->getParent()->getParent();
- for (Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI)
- for (BasicBlock::iterator BI = (*FI).begin(), BE = (*FI).end();
- BI != BE; ++BI)
- if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(BI))
- if (DDI->getAddress() == AI)
- return DDI;
-
- return 0;
-}
-
void PromoteMem2Reg::run() {
Function &F = *DF.getRoot()->getParent();
if (AST) PointerAllocaValues.resize(Allocas.size());
+ AllocaDbgDeclares.resize(Allocas.size());
AllocaInfo Info;
LargeBlockInfo LBI;
@@ -360,8 +373,11 @@ void PromoteMem2Reg::run() {
// Finally, after the scan, check to see if the store is all that is left.
if (Info.UsingBlocks.empty()) {
- // Record debuginfo for the store before removing it.
- ConvertDebugDeclareToDebugValue(findDbgDeclare(AI), Info.OnlyStore, 0);
+ // Record debuginfo for the store and remove the declaration's debuginfo.
+ if (DbgDeclareInst *DDI = Info.DbgDeclare) {
+ ConvertDebugDeclareToDebugValue(DDI, Info.OnlyStore);
+ DDI->eraseFromParent();
+ }
// Remove the (now dead) store and alloca.
Info.OnlyStore->eraseFromParent();
LBI.deleteValue(Info.OnlyStore);
@@ -388,11 +404,11 @@ void PromoteMem2Reg::run() {
if (Info.UsingBlocks.empty()) {
// Remove the (now dead) stores and alloca.
- DbgDeclareInst *DDI = findDbgDeclare(AI);
while (!AI->use_empty()) {
StoreInst *SI = cast<StoreInst>(AI->use_back());
// Record debuginfo for the store before removing it.
- ConvertDebugDeclareToDebugValue(DDI, SI, 0);
+ if (DbgDeclareInst *DDI = Info.DbgDeclare)
+ ConvertDebugDeclareToDebugValue(DDI, SI);
SI->eraseFromParent();
LBI.deleteValue(SI);
}
@@ -404,6 +420,10 @@ void PromoteMem2Reg::run() {
// The alloca has been processed, move on.
RemoveFromAllocasList(AllocaNum);
+ // The alloca's debuginfo can be removed as well.
+ if (DbgDeclareInst *DDI = Info.DbgDeclare)
+ DDI->eraseFromParent();
+
++NumLocalPromoted;
continue;
}
@@ -421,6 +441,9 @@ void PromoteMem2Reg::run() {
// stored into the alloca.
if (AST)
PointerAllocaValues[AllocaNum] = Info.AllocaPointerVal;
+
+ // Remember the dbg.declare intrinsic describing this alloca, if any.
+ if (Info.DbgDeclare) AllocaDbgDeclares[AllocaNum] = Info.DbgDeclare;
// Keep the reverse mapping of the 'Allocas' array for the rename pass.
AllocaLookup[Allocas[AllocaNum]] = AllocaNum;
@@ -476,7 +499,11 @@ void PromoteMem2Reg::run() {
A->eraseFromParent();
}
-
+ // Remove alloca's dbg.declare instrinsics from the function.
+ for (unsigned i = 0, e = AllocaDbgDeclares.size(); i != e; ++i)
+ if (DbgDeclareInst *DDI = AllocaDbgDeclares[i])
+ DDI->eraseFromParent();
+
// Loop over all of the PHI nodes and see if there are any that we can get
// rid of because they merge all of the same incoming values. This can
// happen due to undef values coming into the PHI nodes. This process is
@@ -857,14 +884,19 @@ void PromoteMem2Reg::PromoteSingleBlockAlloca(AllocaInst *AI, AllocaInfo &Info,
// Inserts a llvm.dbg.value instrinsic before the stores to an alloca'd value
// that has an associated llvm.dbg.decl intrinsic.
void PromoteMem2Reg::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
- StoreInst* SI,
- uint64_t Offset) {
- if (!DDI) return;
+ StoreInst *SI) {
+ DIVariable DIVar(DDI->getVariable());
+ if (!DIVar.getNode())
+ return;
if (!DIF)
DIF = new DIFactory(*SI->getParent()->getParent()->getParent());
- DIF->InsertDbgValueIntrinsic(SI->getOperand(0), Offset,
- DIVariable(DDI->getVariable()), SI);
+ Instruction *DbgVal = DIF->InsertDbgValueIntrinsic(SI->getOperand(0), 0,
+ DIVar, SI);
+
+ // Propagate any debug metadata from the store onto the dbg.value.
+ if (MDNode *SIMD = SI->getMetadata("dbg"))
+ DbgVal->setMetadata("dbg", SIMD);
}
// QueuePhiNode - queues a phi-node to be added to a basic-block for a specific
@@ -980,7 +1012,8 @@ NextIteration:
// what value were we writing?
IncomingVals[ai->second] = SI->getOperand(0);
// Record debuginfo for the store before removing it.
- ConvertDebugDeclareToDebugValue(findDbgDeclare(Dest), SI, 0);
+ if (DbgDeclareInst *DDI = AllocaDbgDeclares[ai->second])
+ ConvertDebugDeclareToDebugValue(DDI, SI);
BB->getInstList().erase(SI);
}
}
diff --git a/lib/Transforms/Utils/SSAUpdater.cpp b/lib/Transforms/Utils/SSAUpdater.cpp
index 161bf21..a31235a 100644
--- a/lib/Transforms/Utils/SSAUpdater.cpp
+++ b/lib/Transforms/Utils/SSAUpdater.cpp
@@ -71,6 +71,50 @@ void SSAUpdater::AddAvailableValue(BasicBlock *BB, Value *V) {
getAvailableVals(AV)[BB] = V;
}
+/// IsEquivalentPHI - Check if PHI has the same incoming value as specified
+/// in ValueMapping for each predecessor block.
+static bool IsEquivalentPHI(PHINode *PHI,
+ DenseMap<BasicBlock*, Value*> &ValueMapping) {
+ unsigned PHINumValues = PHI->getNumIncomingValues();
+ if (PHINumValues != ValueMapping.size())
+ return false;
+
+ // Scan the phi to see if it matches.
+ for (unsigned i = 0, e = PHINumValues; i != e; ++i)
+ if (ValueMapping[PHI->getIncomingBlock(i)] !=
+ PHI->getIncomingValue(i)) {
+ return false;
+ }
+
+ return true;
+}
+
+/// GetExistingPHI - Check if BB already contains a phi node that is equivalent
+/// to the specified mapping from predecessor blocks to incoming values.
+static Value *GetExistingPHI(BasicBlock *BB,
+ DenseMap<BasicBlock*, Value*> &ValueMapping) {
+ PHINode *SomePHI;
+ for (BasicBlock::iterator It = BB->begin();
+ (SomePHI = dyn_cast<PHINode>(It)); ++It) {
+ if (IsEquivalentPHI(SomePHI, ValueMapping))
+ return SomePHI;
+ }
+ return 0;
+}
+
+/// GetExistingPHI - Check if BB already contains an equivalent phi node.
+/// The InputIt type must be an iterator over std::pair<BasicBlock*, Value*>
+/// objects that specify the mapping from predecessor blocks to incoming values.
+template<typename InputIt>
+static Value *GetExistingPHI(BasicBlock *BB, const InputIt &I,
+ const InputIt &E) {
+ // Avoid create the mapping if BB has no phi nodes at all.
+ if (!isa<PHINode>(BB->begin()))
+ return 0;
+ DenseMap<BasicBlock*, Value*> ValueMapping(I, E);
+ return GetExistingPHI(BB, ValueMapping);
+}
+
/// GetValueAtEndOfBlock - Construct SSA form, materializing a value that is
/// live at the end of the specified block.
Value *SSAUpdater::GetValueAtEndOfBlock(BasicBlock *BB) {
@@ -149,28 +193,11 @@ Value *SSAUpdater::GetValueInMiddleOfBlock(BasicBlock *BB) {
if (SingularValue != 0)
return SingularValue;
- // Otherwise, we do need a PHI: check to see if we already have one available
- // in this block that produces the right value.
- if (isa<PHINode>(BB->begin())) {
- DenseMap<BasicBlock*, Value*> ValueMapping(PredValues.begin(),
- PredValues.end());
- PHINode *SomePHI;
- for (BasicBlock::iterator It = BB->begin();
- (SomePHI = dyn_cast<PHINode>(It)); ++It) {
- // Scan this phi to see if it is what we need.
- bool Equal = true;
- for (unsigned i = 0, e = SomePHI->getNumIncomingValues(); i != e; ++i)
- if (ValueMapping[SomePHI->getIncomingBlock(i)] !=
- SomePHI->getIncomingValue(i)) {
- Equal = false;
- break;
- }
-
- if (Equal)
- return SomePHI;
- }
- }
-
+ // Otherwise, we do need a PHI.
+ if (Value *ExistingPHI = GetExistingPHI(BB, PredValues.begin(),
+ PredValues.end()))
+ return ExistingPHI;
+
// Ok, we have no way out, insert a new one now.
PHINode *InsertedPHI = PHINode::Create(PrototypeValue->getType(),
PrototypeValue->getName(),
@@ -255,7 +282,7 @@ Value *SSAUpdater::GetValueAtEndOfBlockInternal(BasicBlock *BB) {
// producing the same value. If so, this value will capture it, if not, it
// will get reset to null. We distinguish the no-predecessor case explicitly
// below.
- TrackingVH<Value> SingularValue;
+ TrackingVH<Value> ExistingValue;
// We can get our predecessor info by walking the pred_iterator list, but it
// is relatively slow. If we already have PHI nodes in this block, walk one
@@ -266,11 +293,11 @@ Value *SSAUpdater::GetValueAtEndOfBlockInternal(BasicBlock *BB) {
Value *PredVal = GetValueAtEndOfBlockInternal(PredBB);
IncomingPredInfo.push_back(std::make_pair(PredBB, PredVal));
- // Compute SingularValue.
+ // Set ExistingValue to singular value from all predecessors so far.
if (i == 0)
- SingularValue = PredVal;
- else if (PredVal != SingularValue)
- SingularValue = 0;
+ ExistingValue = PredVal;
+ else if (PredVal != ExistingValue)
+ ExistingValue = 0;
}
} else {
bool isFirstPred = true;
@@ -279,12 +306,12 @@ Value *SSAUpdater::GetValueAtEndOfBlockInternal(BasicBlock *BB) {
Value *PredVal = GetValueAtEndOfBlockInternal(PredBB);
IncomingPredInfo.push_back(std::make_pair(PredBB, PredVal));
- // Compute SingularValue.
+ // Set ExistingValue to singular value from all predecessors so far.
if (isFirstPred) {
- SingularValue = PredVal;
+ ExistingValue = PredVal;
isFirstPred = false;
- } else if (PredVal != SingularValue)
- SingularValue = 0;
+ } else if (PredVal != ExistingValue)
+ ExistingValue = 0;
}
}
@@ -300,31 +327,38 @@ Value *SSAUpdater::GetValueAtEndOfBlockInternal(BasicBlock *BB) {
/// above.
TrackingVH<Value> &InsertedVal = AvailableVals[BB];
- // If all the predecessor values are the same then we don't need to insert a
+ // If the predecessor values are not all the same, then check to see if there
+ // is an existing PHI that can be used.
+ if (!ExistingValue)
+ ExistingValue = GetExistingPHI(BB,
+ IncomingPredInfo.begin()+FirstPredInfoEntry,
+ IncomingPredInfo.end());
+
+ // If there is an existing value we can use, then we don't need to insert a
// PHI. This is the simple and common case.
- if (SingularValue) {
- // If a PHI node got inserted, replace it with the singlar value and delete
+ if (ExistingValue) {
+ // If a PHI node got inserted, replace it with the existing value and delete
// it.
if (InsertedVal) {
PHINode *OldVal = cast<PHINode>(InsertedVal);
// Be careful about dead loops. These RAUW's also update InsertedVal.
- if (InsertedVal != SingularValue)
- OldVal->replaceAllUsesWith(SingularValue);
+ if (InsertedVal != ExistingValue)
+ OldVal->replaceAllUsesWith(ExistingValue);
else
OldVal->replaceAllUsesWith(UndefValue::get(InsertedVal->getType()));
OldVal->eraseFromParent();
} else {
- InsertedVal = SingularValue;
+ InsertedVal = ExistingValue;
}
- // Either path through the 'if' should have set insertedVal -> SingularVal.
- assert((InsertedVal == SingularValue || isa<UndefValue>(InsertedVal)) &&
- "RAUW didn't change InsertedVal to be SingularVal");
+ // Either path through the 'if' should have set InsertedVal -> ExistingVal.
+ assert((InsertedVal == ExistingValue || isa<UndefValue>(InsertedVal)) &&
+ "RAUW didn't change InsertedVal to be ExistingValue");
// Drop the entries we added in IncomingPredInfo to restore the stack.
IncomingPredInfo.erase(IncomingPredInfo.begin()+FirstPredInfoEntry,
IncomingPredInfo.end());
- return SingularValue;
+ return ExistingValue;
}
// Otherwise, we do need a PHI: insert one now if we don't already have one.
diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp
index cb53296..2215059 100644
--- a/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -23,6 +23,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/Target/TargetData.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
@@ -36,6 +37,28 @@ using namespace llvm;
STATISTIC(NumSpeculations, "Number of speculative executed instructions");
+namespace {
+class SimplifyCFGOpt {
+ const TargetData *const TD;
+
+ ConstantInt *GetConstantInt(Value *V);
+ Value *GatherConstantSetEQs(Value *V, std::vector<ConstantInt*> &Values);
+ Value *GatherConstantSetNEs(Value *V, std::vector<ConstantInt*> &Values);
+ bool GatherValueComparisons(Instruction *Cond, Value *&CompVal,
+ std::vector<ConstantInt*> &Values);
+ Value *isValueEqualityComparison(TerminatorInst *TI);
+ BasicBlock *GetValueEqualityComparisonCases(TerminatorInst *TI,
+ std::vector<std::pair<ConstantInt*, BasicBlock*> > &Cases);
+ bool SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
+ BasicBlock *Pred);
+ bool FoldValueComparisonIntoPredecessors(TerminatorInst *TI);
+
+public:
+ explicit SimplifyCFGOpt(const TargetData *td) : TD(td) {}
+ bool run(BasicBlock *BB);
+};
+}
+
/// SafeToMergeTerminators - Return true if it is safe to merge these two
/// terminator instructions together.
///
@@ -243,17 +266,48 @@ static bool DominatesMergePoint(Value *V, BasicBlock *BB,
return true;
}
+/// GetConstantInt - Extract ConstantInt from value, looking through IntToPtr
+/// and PointerNullValue. Return NULL if value is not a constant int.
+ConstantInt *SimplifyCFGOpt::GetConstantInt(Value *V) {
+ // Normal constant int.
+ ConstantInt *CI = dyn_cast<ConstantInt>(V);
+ if (CI || !TD || !isa<Constant>(V) || !isa<PointerType>(V->getType()))
+ return CI;
+
+ // This is some kind of pointer constant. Turn it into a pointer-sized
+ // ConstantInt if possible.
+ const IntegerType *PtrTy = TD->getIntPtrType(V->getContext());
+
+ // Null pointer means 0, see SelectionDAGBuilder::getValue(const Value*).
+ if (isa<ConstantPointerNull>(V))
+ return ConstantInt::get(PtrTy, 0);
+
+ // IntToPtr const int.
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
+ if (CE->getOpcode() == Instruction::IntToPtr)
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(0))) {
+ // The constant is very likely to have the right type already.
+ if (CI->getType() == PtrTy)
+ return CI;
+ else
+ return cast<ConstantInt>
+ (ConstantExpr::getIntegerCast(CI, PtrTy, /*isSigned=*/false));
+ }
+ return 0;
+}
+
/// GatherConstantSetEQs - Given a potentially 'or'd together collection of
/// icmp_eq instructions that compare a value against a constant, return the
/// value being compared, and stick the constant into the Values vector.
-static Value *GatherConstantSetEQs(Value *V, std::vector<ConstantInt*> &Values){
+Value *SimplifyCFGOpt::
+GatherConstantSetEQs(Value *V, std::vector<ConstantInt*> &Values) {
if (Instruction *Inst = dyn_cast<Instruction>(V)) {
if (Inst->getOpcode() == Instruction::ICmp &&
cast<ICmpInst>(Inst)->getPredicate() == ICmpInst::ICMP_EQ) {
- if (ConstantInt *C = dyn_cast<ConstantInt>(Inst->getOperand(1))) {
+ if (ConstantInt *C = GetConstantInt(Inst->getOperand(1))) {
Values.push_back(C);
return Inst->getOperand(0);
- } else if (ConstantInt *C = dyn_cast<ConstantInt>(Inst->getOperand(0))) {
+ } else if (ConstantInt *C = GetConstantInt(Inst->getOperand(0))) {
Values.push_back(C);
return Inst->getOperand(1);
}
@@ -270,14 +324,15 @@ static Value *GatherConstantSetEQs(Value *V, std::vector<ConstantInt*> &Values){
/// GatherConstantSetNEs - Given a potentially 'and'd together collection of
/// setne instructions that compare a value against a constant, return the value
/// being compared, and stick the constant into the Values vector.
-static Value *GatherConstantSetNEs(Value *V, std::vector<ConstantInt*> &Values){
+Value *SimplifyCFGOpt::
+GatherConstantSetNEs(Value *V, std::vector<ConstantInt*> &Values) {
if (Instruction *Inst = dyn_cast<Instruction>(V)) {
if (Inst->getOpcode() == Instruction::ICmp &&
cast<ICmpInst>(Inst)->getPredicate() == ICmpInst::ICMP_NE) {
- if (ConstantInt *C = dyn_cast<ConstantInt>(Inst->getOperand(1))) {
+ if (ConstantInt *C = GetConstantInt(Inst->getOperand(1))) {
Values.push_back(C);
return Inst->getOperand(0);
- } else if (ConstantInt *C = dyn_cast<ConstantInt>(Inst->getOperand(0))) {
+ } else if (ConstantInt *C = GetConstantInt(Inst->getOperand(0))) {
Values.push_back(C);
return Inst->getOperand(1);
}
@@ -294,8 +349,8 @@ static Value *GatherConstantSetNEs(Value *V, std::vector<ConstantInt*> &Values){
/// GatherValueComparisons - If the specified Cond is an 'and' or 'or' of a
/// bunch of comparisons of one value against constants, return the value and
/// the constants being compared.
-static bool GatherValueComparisons(Instruction *Cond, Value *&CompVal,
- std::vector<ConstantInt*> &Values) {
+bool SimplifyCFGOpt::GatherValueComparisons(Instruction *Cond, Value *&CompVal,
+ std::vector<ConstantInt*> &Values) {
if (Cond->getOpcode() == Instruction::Or) {
CompVal = GatherConstantSetEQs(Cond, Values);
@@ -327,29 +382,32 @@ static void EraseTerminatorInstAndDCECond(TerminatorInst *TI) {
/// isValueEqualityComparison - Return true if the specified terminator checks
/// to see if a value is equal to constant integer value.
-static Value *isValueEqualityComparison(TerminatorInst *TI) {
+Value *SimplifyCFGOpt::isValueEqualityComparison(TerminatorInst *TI) {
+ Value *CV = 0;
if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
// Do not permit merging of large switch instructions into their
// predecessors unless there is only one predecessor.
- if (SI->getNumSuccessors() * std::distance(pred_begin(SI->getParent()),
- pred_end(SI->getParent())) > 128)
- return 0;
-
- return SI->getCondition();
- }
- if (BranchInst *BI = dyn_cast<BranchInst>(TI))
+ if (SI->getNumSuccessors()*std::distance(pred_begin(SI->getParent()),
+ pred_end(SI->getParent())) <= 128)
+ CV = SI->getCondition();
+ } else if (BranchInst *BI = dyn_cast<BranchInst>(TI))
if (BI->isConditional() && BI->getCondition()->hasOneUse())
if (ICmpInst *ICI = dyn_cast<ICmpInst>(BI->getCondition()))
if ((ICI->getPredicate() == ICmpInst::ICMP_EQ ||
ICI->getPredicate() == ICmpInst::ICMP_NE) &&
- isa<ConstantInt>(ICI->getOperand(1)))
- return ICI->getOperand(0);
- return 0;
+ GetConstantInt(ICI->getOperand(1)))
+ CV = ICI->getOperand(0);
+
+ // Unwrap any lossless ptrtoint cast.
+ if (TD && CV && CV->getType() == TD->getIntPtrType(CV->getContext()))
+ if (PtrToIntInst *PTII = dyn_cast<PtrToIntInst>(CV))
+ CV = PTII->getOperand(0);
+ return CV;
}
/// GetValueEqualityComparisonCases - Given a value comparison instruction,
/// decode all of the 'cases' that it represents and return the 'default' block.
-static BasicBlock *
+BasicBlock *SimplifyCFGOpt::
GetValueEqualityComparisonCases(TerminatorInst *TI,
std::vector<std::pair<ConstantInt*,
BasicBlock*> > &Cases) {
@@ -362,7 +420,7 @@ GetValueEqualityComparisonCases(TerminatorInst *TI,
BranchInst *BI = cast<BranchInst>(TI);
ICmpInst *ICI = cast<ICmpInst>(BI->getCondition());
- Cases.push_back(std::make_pair(cast<ConstantInt>(ICI->getOperand(1)),
+ Cases.push_back(std::make_pair(GetConstantInt(ICI->getOperand(1)),
BI->getSuccessor(ICI->getPredicate() ==
ICmpInst::ICMP_NE)));
return BI->getSuccessor(ICI->getPredicate() == ICmpInst::ICMP_EQ);
@@ -421,8 +479,9 @@ ValuesOverlap(std::vector<std::pair<ConstantInt*, BasicBlock*> > &C1,
/// comparison with the same value, and if that comparison determines the
/// outcome of this comparison. If so, simplify TI. This does a very limited
/// form of jump threading.
-static bool SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
- BasicBlock *Pred) {
+bool SimplifyCFGOpt::
+SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
+ BasicBlock *Pred) {
Value *PredVal = isValueEqualityComparison(Pred->getTerminator());
if (!PredVal) return false; // Not a value comparison in predecessor.
@@ -548,7 +607,7 @@ namespace {
/// equality comparison instruction (either a switch or a branch on "X == c").
/// See if any of the predecessors of the terminator block are value comparisons
/// on the same value. If so, and if safe to do so, fold them together.
-static bool FoldValueComparisonIntoPredecessors(TerminatorInst *TI) {
+bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI) {
BasicBlock *BB = TI->getParent();
Value *CV = isValueEqualityComparison(TI); // CondVal
assert(CV && "Not a comparison?");
@@ -641,6 +700,13 @@ static bool FoldValueComparisonIntoPredecessors(TerminatorInst *TI) {
for (unsigned i = 0, e = NewSuccessors.size(); i != e; ++i)
AddPredecessorToBlock(NewSuccessors[i], Pred, BB);
+ // Convert pointer to int before we switch.
+ if (isa<PointerType>(CV->getType())) {
+ assert(TD && "Cannot switch on pointer without TargetData");
+ CV = new PtrToIntInst(CV, TD->getIntPtrType(CV->getContext()),
+ "magicptr", PTI);
+ }
+
// Now that the successors are updated, create the new Switch instruction.
SwitchInst *NewSI = SwitchInst::Create(CV, PredDefault,
PredCases.size(), PTI);
@@ -1011,7 +1077,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI) {
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
ConstantInt *CB;
if ((CB = dyn_cast<ConstantInt>(PN->getIncomingValue(i))) &&
- CB->getType()->isInteger(1)) {
+ CB->getType()->isIntegerTy(1)) {
// Okay, we now know that all edges from PredBB should be revectored to
// branch to RealDest.
BasicBlock *PredBB = PN->getIncomingBlock(i);
@@ -1589,14 +1655,7 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) {
return true;
}
-/// SimplifyCFG - This function is used to do simplification of a CFG. For
-/// example, it adjusts branches to branches to eliminate the extra hop, it
-/// eliminates unreachable basic blocks, and does other "peephole" optimization
-/// of the CFG. It returns true if a modification was made.
-///
-/// WARNING: The entry node of a function may not be simplified.
-///
-bool llvm::SimplifyCFG(BasicBlock *BB) {
+bool SimplifyCFGOpt::run(BasicBlock *BB) {
bool Changed = false;
Function *M = BB->getParent();
@@ -1997,7 +2056,7 @@ bool llvm::SimplifyCFG(BasicBlock *BB) {
Value *CompVal = 0;
std::vector<ConstantInt*> Values;
bool TrueWhenEqual = GatherValueComparisons(Cond, CompVal, Values);
- if (CompVal && CompVal->getType()->isInteger()) {
+ if (CompVal) {
// There might be duplicate constants in the list, which the switch
// instruction can't handle, remove them now.
std::sort(Values.begin(), Values.end(), ConstantIntOrdering());
@@ -2008,6 +2067,14 @@ bool llvm::SimplifyCFG(BasicBlock *BB) {
BasicBlock *EdgeBB = BI->getSuccessor(0);
if (!TrueWhenEqual) std::swap(DefaultBB, EdgeBB);
+ // Convert pointer to int before we switch.
+ if (isa<PointerType>(CompVal->getType())) {
+ assert(TD && "Cannot switch on pointer without TargetData");
+ CompVal = new PtrToIntInst(CompVal,
+ TD->getIntPtrType(CompVal->getContext()),
+ "magicptr", BI);
+ }
+
// Create the new switch instruction now.
SwitchInst *New = SwitchInst::Create(CompVal, DefaultBB,
Values.size(), BI);
@@ -2035,3 +2102,14 @@ bool llvm::SimplifyCFG(BasicBlock *BB) {
return Changed;
}
+
+/// SimplifyCFG - This function is used to do simplification of a CFG. For
+/// example, it adjusts branches to branches to eliminate the extra hop, it
+/// eliminates unreachable basic blocks, and does other "peephole" optimization
+/// of the CFG. It returns true if a modification was made.
+///
+/// WARNING: The entry node of a function may not be simplified.
+///
+bool llvm::SimplifyCFG(BasicBlock *BB, const TargetData *TD) {
+ return SimplifyCFGOpt(TD).run(BB);
+}
diff --git a/lib/Transforms/Utils/ValueMapper.cpp b/lib/Transforms/Utils/ValueMapper.cpp
index a6e6701..6045048 100644
--- a/lib/Transforms/Utils/ValueMapper.cpp
+++ b/lib/Transforms/Utils/ValueMapper.cpp
@@ -35,7 +35,7 @@ Value *llvm::MapValue(const Value *V, ValueMapTy &VM) {
if (const MDNode *MD = dyn_cast<MDNode>(V)) {
SmallVector<Value*, 4> Elts;
- for (unsigned i = 0; i != MD->getNumOperands(); i++)
+ for (unsigned i = 0, e = MD->getNumOperands(); i != e; ++i)
Elts.push_back(MD->getOperand(i) ? MapValue(MD->getOperand(i), VM) : 0);
return VM[V] = MDNode::get(V->getContext(), Elts.data(), Elts.size());
}
OpenPOWER on IntegriCloud