diff options
Diffstat (limited to 'lib/Analysis')
-rw-r--r-- | lib/Analysis/ConstantFolding.cpp | 25 | ||||
-rw-r--r-- | lib/Analysis/InlineCost.cpp | 11 | ||||
-rw-r--r-- | lib/Analysis/LoopInfo.cpp | 11 | ||||
-rw-r--r-- | lib/Analysis/ValueTracking.cpp | 2 |
4 files changed, 34 insertions, 15 deletions
diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp index 114db2d..96bb027 100644 --- a/lib/Analysis/ConstantFolding.cpp +++ b/lib/Analysis/ConstantFolding.cpp @@ -589,6 +589,30 @@ static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps, APInt Offset = APInt(BitWidth, TD->getIndexedOffset(Ptr->getType(), (Value**)Ops+1, NumOps-1)); + Ptr = cast<Constant>(Ptr->stripPointerCasts()); + + // If this is a GEP of a GEP, fold it all into a single GEP. + while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { + SmallVector<Value *, 4> NestedOps(GEP->op_begin()+1, GEP->op_end()); + + // Do not try the incorporate the sub-GEP if some index is not a number. + bool AllConstantInt = true; + for (unsigned i = 0, e = NestedOps.size(); i != e; ++i) + if (!isa<ConstantInt>(NestedOps[i])) { + AllConstantInt = false; + break; + } + if (!AllConstantInt) + break; + + Ptr = cast<Constant>(GEP->getOperand(0)); + Offset += APInt(BitWidth, + TD->getIndexedOffset(Ptr->getType(), + (Value**)NestedOps.data(), + NestedOps.size())); + Ptr = cast<Constant>(Ptr->stripPointerCasts()); + } + // If the base value for this address is a literal integer value, fold the // getelementptr to the resulting integer value casted to the pointer type. if (BaseIsInt) { @@ -600,7 +624,6 @@ static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps, // we eliminate over-indexing of the notional static type array bounds. // This makes it easy to determine if the getelementptr is "inbounds". // Also, this helps GlobalOpt do SROA on GlobalVariables. - Ptr = cast<Constant>(Ptr->stripPointerCasts()); const Type *Ty = Ptr->getType(); SmallVector<Constant*, 32> NewIdxs; do { diff --git a/lib/Analysis/InlineCost.cpp b/lib/Analysis/InlineCost.cpp index 0f1f93b..5b8b534 100644 --- a/lib/Analysis/InlineCost.cpp +++ b/lib/Analysis/InlineCost.cpp @@ -22,7 +22,7 @@ using namespace llvm; // instructions will be constant folded if the specified value is constant. // unsigned InlineCostAnalyzer::FunctionInfo:: - CountCodeReductionForConstant(Value *V) { +CountCodeReductionForConstant(Value *V) { unsigned Reduction = 0; for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI) if (isa<BranchInst>(*UI) || isa<SwitchInst>(*UI)) { @@ -31,7 +31,7 @@ unsigned InlineCostAnalyzer::FunctionInfo:: const unsigned NumSucc = TI.getNumSuccessors(); unsigned Instrs = 0; for (unsigned I = 0; I != NumSucc; ++I) - Instrs += TI.getSuccessor(I)->size(); + Instrs += Metrics.NumBBInsts[TI.getSuccessor(I)]; // We don't know which blocks will be eliminated, so use the average size. Reduction += InlineConstants::InstrCost*Instrs*(NumSucc-1)/NumSucc; } else if (CallInst *CI = dyn_cast<CallInst>(*UI)) { @@ -120,7 +120,7 @@ static bool callIsSmall(const Function *F) { StringRef Name = F->getName(); // These will all likely lower to a single selection DAG node. - if (Name == "copysign" || Name == "copysignf" || + if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" || Name == "fabs" || Name == "fabsf" || Name == "fabsl" || Name == "sin" || Name == "sinf" || Name == "sinl" || Name == "cos" || Name == "cosf" || Name == "cosl" || @@ -142,7 +142,7 @@ static bool callIsSmall(const Function *F) { /// from the specified block. void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB) { ++NumBlocks; - + unsigned NumInstsBeforeThisBB = NumInsts; for (BasicBlock::const_iterator II = BB->begin(), E = BB->end(); II != E; ++II) { if (isa<PHINode>(II)) continue; // PHI nodes don't count. @@ -208,6 +208,9 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB) { // function which is extremely undefined behavior. if (isa<IndirectBrInst>(BB->getTerminator())) NeverInline = true; + + // Remember NumInsts for this BB. + NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB; } /// analyzeFunction - Fill in the current structure with information gleaned diff --git a/lib/Analysis/LoopInfo.cpp b/lib/Analysis/LoopInfo.cpp index 2139c29..1001d2b 100644 --- a/lib/Analysis/LoopInfo.cpp +++ b/lib/Analysis/LoopInfo.cpp @@ -263,14 +263,7 @@ unsigned Loop::getSmallConstantTripMultiple() const { } /// isLCSSAForm - Return true if the Loop is in LCSSA form -bool Loop::isLCSSAForm() const { - // Collect all the reachable blocks in the function, for fast lookups. - SmallPtrSet<BasicBlock *, 32> ReachableBBs; - BasicBlock *EntryBB = getHeader()->getParent()->begin(); - for (df_iterator<BasicBlock *> NI = df_begin(EntryBB), - NE = df_end(EntryBB); NI != NE; ++NI) - ReachableBBs.insert(*NI); - +bool Loop::isLCSSAForm(DominatorTree &DT) const { // Sort the blocks vector so that we can use binary search to do quick // lookups. SmallPtrSet<BasicBlock *, 16> LoopBBs(block_begin(), block_end()); @@ -290,7 +283,7 @@ bool Loop::isLCSSAForm() const { // entry are special; uses in them don't need to go through PHIs. if (UserBB != BB && !LoopBBs.count(UserBB) && - ReachableBBs.count(UserBB)) + DT.isReachableFromEntry(UserBB)) return false; } } diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp index 92cbb7c..5ae72f7 100644 --- a/lib/Analysis/ValueTracking.cpp +++ b/lib/Analysis/ValueTracking.cpp @@ -779,7 +779,7 @@ unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD, for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) { if (Tmp == 1) return Tmp; Tmp = std::min(Tmp, - ComputeNumSignBits(PN->getIncomingValue(1), TD, Depth+1)); + ComputeNumSignBits(PN->getIncomingValue(i), TD, Depth+1)); } return Tmp; } |