summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Transforms
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Transforms')
-rw-r--r--contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/IPO/BarrierNoopPass.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/IPO/GlobalDCE.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp10
-rw-r--r--contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/IPO/Inliner.cpp17
-rw-r--r--contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/IPO/LowerBitSets.cpp10
-rw-r--r--contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/IPO/PruneEH.cpp65
-rw-r--r--contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp8
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp28
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp37
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp19
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp17
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/SafeStack.cpp7
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp7
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/BlotMapVector.h2
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.cpp5
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ObjCARCExpand.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp20
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.cpp8
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/ADCE.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/BDCE.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/DCE.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp12
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/FlattenCFGPass.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/Float2Int.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/GVN.cpp20
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp15
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LICM.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoadCombine.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopDistribute.cpp54
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp18
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp194
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LowerAtomic.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/NaryReassociate.cpp69
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp14
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/Reg2Mem.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp183
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/SROA.cpp12
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/SampleProfile.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp13
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Utils/ASanStackFrameLayout.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp7
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CloneModule.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CtorUtils.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Utils/FlattenCFG.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Utils/InstructionNamer.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LCSSA.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp5
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Utils/MetaRenamer.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp201
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SymbolRewriter.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp61
-rw-r--r--contrib/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp2
101 files changed, 783 insertions, 537 deletions
diff --git a/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
index 86b3faa..f754363 100644
--- a/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -92,7 +92,7 @@ namespace {
unsigned maxElements;
DenseMap<const Function *, DISubprogram *> FunctionDIs;
};
-} // namespace
+}
char ArgPromotion::ID = 0;
INITIALIZE_PASS_BEGIN(ArgPromotion, "argpromotion",
diff --git a/contrib/llvm/lib/Transforms/IPO/BarrierNoopPass.cpp b/contrib/llvm/lib/Transforms/IPO/BarrierNoopPass.cpp
index 7585fdc..6af1043 100644
--- a/contrib/llvm/lib/Transforms/IPO/BarrierNoopPass.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/BarrierNoopPass.cpp
@@ -38,7 +38,7 @@ public:
bool runOnModule(Module &M) override { return false; }
};
-} // namespace
+}
ModulePass *llvm::createBarrierNoopPass() { return new BarrierNoop(); }
diff --git a/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp b/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp
index 3b68743..8ce7646 100644
--- a/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp
@@ -53,7 +53,7 @@ namespace {
unsigned getAlignment(GlobalVariable *GV) const;
};
-} // namespace
+}
char ConstantMerge::ID = 0;
INITIALIZE_PASS(ConstantMerge, "constmerge",
diff --git a/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp b/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
index 6bfd3d1..76898f2 100644
--- a/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
@@ -159,7 +159,7 @@ namespace {
bool DeleteDeadVarargs(Function &Fn);
bool RemoveDeadArgumentsFromCallers(Function &Fn);
};
-} // namespace
+}
char DAE::ID = 0;
@@ -175,7 +175,7 @@ namespace {
bool ShouldHackArguments() const override { return true; }
};
-} // namespace
+}
char DAH::ID = 0;
INITIALIZE_PASS(DAH, "deadarghaX0r",
diff --git a/contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp b/contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp
index 7e0dddc..2f8c7d9 100644
--- a/contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp
@@ -146,7 +146,7 @@ namespace {
};
char GVExtractorPass::ID = 0;
-} // namespace
+}
ModulePass *llvm::createGVExtractionPass(std::vector<GlobalValue *> &GVs,
bool deleteFn) {
diff --git a/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
index 749ff99..bb5e64a 100644
--- a/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -132,7 +132,7 @@ namespace {
AliasAnalysis *AA;
TargetLibraryInfo *TLI;
};
-} // namespace
+}
char FunctionAttrs::ID = 0;
INITIALIZE_PASS_BEGIN(FunctionAttrs, "functionattrs",
@@ -379,7 +379,7 @@ namespace {
const SmallPtrSet<Function*, 8> &SCCNodes;
};
-} // namespace
+}
namespace llvm {
template<> struct GraphTraits<ArgumentGraphNode*> {
@@ -406,7 +406,7 @@ namespace llvm {
return AG->end();
}
};
-} // namespace llvm
+}
// Returns Attribute::None, Attribute::ReadOnly or Attribute::ReadNone.
static Attribute::AttrKind
diff --git a/contrib/llvm/lib/Transforms/IPO/GlobalDCE.cpp b/contrib/llvm/lib/Transforms/IPO/GlobalDCE.cpp
index 7983104..61d0ff9 100644
--- a/contrib/llvm/lib/Transforms/IPO/GlobalDCE.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/GlobalDCE.cpp
@@ -57,7 +57,7 @@ namespace {
bool RemoveUnusedGlobalValue(GlobalValue &GV);
};
-} // namespace
+}
/// Returns true if F contains only a single "ret" instruction.
static bool isEmptyFunction(Function *F) {
diff --git a/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index 0d83c82..5ffe15d 100644
--- a/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -89,7 +89,7 @@ namespace {
TargetLibraryInfo *TLI;
SmallSet<const Comdat *, 8> NotDiscardableComdats;
};
-} // namespace
+}
char GlobalOpt::ID = 0;
INITIALIZE_PASS_BEGIN(GlobalOpt, "globalopt",
@@ -1992,11 +1992,9 @@ isSimpleEnoughValueToCommitHelper(Constant *C,
// Aggregate values are safe if all their elements are.
if (isa<ConstantArray>(C) || isa<ConstantStruct>(C) ||
isa<ConstantVector>(C)) {
- for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) {
- Constant *Op = cast<Constant>(C->getOperand(i));
- if (!isSimpleEnoughValueToCommit(Op, SimpleConstants, DL))
+ for (Value *Op : C->operands())
+ if (!isSimpleEnoughValueToCommit(cast<Constant>(Op), SimpleConstants, DL))
return false;
- }
return true;
}
@@ -2786,7 +2784,7 @@ public:
setUsedInitializer(*CompilerUsedV, CompilerUsed);
}
};
-} // namespace
+}
static bool hasUseOtherThanLLVMUsed(GlobalAlias &GA, const LLVMUsed &U) {
if (GA.use_empty()) // No use at all.
diff --git a/contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp b/contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp
index d717b25..af541d1 100644
--- a/contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp
@@ -45,7 +45,7 @@ namespace {
bool PropagateConstantsIntoArguments(Function &F);
bool PropagateConstantReturn(Function &F);
};
-} // namespace
+}
char IPCP::ID = 0;
INITIALIZE_PASS(IPCP, "ipconstprop",
diff --git a/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp b/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp
index 37ff091..dc56a02 100644
--- a/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp
@@ -62,7 +62,7 @@ public:
}
};
-} // namespace
+}
char AlwaysInliner::ID = 0;
INITIALIZE_PASS_BEGIN(AlwaysInliner, "always-inline",
diff --git a/contrib/llvm/lib/Transforms/IPO/Inliner.cpp b/contrib/llvm/lib/Transforms/IPO/Inliner.cpp
index 93cdba6..5273c3d 100644
--- a/contrib/llvm/lib/Transforms/IPO/Inliner.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/Inliner.cpp
@@ -199,8 +199,7 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
// set to keep track of which "available" allocas are being used by this
// function. Also, AllocasForType can be empty of course!
bool MergedAwayAlloca = false;
- for (unsigned i = 0, e = AllocasForType.size(); i != e; ++i) {
- AllocaInst *AvailableAlloca = AllocasForType[i];
+ for (AllocaInst *AvailableAlloca : AllocasForType) {
unsigned Align1 = AI->getAlignment(),
Align2 = AvailableAlloca->getAlignment();
@@ -482,7 +481,7 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
// If there are no calls in this function, exit early.
if (CallSites.empty())
return false;
-
+
// Now that we have all of the call sites, move the ones to functions in the
// current SCC to the end of the list.
unsigned FirstCallInSCC = CallSites.size();
@@ -592,7 +591,7 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
DEBUG(dbgs() << " -> Deleting dead function: "
<< Callee->getName() << "\n");
CallGraphNode *CalleeNode = CG[Callee];
-
+
// Remove any call graph edges from the callee to its callees.
CalleeNode->removeAllCalledFunctions();
@@ -648,8 +647,8 @@ bool Inliner::removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly) {
// Scan for all of the functions, looking for ones that should now be removed
// from the program. Insert the dead ones in the FunctionsToRemove set.
- for (CallGraph::iterator I = CG.begin(), E = CG.end(); I != E; ++I) {
- CallGraphNode *CGN = I->second;
+ for (auto I : CG) {
+ CallGraphNode *CGN = I.second;
Function *F = CGN->getFunction();
if (!F || F->isDeclaration())
continue;
@@ -724,10 +723,8 @@ bool Inliner::removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly) {
FunctionsToRemove.erase(std::unique(FunctionsToRemove.begin(),
FunctionsToRemove.end()),
FunctionsToRemove.end());
- for (SmallVectorImpl<CallGraphNode *>::iterator I = FunctionsToRemove.begin(),
- E = FunctionsToRemove.end();
- I != E; ++I) {
- delete CG.removeFunctionFromModule(*I);
+ for (CallGraphNode *CGN : FunctionsToRemove) {
+ delete CG.removeFunctionFromModule(CGN);
++NumDeleted;
}
return true;
diff --git a/contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp b/contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp
index ada4a76..41334ca 100644
--- a/contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp
@@ -51,7 +51,7 @@ namespace {
AU.addRequired<DominatorTreeWrapperPass>();
}
};
-} // namespace
+}
char LoopExtractor::ID = 0;
INITIALIZE_PASS_BEGIN(LoopExtractor, "loop-extract",
@@ -183,7 +183,7 @@ namespace {
bool runOnModule(Module &M) override;
};
-} // namespace
+}
char BlockExtractorPass::ID = 0;
INITIALIZE_PASS(BlockExtractorPass, "extract-blocks",
diff --git a/contrib/llvm/lib/Transforms/IPO/LowerBitSets.cpp b/contrib/llvm/lib/Transforms/IPO/LowerBitSets.cpp
index bffeebb..c6795c6 100644
--- a/contrib/llvm/lib/Transforms/IPO/LowerBitSets.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/LowerBitSets.cpp
@@ -271,8 +271,10 @@ BitSetInfo LowerBitSets::buildBitSet(
for (MDNode *Op : BitSetNM->operands()) {
if (Op->getOperand(0) != BitSet || !Op->getOperand(1))
continue;
- auto OpGlobal = cast<GlobalVariable>(
+ auto OpGlobal = dyn_cast<GlobalVariable>(
cast<ConstantAsMetadata>(Op->getOperand(1))->getValue());
+ if (!OpGlobal)
+ continue;
uint64_t Offset =
cast<ConstantInt>(cast<ConstantAsMetadata>(Op->getOperand(2))
->getValue())->getZExtValue();
@@ -621,7 +623,7 @@ bool LowerBitSets::buildBitSets() {
report_fatal_error("Bit set element must be a constant");
auto OpGlobal = dyn_cast<GlobalVariable>(OpConstMD->getValue());
if (!OpGlobal)
- report_fatal_error("Bit set element must refer to global");
+ continue;
auto OffsetConstMD = dyn_cast<ConstantAsMetadata>(Op->getOperand(2));
if (!OffsetConstMD)
@@ -675,8 +677,10 @@ bool LowerBitSets::buildBitSets() {
if (I == BitSetIndices.end())
continue;
- auto OpGlobal = cast<GlobalVariable>(
+ auto OpGlobal = dyn_cast<GlobalVariable>(
cast<ConstantAsMetadata>(Op->getOperand(1))->getValue());
+ if (!OpGlobal)
+ continue;
BitSetMembers[I->second].insert(GlobalIndices[OpGlobal]);
}
}
diff --git a/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp b/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp
index 5e41798..2e3519e 100644
--- a/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp
@@ -409,7 +409,7 @@ public:
return (FunctionComparator(F, RHS.getFunc()).compare()) == -1;
}
};
-} // namespace
+}
int FunctionComparator::cmpNumbers(uint64_t L, uint64_t R) const {
if (L < R) return -1;
diff --git a/contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp b/contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp
index 7a7065c..4a7cb7b 100644
--- a/contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp
@@ -40,7 +40,7 @@ namespace {
private:
Function* unswitchFunction(Function* F);
};
-} // namespace
+}
char PartialInliner::ID = 0;
INITIALIZE_PASS(PartialInliner, "partial-inliner",
diff --git a/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp b/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp
index a5ba9ee..b2f1010 100644
--- a/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp
@@ -25,6 +25,7 @@
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
@@ -49,7 +50,7 @@ namespace {
bool SimplifyFunction(Function *F);
void DeleteBasicBlock(BasicBlock *BB);
};
-} // namespace
+}
char PruneEH::ID = 0;
INITIALIZE_PASS_BEGIN(PruneEH, "prune-eh",
@@ -97,42 +98,54 @@ bool PruneEH::runOnSCC(CallGraphSCC &SCC) {
} else {
bool CheckUnwind = !SCCMightUnwind && !F->doesNotThrow();
bool CheckReturn = !SCCMightReturn && !F->doesNotReturn();
+ // Determine if we should scan for InlineAsm in a naked function as it
+ // is the only way to return without a ReturnInst. Only do this for
+ // no-inline functions as functions which may be inlined cannot
+ // meaningfully return via assembly.
+ bool CheckReturnViaAsm = CheckReturn &&
+ F->hasFnAttribute(Attribute::Naked) &&
+ F->hasFnAttribute(Attribute::NoInline);
if (!CheckUnwind && !CheckReturn)
continue;
- // Check to see if this function performs an unwind or calls an
- // unwinding function.
- for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
- if (CheckUnwind && isa<ResumeInst>(BB->getTerminator())) {
- // Uses unwind / resume!
+ for (const BasicBlock &BB : *F) {
+ const TerminatorInst *TI = BB.getTerminator();
+ if (CheckUnwind && TI->mayThrow()) {
SCCMightUnwind = true;
- } else if (CheckReturn && isa<ReturnInst>(BB->getTerminator())) {
+ } else if (CheckReturn && isa<ReturnInst>(TI)) {
SCCMightReturn = true;
}
- // Invoke instructions don't allow unwinding to continue, so we are
- // only interested in call instructions.
- if (CheckUnwind && !SCCMightUnwind)
- for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
- if (CallInst *CI = dyn_cast<CallInst>(I)) {
- if (CI->doesNotThrow()) {
- // This call cannot throw.
- } else if (Function *Callee = CI->getCalledFunction()) {
+ for (const Instruction &I : BB) {
+ if ((!CheckUnwind || SCCMightUnwind) &&
+ (!CheckReturnViaAsm || SCCMightReturn))
+ break;
+
+ // Check to see if this function performs an unwind or calls an
+ // unwinding function.
+ if (CheckUnwind && !SCCMightUnwind && I.mayThrow()) {
+ bool InstMightUnwind = true;
+ if (const auto *CI = dyn_cast<CallInst>(&I)) {
+ if (Function *Callee = CI->getCalledFunction()) {
CallGraphNode *CalleeNode = CG[Callee];
- // If the callee is outside our current SCC then we may
- // throw because it might.
- if (!SCCNodes.count(CalleeNode)) {
- SCCMightUnwind = true;
- break;
- }
- } else {
- // Indirect call, it might throw.
- SCCMightUnwind = true;
- break;
+ // If the callee is outside our current SCC then we may throw
+ // because it might. If it is inside, do nothing.
+ if (SCCNodes.count(CalleeNode) > 0)
+ InstMightUnwind = false;
}
}
- if (SCCMightUnwind && SCCMightReturn) break;
+ SCCMightUnwind |= InstMightUnwind;
+ }
+ if (CheckReturnViaAsm && !SCCMightReturn)
+ if (auto ICS = ImmutableCallSite(&I))
+ if (const auto *IA = dyn_cast<InlineAsm>(ICS.getCalledValue()))
+ if (IA->hasSideEffects())
+ SCCMightReturn = true;
+ }
+
+ if (SCCMightUnwind && SCCMightReturn)
+ break;
}
}
}
diff --git a/contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp b/contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp
index 6f9af1d..a4f30c5 100644
--- a/contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp
@@ -95,7 +95,7 @@ namespace {
AU.setPreservesAll();
}
};
-} // namespace
+}
char StripSymbols::ID = 0;
INITIALIZE_PASS(StripSymbols, "strip",
@@ -142,9 +142,9 @@ static bool OnlyUsedBy(Value *V, Value *Usr) {
static void RemoveDeadConstant(Constant *C) {
assert(C->use_empty() && "Constant is not dead!");
SmallPtrSet<Constant*, 4> Operands;
- for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i)
- if (OnlyUsedBy(C->getOperand(i), C))
- Operands.insert(cast<Constant>(C->getOperand(i)));
+ for (Value *Op : C->operands())
+ if (OnlyUsedBy(Op, C))
+ Operands.insert(cast<Constant>(Op));
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) {
if (!GV->hasLocalLinkage()) return; // Don't delete non-static globals.
GV->eraseFromParent();
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 29ecc1d..2d2c109f 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -193,7 +193,7 @@ namespace {
void incCreateInstNum() {}
#endif
};
-} // namespace
+}
//===----------------------------------------------------------------------===//
//
@@ -1611,6 +1611,32 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
return BinaryOperator::CreateAnd(A, B);
}
+ // (sub (select (a, c, b)), (select (a, d, b))) -> (select (a, (sub c, d), 0))
+ // (sub (select (a, b, c)), (select (a, b, d))) -> (select (a, 0, (sub c, d)))
+ if (auto *SI0 = dyn_cast<SelectInst>(Op0)) {
+ if (auto *SI1 = dyn_cast<SelectInst>(Op1)) {
+ if (SI0->getCondition() == SI1->getCondition()) {
+ if (Value *V = SimplifySubInst(
+ SI0->getFalseValue(), SI1->getFalseValue(), I.hasNoSignedWrap(),
+ I.hasNoUnsignedWrap(), DL, TLI, DT, AC))
+ return SelectInst::Create(
+ SI0->getCondition(),
+ Builder->CreateSub(SI0->getTrueValue(), SI1->getTrueValue(), "",
+ /*HasNUW=*/I.hasNoUnsignedWrap(),
+ /*HasNSW=*/I.hasNoSignedWrap()),
+ V);
+ if (Value *V = SimplifySubInst(SI0->getTrueValue(), SI1->getTrueValue(),
+ I.hasNoSignedWrap(),
+ I.hasNoUnsignedWrap(), DL, TLI, DT, AC))
+ return SelectInst::Create(
+ SI0->getCondition(), V,
+ Builder->CreateSub(SI0->getFalseValue(), SI1->getFalseValue(), "",
+ /*HasNUW=*/I.hasNoUnsignedWrap(),
+ /*HasNSW=*/I.hasNoSignedWrap()));
+ }
+ }
+ }
+
if (Op0->hasOneUse()) {
Value *Y = nullptr;
// ((X | Y) - X) --> (~X & Y)
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index f53eeef..010b7b5 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -2646,7 +2646,8 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
Changed = true;
}
- if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, DL, TLI, DT, AC))
+ if (Value *V =
+ SimplifyICmpInst(I.getPredicate(), Op0, Op1, DL, TLI, DT, AC, &I))
return ReplaceInstUsesWith(I, V);
// comparing -val or val with non-zero is the same as just comparing val
@@ -3927,7 +3928,8 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifyFCmpInst(I.getPredicate(), Op0, Op1, DL, TLI, DT, AC))
+ if (Value *V =
+ SimplifyFCmpInst(I.getPredicate(), Op0, Op1, DL, TLI, DT, AC, &I))
return ReplaceInstUsesWith(I, V);
// Simplify 'fcmp pred X, X'
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index 6b384b4..a554e9f 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -948,7 +948,7 @@ struct UDivFoldAction {
UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand, size_t SLHS)
: FoldAction(FA), OperandToFold(InputOperand), SelectLHSIdx(SLHS) {}
};
-} // namespace
+}
// X udiv 2^C -> X >> C
static Instruction *foldUDivPow2Cst(Value *Op0, Value *Op1,
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
index a93ffbe..460f6eb 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
@@ -19,9 +19,8 @@ using namespace llvm;
#define DEBUG_TYPE "instcombine"
-/// FoldPHIArgBinOpIntoPHI - If we have something like phi [add (a,b), add(a,c)]
-/// and if a/b/c and the add's all have a single use, turn this into a phi
-/// and a single binop.
+/// If we have something like phi [add (a,b), add(a,c)] and if a/b/c and the
+/// adds all have a single use, turn this into a phi and a single binop.
Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst));
@@ -238,10 +237,9 @@ Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) {
}
-/// isSafeAndProfitableToSinkLoad - Return true if we know that it is safe to
-/// sink the load out of the block that defines it. This means that it must be
-/// obvious the value of the load is not changed from the point of the load to
-/// the end of the block it is in.
+/// Return true if we know that it is safe to sink the load out of the block
+/// that defines it. This means that it must be obvious the value of the load is
+/// not changed from the point of the load to the end of the block it is in.
///
/// Finally, it is safe, but not profitable, to sink a load targeting a
/// non-address-taken alloca. Doing so will cause us to not promote the alloca
@@ -385,9 +383,9 @@ Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
-/// FoldPHIArgOpIntoPHI - If all operands to a PHI node are the same "unary"
-/// operator and they all are only used by the PHI, PHI together their
-/// inputs, and do the operation once, to the result of the PHI.
+/// If all operands to a PHI node are the same "unary" operator and they all are
+/// only used by the PHI, PHI together their inputs, and do the operation once,
+/// to the result of the PHI.
Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
@@ -503,8 +501,7 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
return NewCI;
}
-/// DeadPHICycle - Return true if this PHI node is only used by a PHI node cycle
-/// that is dead.
+/// Return true if this PHI node is only used by a PHI node cycle that is dead.
static bool DeadPHICycle(PHINode *PN,
SmallPtrSetImpl<PHINode*> &PotentiallyDeadPHIs) {
if (PN->use_empty()) return true;
@@ -524,8 +521,8 @@ static bool DeadPHICycle(PHINode *PN,
return false;
}
-/// PHIsEqualValue - Return true if this phi node is always equal to
-/// NonPhiInVal. This happens with mutually cyclic phi nodes like:
+/// Return true if this phi node is always equal to NonPhiInVal.
+/// This happens with mutually cyclic phi nodes like:
/// z = some value; x = phi (y, z); y = phi (x, z)
static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal,
SmallPtrSetImpl<PHINode*> &ValueEqualPHIs) {
@@ -582,7 +579,7 @@ struct LoweredPHIRecord {
LoweredPHIRecord(PHINode *pn, unsigned Sh)
: PN(pn), Shift(Sh), Width(0) {}
};
-} // namespace
+}
namespace llvm {
template<>
@@ -603,13 +600,13 @@ namespace llvm {
LHS.Width == RHS.Width;
}
};
-} // namespace llvm
+}
-/// SliceUpIllegalIntegerPHI - This is an integer PHI and we know that it has an
-/// illegal type: see if it is only used by trunc or trunc(lshr) operations. If
-/// so, we split the PHI into the various pieces being extracted. This sort of
-/// thing is introduced when SROA promotes an aggregate to large integer values.
+/// This is an integer PHI and we know that it has an illegal type: see if it is
+/// only used by trunc or trunc(lshr) operations. If so, we split the PHI into
+/// the various pieces being extracted. This sort of thing is introduced when
+/// SROA promotes an aggregate to large integer values.
///
/// TODO: The user of the trunc may be an bitcast to float/double/vector or an
/// inttoptr. We should produce new PHIs in the right type.
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 53950ae..2a81689 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2125,7 +2125,7 @@ Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
// Truncate the condition operand if the new type is equal to or larger than
// the largest legal integer type. We need to be conservative here since
- // x86 generates redundant zero-extenstion instructions if the operand is
+ // x86 generates redundant zero-extension instructions if the operand is
// truncated to i8 or i16.
bool TruncCond = false;
if (NewWidth > 0 && BitWidth > NewWidth &&
@@ -3046,7 +3046,7 @@ public:
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnFunction(Function &F) override;
};
-} // namespace
+}
void InstructionCombiningPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 2dd2fe6..e7ef9f9 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -1144,6 +1144,8 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
// Globals from llvm.metadata aren't emitted, do not instrument them.
if (Section == "llvm.metadata") return false;
+ // Do not instrument globals from special LLVM sections.
+ if (Section.find("__llvm") != StringRef::npos) return false;
// Callbacks put into the CRT initializer/terminator sections
// should not be instrumented.
@@ -1672,12 +1674,6 @@ void FunctionStackPoisoner::SetShadowToStackAfterReturnInlined(
}
}
-static DebugLoc getFunctionEntryDebugLocation(Function &F) {
- for (const auto &Inst : F.getEntryBlock())
- if (!isa<AllocaInst>(Inst)) return Inst.getDebugLoc();
- return DebugLoc();
-}
-
PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond,
Value *ValueIfTrue,
Instruction *ThenTerm,
@@ -1730,7 +1726,9 @@ void FunctionStackPoisoner::poisonStack() {
if (AllocaVec.size() == 0) return;
int StackMallocIdx = -1;
- DebugLoc EntryDebugLocation = getFunctionEntryDebugLocation(F);
+ DebugLoc EntryDebugLocation;
+ if (auto SP = getDISubprogram(&F))
+ EntryDebugLocation = DebugLoc::get(SP->getScopeLine(), 0, SP);
Instruction *InsBefore = AllocaVec[0];
IRBuilder<> IRB(InsBefore);
@@ -1753,11 +1751,10 @@ void FunctionStackPoisoner::poisonStack() {
uint64_t LocalStackSize = L.FrameSize;
bool DoStackMalloc = ClUseAfterReturn && !ASan.CompileKernel &&
LocalStackSize <= kMaxStackMallocSize;
- // Don't do dynamic alloca in presence of inline asm: too often it makes
- // assumptions on which registers are available. Don't do stack malloc in the
- // presence of inline asm on 32-bit platforms for the same reason.
+ // Don't do dynamic alloca or stack malloc in presence of inline asm:
+ // too often it makes assumptions on which registers are available.
bool DoDynamicAlloca = ClDynamicAllocaStack && !HasNonEmptyInlineAsm;
- DoStackMalloc &= !HasNonEmptyInlineAsm || ASan.LongSize != 32;
+ DoStackMalloc &= !HasNonEmptyInlineAsm;
Value *StaticAlloca =
DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp b/contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
index a887425..f685803 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
@@ -63,7 +63,7 @@ namespace {
void emitBranchToTrap(Value *Cmp = nullptr);
bool instrument(Value *Ptr, Value *Val, const DataLayout &DL);
};
-} // namespace
+}
char BoundsChecking::ID = 0;
INITIALIZE_PASS(BoundsChecking, "bounds-checking", "Run-time bounds checking",
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index 4309157..2de6e1a 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -346,7 +346,7 @@ class DFSanVisitor : public InstVisitor<DFSanVisitor> {
void visitMemTransferInst(MemTransferInst &I);
};
-} // namespace
+}
char DataFlowSanitizer::ID;
INITIALIZE_PASS(DataFlowSanitizer, "dfsan",
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
index 43caf1f..9a3ed5c 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
@@ -139,7 +139,7 @@ namespace {
LLVMContext *Ctx;
SmallVector<std::unique_ptr<GCOVFunction>, 16> Funcs;
};
-} // namespace
+}
char GCOVProfiler::ID = 0;
INITIALIZE_PASS(GCOVProfiler, "insert-gcov-profiling",
@@ -419,7 +419,7 @@ namespace {
DenseMap<BasicBlock *, GCOVBlock> Blocks;
GCOVBlock ReturnBlock;
};
-} // namespace
+}
std::string GCOVProfiler::mangleName(const DICompileUnit *CU,
const char *NewStem) {
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp b/contrib/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
index 05a9c8a..712bf8e 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
@@ -362,7 +362,7 @@ void InstrProfiling::emitInitialization() {
Function::Create(SetNameTy, GlobalValue::ExternalLinkage,
"__llvm_profile_override_default_filename", M);
- // Create variable for profile name
+ // Create variable for profile name.
Constant *ProfileNameConst =
ConstantDataArray::getString(M->getContext(), InstrProfileOutput, true);
GlobalVariable *ProfileName =
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 63eee2f..286a563 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -236,6 +236,14 @@ static const MemoryMapParams Linux_MIPS64_MemoryMapParams = {
0x002000000000, // OriginBase
};
+// ppc64 Linux
+static const MemoryMapParams Linux_PowerPC64_MemoryMapParams = {
+ 0x200000000000, // AndMask
+ 0x100000000000, // XorMask
+ 0x080000000000, // ShadowBase
+ 0x1C0000000000, // OriginBase
+};
+
// i386 FreeBSD
static const MemoryMapParams FreeBSD_I386_MemoryMapParams = {
0x000180000000, // AndMask
@@ -262,6 +270,11 @@ static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams = {
&Linux_MIPS64_MemoryMapParams,
};
+static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams = {
+ NULL,
+ &Linux_PowerPC64_MemoryMapParams,
+};
+
static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = {
&FreeBSD_I386_MemoryMapParams,
&FreeBSD_X86_64_MemoryMapParams,
@@ -479,6 +492,10 @@ bool MemorySanitizer::doInitialization(Module &M) {
case Triple::mips64el:
MapParams = Linux_MIPS_MemoryMapParams.bits64;
break;
+ case Triple::ppc64:
+ case Triple::ppc64le:
+ MapParams = Linux_PowerPC_MemoryMapParams.bits64;
+ break;
default:
report_fatal_error("unsupported architecture");
}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/SafeStack.cpp b/contrib/llvm/lib/Transforms/Instrumentation/SafeStack.cpp
index 13c5412..6b185a2 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/SafeStack.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/SafeStack.cpp
@@ -165,7 +165,7 @@ class SafeStack : public FunctionPass {
Type *Int32Ty;
Type *Int8Ty;
- Constant *UnsafeStackPtr;
+ Constant *UnsafeStackPtr = nullptr;
/// Unsafe stack alignment. Each stack frame must ensure that the stack is
/// aligned to this value. We need to re-align the unsafe stack if the
@@ -232,8 +232,6 @@ public:
Int32Ty = Type::getInt32Ty(M.getContext());
Int8Ty = Type::getInt8Ty(M.getContext());
- UnsafeStackPtr = getOrCreateUnsafeStackPtr(M);
-
return false;
}
@@ -576,6 +574,9 @@ bool SafeStack::runOnFunction(Function &F) {
if (!StackRestorePoints.empty())
++NumUnsafeStackRestorePointsFunctions;
+ if (!UnsafeStackPtr)
+ UnsafeStackPtr = getOrCreateUnsafeStackPtr(*F.getParent());
+
// The top of the unsafe stack after all unsafe static allocas are allocated.
Value *StaticTop = moveStaticAllocasToUnsafeStack(F, StaticAllocas, Returns);
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/contrib/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
index dff39ef..7a5b4cb 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
@@ -375,6 +375,13 @@ void SanitizerCoverageModule::SetNoSanitizeMetadata(Instruction *I) {
void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F, BasicBlock &BB,
bool UseCalls) {
+ // Don't insert coverage for unreachable blocks: we will never call
+ // __sanitizer_cov() for them, so counting them in
+ // NumberOfInstrumentedBlocks() might complicate calculation of code coverage
+ // percentage. Also, unreachable instructions frequently have no debug
+ // locations.
+ if (isa<UnreachableInst>(BB.getTerminator()))
+ return;
BasicBlock::iterator IP = BB.getFirstInsertionPt(), BE = BB.end();
// Skip static allocas at the top of the entry block so they don't become
// dynamic when we split the block. If we used our optimized stack layout,
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/BlotMapVector.h b/contrib/llvm/lib/Transforms/ObjCARC/BlotMapVector.h
index f9fde26..d6439b6 100644
--- a/contrib/llvm/lib/Transforms/ObjCARC/BlotMapVector.h
+++ b/contrib/llvm/lib/Transforms/ObjCARC/BlotMapVector.h
@@ -105,4 +105,4 @@ public:
return Map.empty();
}
};
-} // namespace llvm
+} //
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp
index c7c77ec..d318643 100644
--- a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp
@@ -50,7 +50,7 @@ namespace {
initializeObjCARCAPElimPass(*PassRegistry::getPassRegistry());
}
};
-} // namespace
+}
char ObjCARCAPElim::ID = 0;
INITIALIZE_PASS(ObjCARCAPElim,
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.cpp b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.cpp
index 94b092c..3893aab 100644
--- a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.cpp
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.cpp
@@ -57,9 +57,8 @@ ObjCARCAliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
AliasAnalysis::getAnalysisUsage(AU);
}
-AliasAnalysis::AliasResult
-ObjCARCAliasAnalysis::alias(const MemoryLocation &LocA,
- const MemoryLocation &LocB) {
+AliasResult ObjCARCAliasAnalysis::alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) {
if (!EnableARCOpts)
return AliasAnalysis::alias(LocA, LocB);
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp
index 080dbc0..baca76b 100644
--- a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp
@@ -101,7 +101,7 @@ namespace {
initializeObjCARCContractPass(*PassRegistry::getPassRegistry());
}
};
-} // namespace
+}
//===----------------------------------------------------------------------===//
// Implementation
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCExpand.cpp b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCExpand.cpp
index 4f2f7da..53c19c3 100644
--- a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCExpand.cpp
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCExpand.cpp
@@ -63,7 +63,7 @@ namespace {
initializeObjCARCExpandPass(*PassRegistry::getPassRegistry());
}
};
-} // namespace
+}
char ObjCARCExpand::ID = 0;
INITIALIZE_PASS(ObjCARCExpand,
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
index cdbbfac..9edbb17 100644
--- a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
@@ -313,7 +313,7 @@ namespace {
};
const unsigned BBState::OverflowOccurredValue = 0xffffffff;
-} // namespace
+}
namespace llvm {
raw_ostream &operator<<(raw_ostream &OS,
@@ -551,7 +551,7 @@ namespace {
initializeObjCARCOptPass(*PassRegistry::getPassRegistry());
}
};
-} // namespace
+}
char ObjCARCOpt::ID = 0;
INITIALIZE_PASS_BEGIN(ObjCARCOpt,
@@ -1846,7 +1846,7 @@ void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
Value *Arg = Call->getArgOperand(0);
Value *EarlierArg = EarlierCall->getArgOperand(0);
switch (PA.getAA()->alias(Arg, EarlierArg)) {
- case AliasAnalysis::MustAlias:
+ case MustAlias:
Changed = true;
// If the load has a builtin retain, insert a plain retain for it.
if (Class == ARCInstKind::LoadWeakRetained) {
@@ -1858,10 +1858,10 @@ void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
Call->replaceAllUsesWith(EarlierCall);
Call->eraseFromParent();
goto clobbered;
- case AliasAnalysis::MayAlias:
- case AliasAnalysis::PartialAlias:
+ case MayAlias:
+ case PartialAlias:
goto clobbered;
- case AliasAnalysis::NoAlias:
+ case NoAlias:
break;
}
break;
@@ -1875,7 +1875,7 @@ void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
Value *Arg = Call->getArgOperand(0);
Value *EarlierArg = EarlierCall->getArgOperand(0);
switch (PA.getAA()->alias(Arg, EarlierArg)) {
- case AliasAnalysis::MustAlias:
+ case MustAlias:
Changed = true;
// If the load has a builtin retain, insert a plain retain for it.
if (Class == ARCInstKind::LoadWeakRetained) {
@@ -1887,10 +1887,10 @@ void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
Call->eraseFromParent();
goto clobbered;
- case AliasAnalysis::MayAlias:
- case AliasAnalysis::PartialAlias:
+ case MayAlias:
+ case PartialAlias:
goto clobbered;
- case AliasAnalysis::NoAlias:
+ case NoAlias:
break;
}
break;
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.cpp b/contrib/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.cpp
index 8346345..9ffdfb4 100644
--- a/contrib/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.cpp
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.cpp
@@ -116,12 +116,12 @@ bool ProvenanceAnalysis::relatedCheck(const Value *A, const Value *B,
// Ask regular AliasAnalysis, for a first approximation.
switch (AA->alias(A, B)) {
- case AliasAnalysis::NoAlias:
+ case NoAlias:
return false;
- case AliasAnalysis::MustAlias:
- case AliasAnalysis::PartialAlias:
+ case MustAlias:
+ case PartialAlias:
return true;
- case AliasAnalysis::MayAlias:
+ case MayAlias:
break;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/ADCE.cpp b/contrib/llvm/lib/Transforms/Scalar/ADCE.cpp
index fe0224b..d6fc916 100644
--- a/contrib/llvm/lib/Transforms/Scalar/ADCE.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/ADCE.cpp
@@ -44,7 +44,7 @@ struct ADCE : public FunctionPass {
AU.setPreservesCFG();
}
};
-} // namespace
+}
char ADCE::ID = 0;
INITIALIZE_PASS(ADCE, "adce", "Aggressive Dead Code Elimination", false, false)
diff --git a/contrib/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp b/contrib/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
index a4e5446..8918909 100644
--- a/contrib/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
@@ -76,7 +76,7 @@ struct AlignmentFromAssumptions : public FunctionPass {
const SCEV *&OffSCEV);
bool processAssumption(CallInst *I);
};
-} // namespace
+}
char AlignmentFromAssumptions::ID = 0;
static const char aip_name[] = "Alignment from assumptions";
diff --git a/contrib/llvm/lib/Transforms/Scalar/BDCE.cpp b/contrib/llvm/lib/Transforms/Scalar/BDCE.cpp
index 8ffbacd..09c605e 100644
--- a/contrib/llvm/lib/Transforms/Scalar/BDCE.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/BDCE.cpp
@@ -66,7 +66,7 @@ struct BDCE : public FunctionPass {
AssumptionCache *AC;
DominatorTree *DT;
};
-} // namespace
+}
char BDCE::ID = 0;
INITIALIZE_PASS_BEGIN(BDCE, "bdce", "Bit-Tracking Dead Code Elimination",
diff --git a/contrib/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp b/contrib/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
index cc1dc94..4288742 100644
--- a/contrib/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
@@ -171,7 +171,7 @@ private:
void deleteDeadCastInst() const;
bool optimizeConstants(Function &Fn);
};
-} // namespace
+}
char ConstantHoisting::ID = 0;
INITIALIZE_PASS_BEGIN(ConstantHoisting, "consthoist", "Constant Hoisting",
diff --git a/contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp b/contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp
index e3df86e..c974ebb 100644
--- a/contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp
@@ -47,7 +47,7 @@ namespace {
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
};
-} // namespace
+}
char ConstantPropagation::ID = 0;
INITIALIZE_PASS_BEGIN(ConstantPropagation, "constprop",
diff --git a/contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
index b1809b7..79624b2 100644
--- a/contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
@@ -56,7 +56,7 @@ namespace {
AU.addRequired<LazyValueInfo>();
}
};
-} // namespace
+}
char CorrelatedValuePropagation::ID = 0;
INITIALIZE_PASS_BEGIN(CorrelatedValuePropagation, "correlated-propagation",
diff --git a/contrib/llvm/lib/Transforms/Scalar/DCE.cpp b/contrib/llvm/lib/Transforms/Scalar/DCE.cpp
index aa628e5..3b262a2 100644
--- a/contrib/llvm/lib/Transforms/Scalar/DCE.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/DCE.cpp
@@ -60,7 +60,7 @@ namespace {
AU.setPreservesCFG();
}
};
-} // namespace
+}
char DeadInstElimination::ID = 0;
INITIALIZE_PASS(DeadInstElimination, "die",
@@ -87,7 +87,7 @@ namespace {
AU.setPreservesCFG();
}
};
-} // namespace
+}
char DCE::ID = 0;
INITIALIZE_PASS(DCE, "dce", "Dead Code Elimination", false, false)
diff --git a/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index c99dc5f..c505584 100644
--- a/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -92,7 +92,7 @@ namespace {
AU.addPreserved<MemoryDependenceAnalysis>();
}
};
-} // namespace
+}
char DSE::ID = 0;
INITIALIZE_PASS_BEGIN(DSE, "dse", "Dead Store Elimination", false, false)
diff --git a/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
index 8b629ea..d536a93 100644
--- a/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -72,7 +72,7 @@ struct SimpleValue {
isa<ExtractValueInst>(Inst) || isa<InsertValueInst>(Inst);
}
};
-} // namespace
+}
namespace llvm {
template <> struct DenseMapInfo<SimpleValue> {
@@ -85,7 +85,7 @@ template <> struct DenseMapInfo<SimpleValue> {
static unsigned getHashValue(SimpleValue Val);
static bool isEqual(SimpleValue LHS, SimpleValue RHS);
};
-} // namespace llvm
+}
unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) {
Instruction *Inst = Val.Inst;
@@ -219,7 +219,7 @@ struct CallValue {
return true;
}
};
-} // namespace
+}
namespace llvm {
template <> struct DenseMapInfo<CallValue> {
@@ -232,7 +232,7 @@ template <> struct DenseMapInfo<CallValue> {
static unsigned getHashValue(CallValue Val);
static bool isEqual(CallValue LHS, CallValue RHS);
};
-} // namespace llvm
+}
unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) {
Instruction *Inst = Val.Inst;
@@ -447,7 +447,7 @@ private:
ExpectedType);
}
};
-} // namespace
+}
bool EarlyCSE::processNode(DomTreeNode *Node) {
BasicBlock *BB = Node->getBlock();
@@ -764,7 +764,7 @@ public:
AU.setPreservesCFG();
}
};
-} // namespace
+}
char EarlyCSELegacyPass::ID = 0;
diff --git a/contrib/llvm/lib/Transforms/Scalar/FlattenCFGPass.cpp b/contrib/llvm/lib/Transforms/Scalar/FlattenCFGPass.cpp
index dd6ea8d..0430c18 100644
--- a/contrib/llvm/lib/Transforms/Scalar/FlattenCFGPass.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/FlattenCFGPass.cpp
@@ -36,7 +36,7 @@ public:
private:
AliasAnalysis *AA;
};
-} // namespace
+}
char FlattenCFGPass::ID = 0;
INITIALIZE_PASS_BEGIN(FlattenCFGPass, "flattencfg", "Flatten the CFG", false,
diff --git a/contrib/llvm/lib/Transforms/Scalar/Float2Int.cpp b/contrib/llvm/lib/Transforms/Scalar/Float2Int.cpp
index bb90c5f..c931422 100644
--- a/contrib/llvm/lib/Transforms/Scalar/Float2Int.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/Float2Int.cpp
@@ -79,7 +79,7 @@ namespace {
MapVector<Instruction*, Value*> ConvertedInsts;
LLVMContext *Ctx;
};
-} // namespace
+}
char Float2Int::ID = 0;
INITIALIZE_PASS(Float2Int, "float2int", "Float to int", false, false)
diff --git a/contrib/llvm/lib/Transforms/Scalar/GVN.cpp b/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
index d9308c4..60903c8 100644
--- a/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -138,7 +138,7 @@ namespace {
uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
void verifyRemoved(const Value *) const;
};
-} // namespace
+}
namespace llvm {
template <> struct DenseMapInfo<Expression> {
@@ -159,7 +159,7 @@ template <> struct DenseMapInfo<Expression> {
}
};
-} // namespace llvm
+}
//===----------------------------------------------------------------------===//
// ValueTable Internal Functions
@@ -723,7 +723,7 @@ namespace {
};
char GVN::ID = 0;
-} // namespace
+}
// The public interface to this file...
FunctionPass *llvm::createGVNPass(bool NoLoads) {
@@ -1783,13 +1783,9 @@ static void patchReplacementInstruction(Instruction *I, Value *Repl) {
// being replaced.
BinaryOperator *Op = dyn_cast<BinaryOperator>(I);
BinaryOperator *ReplOp = dyn_cast<BinaryOperator>(Repl);
- if (Op && ReplOp && isa<OverflowingBinaryOperator>(Op) &&
- isa<OverflowingBinaryOperator>(ReplOp)) {
- if (ReplOp->hasNoSignedWrap() && !Op->hasNoSignedWrap())
- ReplOp->setHasNoSignedWrap(false);
- if (ReplOp->hasNoUnsignedWrap() && !Op->hasNoUnsignedWrap())
- ReplOp->setHasNoUnsignedWrap(false);
- }
+ if (Op && ReplOp)
+ ReplOp->andIRFlags(Op);
+
if (Instruction *ReplInst = dyn_cast<Instruction>(Repl)) {
// FIXME: If both the original and replacement value are part of the
// same control-flow region (meaning that the execution of one
@@ -2808,6 +2804,10 @@ bool GVN::processFoldableCondBr(BranchInst *BI) {
if (!BI || BI->isUnconditional())
return false;
+ // If a branch has two identical successors, we cannot declare either dead.
+ if (BI->getSuccessor(0) == BI->getSuccessor(1))
+ return false;
+
ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
if (!Cond)
return false;
diff --git a/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
index e931382..6f03754 100644
--- a/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -136,7 +136,7 @@ namespace {
void SinkUnusedInvariants(Loop *L);
};
-} // namespace
+}
char IndVarSimplify::ID = 0;
INITIALIZE_PASS_BEGIN(IndVarSimplify, "indvars",
@@ -494,7 +494,7 @@ struct RewritePhi {
RewritePhi(PHINode *P, unsigned I, Value *V, bool H, bool S)
: PN(P), Ith(I), Val(V), HighCost(H), SafePhi(S) {}
};
-} // namespace
+}
//===----------------------------------------------------------------------===//
// RewriteLoopExitValues - Optimize IV users outside the loop.
@@ -758,7 +758,7 @@ namespace {
WideIVInfo() : NarrowIV(nullptr), WidestNativeType(nullptr),
IsSigned(false) {}
};
-} // namespace
+}
/// visitCast - Update information about the induction variable that is
/// extended by this sign or zero extend operation. This is used to determine
@@ -1321,7 +1321,7 @@ namespace {
// Implement the interface used by simplifyUsersOfIV.
void visitCast(CastInst *Cast) override { visitIVCast(Cast, WI, SE, TTI); }
};
-} // namespace
+}
/// SimplifyAndExtend - Iteratively perform simplification on a worklist of IV
/// users. Each successive simplification may push more users which may
@@ -2013,11 +2013,10 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
// Now that we're done iterating through lists, clean up any instructions
// which are now dead.
- while (!DeadInsts.empty()) {
- Value *V = static_cast<Value *>(DeadInsts.pop_back_val());
- if (Instruction *Inst = dyn_cast_or_null<Instruction>(V))
+ while (!DeadInsts.empty())
+ if (Instruction *Inst =
+ dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val()))
RecursivelyDeleteTriviallyDeadInstructions(Inst, TLI);
- }
// The Rewriter may not be used from this point on.
diff --git a/contrib/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/contrib/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
index ce1a0ca..cbdacad 100644
--- a/contrib/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
@@ -222,7 +222,7 @@ public:
};
char InductiveRangeCheckElimination::ID = 0;
-} // namespace
+}
INITIALIZE_PASS(InductiveRangeCheckElimination, "irce",
"Inductive range check elimination", false, false)
@@ -618,7 +618,7 @@ public:
bool run();
};
-} // namespace
+}
void LoopConstrainer::replacePHIBlock(PHINode *PN, BasicBlock *Block,
BasicBlock *ReplaceBy) {
diff --git a/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp b/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp
index 7316db6..1130d22 100644
--- a/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp
@@ -138,7 +138,7 @@ namespace {
bool SimplifyPartiallyRedundantLoad(LoadInst *LI);
bool TryToUnfoldSelect(CmpInst *CondCmp, BasicBlock *BB);
};
-} // namespace
+}
char JumpThreading::ID = 0;
INITIALIZE_PASS_BEGIN(JumpThreading, "jump-threading",
diff --git a/contrib/llvm/lib/Transforms/Scalar/LICM.cpp b/contrib/llvm/lib/Transforms/Scalar/LICM.cpp
index e501946..f0e6d64 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -156,7 +156,7 @@ namespace {
/// Simple Analysis hook. Delete loop L from alias set map.
void deleteAnalysisLoop(Loop *L) override;
};
-} // namespace
+}
char LICM::ID = 0;
INITIALIZE_PASS_BEGIN(LICM, "licm", "Loop Invariant Code Motion", false, false)
@@ -777,7 +777,7 @@ namespace {
AST.deleteValue(I);
}
};
-} // namespace
+} // end anon namespace
/// Try to promote memory values to scalars by sinking stores out of the
/// loop and moving loads to before the loop. We do this by looping over
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoadCombine.cpp b/contrib/llvm/lib/Transforms/Scalar/LoadCombine.cpp
index 3dbf6ac..c19cd19 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoadCombine.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoadCombine.cpp
@@ -77,7 +77,7 @@ private:
bool aggregateLoads(SmallVectorImpl<LoadPOPPair> &);
bool combineLoads(SmallVectorImpl<LoadPOPPair> &);
};
-} // namespace
+}
bool LoadCombine::doInitialization(Function &F) {
DEBUG(dbgs() << "LoadCombine function: " << F.getName() << "\n");
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
index 02760ff..98b068e 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
@@ -57,7 +57,7 @@ namespace {
bool &Changed, BasicBlock *Preheader);
};
-} // namespace
+}
char LoopDeletion::ID = 0;
INITIALIZE_PASS_BEGIN(LoopDeletion, "loop-deletion",
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopDistribute.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
index d21a7db..0325d26 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
@@ -635,10 +635,11 @@ public:
LoopVersioning(const LoopAccessInfo &LAI, Loop *L, LoopInfo *LI,
DominatorTree *DT,
const SmallVector<int, 8> *PtrToPartition = nullptr)
- : OrigLoop(L), NonDistributedLoop(nullptr),
+ : VersionedLoop(L), NonVersionedLoop(nullptr),
PtrToPartition(PtrToPartition), LAI(LAI), LI(LI), DT(DT) {}
- /// \brief Returns true if we need memchecks to distribute the loop.
+ /// \brief Returns true if we need memchecks to disambiguate may-aliasing
+ /// accesses.
bool needsRuntimeChecks() const {
return LAI.getRuntimePointerCheck()->needsAnyChecking(PtrToPartition);
}
@@ -649,49 +650,51 @@ public:
Instruction *FirstCheckInst;
Instruction *MemRuntimeCheck;
// Add the memcheck in the original preheader (this is empty initially).
- BasicBlock *MemCheckBB = OrigLoop->getLoopPreheader();
+ BasicBlock *MemCheckBB = VersionedLoop->getLoopPreheader();
std::tie(FirstCheckInst, MemRuntimeCheck) =
LAI.addRuntimeCheck(MemCheckBB->getTerminator(), PtrToPartition);
assert(MemRuntimeCheck && "called even though needsAnyChecking = false");
// Rename the block to make the IR more readable.
- MemCheckBB->setName(OrigLoop->getHeader()->getName() + ".ldist.memcheck");
+ MemCheckBB->setName(VersionedLoop->getHeader()->getName() +
+ ".lver.memcheck");
// Create empty preheader for the loop (and after cloning for the
- // original/nondist loop).
+ // non-versioned loop).
BasicBlock *PH =
SplitBlock(MemCheckBB, MemCheckBB->getTerminator(), DT, LI);
- PH->setName(OrigLoop->getHeader()->getName() + ".ph");
+ PH->setName(VersionedLoop->getHeader()->getName() + ".ph");
// Clone the loop including the preheader.
//
// FIXME: This does not currently preserve SimplifyLoop because the exit
// block is a join between the two loops.
- SmallVector<BasicBlock *, 8> NonDistributedLoopBlocks;
- NonDistributedLoop =
- cloneLoopWithPreheader(PH, MemCheckBB, OrigLoop, VMap, ".ldist.nondist",
- LI, DT, NonDistributedLoopBlocks);
- remapInstructionsInLoop(NonDistributedLoopBlocks, VMap);
+ SmallVector<BasicBlock *, 8> NonVersionedLoopBlocks;
+ NonVersionedLoop =
+ cloneLoopWithPreheader(PH, MemCheckBB, VersionedLoop, VMap,
+ ".lver.orig", LI, DT, NonVersionedLoopBlocks);
+ remapInstructionsInLoop(NonVersionedLoopBlocks, VMap);
// Insert the conditional branch based on the result of the memchecks.
Instruction *OrigTerm = MemCheckBB->getTerminator();
- BranchInst::Create(NonDistributedLoop->getLoopPreheader(),
- OrigLoop->getLoopPreheader(), MemRuntimeCheck, OrigTerm);
+ BranchInst::Create(NonVersionedLoop->getLoopPreheader(),
+ VersionedLoop->getLoopPreheader(), MemRuntimeCheck,
+ OrigTerm);
OrigTerm->eraseFromParent();
// The loops merge in the original exit block. This is now dominated by the
// memchecking block.
- DT->changeImmediateDominator(OrigLoop->getExitBlock(), MemCheckBB);
+ DT->changeImmediateDominator(VersionedLoop->getExitBlock(), MemCheckBB);
}
/// \brief Adds the necessary PHI nodes for the versioned loops based on the
/// loop-defined values used outside of the loop.
void addPHINodes(const SmallVectorImpl<Instruction *> &DefsUsedOutside) {
- BasicBlock *PHIBlock = OrigLoop->getExitBlock();
+ BasicBlock *PHIBlock = VersionedLoop->getExitBlock();
assert(PHIBlock && "No single successor to loop exit block");
for (auto *Inst : DefsUsedOutside) {
- auto *NonDistInst = cast<Instruction>(VMap[Inst]);
+ auto *NonVersionedLoopInst = cast<Instruction>(VMap[Inst]);
PHINode *PN;
// First see if we have a single-operand PHI with the value defined by the
@@ -704,24 +707,25 @@ public:
}
// If not create it.
if (!PN) {
- PN = PHINode::Create(Inst->getType(), 2, Inst->getName() + ".ldist",
+ PN = PHINode::Create(Inst->getType(), 2, Inst->getName() + ".lver",
PHIBlock->begin());
for (auto *User : Inst->users())
- if (!OrigLoop->contains(cast<Instruction>(User)->getParent()))
+ if (!VersionedLoop->contains(cast<Instruction>(User)->getParent()))
User->replaceUsesOfWith(Inst, PN);
- PN->addIncoming(Inst, OrigLoop->getExitingBlock());
+ PN->addIncoming(Inst, VersionedLoop->getExitingBlock());
}
- // Add the new incoming value from the non-distributed loop.
- PN->addIncoming(NonDistInst, NonDistributedLoop->getExitingBlock());
+ // Add the new incoming value from the non-versioned loop.
+ PN->addIncoming(NonVersionedLoopInst,
+ NonVersionedLoop->getExitingBlock());
}
}
private:
/// \brief The original loop. This becomes the "versioned" one, i.e. control
/// goes if the memchecks all pass.
- Loop *OrigLoop;
+ Loop *VersionedLoop;
/// \brief The fall-back loop, i.e. if any of the memchecks fail.
- Loop *NonDistributedLoop;
+ Loop *NonVersionedLoop;
/// \brief For each memory pointer it contains the partitionId it is used in.
/// If nullptr, no partitioning is used.
@@ -730,8 +734,8 @@ private:
/// If the pointer is used in multiple partitions the entry is set to -1.
const SmallVector<int, 8> *PtrToPartition;
- /// \brief This maps the instructions from OrigLoop to their counterpart in
- /// NonDistributedLoop.
+ /// \brief This maps the instructions from VersionedLoop to their counterpart
+ /// in NonVersionedLoop.
ValueToValueMapTy VMap;
/// \brief Analyses used.
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index 3de1333..714ce91 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -209,7 +209,7 @@ namespace {
bool runOnNoncountableLoop();
bool runOnCountableLoop();
};
-} // namespace
+}
char LoopIdiomRecognize::ID = 0;
INITIALIZE_PASS_BEGIN(LoopIdiomRecognize, "loop-idiom", "Recognize loop idioms",
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
index 4c40f24..e125026 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
@@ -52,7 +52,7 @@ namespace {
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
};
-} // namespace
+}
char LoopInstSimplify::ID = 0;
INITIALIZE_PASS_BEGIN(LoopInstSimplify, "loop-instsimplify",
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp
index f6db9b1..ed103e6 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp
@@ -438,7 +438,7 @@ namespace {
bool reroll(Instruction *IV, Loop *L, BasicBlock *Header, const SCEV *IterCount,
ReductionTracker &Reductions);
};
-} // namespace
+}
char LoopReroll::ID = 0;
INITIALIZE_PASS_BEGIN(LoopReroll, "loop-reroll", "Reroll loops", false, false)
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp
index 2ba70ad..a675e12 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp
@@ -79,7 +79,7 @@ namespace {
AssumptionCache *AC;
DominatorTree *DT;
};
-} // namespace
+}
char LoopRotate::ID = 0;
INITIALIZE_PASS_BEGIN(LoopRotate, "loop-rotate", "Rotate Loops", false, false)
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index ee72486..4b59f3d 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -116,7 +116,7 @@ public:
void dump() const;
};
-} // namespace
+}
void RegSortData::print(raw_ostream &OS) const {
OS << "[NumUses=" << UsedByIndices.count() << ']';
@@ -157,7 +157,7 @@ public:
const_iterator end() const { return RegSequence.end(); }
};
-} // namespace
+}
void
RegUseTracker::CountRegister(const SCEV *Reg, size_t LUIdx) {
@@ -281,7 +281,7 @@ struct Formula {
void dump() const;
};
-} // namespace
+}
/// DoInitialMatch - Recursion helper for InitialMatch.
static void DoInitialMatch(const SCEV *S, Loop *L,
@@ -903,7 +903,7 @@ private:
SmallPtrSetImpl<const SCEV *> *LoserRegs);
};
-} // namespace
+}
/// RateRegister - Tally up interesting quantities from the given register.
void Cost::RateRegister(const SCEV *Reg,
@@ -1102,7 +1102,7 @@ struct LSRFixup {
void dump() const;
};
-} // namespace
+}
LSRFixup::LSRFixup()
: UserInst(nullptr), OperandValToReplace(nullptr), LUIdx(~size_t(0)),
@@ -1252,7 +1252,7 @@ public:
void dump() const;
};
-} // namespace
+}
/// HasFormula - Test whether this use as a formula which has the same
/// registers as the given formula.
@@ -1791,7 +1791,7 @@ public:
void dump() const;
};
-} // namespace
+}
/// OptimizeShadowIV - If IV is used in a int-to-float cast
/// inside the loop then try to eliminate the cast operation.
@@ -3644,7 +3644,7 @@ struct WorkItem {
void dump() const;
};
-} // namespace
+}
void WorkItem::print(raw_ostream &OS) const {
OS << "in formulae referencing " << *OrigReg << " in use " << LUIdx
@@ -4949,7 +4949,7 @@ private:
void getAnalysisUsage(AnalysisUsage &AU) const override;
};
-} // namespace
+}
char LoopStrengthReduce::ID = 0;
INITIALIZE_PASS_BEGIN(LoopStrengthReduce, "loop-reduce",
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
index d702dc0..9e7558d 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -229,7 +229,7 @@ namespace {
unsigned DynamicCostSavingsDiscount,
uint64_t UnrolledCost, uint64_t RolledDynamicCost);
};
-} // namespace
+}
char LoopUnroll::ID = 0;
INITIALIZE_PASS_BEGIN(LoopUnroll, "loop-unroll", "Unroll loops", false, false)
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
index 5bdc2ec..cbc563b 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -43,6 +43,7 @@
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/MDBuilder.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -80,6 +81,7 @@ namespace {
struct LoopProperties {
unsigned CanBeUnswitchedCount;
+ unsigned WasUnswitchedCount;
unsigned SizeEstimation;
UnswitchedValsMap UnswitchedVals;
};
@@ -93,37 +95,52 @@ namespace {
UnswitchedValsMap *CurLoopInstructions;
LoopProperties *CurrentLoopProperties;
- // Max size of code we can produce on remained iterations.
+ // A loop unswitching with an estimated cost above this threshold
+ // is not performed. MaxSize is turned into unswitching quota for
+ // the current loop, and reduced correspondingly, though note that
+ // the quota is returned by releaseMemory() when the loop has been
+ // processed, so that MaxSize will return to its previous
+ // value. So in most cases MaxSize will equal the Threshold flag
+ // when a new loop is processed. An exception to that is that
+ // MaxSize will have a smaller value while processing nested loops
+ // that were introduced due to loop unswitching of an outer loop.
+ //
+ // FIXME: The way that MaxSize works is subtle and depends on the
+ // pass manager processing loops and calling releaseMemory() in a
+ // specific order. It would be good to find a more straightforward
+ // way of doing what MaxSize does.
unsigned MaxSize;
- public:
-
- LUAnalysisCache() :
- CurLoopInstructions(nullptr), CurrentLoopProperties(nullptr),
- MaxSize(Threshold)
- {}
-
- // Analyze loop. Check its size, calculate is it possible to unswitch
- // it. Returns true if we can unswitch this loop.
- bool countLoop(const Loop *L, const TargetTransformInfo &TTI,
- AssumptionCache *AC);
-
- // Clean all data related to given loop.
- void forgetLoop(const Loop *L);
-
- // Mark case value as unswitched.
- // Since SI instruction can be partly unswitched, in order to avoid
- // extra unswitching in cloned loops keep track all unswitched values.
- void setUnswitched(const SwitchInst *SI, const Value *V);
-
- // Check was this case value unswitched before or not.
- bool isUnswitched(const SwitchInst *SI, const Value *V);
-
- // Clone all loop-unswitch related loop properties.
- // Redistribute unswitching quotas.
- // Note, that new loop data is stored inside the VMap.
- void cloneData(const Loop *NewLoop, const Loop *OldLoop,
- const ValueToValueMapTy &VMap);
+ public:
+ LUAnalysisCache()
+ : CurLoopInstructions(nullptr), CurrentLoopProperties(nullptr),
+ MaxSize(Threshold) {}
+
+ // Analyze loop. Check its size, calculate is it possible to unswitch
+ // it. Returns true if we can unswitch this loop.
+ bool countLoop(const Loop *L, const TargetTransformInfo &TTI,
+ AssumptionCache *AC);
+
+ // Clean all data related to given loop.
+ void forgetLoop(const Loop *L);
+
+ // Mark case value as unswitched.
+ // Since SI instruction can be partly unswitched, in order to avoid
+ // extra unswitching in cloned loops keep track all unswitched values.
+ void setUnswitched(const SwitchInst *SI, const Value *V);
+
+ // Check was this case value unswitched before or not.
+ bool isUnswitched(const SwitchInst *SI, const Value *V);
+
+ // Returns true if another unswitching could be done within the cost
+ // threshold.
+ bool CostAllowsUnswitching();
+
+ // Clone all loop-unswitch related loop properties.
+ // Redistribute unswitching quotas.
+ // Note, that new loop data is stored inside the VMap.
+ void cloneData(const Loop *NewLoop, const Loop *OldLoop,
+ const ValueToValueMapTy &VMap);
};
class LoopUnswitch : public LoopPass {
@@ -195,10 +212,12 @@ namespace {
/// Update the appropriate Phi nodes as we do so.
void SplitExitEdges(Loop *L, const SmallVectorImpl<BasicBlock *> &ExitBlocks);
- bool UnswitchIfProfitable(Value *LoopCond, Constant *Val);
+ bool UnswitchIfProfitable(Value *LoopCond, Constant *Val,
+ TerminatorInst *TI = nullptr);
void UnswitchTrivialCondition(Loop *L, Value *Cond, Constant *Val,
- BasicBlock *ExitBlock);
- void UnswitchNontrivialCondition(Value *LIC, Constant *OnVal, Loop *L);
+ BasicBlock *ExitBlock, TerminatorInst *TI);
+ void UnswitchNontrivialCondition(Value *LIC, Constant *OnVal, Loop *L,
+ TerminatorInst *TI);
void RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC,
Constant *Val, bool isEqual);
@@ -206,14 +225,15 @@ namespace {
void EmitPreheaderBranchOnCondition(Value *LIC, Constant *Val,
BasicBlock *TrueDest,
BasicBlock *FalseDest,
- Instruction *InsertPt);
+ Instruction *InsertPt,
+ TerminatorInst *TI);
void SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L);
bool IsTrivialUnswitchCondition(Value *Cond, Constant **Val = nullptr,
BasicBlock **LoopExit = nullptr);
};
-} // namespace
+}
// Analyze loop. Check its size, calculate is it possible to unswitch
// it. Returns true if we can unswitch this loop.
@@ -242,12 +262,13 @@ bool LUAnalysisCache::countLoop(const Loop *L, const TargetTransformInfo &TTI,
// consideration code simplification opportunities and code that can
// be shared by the resultant unswitched loops.
CodeMetrics Metrics;
- for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
- I != E; ++I)
+ for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); I != E;
+ ++I)
Metrics.analyzeBasicBlock(*I, TTI, EphValues);
- Props.SizeEstimation = std::min(Metrics.NumInsts, Metrics.NumBlocks * 5);
+ Props.SizeEstimation = Metrics.NumInsts;
Props.CanBeUnswitchedCount = MaxSize / (Props.SizeEstimation);
+ Props.WasUnswitchedCount = 0;
MaxSize -= Props.SizeEstimation * Props.CanBeUnswitchedCount;
if (Metrics.notDuplicatable) {
@@ -258,13 +279,6 @@ bool LUAnalysisCache::countLoop(const Loop *L, const TargetTransformInfo &TTI,
}
}
- if (!Props.CanBeUnswitchedCount) {
- DEBUG(dbgs() << "NOT unswitching loop %"
- << L->getHeader()->getName() << ", cost too high: "
- << L->getBlocks().size() << "\n");
- return false;
- }
-
// Be careful. This links are good only before new loop addition.
CurrentLoopProperties = &Props;
CurLoopInstructions = &Props.UnswitchedVals;
@@ -279,7 +293,8 @@ void LUAnalysisCache::forgetLoop(const Loop *L) {
if (LIt != LoopsProperties.end()) {
LoopProperties &Props = LIt->second;
- MaxSize += Props.CanBeUnswitchedCount * Props.SizeEstimation;
+ MaxSize += (Props.CanBeUnswitchedCount + Props.WasUnswitchedCount) *
+ Props.SizeEstimation;
LoopsProperties.erase(LIt);
}
@@ -299,6 +314,10 @@ bool LUAnalysisCache::isUnswitched(const SwitchInst *SI, const Value *V) {
return (*CurLoopInstructions)[SI].count(V);
}
+bool LUAnalysisCache::CostAllowsUnswitching() {
+ return CurrentLoopProperties->CanBeUnswitchedCount > 0;
+}
+
// Clone all loop-unswitch related loop properties.
// Redistribute unswitching quotas.
// Note, that new loop data is stored inside the VMap.
@@ -312,6 +331,8 @@ void LUAnalysisCache::cloneData(const Loop *NewLoop, const Loop *OldLoop,
// Reallocate "can-be-unswitched quota"
--OldLoopProps.CanBeUnswitchedCount;
+ ++OldLoopProps.WasUnswitchedCount;
+ NewLoopProps.WasUnswitchedCount = 0;
unsigned Quota = OldLoopProps.CanBeUnswitchedCount;
NewLoopProps.CanBeUnswitchedCount = Quota / 2;
OldLoopProps.CanBeUnswitchedCount = Quota - Quota / 2;
@@ -453,8 +474,8 @@ bool LoopUnswitch::processCurrentLoop() {
// unswitch on it if we desire.
Value *LoopCond = FindLIVLoopCondition(BI->getCondition(),
currentLoop, Changed);
- if (LoopCond && UnswitchIfProfitable(LoopCond,
- ConstantInt::getTrue(Context))) {
+ if (LoopCond &&
+ UnswitchIfProfitable(LoopCond, ConstantInt::getTrue(Context), TI)) {
++NumBranches;
return true;
}
@@ -643,7 +664,8 @@ bool LoopUnswitch::IsTrivialUnswitchCondition(Value *Cond, Constant **Val,
/// UnswitchIfProfitable - We have found that we can unswitch currentLoop when
/// LoopCond == Val to simplify the loop. If we decide that this is profitable,
/// unswitch the loop, reprocess the pieces, then return true.
-bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val) {
+bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val,
+ TerminatorInst *TI) {
Function *F = loopHeader->getParent();
Constant *CondVal = nullptr;
BasicBlock *ExitBlock = nullptr;
@@ -651,17 +673,25 @@ bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val) {
if (IsTrivialUnswitchCondition(LoopCond, &CondVal, &ExitBlock)) {
// If the condition is trivial, always unswitch. There is no code growth
// for this case.
- UnswitchTrivialCondition(currentLoop, LoopCond, CondVal, ExitBlock);
+ UnswitchTrivialCondition(currentLoop, LoopCond, CondVal, ExitBlock, TI);
return true;
}
// Check to see if it would be profitable to unswitch current loop.
+ if (!BranchesInfo.CostAllowsUnswitching()) {
+ DEBUG(dbgs() << "NOT unswitching loop %"
+ << currentLoop->getHeader()->getName()
+ << " at non-trivial condition '" << *Val
+ << "' == " << *LoopCond << "\n"
+ << ". Cost too high.\n");
+ return false;
+ }
// Do not do non-trivial unswitch while optimizing for size.
if (OptimizeForSize || F->hasFnAttribute(Attribute::OptimizeForSize))
return false;
- UnswitchNontrivialCondition(LoopCond, Val, currentLoop);
+ UnswitchNontrivialCondition(LoopCond, Val, currentLoop, TI);
return true;
}
@@ -685,25 +715,65 @@ static Loop *CloneLoop(Loop *L, Loop *PL, ValueToValueMapTy &VM,
return New;
}
+static void copyMetadata(Instruction *DstInst, const Instruction *SrcInst,
+ bool Swapped) {
+ if (!SrcInst || !SrcInst->hasMetadata())
+ return;
+
+ SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
+ SrcInst->getAllMetadata(MDs);
+ for (auto &MD : MDs) {
+ switch (MD.first) {
+ default:
+ break;
+ case LLVMContext::MD_prof:
+ if (Swapped && MD.second->getNumOperands() == 3 &&
+ isa<MDString>(MD.second->getOperand(0))) {
+ MDString *MDName = cast<MDString>(MD.second->getOperand(0));
+ if (MDName->getString() == "branch_weights") {
+ auto *ValT = cast_or_null<ConstantAsMetadata>(
+ MD.second->getOperand(1))->getValue();
+ auto *ValF = cast_or_null<ConstantAsMetadata>(
+ MD.second->getOperand(2))->getValue();
+ assert(ValT && ValF && "Invalid Operands of branch_weights");
+ auto NewMD =
+ MDBuilder(DstInst->getParent()->getContext())
+ .createBranchWeights(cast<ConstantInt>(ValF)->getZExtValue(),
+ cast<ConstantInt>(ValT)->getZExtValue());
+ MD.second = NewMD;
+ }
+ }
+ // fallthrough.
+ case LLVMContext::MD_dbg:
+ DstInst->setMetadata(MD.first, MD.second);
+ }
+ }
+}
+
/// EmitPreheaderBranchOnCondition - Emit a conditional branch on two values
/// if LIC == Val, branch to TrueDst, otherwise branch to FalseDest. Insert the
/// code immediately before InsertPt.
void LoopUnswitch::EmitPreheaderBranchOnCondition(Value *LIC, Constant *Val,
BasicBlock *TrueDest,
BasicBlock *FalseDest,
- Instruction *InsertPt) {
+ Instruction *InsertPt,
+ TerminatorInst *TI) {
// Insert a conditional branch on LIC to the two preheaders. The original
// code is the true version and the new code is the false version.
Value *BranchVal = LIC;
+ bool Swapped = false;
if (!isa<ConstantInt>(Val) ||
Val->getType() != Type::getInt1Ty(LIC->getContext()))
BranchVal = new ICmpInst(InsertPt, ICmpInst::ICMP_EQ, LIC, Val);
- else if (Val != ConstantInt::getTrue(Val->getContext()))
+ else if (Val != ConstantInt::getTrue(Val->getContext())) {
// We want to enter the new loop when the condition is true.
std::swap(TrueDest, FalseDest);
+ Swapped = true;
+ }
// Insert the new branch.
BranchInst *BI = BranchInst::Create(TrueDest, FalseDest, BranchVal, InsertPt);
+ copyMetadata(BI, TI, Swapped);
// If either edge is critical, split it. This helps preserve LoopSimplify
// form for enclosing loops.
@@ -717,13 +787,14 @@ void LoopUnswitch::EmitPreheaderBranchOnCondition(Value *LIC, Constant *Val,
/// where the path through the loop that doesn't execute its body has no
/// side-effects), unswitch it. This doesn't involve any code duplication, just
/// moving the conditional branch outside of the loop and updating loop info.
-void LoopUnswitch::UnswitchTrivialCondition(Loop *L, Value *Cond,
- Constant *Val,
- BasicBlock *ExitBlock) {
+void LoopUnswitch::UnswitchTrivialCondition(Loop *L, Value *Cond, Constant *Val,
+ BasicBlock *ExitBlock,
+ TerminatorInst *TI) {
DEBUG(dbgs() << "loop-unswitch: Trivial-Unswitch loop %"
- << loopHeader->getName() << " [" << L->getBlocks().size()
- << " blocks] in Function " << L->getHeader()->getParent()->getName()
- << " on cond: " << *Val << " == " << *Cond << "\n");
+ << loopHeader->getName() << " [" << L->getBlocks().size()
+ << " blocks] in Function "
+ << L->getHeader()->getParent()->getName() << " on cond: " << *Val
+ << " == " << *Cond << "\n");
// First step, split the preheader, so that we know that there is a safe place
// to insert the conditional branch. We will change loopPreheader to have a
@@ -744,7 +815,7 @@ void LoopUnswitch::UnswitchTrivialCondition(Loop *L, Value *Cond,
// Okay, now we have a position to branch from and a position to branch to,
// insert the new conditional branch.
EmitPreheaderBranchOnCondition(Cond, Val, NewExit, NewPH,
- loopPreheader->getTerminator());
+ loopPreheader->getTerminator(), TI);
LPM->deleteSimpleAnalysisValue(loopPreheader->getTerminator(), L);
loopPreheader->getTerminator()->eraseFromParent();
@@ -780,7 +851,7 @@ void LoopUnswitch::SplitExitEdges(Loop *L,
/// to unswitch when LIC equal Val. Split it into loop versions and test the
/// condition outside of either loop. Return the loops created as Out1/Out2.
void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
- Loop *L) {
+ Loop *L, TerminatorInst *TI) {
Function *F = loopHeader->getParent();
DEBUG(dbgs() << "loop-unswitch: Unswitching loop %"
<< loopHeader->getName() << " [" << L->getBlocks().size()
@@ -897,7 +968,8 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
"Preheader splitting did not work correctly!");
// Emit the new branch that selects between the two versions of this loop.
- EmitPreheaderBranchOnCondition(LIC, Val, NewBlocks[0], LoopBlocks[0], OldBR);
+ EmitPreheaderBranchOnCondition(LIC, Val, NewBlocks[0], LoopBlocks[0], OldBR,
+ TI);
LPM->deleteSimpleAnalysisValue(OldBR, L);
OldBR->eraseFromParent();
diff --git a/contrib/llvm/lib/Transforms/Scalar/LowerAtomic.cpp b/contrib/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
index b8b35d4..3314e1e 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
@@ -138,7 +138,7 @@ namespace {
return Changed;
}
};
-} // namespace
+}
char LowerAtomic::ID = 0;
INITIALIZE_PASS(LowerAtomic, "loweratomic",
diff --git a/contrib/llvm/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp b/contrib/llvm/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp
index b845c03..0c47cbd 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp
@@ -181,7 +181,7 @@ public:
bool runOnFunction(Function &F) override { return lowerExpectIntrinsic(F); }
};
-} // namespace
+}
char LowerExpectIntrinsic::ID = 0;
INITIALIZE_PASS(LowerExpectIntrinsic, "lower-expect",
diff --git a/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 2c9f935..85012af 100644
--- a/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -153,7 +153,7 @@ struct MemsetRange {
bool isProfitableToUseMemset(const DataLayout &DL) const;
};
-} // namespace
+} // end anon namespace
bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
// If we found more than 4 stores to merge or 16 bytes, use memset.
@@ -237,7 +237,7 @@ public:
};
-} // namespace
+} // end anon namespace
/// addRange - Add a new store to the MemsetRanges data structure. This adds a
@@ -355,7 +355,7 @@ namespace {
};
char MemCpyOpt::ID = 0;
-} // namespace
+}
// createMemCpyOptPass - The public interface to this file...
FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); }
diff --git a/contrib/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp b/contrib/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
index 886b6f5..243db8d 100644
--- a/contrib/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
@@ -156,7 +156,7 @@ private:
};
char MergedLoadStoreMotion::ID = 0;
-} // namespace
+}
///
/// \brief createMergedLoadStoreMotionPass - The public interface to this file.
diff --git a/contrib/llvm/lib/Transforms/Scalar/NaryReassociate.cpp b/contrib/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
index 4cf68b0..f42f830 100644
--- a/contrib/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
@@ -74,21 +74,18 @@
// 1) We only considers n-ary adds for now. This should be extended and
// generalized.
//
-// 2) Besides arithmetic operations, similar reassociation can be applied to
-// GEPs. For example, if
-// X = &arr[a]
-// dominates
-// Y = &arr[a + b]
-// we may rewrite Y into X + b.
-//
//===----------------------------------------------------------------------===//
+#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PatternMatch.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
@@ -115,6 +112,7 @@ public:
AU.addPreserved<DominatorTreeWrapperPass>();
AU.addPreserved<ScalarEvolution>();
AU.addPreserved<TargetLibraryInfoWrapperPass>();
+ AU.addRequired<AssumptionCacheTracker>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<ScalarEvolution>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
@@ -163,12 +161,18 @@ private:
// GEP's pointer size, i.e., whether Index needs to be sign-extended in order
// to be an index of GEP.
bool requiresSignExtension(Value *Index, GetElementPtrInst *GEP);
+ // Returns whether V is known to be non-negative at context \c Ctxt.
+ bool isKnownNonNegative(Value *V, Instruction *Ctxt);
+ // Returns whether AO may sign overflow at context \c Ctxt. It computes a
+ // conservative result -- it answers true when not sure.
+ bool maySignOverflow(AddOperator *AO, Instruction *Ctxt);
+ AssumptionCache *AC;
+ const DataLayout *DL;
DominatorTree *DT;
ScalarEvolution *SE;
TargetLibraryInfo *TLI;
TargetTransformInfo *TTI;
- const DataLayout *DL;
// A lookup table quickly telling which instructions compute the given SCEV.
// Note that there can be multiple instructions at different locations
// computing to the same SCEV, so we map a SCEV to an instruction list. For
@@ -185,6 +189,7 @@ private:
char NaryReassociate::ID = 0;
INITIALIZE_PASS_BEGIN(NaryReassociate, "nary-reassociate", "Nary reassociation",
false, false)
+INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
@@ -200,6 +205,7 @@ bool NaryReassociate::runOnFunction(Function &F) {
if (skipOptnoneFunction(F))
return false;
+ AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
SE = &getAnalysis<ScalarEvolution>();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
@@ -346,18 +352,44 @@ bool NaryReassociate::requiresSignExtension(Value *Index,
return cast<IntegerType>(Index->getType())->getBitWidth() < PointerSizeInBits;
}
+bool NaryReassociate::isKnownNonNegative(Value *V, Instruction *Ctxt) {
+ bool NonNegative, Negative;
+ // TODO: ComputeSignBits is expensive. Consider caching the results.
+ ComputeSignBit(V, NonNegative, Negative, *DL, 0, AC, Ctxt, DT);
+ return NonNegative;
+}
+
+bool NaryReassociate::maySignOverflow(AddOperator *AO, Instruction *Ctxt) {
+ if (AO->hasNoSignedWrap())
+ return false;
+
+ Value *LHS = AO->getOperand(0), *RHS = AO->getOperand(1);
+ // If LHS or RHS has the same sign as the sum, AO doesn't sign overflow.
+ // TODO: handle the negative case as well.
+ if (isKnownNonNegative(AO, Ctxt) &&
+ (isKnownNonNegative(LHS, Ctxt) || isKnownNonNegative(RHS, Ctxt)))
+ return false;
+
+ return true;
+}
+
GetElementPtrInst *
NaryReassociate::tryReassociateGEPAtIndex(GetElementPtrInst *GEP, unsigned I,
Type *IndexedType) {
Value *IndexToSplit = GEP->getOperand(I + 1);
- if (SExtInst *SExt = dyn_cast<SExtInst>(IndexToSplit))
+ if (SExtInst *SExt = dyn_cast<SExtInst>(IndexToSplit)) {
IndexToSplit = SExt->getOperand(0);
+ } else if (ZExtInst *ZExt = dyn_cast<ZExtInst>(IndexToSplit)) {
+ // zext can be treated as sext if the source is non-negative.
+ if (isKnownNonNegative(ZExt->getOperand(0), GEP))
+ IndexToSplit = ZExt->getOperand(0);
+ }
if (AddOperator *AO = dyn_cast<AddOperator>(IndexToSplit)) {
// If the I-th index needs sext and the underlying add is not equipped with
// nsw, we cannot split the add because
// sext(LHS + RHS) != sext(LHS) + sext(RHS).
- if (requiresSignExtension(IndexToSplit, GEP) && !AO->hasNoSignedWrap())
+ if (requiresSignExtension(IndexToSplit, GEP) && maySignOverflow(AO, GEP))
return nullptr;
Value *LHS = AO->getOperand(0), *RHS = AO->getOperand(1);
// IndexToSplit = LHS + RHS.
@@ -373,10 +405,9 @@ NaryReassociate::tryReassociateGEPAtIndex(GetElementPtrInst *GEP, unsigned I,
return nullptr;
}
-GetElementPtrInst *
-NaryReassociate::tryReassociateGEPAtIndex(GetElementPtrInst *GEP, unsigned I,
- Value *LHS, Value *RHS,
- Type *IndexedType) {
+GetElementPtrInst *NaryReassociate::tryReassociateGEPAtIndex(
+ GetElementPtrInst *GEP, unsigned I, Value *LHS, Value *RHS,
+ Type *IndexedType) {
// Look for GEP's closest dominator that has the same SCEV as GEP except that
// the I-th index is replaced with LHS.
SmallVector<const SCEV *, 4> IndexExprs;
@@ -384,6 +415,16 @@ NaryReassociate::tryReassociateGEPAtIndex(GetElementPtrInst *GEP, unsigned I,
IndexExprs.push_back(SE->getSCEV(*Index));
// Replace the I-th index with LHS.
IndexExprs[I] = SE->getSCEV(LHS);
+ if (isKnownNonNegative(LHS, GEP) &&
+ DL->getTypeSizeInBits(LHS->getType()) <
+ DL->getTypeSizeInBits(GEP->getOperand(I)->getType())) {
+ // Zero-extend LHS if it is non-negative. InstCombine canonicalizes sext to
+ // zext if the source operand is proved non-negative. We should do that
+ // consistently so that CandidateExpr more likely appears before. See
+ // @reassociate_gep_assume for an example of this canonicalization.
+ IndexExprs[I] =
+ SE->getZeroExtendExpr(IndexExprs[I], GEP->getOperand(I)->getType());
+ }
const SCEV *CandidateExpr = SE->getGEPExpr(
GEP->getSourceElementType(), SE->getSCEV(GEP->getPointerOperand()),
IndexExprs, GEP->isInBounds());
diff --git a/contrib/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp b/contrib/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
index 5423499..31d7df3 100644
--- a/contrib/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
@@ -46,7 +46,7 @@ namespace {
};
char PartiallyInlineLibCalls::ID = 0;
-} // namespace
+}
INITIALIZE_PASS(PartiallyInlineLibCalls, "partially-inline-libcalls",
"Partially inline calls to library functions", false, false)
diff --git a/contrib/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp b/contrib/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp
index 670dcd2..9ecaf10 100644
--- a/contrib/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp
@@ -160,7 +160,7 @@ struct PlaceBackedgeSafepointsImpl : public FunctionPass {
AU.setPreservesAll();
}
};
-} // namespace
+}
static cl::opt<bool> NoEntry("spp-no-entry", cl::Hidden, cl::init(false));
static cl::opt<bool> NoCall("spp-no-call", cl::Hidden, cl::init(false));
@@ -181,7 +181,7 @@ struct PlaceSafepoints : public FunctionPass {
// if that was worth doing
}
};
-} // namespace
+}
// Insert a safepoint poll immediately before the given instruction. Does
// not handle the parsability of state at the runtime call, that's the
diff --git a/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp b/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp
index 9842fd7..d1acf78 100644
--- a/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp
@@ -154,7 +154,7 @@ namespace {
unsigned SymbolicRank;
bool isOr;
};
-} // namespace
+}
namespace {
class Reassociate : public FunctionPass {
@@ -197,7 +197,7 @@ namespace {
void OptimizeInst(Instruction *I);
Instruction *canonicalizeNegConstExpr(Instruction *I);
};
-} // namespace
+}
XorOpnd::XorOpnd(Value *V) {
assert(!isa<ConstantInt>(V) && "No ConstantInt");
@@ -936,6 +936,10 @@ static Value *NegateValue(Value *V, Instruction *BI) {
// Push the negates through the add.
I->setOperand(0, NegateValue(I->getOperand(0), BI));
I->setOperand(1, NegateValue(I->getOperand(1), BI));
+ if (I->getOpcode() == Instruction::Add) {
+ I->setHasNoUnsignedWrap(false);
+ I->setHasNoSignedWrap(false);
+ }
// We must move the add instruction here, because the neg instructions do
// not dominate the old add instruction in general. By moving it, we are
@@ -976,6 +980,12 @@ static Value *NegateValue(Value *V, Instruction *BI) {
InsertPt = TheNeg->getParent()->getParent()->getEntryBlock().begin();
}
TheNeg->moveBefore(InsertPt);
+ if (TheNeg->getOpcode() == Instruction::Sub) {
+ TheNeg->setHasNoUnsignedWrap(false);
+ TheNeg->setHasNoSignedWrap(false);
+ } else {
+ TheNeg->andIRFlags(BI);
+ }
return TheNeg;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/Reg2Mem.cpp b/contrib/llvm/lib/Transforms/Scalar/Reg2Mem.cpp
index 2ff56e6..1b46727 100644
--- a/contrib/llvm/lib/Transforms/Scalar/Reg2Mem.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/Reg2Mem.cpp
@@ -58,7 +58,7 @@ namespace {
bool runOnFunction(Function &F) override;
};
-} // namespace
+}
char RegToMem::ID = 0;
INITIALIZE_PASS_BEGIN(RegToMem, "reg2mem", "Demote all values to stack slots",
diff --git a/contrib/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/contrib/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
index c15bc1b..ae2ae3a 100644
--- a/contrib/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
@@ -183,7 +183,7 @@ struct PartiallyConstructedSafepointRecord {
/// Maps rematerialized copy to it's original value.
RematerializedValueMapTy RematerializedValues;
};
-} // namespace
+}
/// Compute the live-in set for every basic block in the function
static void computeLiveInValues(DominatorTree &DT, Function &F,
@@ -294,12 +294,17 @@ static void analyzeParsePointLiveness(
static Value *findBaseDefiningValue(Value *I);
-/// If we can trivially determine that the index specified in the given vector
-/// is a base pointer, return it. In cases where the entire vector is known to
-/// consist of base pointers, the entire vector will be returned. This
-/// indicates that the relevant extractelement is a valid base pointer and
-/// should be used directly.
-static Value *findBaseOfVector(Value *I, Value *Index) {
+/// Return a base defining value for the 'Index' element of the given vector
+/// instruction 'I'. If Index is null, returns a BDV for the entire vector
+/// 'I'. As an optimization, this method will try to determine when the
+/// element is known to already be a base pointer. If this can be established,
+/// the second value in the returned pair will be true. Note that either a
+/// vector or a pointer typed value can be returned. For the former, the
+/// vector returned is a BDV (and possibly a base) of the entire vector 'I'.
+/// If the later, the return pointer is a BDV (or possibly a base) for the
+/// particular element in 'I'.
+static std::pair<Value *, bool>
+findBaseDefiningValueOfVector(Value *I, Value *Index = nullptr) {
assert(I->getType()->isVectorTy() &&
cast<VectorType>(I->getType())->getElementType()->isPointerTy() &&
"Illegal to ask for the base pointer of a non-pointer type");
@@ -309,7 +314,7 @@ static Value *findBaseOfVector(Value *I, Value *Index) {
if (isa<Argument>(I))
// An incoming argument to the function is a base pointer
- return I;
+ return std::make_pair(I, true);
// We shouldn't see the address of a global as a vector value?
assert(!isa<GlobalVariable>(I) &&
@@ -320,7 +325,7 @@ static Value *findBaseOfVector(Value *I, Value *Index) {
if (isa<UndefValue>(I))
// utterly meaningless, but useful for dealing with partially optimized
// code.
- return I;
+ return std::make_pair(I, true);
// Due to inheritance, this must be _after_ the global variable and undef
// checks
@@ -328,38 +333,56 @@ static Value *findBaseOfVector(Value *I, Value *Index) {
assert(!isa<GlobalVariable>(I) && !isa<UndefValue>(I) &&
"order of checks wrong!");
assert(Con->isNullValue() && "null is the only case which makes sense");
- return Con;
+ return std::make_pair(Con, true);
}
-
+
if (isa<LoadInst>(I))
- return I;
-
+ return std::make_pair(I, true);
+
// For an insert element, we might be able to look through it if we know
- // something about the indexes, but if the indices are arbitrary values, we
- // can't without much more extensive scalarization.
+ // something about the indexes.
if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(I)) {
- Value *InsertIndex = IEI->getOperand(2);
- // This index is inserting the value, look for it's base
- if (InsertIndex == Index)
- return findBaseDefiningValue(IEI->getOperand(1));
- // Both constant, and can't be equal per above. This insert is definitely
- // not relevant, look back at the rest of the vector and keep trying.
- if (isa<ConstantInt>(Index) && isa<ConstantInt>(InsertIndex))
- return findBaseOfVector(IEI->getOperand(0), Index);
- }
-
- // Note: This code is currently rather incomplete. We are essentially only
- // handling cases where the vector element is trivially a base pointer. We
- // need to update the entire base pointer construction algorithm to know how
- // to track vector elements and potentially scalarize, but the case which
- // would motivate the work hasn't shown up in real workloads yet.
- llvm_unreachable("no base found for vector element");
+ if (Index) {
+ Value *InsertIndex = IEI->getOperand(2);
+ // This index is inserting the value, look for its BDV
+ if (InsertIndex == Index)
+ return std::make_pair(findBaseDefiningValue(IEI->getOperand(1)), false);
+ // Both constant, and can't be equal per above. This insert is definitely
+ // not relevant, look back at the rest of the vector and keep trying.
+ if (isa<ConstantInt>(Index) && isa<ConstantInt>(InsertIndex))
+ return findBaseDefiningValueOfVector(IEI->getOperand(0), Index);
+ }
+
+ // We don't know whether this vector contains entirely base pointers or
+ // not. To be conservatively correct, we treat it as a BDV and will
+ // duplicate code as needed to construct a parallel vector of bases.
+ return std::make_pair(IEI, false);
+ }
+
+ if (isa<ShuffleVectorInst>(I))
+ // We don't know whether this vector contains entirely base pointers or
+ // not. To be conservatively correct, we treat it as a BDV and will
+ // duplicate code as needed to construct a parallel vector of bases.
+ // TODO: There a number of local optimizations which could be applied here
+ // for particular sufflevector patterns.
+ return std::make_pair(I, false);
+
+ // A PHI or Select is a base defining value. The outer findBasePointer
+ // algorithm is responsible for constructing a base value for this BDV.
+ assert((isa<SelectInst>(I) || isa<PHINode>(I)) &&
+ "unknown vector instruction - no base found for vector element");
+ return std::make_pair(I, false);
}
+static bool isKnownBaseResult(Value *V);
+
/// Helper function for findBasePointer - Will return a value which either a)
/// defines the base pointer for the input or b) blocks the simple search
/// (i.e. a PHI or Select of two derived pointers)
static Value *findBaseDefiningValue(Value *I) {
+ if (I->getType()->isVectorTy())
+ return findBaseDefiningValueOfVector(I).first;
+
assert(I->getType()->isPointerTy() &&
"Illegal to ask for the base pointer of a non-pointer type");
@@ -370,16 +393,39 @@ static Value *findBaseDefiningValue(Value *I) {
if (auto *EEI = dyn_cast<ExtractElementInst>(I)) {
Value *VectorOperand = EEI->getVectorOperand();
Value *Index = EEI->getIndexOperand();
- Value *VectorBase = findBaseOfVector(VectorOperand, Index);
- // If the result returned is a vector, we know the entire vector must
- // contain base pointers. In that case, the extractelement is a valid base
- // for this value.
- if (VectorBase->getType()->isVectorTy())
- return EEI;
- // Otherwise, we needed to look through the vector to find the base for
- // this particular element.
- assert(VectorBase->getType()->isPointerTy());
- return VectorBase;
+ std::pair<Value *, bool> pair =
+ findBaseDefiningValueOfVector(VectorOperand, Index);
+ Value *VectorBase = pair.first;
+ if (VectorBase->getType()->isPointerTy())
+ // We found a BDV for this specific element with the vector. This is an
+ // optimization, but in practice it covers most of the useful cases
+ // created via scalarization.
+ return VectorBase;
+ else {
+ assert(VectorBase->getType()->isVectorTy());
+ if (pair.second)
+ // If the entire vector returned is known to be entirely base pointers,
+ // then the extractelement is valid base for this value.
+ return EEI;
+ else {
+ // Otherwise, we have an instruction which potentially produces a
+ // derived pointer and we need findBasePointers to clone code for us
+ // such that we can create an instruction which produces the
+ // accompanying base pointer.
+ // Note: This code is currently rather incomplete. We don't currently
+ // support the general form of shufflevector of insertelement.
+ // Conceptually, these are just 'base defining values' of the same
+ // variety as phi or select instructions. We need to update the
+ // findBasePointers algorithm to insert new 'base-only' versions of the
+ // original instructions. This is relative straight forward to do, but
+ // the case which would motivate the work hasn't shown up in real
+ // workloads yet.
+ assert((isa<PHINode>(VectorBase) || isa<SelectInst>(VectorBase)) &&
+ "need to extend findBasePointers for generic vector"
+ "instruction cases");
+ return VectorBase;
+ }
+ }
}
if (isa<Argument>(I))
@@ -646,7 +692,7 @@ private:
llvm_unreachable("only three states!");
}
};
-} // namespace
+}
/// For a given value or instruction, figure out what base ptr it's derived
/// from. For gc objects, this is simply itself. On success, returns a value
/// which is the base pointer. (This is reliable and can be used for
@@ -1712,7 +1758,9 @@ static void findLiveReferences(
/// slightly non-trivial since it requires a format change. Given how rare
/// such cases are (for the moment?) scalarizing is an acceptable comprimise.
static void splitVectorValues(Instruction *StatepointInst,
- StatepointLiveSetTy &LiveSet, DominatorTree &DT) {
+ StatepointLiveSetTy &LiveSet,
+ DenseMap<Value *, Value *>& PointerToBase,
+ DominatorTree &DT) {
SmallVector<Value *, 16> ToSplit;
for (Value *V : LiveSet)
if (isa<VectorType>(V->getType()))
@@ -1721,14 +1769,14 @@ static void splitVectorValues(Instruction *StatepointInst,
if (ToSplit.empty())
return;
+ DenseMap<Value *, SmallVector<Value *, 16>> ElementMapping;
+
Function &F = *(StatepointInst->getParent()->getParent());
DenseMap<Value *, AllocaInst *> AllocaMap;
// First is normal return, second is exceptional return (invoke only)
DenseMap<Value *, std::pair<Value *, Value *>> Replacements;
for (Value *V : ToSplit) {
- LiveSet.erase(V);
-
AllocaInst *Alloca =
new AllocaInst(V->getType(), "", F.getEntryBlock().getFirstNonPHI());
AllocaMap[V] = Alloca;
@@ -1738,7 +1786,7 @@ static void splitVectorValues(Instruction *StatepointInst,
SmallVector<Value *, 16> Elements;
for (unsigned i = 0; i < VT->getNumElements(); i++)
Elements.push_back(Builder.CreateExtractElement(V, Builder.getInt32(i)));
- LiveSet.insert(Elements.begin(), Elements.end());
+ ElementMapping[V] = Elements;
auto InsertVectorReform = [&](Instruction *IP) {
Builder.SetInsertPoint(IP);
@@ -1771,6 +1819,7 @@ static void splitVectorValues(Instruction *StatepointInst,
Replacements[V].second = InsertVectorReform(IP);
}
}
+
for (Value *V : ToSplit) {
AllocaInst *Alloca = AllocaMap[V];
@@ -1814,6 +1863,25 @@ static void splitVectorValues(Instruction *StatepointInst,
for (Value *V : ToSplit)
Allocas.push_back(AllocaMap[V]);
PromoteMemToReg(Allocas, DT);
+
+ // Update our tracking of live pointers and base mappings to account for the
+ // changes we just made.
+ for (Value *V : ToSplit) {
+ auto &Elements = ElementMapping[V];
+
+ LiveSet.erase(V);
+ LiveSet.insert(Elements.begin(), Elements.end());
+ // We need to update the base mapping as well.
+ assert(PointerToBase.count(V));
+ Value *OldBase = PointerToBase[V];
+ auto &BaseElements = ElementMapping[OldBase];
+ PointerToBase.erase(V);
+ assert(Elements.size() == BaseElements.size());
+ for (unsigned i = 0; i < Elements.size(); i++) {
+ Value *Elem = Elements[i];
+ PointerToBase[Elem] = BaseElements[i];
+ }
+ }
}
// Helper function for the "rematerializeLiveValues". It walks use chain
@@ -2075,17 +2143,6 @@ static bool insertParsePoints(Function &F, DominatorTree &DT, Pass *P,
// site.
findLiveReferences(F, DT, P, toUpdate, records);
- // Do a limited scalarization of any live at safepoint vector values which
- // contain pointers. This enables this pass to run after vectorization at
- // the cost of some possible performance loss. TODO: it would be nice to
- // natively support vectors all the way through the backend so we don't need
- // to scalarize here.
- for (size_t i = 0; i < records.size(); i++) {
- struct PartiallyConstructedSafepointRecord &info = records[i];
- Instruction *statepoint = toUpdate[i].getInstruction();
- splitVectorValues(cast<Instruction>(statepoint), info.liveset, DT);
- }
-
// B) Find the base pointers for each live pointer
/* scope for caching */ {
// Cache the 'defining value' relation used in the computation and
@@ -2146,6 +2203,18 @@ static bool insertParsePoints(Function &F, DominatorTree &DT, Pass *P,
}
holders.clear();
+ // Do a limited scalarization of any live at safepoint vector values which
+ // contain pointers. This enables this pass to run after vectorization at
+ // the cost of some possible performance loss. TODO: it would be nice to
+ // natively support vectors all the way through the backend so we don't need
+ // to scalarize here.
+ for (size_t i = 0; i < records.size(); i++) {
+ struct PartiallyConstructedSafepointRecord &info = records[i];
+ Instruction *statepoint = toUpdate[i].getInstruction();
+ splitVectorValues(cast<Instruction>(statepoint), info.liveset,
+ info.PointerToBase, DT);
+ }
+
// In order to reduce live set of statepoint we might choose to rematerialize
// some values instead of relocating them. This is purelly an optimization and
// does not influence correctness.
diff --git a/contrib/llvm/lib/Transforms/Scalar/SROA.cpp b/contrib/llvm/lib/Transforms/Scalar/SROA.cpp
index f38b2b1..056dd11 100644
--- a/contrib/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -127,7 +127,7 @@ typedef llvm::IRBuilder<true, ConstantFolder, IRBuilderPrefixedInserter<true>>
typedef llvm::IRBuilder<false, ConstantFolder, IRBuilderPrefixedInserter<false>>
IRBuilderTy;
#endif
-} // namespace
+}
namespace {
/// \brief A used slice of an alloca.
@@ -595,7 +595,7 @@ private:
/// the alloca.
SmallVector<Use *, 8> DeadOperands;
};
-} // namespace
+}
static Value *foldSelectInst(SelectInst &SI) {
// If the condition being selected on is a constant or the same value is
@@ -1173,7 +1173,7 @@ public:
}
}
};
-} // namespace
+} // end anon namespace
namespace {
/// \brief An optimization pass providing Scalar Replacement of Aggregates.
@@ -1268,7 +1268,7 @@ private:
void deleteDeadInstructions(SmallPtrSetImpl<AllocaInst *> &DeletedAllocas);
bool promoteAllocas(Function &F);
};
-} // namespace
+}
char SROA::ID = 0;
@@ -3119,7 +3119,7 @@ private:
return true;
}
};
-} // namespace
+}
namespace {
/// \brief Visitor to rewrite aggregate loads and stores as scalar.
@@ -3327,7 +3327,7 @@ private:
return false;
}
};
-} // namespace
+}
/// \brief Strip aggregate type wrapping.
///
diff --git a/contrib/llvm/lib/Transforms/Scalar/SampleProfile.cpp b/contrib/llvm/lib/Transforms/Scalar/SampleProfile.cpp
index 69e3a67..c8dfa54 100644
--- a/contrib/llvm/lib/Transforms/Scalar/SampleProfile.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/SampleProfile.cpp
@@ -174,7 +174,7 @@ protected:
/// \brief Flag indicating whether the profile input loaded successfully.
bool ProfileIsValid;
};
-} // namespace
+}
/// \brief Print the weight of edge \p E on stream \p OS.
///
diff --git a/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index e42c3da..d955da7 100644
--- a/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -221,7 +221,7 @@ namespace {
}
};
-} // namespace
+}
char SROA_DT::ID = 0;
char SROA_SSAUp::ID = 0;
@@ -1123,7 +1123,7 @@ public:
}
}
};
-} // namespace
+} // end anon namespace
/// isSafeSelectToSpeculate - Select instructions that use an alloca and are
/// subsequently loaded can be rewritten to load both input pointers and then
diff --git a/contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
index 0733daf..231411a 100644
--- a/contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
@@ -48,8 +48,8 @@ UserBonusInstThreshold("bonus-inst-threshold", cl::Hidden, cl::init(1),
STATISTIC(NumSimpl, "Number of blocks simplified");
-/// mergeEmptyReturnBlocks - If we have more than one empty (other than phi
-/// node) return blocks, merge them together to promote recursive block merging.
+/// If we have more than one empty (other than phi node) return blocks,
+/// merge them together to promote recursive block merging.
static bool mergeEmptyReturnBlocks(Function &F) {
bool Changed = false;
@@ -124,7 +124,7 @@ static bool mergeEmptyReturnBlocks(Function &F) {
return Changed;
}
-/// iterativelySimplifyCFG - Call SimplifyCFG on all the blocks in the function,
+/// Call SimplifyCFG on all the blocks in the function,
/// iterating until no more changes are made.
static bool iterativelySimplifyCFG(Function &F, const TargetTransformInfo &TTI,
AssumptionCache *AC,
@@ -134,8 +134,7 @@ static bool iterativelySimplifyCFG(Function &F, const TargetTransformInfo &TTI,
while (LocalChange) {
LocalChange = false;
- // Loop over all of the basic blocks and remove them if they are unneeded...
- //
+ // Loop over all of the basic blocks and remove them if they are unneeded.
for (Function::iterator BBIt = F.begin(); BBIt != F.end(); ) {
if (SimplifyCFG(BBIt++, TTI, BonusInstThreshold, AC)) {
LocalChange = true;
@@ -159,7 +158,7 @@ static bool simplifyFunctionCFG(Function &F, const TargetTransformInfo &TTI,
// iterativelySimplifyCFG can (rarely) make some loops dead. If this happens,
// removeUnreachableBlocks is needed to nuke them, which means we should
// iterate between the two optimizations. We structure the code like this to
- // avoid reruning iterativelySimplifyCFG if the second pass of
+ // avoid rerunning iterativelySimplifyCFG if the second pass of
// removeUnreachableBlocks doesn't do anything.
if (!removeUnreachableBlocks(F))
return true;
@@ -220,7 +219,7 @@ struct CFGSimplifyPass : public FunctionPass {
AU.addRequired<TargetTransformInfoWrapperPass>();
}
};
-} // namespace
+}
char CFGSimplifyPass::ID = 0;
INITIALIZE_PASS_BEGIN(CFGSimplifyPass, "simplifycfg", "Simplify the CFG", false,
diff --git a/contrib/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp b/contrib/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
index f32769c..6d9d417 100644
--- a/contrib/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
@@ -224,11 +224,13 @@ FunctionPass *llvm::createStraightLineStrengthReducePass() {
bool StraightLineStrengthReduce::isBasisFor(const Candidate &Basis,
const Candidate &C) {
return (Basis.Ins != C.Ins && // skip the same instruction
+ // They must have the same type too. Basis.Base == C.Base doesn't
+ // guarantee their types are the same (PR23975).
+ Basis.Ins->getType() == C.Ins->getType() &&
// Basis must dominate C in order to rewrite C with respect to Basis.
DT->dominates(Basis.Ins->getParent(), C.Ins->getParent()) &&
// They share the same base, stride, and candidate kind.
- Basis.Base == C.Base &&
- Basis.Stride == C.Stride &&
+ Basis.Base == C.Base && Basis.Stride == C.Stride &&
Basis.CandidateKind == C.CandidateKind);
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
index d23f515..c7de2e2 100644
--- a/contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -120,7 +120,7 @@ namespace {
bool CanMoveAboveCall(Instruction *I, CallInst *CI);
Value *CanTransformAccumulatorRecursion(Instruction *I, CallInst *CI);
};
-} // namespace
+}
char TailCallElim::ID = 0;
INITIALIZE_PASS_BEGIN(TailCallElim, "tailcallelim",
@@ -246,7 +246,7 @@ struct AllocaDerivedValueTracker {
SmallPtrSet<Instruction *, 32> AllocaUsers;
SmallPtrSet<Instruction *, 32> EscapePoints;
};
-} // namespace
+}
bool TailCallElim::markTails(Function &F, bool &AllCallsAreTailCalls) {
if (F.callsFunctionThatReturnsTwice())
diff --git a/contrib/llvm/lib/Transforms/Utils/ASanStackFrameLayout.cpp b/contrib/llvm/lib/Transforms/Utils/ASanStackFrameLayout.cpp
index 72cdfa4..03c3a80 100644
--- a/contrib/llvm/lib/Transforms/Utils/ASanStackFrameLayout.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/ASanStackFrameLayout.cpp
@@ -107,4 +107,4 @@ ComputeASanStackFrameLayout(SmallVectorImpl<ASanStackVariableDescription> &Vars,
assert(Layout->FrameSize / Granularity == Layout->ShadowBytes.size());
}
-} // namespace llvm
+} // llvm namespace
diff --git a/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp b/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
index 798376e..53471de 100644
--- a/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
@@ -211,6 +211,11 @@ void llvm::ReplaceInstWithInst(BasicBlock::InstListType &BIL,
assert(I->getParent() == nullptr &&
"ReplaceInstWithInst: Instruction already inserted into basic block!");
+ // Copy debug location to newly added instruction, if it wasn't already set
+ // by the caller.
+ if (!I->getDebugLoc())
+ I->setDebugLoc(BI->getDebugLoc());
+
// Insert the new instruction into the basic block...
BasicBlock::iterator New = BIL.insert(BI, I);
@@ -716,7 +721,6 @@ TerminatorInst *llvm::SplitBlockAndInsertIfThen(Value *Cond,
CheckTerm->setDebugLoc(SplitBefore->getDebugLoc());
BranchInst *HeadNewTerm =
BranchInst::Create(/*ifTrue*/ThenBlock, /*ifFalse*/Tail, Cond);
- HeadNewTerm->setDebugLoc(SplitBefore->getDebugLoc());
HeadNewTerm->setMetadata(LLVMContext::MD_prof, BranchWeights);
ReplaceInstWithInst(HeadOldTerm, HeadNewTerm);
@@ -766,7 +770,6 @@ void llvm::SplitBlockAndInsertIfThenElse(Value *Cond, Instruction *SplitBefore,
(*ElseTerm)->setDebugLoc(SplitBefore->getDebugLoc());
BranchInst *HeadNewTerm =
BranchInst::Create(/*ifTrue*/ThenBlock, /*ifFalse*/ElseBlock, Cond);
- HeadNewTerm->setDebugLoc(SplitBefore->getDebugLoc());
HeadNewTerm->setMetadata(LLVMContext::MD_prof, BranchWeights);
ReplaceInstWithInst(HeadOldTerm, HeadNewTerm);
}
diff --git a/contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp b/contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
index 362cd9b..7e83c9e 100644
--- a/contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
@@ -60,7 +60,7 @@ namespace {
AU.addPreservedID(LoopSimplifyID);
}
};
-} // namespace
+}
char BreakCriticalEdges::ID = 0;
INITIALIZE_PASS(BreakCriticalEdges, "break-crit-edges",
diff --git a/contrib/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp b/contrib/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
index 0771b29..f2d5e07 100644
--- a/contrib/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
@@ -42,7 +42,7 @@ namespace {
DivPhiNodes(PHINode *InQuotient, PHINode *InRemainder)
: Quotient(InQuotient), Remainder(InRemainder) {}
};
-} // namespace
+}
namespace llvm {
template<>
@@ -69,7 +69,7 @@ namespace llvm {
};
typedef DenseMap<DivOpInfo, DivPhiNodes> DivCacheTy;
-} // namespace llvm
+}
// insertFastDiv - Substitutes the div/rem instruction with code that checks the
// value of the operands and uses a shorter-faster div/rem instruction when
diff --git a/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp b/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp
index e623445..4f8d1df 100644
--- a/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp
@@ -289,7 +289,7 @@ namespace {
BasicBlock::const_iterator StartingInst,
std::vector<const BasicBlock*> &ToClone);
};
-} // namespace
+}
/// The specified block is found to be reachable, clone it and
/// anything that it can reach.
diff --git a/contrib/llvm/lib/Transforms/Utils/CloneModule.cpp b/contrib/llvm/lib/Transforms/Utils/CloneModule.cpp
index 2693322..61f1811 100644
--- a/contrib/llvm/lib/Transforms/Utils/CloneModule.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/CloneModule.cpp
@@ -99,7 +99,11 @@ Module *llvm::CloneModule(const Module *M, ValueToValueMapTy &VMap) {
SmallVector<ReturnInst*, 8> Returns; // Ignore returns cloned.
CloneFunctionInto(F, I, VMap, /*ModuleLevelChanges=*/true, Returns);
+
}
+
+ if (I->hasPersonalityFn())
+ F->setPersonalityFn(MapValue(I->getPersonalityFn(), VMap));
}
// And aliases
diff --git a/contrib/llvm/lib/Transforms/Utils/CtorUtils.cpp b/contrib/llvm/lib/Transforms/Utils/CtorUtils.cpp
index 4bbded8..dc95089 100644
--- a/contrib/llvm/lib/Transforms/Utils/CtorUtils.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/CtorUtils.cpp
@@ -162,4 +162,4 @@ bool optimizeGlobalCtorsList(Module &M,
return true;
}
-} // namespace llvm
+} // End llvm namespace
diff --git a/contrib/llvm/lib/Transforms/Utils/FlattenCFG.cpp b/contrib/llvm/lib/Transforms/Utils/FlattenCFG.cpp
index 40a48c0..4eb3e3d 100644
--- a/contrib/llvm/lib/Transforms/Utils/FlattenCFG.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/FlattenCFG.cpp
@@ -46,7 +46,7 @@ public:
FlattenCFGOpt(AliasAnalysis *AA) : AA(AA) {}
bool run(BasicBlock *BB);
};
-} // namespace
+}
/// If \param [in] BB has more than one predecessor that is a conditional
/// branch, attempt to use parallel and/or for the branch condition. \returns
diff --git a/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp b/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp
index ea84e7c..d2d60d7 100644
--- a/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp
@@ -121,7 +121,7 @@ namespace {
}
}
};
-} // namespace
+}
/// Get or create a target for the branch from ResumeInsts.
BasicBlock *InvokeInliningInfo::getInnerResumeDest() {
diff --git a/contrib/llvm/lib/Transforms/Utils/InstructionNamer.cpp b/contrib/llvm/lib/Transforms/Utils/InstructionNamer.cpp
index c9bec9a..da890a2 100644
--- a/contrib/llvm/lib/Transforms/Utils/InstructionNamer.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/InstructionNamer.cpp
@@ -50,7 +50,7 @@ namespace {
};
char InstNamer::ID = 0;
-} // namespace
+}
INITIALIZE_PASS(InstNamer, "instnamer",
"Assign names to anonymous instructions", false, false)
diff --git a/contrib/llvm/lib/Transforms/Utils/LCSSA.cpp b/contrib/llvm/lib/Transforms/Utils/LCSSA.cpp
index fcc7986..9d40b69 100644
--- a/contrib/llvm/lib/Transforms/Utils/LCSSA.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LCSSA.cpp
@@ -300,7 +300,7 @@ struct LCSSA : public FunctionPass {
AU.addPreserved<ScalarEvolution>();
}
};
-} // namespace
+}
char LCSSA::ID = 0;
INITIALIZE_PASS_BEGIN(LCSSA, "lcssa", "Loop-Closed SSA Form Pass", false, false)
diff --git a/contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp b/contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp
index 8b0afa6..2e7d21c 100644
--- a/contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp
@@ -386,8 +386,9 @@ static BasicBlock *insertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader,
// Create and insert the new backedge block...
BasicBlock *BEBlock = BasicBlock::Create(Header->getContext(),
- Header->getName()+".backedge", F);
+ Header->getName() + ".backedge", F);
BranchInst *BETerminator = BranchInst::Create(Header, BEBlock);
+ BETerminator->setDebugLoc(Header->getFirstNonPHI()->getDebugLoc());
DEBUG(dbgs() << "LoopSimplify: Inserting unique backedge block "
<< BEBlock->getName() << "\n");
@@ -776,7 +777,7 @@ namespace {
/// verifyAnalysis() - Verify LoopSimplifyForm's guarantees.
void verifyAnalysis() const override;
};
-} // namespace
+}
char LoopSimplify::ID = 0;
INITIALIZE_PASS_BEGIN(LoopSimplify, "loop-simplify",
diff --git a/contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp b/contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp
index c1b0645..4acd988 100644
--- a/contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp
@@ -101,7 +101,7 @@ namespace {
return CI1->getValue().slt(CI2->getValue());
}
};
-} // namespace
+}
char LowerSwitch::ID = 0;
INITIALIZE_PASS(LowerSwitch, "lowerswitch",
diff --git a/contrib/llvm/lib/Transforms/Utils/MetaRenamer.cpp b/contrib/llvm/lib/Transforms/Utils/MetaRenamer.cpp
index 46dd65e..395a46b 100644
--- a/contrib/llvm/lib/Transforms/Utils/MetaRenamer.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/MetaRenamer.cpp
@@ -131,7 +131,7 @@ namespace {
return true;
}
};
-} // namespace
+}
char MetaRenamer::ID = 0;
INITIALIZE_PASS(MetaRenamer, "metarenamer",
diff --git a/contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp b/contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp
index c0988987..88b39dd 100644
--- a/contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp
@@ -303,7 +303,7 @@ public:
}
};
-} // namespace llvm
+} // End llvm namespace
/// Check to see if AvailableVals has an entry for the specified BB and if so,
/// return it. If not, construct SSA form by first calculating the required
diff --git a/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 3d7ab0f..36781c1 100644
--- a/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -136,11 +136,10 @@ public:
: TTI(TTI), DL(DL), BonusInstThreshold(BonusInstThreshold), AC(AC) {}
bool run(BasicBlock *BB);
};
-} // namespace
+}
-/// SafeToMergeTerminators - Return true if it is safe to merge these two
+/// Return true if it is safe to merge these two
/// terminator instructions together.
-///
static bool SafeToMergeTerminators(TerminatorInst *SI1, TerminatorInst *SI2) {
if (SI1 == SI2) return false; // Can't merge with self!
@@ -164,11 +163,9 @@ static bool SafeToMergeTerminators(TerminatorInst *SI1, TerminatorInst *SI2) {
return true;
}
-/// isProfitableToFoldUnconditional - Return true if it is safe and profitable
-/// to merge these two terminator instructions together, where SI1 is an
-/// unconditional branch. PhiNodes will store all PHI nodes in common
-/// successors.
-///
+/// Return true if it is safe and profitable to merge these two terminator
+/// instructions together, where SI1 is an unconditional branch. PhiNodes will
+/// store all PHI nodes in common successors.
static bool isProfitableToFoldUnconditional(BranchInst *SI1,
BranchInst *SI2,
Instruction *Cond,
@@ -205,10 +202,10 @@ static bool isProfitableToFoldUnconditional(BranchInst *SI1,
return true;
}
-/// AddPredecessorToBlock - Update PHI nodes in Succ to indicate that there will
-/// now be entries in it from the 'NewPred' block. The values that will be
-/// flowing into the PHI nodes will be the same as those coming in from
-/// ExistPred, an existing predecessor of Succ.
+/// Update PHI nodes in Succ to indicate that there will now be entries in it
+/// from the 'NewPred' block. The values that will be flowing into the PHI nodes
+/// will be the same as those coming in from ExistPred, an existing predecessor
+/// of Succ.
static void AddPredecessorToBlock(BasicBlock *Succ, BasicBlock *NewPred,
BasicBlock *ExistPred) {
if (!isa<PHINode>(Succ->begin())) return; // Quick exit if nothing to do
@@ -219,9 +216,9 @@ static void AddPredecessorToBlock(BasicBlock *Succ, BasicBlock *NewPred,
PN->addIncoming(PN->getIncomingValueForBlock(ExistPred), NewPred);
}
-/// ComputeSpeculationCost - Compute an abstract "cost" of speculating the
-/// given instruction, which is assumed to be safe to speculate. TCC_Free means
-/// cheap, TCC_Basic means less cheap, and TCC_Expensive means prohibitively
+/// Compute an abstract "cost" of speculating the given instruction,
+/// which is assumed to be safe to speculate. TCC_Free means cheap,
+/// TCC_Basic means less cheap, and TCC_Expensive means prohibitively
/// expensive.
static unsigned ComputeSpeculationCost(const User *I,
const TargetTransformInfo &TTI) {
@@ -229,8 +226,8 @@ static unsigned ComputeSpeculationCost(const User *I,
"Instruction is not safe to speculatively execute!");
return TTI.getUserCost(I);
}
-/// DominatesMergePoint - If we have a merge point of an "if condition" as
-/// accepted above, return true if the specified value dominates the block. We
+/// If we have a merge point of an "if condition" as accepted above,
+/// return true if the specified value dominates the block. We
/// don't handle the true generality of domination here, just a special case
/// which works well enough for us.
///
@@ -302,7 +299,7 @@ static bool DominatesMergePoint(Value *V, BasicBlock *BB,
return true;
}
-/// GetConstantInt - Extract ConstantInt from value, looking through IntToPtr
+/// Extract ConstantInt from value, looking through IntToPtr
/// and PointerNullValue. Return NULL if value is not a constant int.
static ConstantInt *GetConstantInt(Value *V, const DataLayout &DL) {
// Normal constant int.
@@ -456,7 +453,7 @@ private:
}
- /// gather - Given a potentially 'or'd or 'and'd together collection of icmp
+ /// Given a potentially 'or'd or 'and'd together collection of icmp
/// eq/ne/lt/gt instructions that compare a value against a constant, extract
/// the value being compared, and stick the list constants into the Vals
/// vector.
@@ -502,7 +499,7 @@ private:
}
};
-} // namespace
+}
static void EraseTerminatorInstAndDCECond(TerminatorInst *TI) {
Instruction *Cond = nullptr;
@@ -519,7 +516,7 @@ static void EraseTerminatorInstAndDCECond(TerminatorInst *TI) {
if (Cond) RecursivelyDeleteTriviallyDeadInstructions(Cond);
}
-/// isValueEqualityComparison - Return true if the specified terminator checks
+/// Return true if the specified terminator checks
/// to see if a value is equal to constant integer value.
Value *SimplifyCFGOpt::isValueEqualityComparison(TerminatorInst *TI) {
Value *CV = nullptr;
@@ -547,7 +544,7 @@ Value *SimplifyCFGOpt::isValueEqualityComparison(TerminatorInst *TI) {
return CV;
}
-/// GetValueEqualityComparisonCases - Given a value comparison instruction,
+/// Given a value comparison instruction,
/// decode all of the 'cases' that it represents and return the 'default' block.
BasicBlock *SimplifyCFGOpt::
GetValueEqualityComparisonCases(TerminatorInst *TI,
@@ -571,15 +568,14 @@ GetValueEqualityComparisonCases(TerminatorInst *TI,
}
-/// EliminateBlockCases - Given a vector of bb/value pairs, remove any entries
+/// Given a vector of bb/value pairs, remove any entries
/// in the list that match the specified block.
static void EliminateBlockCases(BasicBlock *BB,
std::vector<ValueEqualityComparisonCase> &Cases) {
Cases.erase(std::remove(Cases.begin(), Cases.end(), BB), Cases.end());
}
-/// ValuesOverlap - Return true if there are any keys in C1 that exist in C2 as
-/// well.
+/// Return true if there are any keys in C1 that exist in C2 as well.
static bool
ValuesOverlap(std::vector<ValueEqualityComparisonCase> &C1,
std::vector<ValueEqualityComparisonCase > &C2) {
@@ -613,12 +609,11 @@ ValuesOverlap(std::vector<ValueEqualityComparisonCase> &C1,
return false;
}
-/// SimplifyEqualityComparisonWithOnlyPredecessor - If TI is known to be a
-/// terminator instruction and its block is known to only have a single
-/// predecessor block, check to see if that predecessor is also a value
-/// comparison with the same value, and if that comparison determines the
-/// outcome of this comparison. If so, simplify TI. This does a very limited
-/// form of jump threading.
+/// If TI is known to be a terminator instruction and its block is known to
+/// only have a single predecessor block, check to see if that predecessor is
+/// also a value comparison with the same value, and if that comparison
+/// determines the outcome of this comparison. If so, simplify TI. This does a
+/// very limited form of jump threading.
bool SimplifyCFGOpt::
SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
BasicBlock *Pred,
@@ -754,7 +749,7 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
}
namespace {
- /// ConstantIntOrdering - This class implements a stable ordering of constant
+ /// This class implements a stable ordering of constant
/// integers that does not depend on their address. This is important for
/// applications that sort ConstantInt's to ensure uniqueness.
struct ConstantIntOrdering {
@@ -817,8 +812,8 @@ static void FitWeights(MutableArrayRef<uint64_t> Weights) {
}
}
-/// FoldValueComparisonIntoPredecessors - The specified terminator is a value
-/// equality comparison instruction (either a switch or a branch on "X == c").
+/// The specified terminator is a value equality comparison instruction
+/// (either a switch or a branch on "X == c").
/// See if any of the predecessors of the terminator block are value comparisons
/// on the same value. If so, and if safe to do so, fold them together.
bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI,
@@ -1027,10 +1022,9 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI,
return Changed;
}
-// isSafeToHoistInvoke - If we would need to insert a select that uses the
-// value of this invoke (comments in HoistThenElseCodeToIf explain why we
-// would need to do this), we can't hoist the invoke, as there is nowhere
-// to put the select in this case.
+// If we would need to insert a select that uses the value of this invoke
+// (comments in HoistThenElseCodeToIf explain why we would need to do this), we
+// can't hoist the invoke, as there is nowhere to put the select in this case.
static bool isSafeToHoistInvoke(BasicBlock *BB1, BasicBlock *BB2,
Instruction *I1, Instruction *I2) {
for (succ_iterator SI = succ_begin(BB1), E = succ_end(BB1); SI != E; ++SI) {
@@ -1049,9 +1043,9 @@ static bool isSafeToHoistInvoke(BasicBlock *BB1, BasicBlock *BB2,
static bool passingValueIsAlwaysUndefined(Value *V, Instruction *I);
-/// HoistThenElseCodeToIf - Given a conditional branch that goes to BB1 and
-/// BB2, hoist any common code in the two blocks up into the branch block. The
-/// caller of this function guarantees that BI's block dominates BB1 and BB2.
+/// Given a conditional branch that goes to BB1 and BB2, hoist any common code
+/// in the two blocks up into the branch block. The caller of this function
+/// guarantees that BI's block dominates BB1 and BB2.
static bool HoistThenElseCodeToIf(BranchInst *BI,
const TargetTransformInfo &TTI) {
// This does very trivial matching, with limited scanning, to find identical
@@ -1197,7 +1191,7 @@ HoistTerminator:
return true;
}
-/// SinkThenElseCodeToEnd - Given an unconditional branch that goes to BBEnd,
+/// Given an unconditional branch that goes to BBEnd,
/// check whether BBEnd has only two predecessors and the other predecessor
/// ends with an unconditional branch. If it is true, sink any common code
/// in the two predecessors to BBEnd.
@@ -1656,8 +1650,7 @@ static bool HasNoDuplicateCall(const BasicBlock *BB) {
return false;
}
-/// BlockIsSimpleEnoughToThreadThrough - Return true if we can thread a branch
-/// across this block.
+/// Return true if we can thread a branch across this block.
static bool BlockIsSimpleEnoughToThreadThrough(BasicBlock *BB) {
BranchInst *BI = cast<BranchInst>(BB->getTerminator());
unsigned Size = 0;
@@ -1681,10 +1674,9 @@ static bool BlockIsSimpleEnoughToThreadThrough(BasicBlock *BB) {
return true;
}
-/// FoldCondBranchOnPHI - If we have a conditional branch on a PHI node value
-/// that is defined in the same block as the branch and if any PHI entries are
-/// constants, thread edges corresponding to that entry to be branches to their
-/// ultimate destination.
+/// If we have a conditional branch on a PHI node value that is defined in the
+/// same block as the branch and if any PHI entries are constants, thread edges
+/// corresponding to that entry to be branches to their ultimate destination.
static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout &DL) {
BasicBlock *BB = BI->getParent();
PHINode *PN = dyn_cast<PHINode>(BI->getCondition());
@@ -1781,8 +1773,8 @@ static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout &DL) {
return false;
}
-/// FoldTwoEntryPHINode - Given a BB that starts with the specified two-entry
-/// PHI node, see if we can eliminate it.
+/// Given a BB that starts with the specified two-entry PHI node,
+/// see if we can eliminate it.
static bool FoldTwoEntryPHINode(PHINode *PN, const TargetTransformInfo &TTI,
const DataLayout &DL) {
// Ok, this is a two entry PHI node. Check to see if this is a simple "if
@@ -1920,8 +1912,8 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const TargetTransformInfo &TTI,
return true;
}
-/// SimplifyCondBranchToTwoReturns - If we found a conditional branch that goes
-/// to two returning blocks, try to merge them together into one return,
+/// If we found a conditional branch that goes to two returning blocks,
+/// try to merge them together into one return,
/// introducing a select if the return values disagree.
static bool SimplifyCondBranchToTwoReturns(BranchInst *BI,
IRBuilder<> &Builder) {
@@ -2008,10 +2000,9 @@ static bool SimplifyCondBranchToTwoReturns(BranchInst *BI,
return true;
}
-/// ExtractBranchMetadata - Given a conditional BranchInstruction, retrieve the
-/// probabilities of the branch taking each edge. Fills in the two APInt
-/// parameters and return true, or returns false if no or invalid metadata was
-/// found.
+/// Given a conditional BranchInstruction, retrieve the probabilities of the
+/// branch taking each edge. Fills in the two APInt parameters and returns true,
+/// or returns false if no or invalid metadata was found.
static bool ExtractBranchMetadata(BranchInst *BI,
uint64_t &ProbTrue, uint64_t &ProbFalse) {
assert(BI->isConditional() &&
@@ -2028,9 +2019,8 @@ static bool ExtractBranchMetadata(BranchInst *BI,
return true;
}
-/// checkCSEInPredecessor - Return true if the given instruction is available
+/// Return true if the given instruction is available
/// in its predecessor block. If yes, the instruction will be removed.
-///
static bool checkCSEInPredecessor(Instruction *Inst, BasicBlock *PB) {
if (!isa<BinaryOperator>(Inst) && !isa<CmpInst>(Inst))
return false;
@@ -2046,9 +2036,9 @@ static bool checkCSEInPredecessor(Instruction *Inst, BasicBlock *PB) {
return false;
}
-/// FoldBranchToCommonDest - If this basic block is simple enough, and if a
-/// predecessor branches to us and one of our successors, fold the block into
-/// the predecessor and use logical operations to pick the right destination.
+/// If this basic block is simple enough, and if a predecessor branches to us
+/// and one of our successors, fold the block into the predecessor and use
+/// logical operations to pick the right destination.
bool llvm::FoldBranchToCommonDest(BranchInst *BI, unsigned BonusInstThreshold) {
BasicBlock *BB = BI->getParent();
@@ -2190,11 +2180,11 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI, unsigned BonusInstThreshold) {
}
// If we have bonus instructions, clone them into the predecessor block.
- // Note that there may be mutliple predecessor blocks, so we cannot move
+ // Note that there may be multiple predecessor blocks, so we cannot move
// bonus instructions to a predecessor block.
ValueToValueMapTy VMap; // maps original values to cloned values
// We already make sure Cond is the last instruction before BI. Therefore,
- // every instructions before Cond other than DbgInfoIntrinsic are bonus
+ // all instructions before Cond other than DbgInfoIntrinsic are bonus
// instructions.
for (auto BonusInst = BB->begin(); Cond != BonusInst; ++BonusInst) {
if (isa<DbgInfoIntrinsic>(BonusInst))
@@ -2342,8 +2332,8 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI, unsigned BonusInstThreshold) {
return false;
}
-/// SimplifyCondBranchToCondBranch - If we have a conditional branch as a
-/// predecessor of another block, this function tries to simplify it. We know
+/// If we have a conditional branch as a predecessor of another block,
+/// this function tries to simplify it. We know
/// that PBI and BI are both conditional branches, and BI is in one of the
/// successor blocks of PBI - PBI branches to BI.
static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) {
@@ -2558,8 +2548,8 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) {
return true;
}
-// SimplifyTerminatorOnSelect - Simplifies a terminator by replacing it with a
-// branch to TrueBB if Cond is true or to FalseBB if Cond is false.
+// Simplifies a terminator by replacing it with a branch to TrueBB if Cond is
+// true or to FalseBB if Cond is false.
// Takes care of updating the successors and removing the old terminator.
// Also makes sure not to introduce new successors by assuming that edges to
// non-successor TrueBBs and FalseBBs aren't reachable.
@@ -2624,7 +2614,7 @@ static bool SimplifyTerminatorOnSelect(TerminatorInst *OldTerm, Value *Cond,
return true;
}
-// SimplifySwitchOnSelect - Replaces
+// Replaces
// (switch (select cond, X, Y)) on constant X, Y
// with a branch - conditional if X and Y lead to distinct BBs,
// unconditional otherwise.
@@ -2659,7 +2649,7 @@ static bool SimplifySwitchOnSelect(SwitchInst *SI, SelectInst *Select) {
TrueWeight, FalseWeight);
}
-// SimplifyIndirectBrOnSelect - Replaces
+// Replaces
// (indirectbr (select cond, blockaddress(@fn, BlockA),
// blockaddress(@fn, BlockB)))
// with
@@ -2680,8 +2670,8 @@ static bool SimplifyIndirectBrOnSelect(IndirectBrInst *IBI, SelectInst *SI) {
0, 0);
}
-/// TryToSimplifyUncondBranchWithICmpInIt - This is called when we find an icmp
-/// instruction (a seteq/setne with a constant) as the only instruction in a
+/// This is called when we find an icmp instruction
+/// (a seteq/setne with a constant) as the only instruction in a
/// block that ends with an uncond branch. We are looking for a very specific
/// pattern that occurs when "A == 1 || A == 2 || A == 3" gets simplified. In
/// this case, we merge the first two "or's of icmp" into a switch, but then the
@@ -2802,7 +2792,7 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(
return true;
}
-/// SimplifyBranchOnICmpChain - The specified branch is a conditional branch.
+/// The specified branch is a conditional branch.
/// Check to see if it is branching on an or/and chain of icmp instructions, and
/// fold it into a switch instruction if so.
static bool SimplifyBranchOnICmpChain(BranchInst *BI, IRBuilder<> &Builder,
@@ -3239,7 +3229,7 @@ static bool TurnSwitchRangeIntoICmp(SwitchInst *SI, IRBuilder<> &Builder) {
return true;
}
-/// EliminateDeadSwitchCases - Compute masked bits for the condition of a switch
+/// Compute masked bits for the condition of a switch
/// and use it to remove dead cases.
static bool EliminateDeadSwitchCases(SwitchInst *SI, AssumptionCache *AC,
const DataLayout &DL) {
@@ -3290,8 +3280,8 @@ static bool EliminateDeadSwitchCases(SwitchInst *SI, AssumptionCache *AC,
return !DeadCases.empty();
}
-/// FindPHIForConditionForwarding - If BB would be eligible for simplification
-/// by TryToSimplifyUncondBranchFromEmptyBlock (i.e. it is empty and terminated
+/// If BB would be eligible for simplification by
+/// TryToSimplifyUncondBranchFromEmptyBlock (i.e. it is empty and terminated
/// by an unconditional branch), look at the phi node for BB in the successor
/// block and see if the incoming value is equal to CaseValue. If so, return
/// the phi node, and set PhiIndex to BB's index in the phi node.
@@ -3324,9 +3314,9 @@ static PHINode *FindPHIForConditionForwarding(ConstantInt *CaseValue,
return nullptr;
}
-/// ForwardSwitchConditionToPHI - Try to forward the condition of a switch
-/// instruction to a phi node dominated by the switch, if that would mean that
-/// some of the destination blocks of the switch can be folded away.
+/// Try to forward the condition of a switch instruction to a phi node
+/// dominated by the switch, if that would mean that some of the destination
+/// blocks of the switch can be folded away.
/// Returns true if a change is made.
static bool ForwardSwitchConditionToPHI(SwitchInst *SI) {
typedef DenseMap<PHINode*, SmallVector<int,4> > ForwardingNodesMap;
@@ -3361,7 +3351,7 @@ static bool ForwardSwitchConditionToPHI(SwitchInst *SI) {
return Changed;
}
-/// ValidLookupTableConstant - Return true if the backend will be able to handle
+/// Return true if the backend will be able to handle
/// initializing an array of constants like C.
static bool ValidLookupTableConstant(Constant *C) {
if (C->isThreadDependent())
@@ -3379,7 +3369,7 @@ static bool ValidLookupTableConstant(Constant *C) {
isa<UndefValue>(C);
}
-/// LookupConstant - If V is a Constant, return it. Otherwise, try to look up
+/// If V is a Constant, return it. Otherwise, try to look up
/// its constant value in ConstantPool, returning 0 if it's not there.
static Constant *LookupConstant(Value *V,
const SmallDenseMap<Value*, Constant*>& ConstantPool) {
@@ -3388,7 +3378,7 @@ static Constant *LookupConstant(Value *V,
return ConstantPool.lookup(V);
}
-/// ConstantFold - Try to fold instruction I into a constant. This works for
+/// Try to fold instruction I into a constant. This works for
/// simple instructions such as binary operations where both operands are
/// constant or can be replaced by constants from the ConstantPool. Returns the
/// resulting constant on success, 0 otherwise.
@@ -3422,7 +3412,7 @@ ConstantFold(Instruction *I, const DataLayout &DL,
return ConstantFoldInstOperands(I->getOpcode(), I->getType(), COps, DL);
}
-/// GetCaseResults - Try to determine the resulting constant values in phi nodes
+/// Try to determine the resulting constant values in phi nodes
/// at the common destination basic block, *CommonDest, for one of the case
/// destionations CaseDest corresponding to value CaseVal (0 for the default
/// case), of a switch instruction SI.
@@ -3501,8 +3491,8 @@ GetCaseResults(SwitchInst *SI, ConstantInt *CaseVal, BasicBlock *CaseDest,
return Res.size() > 0;
}
-// MapCaseToResult - Helper function used to
-// add CaseVal to the list of cases that generate Result.
+// Helper function used to add CaseVal to the list of cases that generate
+// Result.
static void MapCaseToResult(ConstantInt *CaseVal,
SwitchCaseResultVectorTy &UniqueResults,
Constant *Result) {
@@ -3516,7 +3506,7 @@ static void MapCaseToResult(ConstantInt *CaseVal,
SmallVector<ConstantInt*, 4>(1, CaseVal)));
}
-// InitializeUniqueCases - Helper function that initializes a map containing
+// Helper function that initializes a map containing
// results for the PHI node of the common destination block for a switch
// instruction. Returns false if multiple PHI nodes have been found or if
// there is not a common destination block for the switch.
@@ -3561,9 +3551,8 @@ static bool InitializeUniqueCases(SwitchInst *SI, PHINode *&PHI,
return true;
}
-// ConvertTwoCaseSwitch - Helper function that checks if it is possible to
-// transform a switch with only two cases (or two cases + default)
-// that produces a result into a value select.
+// Helper function that checks if it is possible to transform a switch with only
+// two cases (or two cases + default) that produces a result into a select.
// Example:
// switch (a) {
// case 10: %0 = icmp eq i32 %a, 10
@@ -3603,9 +3592,8 @@ ConvertTwoCaseSwitch(const SwitchCaseResultVectorTy &ResultVector,
return nullptr;
}
-// RemoveSwitchAfterSelectConversion - Helper function to cleanup a switch
-// instruction that has been converted into a select, fixing up PHI nodes and
-// basic blocks.
+// Helper function to cleanup a switch instruction that has been converted into
+// a select, fixing up PHI nodes and basic blocks.
static void RemoveSwitchAfterSelectConversion(SwitchInst *SI, PHINode *PHI,
Value *SelectValue,
IRBuilder<> &Builder) {
@@ -3627,7 +3615,7 @@ static void RemoveSwitchAfterSelectConversion(SwitchInst *SI, PHINode *PHI,
SI->eraseFromParent();
}
-/// SwitchToSelect - If the switch is only used to initialize one or more
+/// If the switch is only used to initialize one or more
/// phi nodes in a common successor block with only two different
/// constant values, replace the switch with select.
static bool SwitchToSelect(SwitchInst *SI, IRBuilder<> &Builder,
@@ -3659,23 +3647,21 @@ static bool SwitchToSelect(SwitchInst *SI, IRBuilder<> &Builder,
}
namespace {
- /// SwitchLookupTable - This class represents a lookup table that can be used
- /// to replace a switch.
+ /// This class represents a lookup table that can be used to replace a switch.
class SwitchLookupTable {
public:
- /// SwitchLookupTable - Create a lookup table to use as a switch replacement
- /// with the contents of Values, using DefaultValue to fill any holes in the
- /// table.
+ /// Create a lookup table to use as a switch replacement with the contents
+ /// of Values, using DefaultValue to fill any holes in the table.
SwitchLookupTable(
Module &M, uint64_t TableSize, ConstantInt *Offset,
const SmallVectorImpl<std::pair<ConstantInt *, Constant *>> &Values,
Constant *DefaultValue, const DataLayout &DL);
- /// BuildLookup - Build instructions with Builder to retrieve the value at
+ /// Build instructions with Builder to retrieve the value at
/// the position given by Index in the lookup table.
Value *BuildLookup(Value *Index, IRBuilder<> &Builder);
- /// WouldFitInRegister - Return true if a table with TableSize elements of
+ /// Return true if a table with TableSize elements of
/// type ElementType would fit in a target-legal register.
static bool WouldFitInRegister(const DataLayout &DL, uint64_t TableSize,
const Type *ElementType);
@@ -3717,7 +3703,7 @@ namespace {
// For ArrayKind, this is the array.
GlobalVariable *Array;
};
-} // namespace
+}
SwitchLookupTable::SwitchLookupTable(
Module &M, uint64_t TableSize, ConstantInt *Offset,
@@ -3907,9 +3893,8 @@ bool SwitchLookupTable::WouldFitInRegister(const DataLayout &DL,
return DL.fitsInLegalInteger(TableSize * IT->getBitWidth());
}
-/// ShouldBuildLookupTable - Determine whether a lookup table should be built
-/// for this switch, based on the number of cases, size of the table and the
-/// types of the results.
+/// Determine whether a lookup table should be built for this switch, based on
+/// the number of cases, size of the table, and the types of the results.
static bool
ShouldBuildLookupTable(SwitchInst *SI, uint64_t TableSize,
const TargetTransformInfo &TTI, const DataLayout &DL,
@@ -4033,9 +4018,9 @@ static void reuseTableCompare(User *PhiUser, BasicBlock *PhiBlock,
}
}
-/// SwitchToLookupTable - If the switch is only used to initialize one or more
-/// phi nodes in a common successor block with different constant values,
-/// replace the switch with lookup tables.
+/// If the switch is only used to initialize one or more phi nodes in a common
+/// successor block with different constant values, replace the switch with
+/// lookup tables.
static bool SwitchToLookupTable(SwitchInst *SI, IRBuilder<> &Builder,
const DataLayout &DL,
const TargetTransformInfo &TTI) {
@@ -4691,8 +4676,8 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) {
return Changed;
}
-/// SimplifyCFG - This function is used to do simplification of a CFG. For
-/// example, it adjusts branches to branches to eliminate the extra hop, it
+/// This function is used to do simplification of a CFG.
+/// For example, it adjusts branches to branches to eliminate the extra hop,
/// eliminates unreachable basic blocks, and does other "peephole" optimization
/// of the CFG. It returns true if a modification was made.
///
diff --git a/contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp b/contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
index 68986ac..ab30aa1 100644
--- a/contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
@@ -77,7 +77,7 @@ namespace {
Instruction *splitOverflowIntrinsic(Instruction *IVUser,
const DominatorTree *DT);
};
-} // namespace
+}
/// Fold an IV operand into its use. This removes increments of an
/// aligned IV when used by a instruction that ignores the low bits.
diff --git a/contrib/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp b/contrib/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp
index 0a583a5..c499c87 100644
--- a/contrib/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp
@@ -100,7 +100,7 @@ namespace {
return Changed;
}
};
-} // namespace
+}
char InstSimplifier::ID = 0;
INITIALIZE_PASS_BEGIN(InstSimplifier, "instsimplify",
diff --git a/contrib/llvm/lib/Transforms/Utils/SymbolRewriter.cpp b/contrib/llvm/lib/Transforms/Utils/SymbolRewriter.cpp
index 4cc278f..a2a54da 100644
--- a/contrib/llvm/lib/Transforms/Utils/SymbolRewriter.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SymbolRewriter.cpp
@@ -538,7 +538,7 @@ void RewriteSymbols::loadAndParseMapFiles() {
for (const auto &MapFile : MapFiles)
parser.parse(MapFile, &Descriptors);
}
-} // namespace
+}
INITIALIZE_PASS(RewriteSymbols, "rewrite-symbols", "Rewrite Symbols", false,
false)
diff --git a/contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp b/contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp
index fd7661f..215d6f9 100644
--- a/contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp
+++ b/contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp
@@ -3192,7 +3192,7 @@ namespace {
DEBUG(dbgs() << "BBV: final: \n" << BB << "\n");
}
-} // namespace
+}
char BBVectorize::ID = 0;
static const char bb_vectorize_name[] = "Basic-Block Vectorization";
diff --git a/contrib/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/contrib/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index b7faa20..5ba1417 100644
--- a/contrib/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/contrib/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -96,7 +96,7 @@
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/Transforms/Utils/VectorUtils.h"
+#include "llvm/Analysis/VectorUtils.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
#include <algorithm>
#include <map>
@@ -850,6 +850,8 @@ public:
return B.CreateAdd(StartValue, Index);
case IK_PtrInduction:
+ assert(Index->getType() == StepValue->getType() &&
+ "Index type does not match StepValue type");
if (StepValue->isMinusOne())
Index = B.CreateNeg(Index);
else if (!StepValue->isOne())
@@ -2413,9 +2415,8 @@ void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, bool IfPredic
LoopVectorBody.push_back(NewIfBlock);
VectorLp->addBasicBlockToLoop(NewIfBlock, *LI);
Builder.SetInsertPoint(InsertPt);
- Instruction *OldBr = IfBlock->getTerminator();
- BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr);
- OldBr->eraseFromParent();
+ ReplaceInstWithInst(IfBlock->getTerminator(),
+ BranchInst::Create(CondBlock, NewIfBlock, Cmp));
IfBlock = NewIfBlock;
}
}
@@ -2658,9 +2659,9 @@ void InnerLoopVectorizer::createEmptyLoop() {
if (ParentLoop)
ParentLoop->addBasicBlockToLoop(CheckBlock, *LI);
LoopBypassBlocks.push_back(CheckBlock);
- Instruction *OldTerm = LastBypassBlock->getTerminator();
- BranchInst::Create(ScalarPH, CheckBlock, CheckBCOverflow, OldTerm);
- OldTerm->eraseFromParent();
+ ReplaceInstWithInst(
+ LastBypassBlock->getTerminator(),
+ BranchInst::Create(ScalarPH, CheckBlock, CheckBCOverflow));
LastBypassBlock = CheckBlock;
}
@@ -2682,9 +2683,8 @@ void InnerLoopVectorizer::createEmptyLoop() {
// Replace the branch into the memory check block with a conditional branch
// for the "few elements case".
- Instruction *OldTerm = LastBypassBlock->getTerminator();
- BranchInst::Create(MiddleBlock, CheckBlock, Cmp, OldTerm);
- OldTerm->eraseFromParent();
+ ReplaceInstWithInst(LastBypassBlock->getTerminator(),
+ BranchInst::Create(MiddleBlock, CheckBlock, Cmp));
Cmp = StrideCheck;
LastBypassBlock = CheckBlock;
@@ -2707,17 +2707,15 @@ void InnerLoopVectorizer::createEmptyLoop() {
// Replace the branch into the memory check block with a conditional branch
// for the "few elements case".
- Instruction *OldTerm = LastBypassBlock->getTerminator();
- BranchInst::Create(MiddleBlock, CheckBlock, Cmp, OldTerm);
- OldTerm->eraseFromParent();
+ ReplaceInstWithInst(LastBypassBlock->getTerminator(),
+ BranchInst::Create(MiddleBlock, CheckBlock, Cmp));
Cmp = MemRuntimeCheck;
LastBypassBlock = CheckBlock;
}
- LastBypassBlock->getTerminator()->eraseFromParent();
- BranchInst::Create(MiddleBlock, VectorPH, Cmp,
- LastBypassBlock);
+ ReplaceInstWithInst(LastBypassBlock->getTerminator(),
+ BranchInst::Create(MiddleBlock, VectorPH, Cmp));
// We are going to resume the execution of the scalar loop.
// Go over all of the induction variables that we found and fix the
@@ -2798,7 +2796,10 @@ void InnerLoopVectorizer::createEmptyLoop() {
break;
}
case LoopVectorizationLegality::IK_PtrInduction: {
- EndValue = II.transform(BypassBuilder, CountRoundDown);
+ Value *CRD = BypassBuilder.CreateSExtOrTrunc(CountRoundDown,
+ II.StepValue->getType(),
+ "cast.crd");
+ EndValue = II.transform(BypassBuilder, CRD);
EndValue->setName("ptr.ind.end");
break;
}
@@ -2851,10 +2852,8 @@ void InnerLoopVectorizer::createEmptyLoop() {
Value *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, IdxEnd,
ResumeIndex, "cmp.n",
MiddleBlock->getTerminator());
-
- BranchInst::Create(ExitBlock, ScalarPH, CmpN, MiddleBlock->getTerminator());
- // Remove the old terminator.
- MiddleBlock->getTerminator()->eraseFromParent();
+ ReplaceInstWithInst(MiddleBlock->getTerminator(),
+ BranchInst::Create(ExitBlock, ScalarPH, CmpN));
// Create i+1 and fill the PHINode.
Value *NextIdx = Builder.CreateAdd(Induction, Step, "index.next");
@@ -2906,7 +2905,7 @@ struct CSEDenseMapInfo {
return LHS->isIdenticalTo(RHS);
}
};
-} // namespace
+}
/// \brief Check whether this block is a predicated block.
/// Due to if predication of stores we might create a sequence of "if(pred) a[i]
@@ -3448,12 +3447,14 @@ void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
// This is the normalized GEP that starts counting at zero.
Value *NormalizedIdx =
Builder.CreateSub(Induction, ExtendedIdx, "normalized.idx");
+ NormalizedIdx =
+ Builder.CreateSExtOrTrunc(NormalizedIdx, II.StepValue->getType());
// This is the vector of results. Notice that we don't generate
// vector geps because scalar geps result in better code.
for (unsigned part = 0; part < UF; ++part) {
if (VF == 1) {
int EltIndex = part;
- Constant *Idx = ConstantInt::get(Induction->getType(), EltIndex);
+ Constant *Idx = ConstantInt::get(NormalizedIdx->getType(), EltIndex);
Value *GlobalIdx = Builder.CreateAdd(NormalizedIdx, Idx);
Value *SclrGep = II.transform(Builder, GlobalIdx);
SclrGep->setName("next.gep");
@@ -3464,7 +3465,7 @@ void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
Value *VecVal = UndefValue::get(VectorType::get(P->getType(), VF));
for (unsigned int i = 0; i < VF; ++i) {
int EltIndex = i + part * VF;
- Constant *Idx = ConstantInt::get(Induction->getType(), EltIndex);
+ Constant *Idx = ConstantInt::get(NormalizedIdx->getType(), EltIndex);
Value *GlobalIdx = Builder.CreateAdd(NormalizedIdx, Idx);
Value *SclrGep = II.transform(Builder, GlobalIdx);
SclrGep->setName("next.gep");
@@ -4642,10 +4643,9 @@ LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) {
if (VF == 0)
VF = MaxVectorSize;
-
- // If the trip count that we found modulo the vectorization factor is not
- // zero then we require a tail.
- if (VF < 2) {
+ else {
+ // If the trip count that we found modulo the vectorization factor is not
+ // zero then we require a tail.
emitAnalysis(VectorizationReport() <<
"cannot optimize for size and vectorize at the "
"same time. Enable vectorization of this loop "
@@ -5507,9 +5507,8 @@ void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr,
LoopVectorBody.push_back(NewIfBlock);
VectorLp->addBasicBlockToLoop(NewIfBlock, *LI);
Builder.SetInsertPoint(InsertPt);
- Instruction *OldBr = IfBlock->getTerminator();
- BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr);
- OldBr->eraseFromParent();
+ ReplaceInstWithInst(IfBlock->getTerminator(),
+ BranchInst::Create(CondBlock, NewIfBlock, Cmp));
IfBlock = NewIfBlock;
}
}
diff --git a/contrib/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/contrib/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 370e295..7c4c279 100644
--- a/contrib/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/contrib/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -43,7 +43,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Transforms/Utils/VectorUtils.h"
+#include "llvm/Analysis/VectorUtils.h"
#include <algorithm>
#include <map>
#include <memory>
OpenPOWER on IntegriCloud