summaryrefslogtreecommitdiffstats
path: root/lib/Transforms
diff options
context:
space:
mode:
authorrdivacky <rdivacky@FreeBSD.org>2009-11-04 14:58:56 +0000
committerrdivacky <rdivacky@FreeBSD.org>2009-11-04 14:58:56 +0000
commit7ff99155c39edd73ebf1c6adfa023b1048fee9a4 (patch)
treeb4dc751bcee540346911aa4115729eff2f991657 /lib/Transforms
parentd1f06de484602e72707476a6152974847bac1570 (diff)
downloadFreeBSD-src-7ff99155c39edd73ebf1c6adfa023b1048fee9a4.zip
FreeBSD-src-7ff99155c39edd73ebf1c6adfa023b1048fee9a4.tar.gz
Update LLVM to r86025.
Diffstat (limited to 'lib/Transforms')
-rw-r--r--lib/Transforms/IPO/ArgumentPromotion.cpp3
-rw-r--r--lib/Transforms/IPO/CMakeLists.txt1
-rw-r--r--lib/Transforms/IPO/ConstantMerge.cpp3
-rw-r--r--lib/Transforms/IPO/DeadArgumentElimination.cpp3
-rw-r--r--lib/Transforms/IPO/DeadTypeElimination.cpp3
-rw-r--r--lib/Transforms/IPO/ExtractGV.cpp3
-rw-r--r--lib/Transforms/IPO/FunctionAttrs.cpp5
-rw-r--r--lib/Transforms/IPO/GlobalDCE.cpp38
-rw-r--r--lib/Transforms/IPO/GlobalOpt.cpp43
-rw-r--r--lib/Transforms/IPO/IPConstantPropagation.cpp6
-rw-r--r--lib/Transforms/IPO/IPO.cpp2
-rw-r--r--lib/Transforms/IPO/InlineAlways.cpp3
-rw-r--r--lib/Transforms/IPO/InlineSimple.cpp3
-rw-r--r--lib/Transforms/IPO/Internalize.cpp3
-rw-r--r--lib/Transforms/IPO/LoopExtractor.cpp3
-rw-r--r--lib/Transforms/IPO/LowerSetJmp.cpp37
-rw-r--r--lib/Transforms/IPO/MergeFunctions.cpp3
-rw-r--r--lib/Transforms/IPO/PartialInlining.cpp3
-rw-r--r--lib/Transforms/IPO/PartialSpecialization.cpp3
-rw-r--r--lib/Transforms/IPO/PruneEH.cpp3
-rw-r--r--lib/Transforms/IPO/RaiseAllocations.cpp173
-rw-r--r--lib/Transforms/IPO/StripDeadPrototypes.cpp3
-rw-r--r--lib/Transforms/IPO/StripSymbols.cpp18
-rw-r--r--lib/Transforms/IPO/StructRetPromotion.cpp3
-rw-r--r--lib/Transforms/Instrumentation/BlockProfiling.cpp3
-rw-r--r--lib/Transforms/Instrumentation/EdgeProfiling.cpp3
-rw-r--r--lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp3
-rw-r--r--lib/Transforms/Instrumentation/RSProfiling.cpp13
-rw-r--r--lib/Transforms/Scalar/ABCD.cpp1104
-rw-r--r--lib/Transforms/Scalar/CMakeLists.txt6
-rw-r--r--lib/Transforms/Scalar/CodeGenLICM.cpp112
-rw-r--r--lib/Transforms/Scalar/CodeGenPrepare.cpp3
-rw-r--r--lib/Transforms/Scalar/CondPropagate.cpp26
-rw-r--r--lib/Transforms/Scalar/DeadStoreElimination.cpp33
-rw-r--r--lib/Transforms/Scalar/GEPSplitter.cpp81
-rw-r--r--lib/Transforms/Scalar/GVN.cpp42
-rw-r--r--lib/Transforms/Scalar/IndVarSimplify.cpp4
-rw-r--r--lib/Transforms/Scalar/InstructionCombining.cpp753
-rw-r--r--lib/Transforms/Scalar/LoopDeletion.cpp30
-rw-r--r--lib/Transforms/Scalar/LoopIndexSplit.cpp8
-rw-r--r--lib/Transforms/Scalar/LoopRotation.cpp300
-rw-r--r--lib/Transforms/Scalar/LoopStrengthReduce.cpp8
-rw-r--r--lib/Transforms/Scalar/LoopUnrollPass.cpp151
-rw-r--r--lib/Transforms/Scalar/LoopUnswitch.cpp3
-rw-r--r--lib/Transforms/Scalar/SCCP.cpp1762
-rw-r--r--lib/Transforms/Scalar/SCCVN.cpp721
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp61
-rw-r--r--lib/Transforms/Scalar/SimplifyCFGPass.cpp3
-rw-r--r--lib/Transforms/Scalar/SimplifyLibCalls.cpp25
-rw-r--r--lib/Transforms/Scalar/TailDuplication.cpp2
-rw-r--r--lib/Transforms/Utils/BasicBlockUtils.cpp17
-rw-r--r--lib/Transforms/Utils/BasicInliner.cpp2
-rw-r--r--lib/Transforms/Utils/BreakCriticalEdges.cpp30
-rw-r--r--lib/Transforms/Utils/CMakeLists.txt3
-rw-r--r--lib/Transforms/Utils/CloneFunction.cpp10
-rw-r--r--lib/Transforms/Utils/CloneModule.cpp5
-rw-r--r--lib/Transforms/Utils/CodeExtractor.cpp3
-rw-r--r--lib/Transforms/Utils/InlineFunction.cpp9
-rw-r--r--lib/Transforms/Utils/Local.cpp60
-rw-r--r--lib/Transforms/Utils/LoopSimplify.cpp3
-rw-r--r--lib/Transforms/Utils/LoopUnroll.cpp (renamed from lib/Transforms/Utils/UnrollLoop.cpp)4
-rw-r--r--lib/Transforms/Utils/LowerInvoke.cpp4
-rw-r--r--lib/Transforms/Utils/LowerSwitch.cpp5
-rw-r--r--lib/Transforms/Utils/Mem2Reg.cpp4
-rw-r--r--lib/Transforms/Utils/PromoteMemoryToRegister.cpp14
-rw-r--r--lib/Transforms/Utils/SSI.cpp2
-rw-r--r--lib/Transforms/Utils/SimplifyCFG.cpp66
-rw-r--r--lib/Transforms/Utils/ValueMapper.cpp158
68 files changed, 4010 insertions, 2017 deletions
diff --git a/lib/Transforms/IPO/ArgumentPromotion.cpp b/lib/Transforms/IPO/ArgumentPromotion.cpp
index 5b91f3d..dd5a6d8 100644
--- a/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -41,7 +41,6 @@
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Support/CallSite.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -59,7 +58,7 @@ STATISTIC(NumArgumentsDead , "Number of dead pointer args eliminated");
namespace {
/// ArgPromotion - The 'by reference' to 'by value' argument promotion pass.
///
- struct VISIBILITY_HIDDEN ArgPromotion : public CallGraphSCCPass {
+ struct ArgPromotion : public CallGraphSCCPass {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<AliasAnalysis>();
CallGraphSCCPass::getAnalysisUsage(AU);
diff --git a/lib/Transforms/IPO/CMakeLists.txt b/lib/Transforms/IPO/CMakeLists.txt
index 5c28801..92bef3b 100644
--- a/lib/Transforms/IPO/CMakeLists.txt
+++ b/lib/Transforms/IPO/CMakeLists.txt
@@ -19,7 +19,6 @@ add_llvm_library(LLVMipo
PartialInlining.cpp
PartialSpecialization.cpp
PruneEH.cpp
- RaiseAllocations.cpp
StripDeadPrototypes.cpp
StripSymbols.cpp
StructRetPromotion.cpp
diff --git a/lib/Transforms/IPO/ConstantMerge.cpp b/lib/Transforms/IPO/ConstantMerge.cpp
index c1a1045..4972687 100644
--- a/lib/Transforms/IPO/ConstantMerge.cpp
+++ b/lib/Transforms/IPO/ConstantMerge.cpp
@@ -22,14 +22,13 @@
#include "llvm/Module.h"
#include "llvm/Pass.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Support/Compiler.h"
#include <map>
using namespace llvm;
STATISTIC(NumMerged, "Number of global constants merged");
namespace {
- struct VISIBILITY_HIDDEN ConstantMerge : public ModulePass {
+ struct ConstantMerge : public ModulePass {
static char ID; // Pass identification, replacement for typeid
ConstantMerge() : ModulePass(&ID) {}
diff --git a/lib/Transforms/IPO/DeadArgumentElimination.cpp b/lib/Transforms/IPO/DeadArgumentElimination.cpp
index 79a32f0..a3db836 100644
--- a/lib/Transforms/IPO/DeadArgumentElimination.cpp
+++ b/lib/Transforms/IPO/DeadArgumentElimination.cpp
@@ -33,7 +33,6 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/Support/Compiler.h"
#include <map>
#include <set>
using namespace llvm;
@@ -44,7 +43,7 @@ STATISTIC(NumRetValsEliminated , "Number of unused return values removed");
namespace {
/// DAE - The dead argument elimination pass.
///
- class VISIBILITY_HIDDEN DAE : public ModulePass {
+ class DAE : public ModulePass {
public:
/// Struct that represents (part of) either a return value or a function
diff --git a/lib/Transforms/IPO/DeadTypeElimination.cpp b/lib/Transforms/IPO/DeadTypeElimination.cpp
index 85aed2b..025d77e 100644
--- a/lib/Transforms/IPO/DeadTypeElimination.cpp
+++ b/lib/Transforms/IPO/DeadTypeElimination.cpp
@@ -19,13 +19,12 @@
#include "llvm/TypeSymbolTable.h"
#include "llvm/DerivedTypes.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Support/Compiler.h"
using namespace llvm;
STATISTIC(NumKilled, "Number of unused typenames removed from symtab");
namespace {
- struct VISIBILITY_HIDDEN DTE : public ModulePass {
+ struct DTE : public ModulePass {
static char ID; // Pass identification, replacement for typeid
DTE() : ModulePass(&ID) {}
diff --git a/lib/Transforms/IPO/ExtractGV.cpp b/lib/Transforms/IPO/ExtractGV.cpp
index 191100c..7f67e48 100644
--- a/lib/Transforms/IPO/ExtractGV.cpp
+++ b/lib/Transforms/IPO/ExtractGV.cpp
@@ -17,13 +17,12 @@
#include "llvm/Pass.h"
#include "llvm/Constants.h"
#include "llvm/Transforms/IPO.h"
-#include "llvm/Support/Compiler.h"
#include <algorithm>
using namespace llvm;
namespace {
/// @brief A pass to extract specific functions and their dependencies.
- class VISIBILITY_HIDDEN GVExtractorPass : public ModulePass {
+ class GVExtractorPass : public ModulePass {
std::vector<GlobalValue*> Named;
bool deleteStuff;
bool reLink;
diff --git a/lib/Transforms/IPO/FunctionAttrs.cpp b/lib/Transforms/IPO/FunctionAttrs.cpp
index 0701b94..b3a832f 100644
--- a/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -26,11 +26,10 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/CaptureTracking.h"
-#include "llvm/Analysis/MallocHelper.h"
+#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/UniqueVector.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/InstIterator.h"
using namespace llvm;
@@ -40,7 +39,7 @@ STATISTIC(NumNoCapture, "Number of arguments marked nocapture");
STATISTIC(NumNoAlias, "Number of function returns marked noalias");
namespace {
- struct VISIBILITY_HIDDEN FunctionAttrs : public CallGraphSCCPass {
+ struct FunctionAttrs : public CallGraphSCCPass {
static char ID; // Pass identification, replacement for typeid
FunctionAttrs() : CallGraphSCCPass(&ID) {}
diff --git a/lib/Transforms/IPO/GlobalDCE.cpp b/lib/Transforms/IPO/GlobalDCE.cpp
index 8f4e8b3..44216a6 100644
--- a/lib/Transforms/IPO/GlobalDCE.cpp
+++ b/lib/Transforms/IPO/GlobalDCE.cpp
@@ -20,9 +20,8 @@
#include "llvm/Constants.h"
#include "llvm/Module.h"
#include "llvm/Pass.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Support/Compiler.h"
-#include <set>
using namespace llvm;
STATISTIC(NumAliases , "Number of global aliases removed");
@@ -30,7 +29,7 @@ STATISTIC(NumFunctions, "Number of functions removed");
STATISTIC(NumVariables, "Number of global variables removed");
namespace {
- struct VISIBILITY_HIDDEN GlobalDCE : public ModulePass {
+ struct GlobalDCE : public ModulePass {
static char ID; // Pass identification, replacement for typeid
GlobalDCE() : ModulePass(&ID) {}
@@ -40,7 +39,7 @@ namespace {
bool runOnModule(Module &M);
private:
- std::set<GlobalValue*> AliveGlobals;
+ SmallPtrSet<GlobalValue*, 32> AliveGlobals;
/// GlobalIsNeeded - mark the specific global value as needed, and
/// recursively mark anything that it uses as also needed.
@@ -92,7 +91,8 @@ bool GlobalDCE::runOnModule(Module &M) {
// The first pass is to drop initializers of global variables which are dead.
std::vector<GlobalVariable*> DeadGlobalVars; // Keep track of dead globals
- for (Module::global_iterator I = M.global_begin(), E = M.global_end(); I != E; ++I)
+ for (Module::global_iterator I = M.global_begin(), E = M.global_end();
+ I != E; ++I)
if (!AliveGlobals.count(I)) {
DeadGlobalVars.push_back(I); // Keep track of dead globals
I->setInitializer(0);
@@ -155,14 +155,10 @@ bool GlobalDCE::runOnModule(Module &M) {
/// GlobalIsNeeded - the specific global value as needed, and
/// recursively mark anything that it uses as also needed.
void GlobalDCE::GlobalIsNeeded(GlobalValue *G) {
- std::set<GlobalValue*>::iterator I = AliveGlobals.find(G);
-
// If the global is already in the set, no need to reprocess it.
- if (I != AliveGlobals.end()) return;
-
- // Otherwise insert it now, so we do not infinitely recurse
- AliveGlobals.insert(I, G);
-
+ if (!AliveGlobals.insert(G))
+ return;
+
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(G)) {
// If this is a global variable, we must make sure to add any global values
// referenced by the initializer to the alive set.
@@ -177,11 +173,9 @@ void GlobalDCE::GlobalIsNeeded(GlobalValue *G) {
// operands. Any operands of these types must be processed to ensure that
// any globals used will be marked as needed.
Function *F = cast<Function>(G);
- // For all basic blocks...
+
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
- // For all instructions...
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
- // For all operands...
for (User::op_iterator U = I->op_begin(), E = I->op_end(); U != E; ++U)
if (GlobalValue *GV = dyn_cast<GlobalValue>(*U))
GlobalIsNeeded(GV);
@@ -192,13 +186,13 @@ void GlobalDCE::GlobalIsNeeded(GlobalValue *G) {
void GlobalDCE::MarkUsedGlobalsAsNeeded(Constant *C) {
if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
- GlobalIsNeeded(GV);
- else {
- // Loop over all of the operands of the constant, adding any globals they
- // use to the list of needed globals.
- for (User::op_iterator I = C->op_begin(), E = C->op_end(); I != E; ++I)
- MarkUsedGlobalsAsNeeded(cast<Constant>(*I));
- }
+ return GlobalIsNeeded(GV);
+
+ // Loop over all of the operands of the constant, adding any globals they
+ // use to the list of needed globals.
+ for (User::op_iterator I = C->op_begin(), E = C->op_end(); I != E; ++I)
+ if (Constant *OpC = dyn_cast<Constant>(*I))
+ MarkUsedGlobalsAsNeeded(OpC);
}
// RemoveUnusedGlobalValue - Loop over all of the uses of the specified
diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp
index 9ced2e8..5dab9ef 100644
--- a/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/lib/Transforms/IPO/GlobalOpt.cpp
@@ -24,10 +24,9 @@
#include "llvm/Module.h"
#include "llvm/Pass.h"
#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Analysis/MallocHelper.h"
+#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Support/CallSite.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
@@ -57,7 +56,7 @@ STATISTIC(NumAliasesResolved, "Number of global aliases resolved");
STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated");
namespace {
- struct VISIBILITY_HIDDEN GlobalOpt : public ModulePass {
+ struct GlobalOpt : public ModulePass {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
}
static char ID; // Pass identification, replacement for typeid
@@ -85,7 +84,7 @@ namespace {
/// GlobalStatus - As we analyze each global, keep track of some information
/// about it. If we find out that the address of the global is taken, none of
/// this info will be accurate.
-struct VISIBILITY_HIDDEN GlobalStatus {
+struct GlobalStatus {
/// isLoaded - True if the global is ever loaded. If the global isn't ever
/// loaded it can be deleted.
bool isLoaded;
@@ -824,6 +823,7 @@ static void ConstantPropUsersOf(Value *V, LLVMContext &Context) {
static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
CallInst *CI,
BitCastInst *BCI,
+ Value* NElems,
LLVMContext &Context,
TargetData* TD) {
DEBUG(errs() << "PROMOTING MALLOC GLOBAL: " << *GV
@@ -831,9 +831,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
const Type *IntPtrTy = TD->getIntPtrType(Context);
- Value* ArraySize = getMallocArraySize(CI, Context, TD);
- assert(ArraySize && "not a malloc whose array size can be determined");
- ConstantInt *NElements = cast<ConstantInt>(ArraySize);
+ ConstantInt *NElements = cast<ConstantInt>(NElems);
if (NElements->getZExtValue() != 1) {
// If we have an array allocation, transform it to a single element
// allocation to make the code below simpler.
@@ -1276,15 +1274,14 @@ static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
/// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break
/// it up into multiple allocations of arrays of the fields.
static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV,
- CallInst *CI, BitCastInst* BCI,
+ CallInst *CI, BitCastInst* BCI,
+ Value* NElems,
LLVMContext &Context,
- TargetData *TD){
+ TargetData *TD) {
DEBUG(errs() << "SROA HEAP ALLOC: " << *GV << " MALLOC CALL = " << *CI
<< " BITCAST = " << *BCI << '\n');
const Type* MAT = getMallocAllocatedType(CI);
const StructType *STy = cast<StructType>(MAT);
- Value* ArraySize = getMallocArraySize(CI, Context, TD);
- assert(ArraySize && "not a malloc whose array size can be determined");
// There is guaranteed to be at least one use of the malloc (storing
// it into GV). If there are other uses, change them to be uses of
@@ -1310,7 +1307,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV,
FieldGlobals.push_back(NGV);
Value *NMI = CallInst::CreateMalloc(CI, TD->getIntPtrType(Context),
- FieldTy, ArraySize,
+ FieldTy, NElems,
BCI->getName() + ".f" + Twine(FieldNo));
FieldMallocs.push_back(NMI);
new StoreInst(NMI, NGV, BCI);
@@ -1364,10 +1361,11 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV,
OrigBB->getParent());
BasicBlock *NextBlock = BasicBlock::Create(Context, "next",
OrigBB->getParent());
- BranchInst::Create(FreeBlock, NextBlock, Cmp, NullPtrBlock);
+ Instruction *BI = BranchInst::Create(FreeBlock, NextBlock,
+ Cmp, NullPtrBlock);
// Fill in FreeBlock.
- new FreeInst(GVVal, FreeBlock);
+ CallInst::CreateFree(GVVal, BI);
new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i],
FreeBlock);
BranchInst::Create(NextBlock, FreeBlock);
@@ -1510,7 +1508,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// something.
if (TD &&
NElements->getZExtValue() * TD->getTypeAllocSize(AllocTy) < 2048) {
- GVI = OptimizeGlobalAddressOfMalloc(GV, CI, BCI, Context, TD);
+ GVI = OptimizeGlobalAddressOfMalloc(GV, CI, BCI, NElems, Context, TD);
return true;
}
@@ -1520,7 +1518,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// If this is an allocation of a fixed size array of structs, analyze as a
// variable size array. malloc [100 x struct],1 -> malloc struct, 100
- if (!isArrayMalloc(CI, Context, TD))
+ if (NElems == ConstantInt::get(CI->getOperand(1)->getType(), 1))
if (const ArrayType *AT = dyn_cast<ArrayType>(AllocTy))
AllocTy = AT->getElementType();
@@ -1547,7 +1545,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
CI = extractMallocCallFromBitCast(NewMI);
}
- GVI = PerformHeapAllocSRoA(GV, CI, BCI, Context, TD);
+ GVI = PerformHeapAllocSRoA(GV, CI, BCI, NElems, Context, TD);
return true;
}
}
@@ -1878,9 +1876,8 @@ bool GlobalOpt::OptimizeFunctions(Module &M) {
if (!F->hasName() && !F->isDeclaration())
F->setLinkage(GlobalValue::InternalLinkage);
F->removeDeadConstantUsers();
- if (F->use_empty() && (F->hasLocalLinkage() ||
- F->hasLinkOnceLinkage())) {
- M.getFunctionList().erase(F);
+ if (F->use_empty() && (F->hasLocalLinkage() || F->hasLinkOnceLinkage())) {
+ F->eraseFromParent();
Changed = true;
++NumFnDeleted;
} else if (F->hasLocalLinkage()) {
@@ -2343,6 +2340,12 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
dyn_cast<ConstantInt>(getVal(Values, SI->getCondition()));
if (!Val) return false; // Cannot determine.
NewBB = SI->getSuccessor(SI->findCaseValue(Val));
+ } else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(CurInst)) {
+ Value *Val = getVal(Values, IBI->getAddress())->stripPointerCasts();
+ if (BlockAddress *BA = dyn_cast<BlockAddress>(Val))
+ NewBB = BA->getBasicBlock();
+ else
+ return false; // Cannot determine.
} else if (ReturnInst *RI = dyn_cast<ReturnInst>(CurInst)) {
if (RI->getNumOperands())
RetVal = getVal(Values, RI->getOperand(0));
diff --git a/lib/Transforms/IPO/IPConstantPropagation.cpp b/lib/Transforms/IPO/IPConstantPropagation.cpp
index 7b0e9c7..023e642 100644
--- a/lib/Transforms/IPO/IPConstantPropagation.cpp
+++ b/lib/Transforms/IPO/IPConstantPropagation.cpp
@@ -24,7 +24,6 @@
#include "llvm/Pass.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Support/CallSite.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/SmallVector.h"
using namespace llvm;
@@ -35,7 +34,7 @@ STATISTIC(NumReturnValProped, "Number of return values turned into constants");
namespace {
/// IPCP - The interprocedural constant propagation pass
///
- struct VISIBILITY_HIDDEN IPCP : public ModulePass {
+ struct IPCP : public ModulePass {
static char ID; // Pass identification, replacement for typeid
IPCP() : ModulePass(&ID) {}
@@ -87,6 +86,9 @@ bool IPCP::PropagateConstantsIntoArguments(Function &F) {
unsigned NumNonconstant = 0;
for (Value::use_iterator UI = F.use_begin(), E = F.use_end(); UI != E; ++UI) {
+ // Ignore blockaddress uses.
+ if (isa<BlockAddress>(*UI)) continue;
+
// Used by a non-instruction, or not the callee of a function, do not
// transform.
if (!isa<CallInst>(*UI) && !isa<InvokeInst>(*UI))
diff --git a/lib/Transforms/IPO/IPO.cpp b/lib/Transforms/IPO/IPO.cpp
index 43066076..83e8624 100644
--- a/lib/Transforms/IPO/IPO.cpp
+++ b/lib/Transforms/IPO/IPO.cpp
@@ -63,7 +63,7 @@ void LLVMAddPruneEHPass(LLVMPassManagerRef PM) {
}
void LLVMAddRaiseAllocationsPass(LLVMPassManagerRef PM) {
- unwrap(PM)->add(createRaiseAllocationsPass());
+ // FIXME: Remove in LLVM 3.0.
}
void LLVMAddStripDeadPrototypesPass(LLVMPassManagerRef PM) {
diff --git a/lib/Transforms/IPO/InlineAlways.cpp b/lib/Transforms/IPO/InlineAlways.cpp
index 2344403..f11ecae 100644
--- a/lib/Transforms/IPO/InlineAlways.cpp
+++ b/lib/Transforms/IPO/InlineAlways.cpp
@@ -21,7 +21,6 @@
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/InlineCost.h"
#include "llvm/Support/CallSite.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/IPO/InlinerPass.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -31,7 +30,7 @@ using namespace llvm;
namespace {
// AlwaysInliner only inlines functions that are mark as "always inline".
- class VISIBILITY_HIDDEN AlwaysInliner : public Inliner {
+ class AlwaysInliner : public Inliner {
// Functions that are never inlined
SmallPtrSet<const Function*, 16> NeverInline;
InlineCostAnalyzer CA;
diff --git a/lib/Transforms/IPO/InlineSimple.cpp b/lib/Transforms/IPO/InlineSimple.cpp
index b1c643b..598043d 100644
--- a/lib/Transforms/IPO/InlineSimple.cpp
+++ b/lib/Transforms/IPO/InlineSimple.cpp
@@ -20,7 +20,6 @@
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/InlineCost.h"
#include "llvm/Support/CallSite.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/IPO/InlinerPass.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -29,7 +28,7 @@ using namespace llvm;
namespace {
- class VISIBILITY_HIDDEN SimpleInliner : public Inliner {
+ class SimpleInliner : public Inliner {
// Functions that are never inlined
SmallPtrSet<const Function*, 16> NeverInline;
InlineCostAnalyzer CA;
diff --git a/lib/Transforms/IPO/Internalize.cpp b/lib/Transforms/IPO/Internalize.cpp
index e3c3c67..20ae0d5 100644
--- a/lib/Transforms/IPO/Internalize.cpp
+++ b/lib/Transforms/IPO/Internalize.cpp
@@ -19,7 +19,6 @@
#include "llvm/Pass.h"
#include "llvm/Module.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/Statistic.h"
@@ -44,7 +43,7 @@ APIList("internalize-public-api-list", cl::value_desc("list"),
cl::CommaSeparated);
namespace {
- class VISIBILITY_HIDDEN InternalizePass : public ModulePass {
+ class InternalizePass : public ModulePass {
std::set<std::string> ExternalNames;
/// If no api symbols were specified and a main function is defined,
/// assume the main function is the only API
diff --git a/lib/Transforms/IPO/LoopExtractor.cpp b/lib/Transforms/IPO/LoopExtractor.cpp
index 02ac3bb..fd69aeb 100644
--- a/lib/Transforms/IPO/LoopExtractor.cpp
+++ b/lib/Transforms/IPO/LoopExtractor.cpp
@@ -22,7 +22,6 @@
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/FunctionUtils.h"
#include "llvm/ADT/Statistic.h"
@@ -33,7 +32,7 @@ using namespace llvm;
STATISTIC(NumExtracted, "Number of loops extracted");
namespace {
- struct VISIBILITY_HIDDEN LoopExtractor : public LoopPass {
+ struct LoopExtractor : public LoopPass {
static char ID; // Pass identification, replacement for typeid
unsigned NumLoops;
diff --git a/lib/Transforms/IPO/LowerSetJmp.cpp b/lib/Transforms/IPO/LowerSetJmp.cpp
index 55194b3..4d61e83 100644
--- a/lib/Transforms/IPO/LowerSetJmp.cpp
+++ b/lib/Transforms/IPO/LowerSetJmp.cpp
@@ -43,14 +43,10 @@
#include "llvm/Module.h"
#include "llvm/Pass.h"
#include "llvm/Support/CFG.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/InstVisitor.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/VectorExtras.h"
-#include "llvm/ADT/SmallVector.h"
#include <map>
using namespace llvm;
@@ -62,8 +58,7 @@ STATISTIC(InvokesTransformed , "Number of invokes modified");
namespace {
//===--------------------------------------------------------------------===//
// LowerSetJmp pass implementation.
- class VISIBILITY_HIDDEN LowerSetJmp : public ModulePass,
- public InstVisitor<LowerSetJmp> {
+ class LowerSetJmp : public ModulePass, public InstVisitor<LowerSetJmp> {
// LLVM library functions...
Constant *InitSJMap; // __llvm_sjljeh_init_setjmpmap
Constant *DestroySJMap; // __llvm_sjljeh_destroy_setjmpmap
@@ -110,7 +105,7 @@ namespace {
void TransformLongJmpCall(CallInst* Inst);
void TransformSetJmpCall(CallInst* Inst);
- bool IsTransformableFunction(const std::string& Name);
+ bool IsTransformableFunction(StringRef Name);
public:
static char ID; // Pass identification, replacement for typeid
LowerSetJmp() : ModulePass(&ID) {}
@@ -251,13 +246,8 @@ bool LowerSetJmp::doInitialization(Module& M)
// "llvm.{setjmp,longjmp}" functions and none of the setjmp/longjmp error
// handling functions (beginning with __llvm_sjljeh_...they don't throw
// exceptions).
-bool LowerSetJmp::IsTransformableFunction(const std::string& Name) {
- std::string SJLJEh("__llvm_sjljeh");
-
- if (Name.size() > SJLJEh.size())
- return std::string(Name.begin(), Name.begin() + SJLJEh.size()) != SJLJEh;
-
- return true;
+bool LowerSetJmp::IsTransformableFunction(StringRef Name) {
+ return !Name.startswith("__llvm_sjljeh_");
}
// TransformLongJmpCall - Transform a longjmp call into a call to the
@@ -265,8 +255,7 @@ bool LowerSetJmp::IsTransformableFunction(const std::string& Name) {
// throwing the exception for us.
void LowerSetJmp::TransformLongJmpCall(CallInst* Inst)
{
- const Type* SBPTy =
- Type::getInt8PtrTy(Inst->getContext());
+ const Type* SBPTy = Type::getInt8PtrTy(Inst->getContext());
// Create the call to "__llvm_sjljeh_throw_longjmp". This takes the
// same parameters as "longjmp", except that the buffer is cast to a
@@ -274,10 +263,8 @@ void LowerSetJmp::TransformLongJmpCall(CallInst* Inst)
// Inst's uses and doesn't get a name.
CastInst* CI =
new BitCastInst(Inst->getOperand(1), SBPTy, "LJBuf", Inst);
- SmallVector<Value *, 2> Args;
- Args.push_back(CI);
- Args.push_back(Inst->getOperand(2));
- CallInst::Create(ThrowLongJmp, Args.begin(), Args.end(), "", Inst);
+ Value *Args[] = { CI, Inst->getOperand(2) };
+ CallInst::Create(ThrowLongJmp, Args, Args + 2, "", Inst);
SwitchValuePair& SVP = SwitchValMap[Inst->getParent()->getParent()];
@@ -392,11 +379,11 @@ void LowerSetJmp::TransformSetJmpCall(CallInst* Inst)
Type::getInt8PtrTy(Inst->getContext());
CastInst* BufPtr =
new BitCastInst(Inst->getOperand(1), SBPTy, "SBJmpBuf", Inst);
- std::vector<Value*> Args =
- make_vector<Value*>(GetSetJmpMap(Func), BufPtr,
- ConstantInt::get(Type::getInt32Ty(Inst->getContext()),
- SetJmpIDMap[Func]++), 0);
- CallInst::Create(AddSJToMap, Args.begin(), Args.end(), "", Inst);
+ Value *Args[] = {
+ GetSetJmpMap(Func), BufPtr,
+ ConstantInt::get(Type::getInt32Ty(Inst->getContext()), SetJmpIDMap[Func]++)
+ };
+ CallInst::Create(AddSJToMap, Args, Args + 3, "", Inst);
// We are guaranteed that there are no values live across basic blocks
// (because we are "not in SSA form" yet), but there can still be values live
diff --git a/lib/Transforms/IPO/MergeFunctions.cpp b/lib/Transforms/IPO/MergeFunctions.cpp
index 13bbf9c..b2bdabc 100644
--- a/lib/Transforms/IPO/MergeFunctions.cpp
+++ b/lib/Transforms/IPO/MergeFunctions.cpp
@@ -51,7 +51,6 @@
#include "llvm/Module.h"
#include "llvm/Pass.h"
#include "llvm/Support/CallSite.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -62,7 +61,7 @@ using namespace llvm;
STATISTIC(NumFunctionsMerged, "Number of functions merged");
namespace {
- struct VISIBILITY_HIDDEN MergeFunctions : public ModulePass {
+ struct MergeFunctions : public ModulePass {
static char ID; // Pass identification, replacement for typeid
MergeFunctions() : ModulePass(&ID) {}
diff --git a/lib/Transforms/IPO/PartialInlining.cpp b/lib/Transforms/IPO/PartialInlining.cpp
index 8f858d3..b955b97 100644
--- a/lib/Transforms/IPO/PartialInlining.cpp
+++ b/lib/Transforms/IPO/PartialInlining.cpp
@@ -21,14 +21,13 @@
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/FunctionUtils.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/CFG.h"
using namespace llvm;
STATISTIC(NumPartialInlined, "Number of functions partially inlined");
namespace {
- struct VISIBILITY_HIDDEN PartialInliner : public ModulePass {
+ struct PartialInliner : public ModulePass {
virtual void getAnalysisUsage(AnalysisUsage &AU) const { }
static char ID; // Pass identification, replacement for typeid
PartialInliner() : ModulePass(&ID) {}
diff --git a/lib/Transforms/IPO/PartialSpecialization.cpp b/lib/Transforms/IPO/PartialSpecialization.cpp
index 0e1fdb9..084b94e 100644
--- a/lib/Transforms/IPO/PartialSpecialization.cpp
+++ b/lib/Transforms/IPO/PartialSpecialization.cpp
@@ -27,7 +27,6 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Support/CallSite.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/ADT/DenseSet.h"
#include <map>
using namespace llvm;
@@ -41,7 +40,7 @@ static const int CallsMin = 5;
static const double ConstValPercent = .1;
namespace {
- class VISIBILITY_HIDDEN PartSpec : public ModulePass {
+ class PartSpec : public ModulePass {
void scanForInterest(Function&, SmallVector<int, 6>&);
int scanDistribution(Function&, int, std::map<Constant*, int>&);
public :
diff --git a/lib/Transforms/IPO/PruneEH.cpp b/lib/Transforms/IPO/PruneEH.cpp
index daf81e9..3ae771c 100644
--- a/lib/Transforms/IPO/PruneEH.cpp
+++ b/lib/Transforms/IPO/PruneEH.cpp
@@ -27,7 +27,6 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/CFG.h"
-#include "llvm/Support/Compiler.h"
#include <set>
#include <algorithm>
using namespace llvm;
@@ -36,7 +35,7 @@ STATISTIC(NumRemoved, "Number of invokes removed");
STATISTIC(NumUnreach, "Number of noreturn calls optimized");
namespace {
- struct VISIBILITY_HIDDEN PruneEH : public CallGraphSCCPass {
+ struct PruneEH : public CallGraphSCCPass {
static char ID; // Pass identification, replacement for typeid
PruneEH() : CallGraphSCCPass(&ID) {}
diff --git a/lib/Transforms/IPO/RaiseAllocations.cpp b/lib/Transforms/IPO/RaiseAllocations.cpp
deleted file mode 100644
index deb4405..0000000
--- a/lib/Transforms/IPO/RaiseAllocations.cpp
+++ /dev/null
@@ -1,173 +0,0 @@
-//===- RaiseAllocations.cpp - Convert @free calls to insts ------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the RaiseAllocations pass which convert free calls to free
-// instructions.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "raiseallocs"
-#include "llvm/Transforms/IPO.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/Module.h"
-#include "llvm/Instructions.h"
-#include "llvm/Pass.h"
-#include "llvm/Support/CallSite.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/ADT/Statistic.h"
-#include <algorithm>
-using namespace llvm;
-
-STATISTIC(NumRaised, "Number of allocations raised");
-
-namespace {
- // RaiseAllocations - Turn @free calls into the appropriate
- // instruction.
- //
- class VISIBILITY_HIDDEN RaiseAllocations : public ModulePass {
- Function *FreeFunc; // Functions in the module we are processing
- // Initialized by doPassInitializationVirt
- public:
- static char ID; // Pass identification, replacement for typeid
- RaiseAllocations()
- : ModulePass(&ID), FreeFunc(0) {}
-
- // doPassInitialization - For the raise allocations pass, this finds a
- // declaration for free if it exists.
- //
- void doInitialization(Module &M);
-
- // run - This method does the actual work of converting instructions over.
- //
- bool runOnModule(Module &M);
- };
-} // end anonymous namespace
-
-char RaiseAllocations::ID = 0;
-static RegisterPass<RaiseAllocations>
-X("raiseallocs", "Raise allocations from calls to instructions");
-
-// createRaiseAllocationsPass - The interface to this file...
-ModulePass *llvm::createRaiseAllocationsPass() {
- return new RaiseAllocations();
-}
-
-
-// If the module has a symbol table, they might be referring to the free
-// function. If this is the case, grab the method pointers that the module is
-// using.
-//
-// Lookup @free in the symbol table, for later use. If they don't
-// exist, or are not external, we do not worry about converting calls to that
-// function into the appropriate instruction.
-//
-void RaiseAllocations::doInitialization(Module &M) {
- // Get free prototype if it exists!
- FreeFunc = M.getFunction("free");
- if (FreeFunc) {
- const FunctionType* TyWeHave = FreeFunc->getFunctionType();
-
- // Get the expected prototype for void free(i8*)
- const FunctionType *Free1Type =
- FunctionType::get(Type::getVoidTy(M.getContext()),
- std::vector<const Type*>(1, PointerType::getUnqual(
- Type::getInt8Ty(M.getContext()))),
- false);
-
- if (TyWeHave != Free1Type) {
- // Check to see if the prototype was forgotten, giving us
- // void (...) * free
- // This handles the common forward declaration of: 'void free();'
- const FunctionType* Free2Type =
- FunctionType::get(Type::getVoidTy(M.getContext()), true);
-
- if (TyWeHave != Free2Type) {
- // One last try, check to see if we can find free as
- // int (...)* free. This handles the case where NOTHING was declared.
- const FunctionType* Free3Type =
- FunctionType::get(Type::getInt32Ty(M.getContext()), true);
-
- if (TyWeHave != Free3Type) {
- // Give up.
- FreeFunc = 0;
- }
- }
- }
- }
-
- // Don't mess with locally defined versions of these functions...
- if (FreeFunc && !FreeFunc->isDeclaration()) FreeFunc = 0;
-}
-
-// run - Transform calls into instructions...
-//
-bool RaiseAllocations::runOnModule(Module &M) {
- // Find the free prototype...
- doInitialization(M);
-
- bool Changed = false;
-
- // Process all free calls...
- if (FreeFunc) {
- std::vector<User*> Users(FreeFunc->use_begin(), FreeFunc->use_end());
- std::vector<Value*> EqPointers; // Values equal to FreeFunc
-
- while (!Users.empty()) {
- User *U = Users.back();
- Users.pop_back();
-
- if (Instruction *I = dyn_cast<Instruction>(U)) {
- if (isa<InvokeInst>(I))
- continue;
- CallSite CS = CallSite::get(I);
- if (CS.getInstruction() && !CS.arg_empty() &&
- (CS.getCalledFunction() == FreeFunc ||
- std::find(EqPointers.begin(), EqPointers.end(),
- CS.getCalledValue()) != EqPointers.end())) {
-
- // If no prototype was provided for free, we may need to cast the
- // source pointer. This should be really uncommon, but it's necessary
- // just in case we are dealing with weird code like this:
- // free((long)ptr);
- //
- Value *Source = *CS.arg_begin();
- if (!isa<PointerType>(Source->getType()))
- Source = new IntToPtrInst(Source,
- Type::getInt8PtrTy(M.getContext()),
- "FreePtrCast", I);
- new FreeInst(Source, I);
-
- // If the old instruction was an invoke, add an unconditional branch
- // before the invoke, which will become the new terminator.
- if (InvokeInst *II = dyn_cast<InvokeInst>(I))
- BranchInst::Create(II->getNormalDest(), I);
-
- // Delete the old call site
- if (I->getType() != Type::getVoidTy(M.getContext()))
- I->replaceAllUsesWith(UndefValue::get(I->getType()));
- I->eraseFromParent();
- Changed = true;
- ++NumRaised;
- }
- } else if (GlobalValue *GV = dyn_cast<GlobalValue>(U)) {
- Users.insert(Users.end(), GV->use_begin(), GV->use_end());
- EqPointers.push_back(GV);
- } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
- if (CE->isCast()) {
- Users.insert(Users.end(), CE->use_begin(), CE->use_end());
- EqPointers.push_back(CE);
- }
- }
- }
- }
-
- return Changed;
-}
diff --git a/lib/Transforms/IPO/StripDeadPrototypes.cpp b/lib/Transforms/IPO/StripDeadPrototypes.cpp
index a94d78e..4566a76 100644
--- a/lib/Transforms/IPO/StripDeadPrototypes.cpp
+++ b/lib/Transforms/IPO/StripDeadPrototypes.cpp
@@ -19,7 +19,6 @@
#include "llvm/Pass.h"
#include "llvm/Module.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Support/Compiler.h"
using namespace llvm;
STATISTIC(NumDeadPrototypes, "Number of dead prototypes removed");
@@ -27,7 +26,7 @@ STATISTIC(NumDeadPrototypes, "Number of dead prototypes removed");
namespace {
/// @brief Pass to remove unused function declarations.
-class VISIBILITY_HIDDEN StripDeadPrototypesPass : public ModulePass {
+class StripDeadPrototypesPass : public ModulePass {
public:
static char ID; // Pass identification, replacement for typeid
StripDeadPrototypesPass() : ModulePass(&ID) { }
diff --git a/lib/Transforms/IPO/StripSymbols.cpp b/lib/Transforms/IPO/StripSymbols.cpp
index 57aaf43..4f6369e 100644
--- a/lib/Transforms/IPO/StripSymbols.cpp
+++ b/lib/Transforms/IPO/StripSymbols.cpp
@@ -112,11 +112,11 @@ static bool OnlyUsedBy(Value *V, Value *Usr) {
static void RemoveDeadConstant(Constant *C) {
assert(C->use_empty() && "Constant is not dead!");
- SmallPtrSet<Constant *, 4> Operands;
+ SmallPtrSet<Constant*, 4> Operands;
for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i)
if (isa<DerivedType>(C->getOperand(i)->getType()) &&
OnlyUsedBy(C->getOperand(i), C))
- Operands.insert(C->getOperand(i));
+ Operands.insert(cast<Constant>(C->getOperand(i)));
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) {
if (!GV->hasLocalLinkage()) return; // Don't delete non static globals.
GV->eraseFromParent();
@@ -126,7 +126,7 @@ static void RemoveDeadConstant(Constant *C) {
C->destroyConstant();
// If the constant referenced anything, see if we can delete it as well.
- for (SmallPtrSet<Constant *, 4>::iterator OI = Operands.begin(),
+ for (SmallPtrSet<Constant*, 4>::iterator OI = Operands.begin(),
OE = Operands.end(); OI != OE; ++OI)
RemoveDeadConstant(*OI);
}
@@ -290,23 +290,13 @@ bool StripDebugDeclare::runOnModule(Module &M) {
Declare->eraseFromParent();
}
- // Delete all llvm.dbg.global_variables.
- for (Module::global_iterator I = M.global_begin(), E = M.global_end();
- I != E; ++I) {
- GlobalVariable *GV = dyn_cast<GlobalVariable>(I);
- if (!GV) continue;
- if (GV->use_empty() && GV->getName().startswith("llvm.dbg.global_variable"))
- DeadConstants.push_back(GV);
- }
-
while (!DeadConstants.empty()) {
Constant *C = DeadConstants.back();
DeadConstants.pop_back();
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) {
if (GV->hasLocalLinkage())
RemoveDeadConstant(GV);
- }
- else
+ } else
RemoveDeadConstant(C);
}
diff --git a/lib/Transforms/IPO/StructRetPromotion.cpp b/lib/Transforms/IPO/StructRetPromotion.cpp
index 4442820..67fc934 100644
--- a/lib/Transforms/IPO/StructRetPromotion.cpp
+++ b/lib/Transforms/IPO/StructRetPromotion.cpp
@@ -34,7 +34,6 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -44,7 +43,7 @@ namespace {
/// SRETPromotion - This pass removes sret parameter and updates
/// function to use multiple return value.
///
- struct VISIBILITY_HIDDEN SRETPromotion : public CallGraphSCCPass {
+ struct SRETPromotion : public CallGraphSCCPass {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
CallGraphSCCPass::getAnalysisUsage(AU);
}
diff --git a/lib/Transforms/Instrumentation/BlockProfiling.cpp b/lib/Transforms/Instrumentation/BlockProfiling.cpp
index eb8f225..211a6d6 100644
--- a/lib/Transforms/Instrumentation/BlockProfiling.cpp
+++ b/lib/Transforms/Instrumentation/BlockProfiling.cpp
@@ -22,7 +22,6 @@
#include "llvm/DerivedTypes.h"
#include "llvm/Module.h"
#include "llvm/Pass.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Instrumentation.h"
#include "RSProfiling.h"
@@ -30,7 +29,7 @@
using namespace llvm;
namespace {
- class VISIBILITY_HIDDEN FunctionProfiler : public RSProfilers_std {
+ class FunctionProfiler : public RSProfilers_std {
public:
static char ID;
bool runOnModule(Module &M);
diff --git a/lib/Transforms/Instrumentation/EdgeProfiling.cpp b/lib/Transforms/Instrumentation/EdgeProfiling.cpp
index b9cb275..9ae3786 100644
--- a/lib/Transforms/Instrumentation/EdgeProfiling.cpp
+++ b/lib/Transforms/Instrumentation/EdgeProfiling.cpp
@@ -20,7 +20,6 @@
#include "ProfilingUtils.h"
#include "llvm/Module.h"
#include "llvm/Pass.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Instrumentation.h"
@@ -31,7 +30,7 @@ using namespace llvm;
STATISTIC(NumEdgesInserted, "The # of edges inserted.");
namespace {
- class VISIBILITY_HIDDEN EdgeProfiler : public ModulePass {
+ class EdgeProfiler : public ModulePass {
bool runOnModule(Module &M);
public:
static char ID; // Pass identification, replacement for typeid
diff --git a/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp b/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp
index b2e6747..0a46fe5 100644
--- a/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp
+++ b/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp
@@ -19,7 +19,6 @@
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/ProfileInfo.h"
#include "llvm/Analysis/ProfileInfoLoader.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Debug.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
@@ -33,7 +32,7 @@ using namespace llvm;
STATISTIC(NumEdgesInserted, "The # of edges inserted.");
namespace {
- class VISIBILITY_HIDDEN OptimalEdgeProfiler : public ModulePass {
+ class OptimalEdgeProfiler : public ModulePass {
bool runOnModule(Module &M);
public:
static char ID; // Pass identification, replacement for typeid
diff --git a/lib/Transforms/Instrumentation/RSProfiling.cpp b/lib/Transforms/Instrumentation/RSProfiling.cpp
index 3b72260..c08efc1 100644
--- a/lib/Transforms/Instrumentation/RSProfiling.cpp
+++ b/lib/Transforms/Instrumentation/RSProfiling.cpp
@@ -42,7 +42,6 @@
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -72,7 +71,7 @@ namespace {
/// NullProfilerRS - The basic profiler that does nothing. It is the default
/// profiler and thus terminates RSProfiler chains. It is useful for
/// measuring framework overhead
- class VISIBILITY_HIDDEN NullProfilerRS : public RSProfilers {
+ class NullProfilerRS : public RSProfilers {
public:
static char ID; // Pass identification, replacement for typeid
bool isProfiling(Value* v) {
@@ -94,7 +93,7 @@ static RegisterAnalysisGroup<RSProfilers, true> NPT(NP);
namespace {
/// Chooser - Something that chooses when to make a sample of the profiled code
- class VISIBILITY_HIDDEN Chooser {
+ class Chooser {
public:
/// ProcessChoicePoint - is called for each basic block inserted to choose
/// between normal and sample code
@@ -108,7 +107,7 @@ namespace {
//Things that implement sampling policies
//A global value that is read-mod-stored to choose when to sample.
//A sample is taken when the global counter hits 0
- class VISIBILITY_HIDDEN GlobalRandomCounter : public Chooser {
+ class GlobalRandomCounter : public Chooser {
GlobalVariable* Counter;
Value* ResetValue;
const IntegerType* T;
@@ -120,7 +119,7 @@ namespace {
};
//Same is GRC, but allow register allocation of the global counter
- class VISIBILITY_HIDDEN GlobalRandomCounterOpt : public Chooser {
+ class GlobalRandomCounterOpt : public Chooser {
GlobalVariable* Counter;
Value* ResetValue;
AllocaInst* AI;
@@ -134,7 +133,7 @@ namespace {
//Use the cycle counter intrinsic as a source of pseudo randomness when
//deciding when to sample.
- class VISIBILITY_HIDDEN CycleCounter : public Chooser {
+ class CycleCounter : public Chooser {
uint64_t rm;
Constant *F;
public:
@@ -145,7 +144,7 @@ namespace {
};
/// ProfilerRS - Insert the random sampling framework
- struct VISIBILITY_HIDDEN ProfilerRS : public FunctionPass {
+ struct ProfilerRS : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
ProfilerRS() : FunctionPass(&ID) {}
diff --git a/lib/Transforms/Scalar/ABCD.cpp b/lib/Transforms/Scalar/ABCD.cpp
new file mode 100644
index 0000000..c8541d7
--- /dev/null
+++ b/lib/Transforms/Scalar/ABCD.cpp
@@ -0,0 +1,1104 @@
+//===------- ABCD.cpp - Removes redundant conditional branches ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass removes redundant branch instructions. This algorithm was
+// described by Rastislav Bodik, Rajiv Gupta and Vivek Sarkar in their paper
+// "ABCD: Eliminating Array Bounds Checks on Demand (2000)". The original
+// Algorithm was created to remove array bound checks for strongly typed
+// languages. This implementation expands the idea and removes any conditional
+// branches that can be proved redundant, not only those used in array bound
+// checks. With the SSI representation, each variable has a
+// constraint. By analyzing these constraints we can prove that a branch is
+// redundant. When a branch is proved redundant it means that
+// one direction will always be taken; thus, we can change this branch into an
+// unconditional jump.
+// It is advisable to run SimplifyCFG and Aggressive Dead Code Elimination
+// after ABCD to clean up the code.
+// This implementation was created based on the implementation of the ABCD
+// algorithm implemented for the compiler Jitrino.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "abcd"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/Instructions.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/SSI.h"
+
+using namespace llvm;
+
+STATISTIC(NumBranchTested, "Number of conditional branches analyzed");
+STATISTIC(NumBranchRemoved, "Number of conditional branches removed");
+
+namespace {
+
+class ABCD : public FunctionPass {
+ public:
+ static char ID; // Pass identification, replacement for typeid.
+ ABCD() : FunctionPass(&ID) {}
+
+ void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<SSI>();
+ }
+
+ bool runOnFunction(Function &F);
+
+ private:
+ /// Keep track of whether we've modified the program yet.
+ bool modified;
+
+ enum ProveResult {
+ False = 0,
+ Reduced = 1,
+ True = 2
+ };
+
+ typedef ProveResult (*meet_function)(ProveResult, ProveResult);
+ static ProveResult max(ProveResult res1, ProveResult res2) {
+ return (ProveResult) std::max(res1, res2);
+ }
+ static ProveResult min(ProveResult res1, ProveResult res2) {
+ return (ProveResult) std::min(res1, res2);
+ }
+
+ class Bound {
+ public:
+ Bound(APInt v, bool upper) : value(v), upper_bound(upper) {}
+ Bound(const Bound *b, int cnst)
+ : value(b->value - cnst), upper_bound(b->upper_bound) {}
+ Bound(const Bound *b, const APInt &cnst)
+ : value(b->value - cnst), upper_bound(b->upper_bound) {}
+
+ /// Test if Bound is an upper bound
+ bool isUpperBound() const { return upper_bound; }
+
+ /// Get the bitwidth of this bound
+ int32_t getBitWidth() const { return value.getBitWidth(); }
+
+ /// Creates a Bound incrementing the one received
+ static Bound *createIncrement(const Bound *b) {
+ return new Bound(b->isUpperBound() ? b->value+1 : b->value-1,
+ b->upper_bound);
+ }
+
+ /// Creates a Bound decrementing the one received
+ static Bound *createDecrement(const Bound *b) {
+ return new Bound(b->isUpperBound() ? b->value-1 : b->value+1,
+ b->upper_bound);
+ }
+
+ /// Test if two bounds are equal
+ static bool eq(const Bound *a, const Bound *b) {
+ if (!a || !b) return false;
+
+ assert(a->isUpperBound() == b->isUpperBound());
+ return a->value == b->value;
+ }
+
+ /// Test if val is less than or equal to Bound b
+ static bool leq(APInt val, const Bound *b) {
+ if (!b) return false;
+ return b->isUpperBound() ? val.sle(b->value) : val.sge(b->value);
+ }
+
+ /// Test if Bound a is less then or equal to Bound
+ static bool leq(const Bound *a, const Bound *b) {
+ if (!a || !b) return false;
+
+ assert(a->isUpperBound() == b->isUpperBound());
+ return a->isUpperBound() ? a->value.sle(b->value) :
+ a->value.sge(b->value);
+ }
+
+ /// Test if Bound a is less then Bound b
+ static bool lt(const Bound *a, const Bound *b) {
+ if (!a || !b) return false;
+
+ assert(a->isUpperBound() == b->isUpperBound());
+ return a->isUpperBound() ? a->value.slt(b->value) :
+ a->value.sgt(b->value);
+ }
+
+ /// Test if Bound b is greater then or equal val
+ static bool geq(const Bound *b, APInt val) {
+ return leq(val, b);
+ }
+
+ /// Test if Bound a is greater then or equal Bound b
+ static bool geq(const Bound *a, const Bound *b) {
+ return leq(b, a);
+ }
+
+ private:
+ APInt value;
+ bool upper_bound;
+ };
+
+ /// This class is used to store results some parts of the graph,
+ /// so information does not need to be recalculated. The maximum false,
+ /// minimum true and minimum reduced results are stored
+ class MemoizedResultChart {
+ public:
+ MemoizedResultChart()
+ : max_false(NULL), min_true(NULL), min_reduced(NULL) {}
+
+ /// Returns the max false
+ Bound *getFalse() const { return max_false; }
+
+ /// Returns the min true
+ Bound *getTrue() const { return min_true; }
+
+ /// Returns the min reduced
+ Bound *getReduced() const { return min_reduced; }
+
+ /// Return the stored result for this bound
+ ProveResult getResult(const Bound *bound) const;
+
+ /// Stores a false found
+ void addFalse(Bound *bound);
+
+ /// Stores a true found
+ void addTrue(Bound *bound);
+
+ /// Stores a Reduced found
+ void addReduced(Bound *bound);
+
+ /// Clears redundant reduced
+ /// If a min_true is smaller than a min_reduced then the min_reduced
+ /// is unnecessary and then removed. It also works for min_reduced
+ /// begin smaller than max_false.
+ void clearRedundantReduced();
+
+ void clear() {
+ delete max_false;
+ delete min_true;
+ delete min_reduced;
+ }
+
+ private:
+ Bound *max_false, *min_true, *min_reduced;
+ };
+
+ /// This class stores the result found for a node of the graph,
+ /// so these results do not need to be recalculated, only searched for.
+ class MemoizedResult {
+ public:
+ /// Test if there is true result stored from b to a
+ /// that is less then the bound
+ bool hasTrue(Value *b, const Bound *bound) const {
+ Bound *trueBound = map.lookup(b).getTrue();
+ return trueBound && Bound::leq(trueBound, bound);
+ }
+
+ /// Test if there is false result stored from b to a
+ /// that is less then the bound
+ bool hasFalse(Value *b, const Bound *bound) const {
+ Bound *falseBound = map.lookup(b).getFalse();
+ return falseBound && Bound::leq(falseBound, bound);
+ }
+
+ /// Test if there is reduced result stored from b to a
+ /// that is less then the bound
+ bool hasReduced(Value *b, const Bound *bound) const {
+ Bound *reducedBound = map.lookup(b).getReduced();
+ return reducedBound && Bound::leq(reducedBound, bound);
+ }
+
+ /// Returns the stored bound for b
+ ProveResult getBoundResult(Value *b, Bound *bound) {
+ return map[b].getResult(bound);
+ }
+
+ /// Clears the map
+ void clear() {
+ DenseMapIterator<Value*, MemoizedResultChart> begin = map.begin();
+ DenseMapIterator<Value*, MemoizedResultChart> end = map.end();
+ for (; begin != end; ++begin) {
+ begin->second.clear();
+ }
+ map.clear();
+ }
+
+ /// Stores the bound found
+ void updateBound(Value *b, Bound *bound, const ProveResult res);
+
+ private:
+ // Maps a nod in the graph with its results found.
+ DenseMap<Value*, MemoizedResultChart> map;
+ };
+
+ /// This class represents an edge in the inequality graph used by the
+ /// ABCD algorithm. An edge connects node v to node u with a value c if
+ /// we could infer a constraint v <= u + c in the source program.
+ class Edge {
+ public:
+ Edge(Value *V, APInt val, bool upper)
+ : vertex(V), value(val), upper_bound(upper) {}
+
+ Value *getVertex() const { return vertex; }
+ const APInt &getValue() const { return value; }
+ bool isUpperBound() const { return upper_bound; }
+
+ private:
+ Value *vertex;
+ APInt value;
+ bool upper_bound;
+ };
+
+ /// Weighted and Directed graph to represent constraints.
+ /// There is one type of constraint, a <= b + X, which will generate an
+ /// edge from b to a with weight X.
+ class InequalityGraph {
+ public:
+
+ /// Adds an edge from V_from to V_to with weight value
+ void addEdge(Value *V_from, Value *V_to, APInt value, bool upper);
+
+ /// Test if there is a node V
+ bool hasNode(Value *V) const { return graph.count(V); }
+
+ /// Test if there is any edge from V in the upper direction
+ bool hasEdge(Value *V, bool upper) const;
+
+ /// Returns all edges pointed by vertex V
+ SmallPtrSet<Edge *, 16> getEdges(Value *V) const {
+ return graph.lookup(V);
+ }
+
+ /// Prints the graph in dot format.
+ /// Blue edges represent upper bound and Red lower bound.
+ void printGraph(raw_ostream &OS, Function &F) const {
+ printHeader(OS, F);
+ printBody(OS);
+ printFooter(OS);
+ }
+
+ /// Clear the graph
+ void clear() {
+ graph.clear();
+ }
+
+ private:
+ DenseMap<Value *, SmallPtrSet<Edge *, 16> > graph;
+
+ /// Adds a Node to the graph.
+ DenseMap<Value *, SmallPtrSet<Edge *, 16> >::iterator addNode(Value *V) {
+ SmallPtrSet<Edge *, 16> p;
+ return graph.insert(std::make_pair(V, p)).first;
+ }
+
+ /// Prints the header of the dot file
+ void printHeader(raw_ostream &OS, Function &F) const;
+
+ /// Prints the footer of the dot file
+ void printFooter(raw_ostream &OS) const {
+ OS << "}\n";
+ }
+
+ /// Prints the body of the dot file
+ void printBody(raw_ostream &OS) const;
+
+ /// Prints vertex source to the dot file
+ void printVertex(raw_ostream &OS, Value *source) const;
+
+ /// Prints the edge to the dot file
+ void printEdge(raw_ostream &OS, Value *source, Edge *edge) const;
+
+ void printName(raw_ostream &OS, Value *info) const;
+ };
+
+ /// Iterates through all BasicBlocks, if the Terminator Instruction
+ /// uses an Comparator Instruction, all operands of this comparator
+ /// are sent to be transformed to SSI. Only Instruction operands are
+ /// transformed.
+ void createSSI(Function &F);
+
+ /// Creates the graphs for this function.
+ /// It will look for all comparators used in branches, and create them.
+ /// These comparators will create constraints for any instruction as an
+ /// operand.
+ void executeABCD(Function &F);
+
+ /// Seeks redundancies in the comparator instruction CI.
+ /// If the ABCD algorithm can prove that the comparator CI always
+ /// takes one way, then the Terminator Instruction TI is substituted from
+ /// a conditional branch to a unconditional one.
+ /// This code basically receives a comparator, and verifies which kind of
+ /// instruction it is. Depending on the kind of instruction, we use different
+ /// strategies to prove its redundancy.
+ void seekRedundancy(ICmpInst *ICI, TerminatorInst *TI);
+
+ /// Substitutes Terminator Instruction TI, that is a conditional branch,
+ /// with one unconditional branch. Succ_edge determines if the new
+ /// unconditional edge will be the first or second edge of the former TI
+ /// instruction.
+ void removeRedundancy(TerminatorInst *TI, bool Succ_edge);
+
+ /// When an conditional branch is removed, the BasicBlock that is no longer
+ /// reachable will have problems in phi functions. This method fixes these
+ /// phis removing the former BasicBlock from the list of incoming BasicBlocks
+ /// of all phis. In case the phi remains with no predecessor it will be
+ /// marked to be removed later.
+ void fixPhi(BasicBlock *BB, BasicBlock *Succ);
+
+ /// Removes phis that have no predecessor
+ void removePhis();
+
+ /// Creates constraints for Instructions.
+ /// If the constraint for this instruction has already been created
+ /// nothing is done.
+ void createConstraintInstruction(Instruction *I);
+
+ /// Creates constraints for Binary Operators.
+ /// It will create constraints only for addition and subtraction,
+ /// the other binary operations are not treated by ABCD.
+ /// For additions in the form a = b + X and a = X + b, where X is a constant,
+ /// the constraint a <= b + X can be obtained. For this constraint, an edge
+ /// a->b with weight X is added to the lower bound graph, and an edge
+ /// b->a with weight -X is added to the upper bound graph.
+ /// Only subtractions in the format a = b - X is used by ABCD.
+ /// Edges are created using the same semantic as addition.
+ void createConstraintBinaryOperator(BinaryOperator *BO);
+
+ /// Creates constraints for Comparator Instructions.
+ /// Only comparators that have any of the following operators
+ /// are used to create constraints: >=, >, <=, <. And only if
+ /// at least one operand is an Instruction. In a Comparator Instruction
+ /// a op b, there will be 4 sigma functions a_t, a_f, b_t and b_f. Where
+ /// t and f represent sigma for operands in true and false branches. The
+ /// following constraints can be obtained. a_t <= a, a_f <= a, b_t <= b and
+ /// b_f <= b. There are two more constraints that depend on the operator.
+ /// For the operator <= : a_t <= b_t and b_f <= a_f-1
+ /// For the operator < : a_t <= b_t-1 and b_f <= a_f
+ /// For the operator >= : b_t <= a_t and a_f <= b_f-1
+ /// For the operator > : b_t <= a_t-1 and a_f <= b_f
+ void createConstraintCmpInst(ICmpInst *ICI, TerminatorInst *TI);
+
+ /// Creates constraints for PHI nodes.
+ /// In a PHI node a = phi(b,c) we can create the constraint
+ /// a<= max(b,c). With this constraint there will be the edges,
+ /// b->a and c->a with weight 0 in the lower bound graph, and the edges
+ /// a->b and a->c with weight 0 in the upper bound graph.
+ void createConstraintPHINode(PHINode *PN);
+
+ /// Given a binary operator, we are only interest in the case
+ /// that one operand is an Instruction and the other is a ConstantInt. In
+ /// this case the method returns true, otherwise false. It also obtains the
+ /// Instruction and ConstantInt from the BinaryOperator and returns it.
+ bool createBinaryOperatorInfo(BinaryOperator *BO, Instruction **I1,
+ Instruction **I2, ConstantInt **C1,
+ ConstantInt **C2);
+
+ /// This method creates a constraint between a Sigma and an Instruction.
+ /// These constraints are created as soon as we find a comparator that uses a
+ /// SSI variable.
+ void createConstraintSigInst(Instruction *I_op, BasicBlock *BB_succ_t,
+ BasicBlock *BB_succ_f, PHINode **SIG_op_t,
+ PHINode **SIG_op_f);
+
+ /// If PN_op1 and PN_o2 are different from NULL, create a constraint
+ /// PN_op2 -> PN_op1 with value. In case any of them is NULL, replace
+ /// with the respective V_op#, if V_op# is a ConstantInt.
+ void createConstraintSigSig(PHINode *SIG_op1, PHINode *SIG_op2, APInt value);
+
+ /// Returns the sigma representing the Instruction I in BasicBlock BB.
+ /// Returns NULL in case there is no sigma for this Instruction in this
+ /// Basic Block. This methods assume that sigmas are the first instructions
+ /// in a block, and that there can be only two sigmas in a block. So it will
+ /// only look on the first two instructions of BasicBlock BB.
+ PHINode *findSigma(BasicBlock *BB, Instruction *I);
+
+ /// Original ABCD algorithm to prove redundant checks.
+ /// This implementation works on any kind of inequality branch.
+ bool demandProve(Value *a, Value *b, int c, bool upper_bound);
+
+ /// Prove that distance between b and a is <= bound
+ ProveResult prove(Value *a, Value *b, Bound *bound, unsigned level);
+
+ /// Updates the distance value for a and b
+ void updateMemDistance(Value *a, Value *b, Bound *bound, unsigned level,
+ meet_function meet);
+
+ InequalityGraph inequality_graph;
+ MemoizedResult mem_result;
+ DenseMap<Value*, Bound*> active;
+ SmallPtrSet<Value*, 16> created;
+ SmallVector<PHINode *, 16> phis_to_remove;
+};
+
+} // end anonymous namespace.
+
+char ABCD::ID = 0;
+static RegisterPass<ABCD> X("abcd", "ABCD: Eliminating Array Bounds Checks on Demand");
+
+
+bool ABCD::runOnFunction(Function &F) {
+ modified = false;
+ createSSI(F);
+ executeABCD(F);
+ DEBUG(inequality_graph.printGraph(errs(), F));
+ removePhis();
+
+ inequality_graph.clear();
+ mem_result.clear();
+ active.clear();
+ created.clear();
+ phis_to_remove.clear();
+ return modified;
+}
+
+/// Iterates through all BasicBlocks, if the Terminator Instruction
+/// uses an Comparator Instruction, all operands of this comparator
+/// are sent to be transformed to SSI. Only Instruction operands are
+/// transformed.
+void ABCD::createSSI(Function &F) {
+ SSI *ssi = &getAnalysis<SSI>();
+
+ SmallVector<Instruction *, 16> Insts;
+
+ for (Function::iterator begin = F.begin(), end = F.end();
+ begin != end; ++begin) {
+ BasicBlock *BB = begin;
+ TerminatorInst *TI = BB->getTerminator();
+ if (TI->getNumOperands() == 0)
+ continue;
+
+ if (ICmpInst *ICI = dyn_cast<ICmpInst>(TI->getOperand(0))) {
+ if (Instruction *I = dyn_cast<Instruction>(ICI->getOperand(0))) {
+ modified = true; // XXX: but yet createSSI might do nothing
+ Insts.push_back(I);
+ }
+ if (Instruction *I = dyn_cast<Instruction>(ICI->getOperand(1))) {
+ modified = true;
+ Insts.push_back(I);
+ }
+ }
+ }
+ ssi->createSSI(Insts);
+}
+
+/// Creates the graphs for this function.
+/// It will look for all comparators used in branches, and create them.
+/// These comparators will create constraints for any instruction as an
+/// operand.
+void ABCD::executeABCD(Function &F) {
+ for (Function::iterator begin = F.begin(), end = F.end();
+ begin != end; ++begin) {
+ BasicBlock *BB = begin;
+ TerminatorInst *TI = BB->getTerminator();
+ if (TI->getNumOperands() == 0)
+ continue;
+
+ ICmpInst *ICI = dyn_cast<ICmpInst>(TI->getOperand(0));
+ if (!ICI || !isa<IntegerType>(ICI->getOperand(0)->getType()))
+ continue;
+
+ createConstraintCmpInst(ICI, TI);
+ seekRedundancy(ICI, TI);
+ }
+}
+
+/// Seeks redundancies in the comparator instruction CI.
+/// If the ABCD algorithm can prove that the comparator CI always
+/// takes one way, then the Terminator Instruction TI is substituted from
+/// a conditional branch to a unconditional one.
+/// This code basically receives a comparator, and verifies which kind of
+/// instruction it is. Depending on the kind of instruction, we use different
+/// strategies to prove its redundancy.
+void ABCD::seekRedundancy(ICmpInst *ICI, TerminatorInst *TI) {
+ CmpInst::Predicate Pred = ICI->getPredicate();
+
+ Value *source, *dest;
+ int distance1, distance2;
+ bool upper;
+
+ switch(Pred) {
+ case CmpInst::ICMP_SGT: // signed greater than
+ upper = false;
+ distance1 = 1;
+ distance2 = 0;
+ break;
+
+ case CmpInst::ICMP_SGE: // signed greater or equal
+ upper = false;
+ distance1 = 0;
+ distance2 = -1;
+ break;
+
+ case CmpInst::ICMP_SLT: // signed less than
+ upper = true;
+ distance1 = -1;
+ distance2 = 0;
+ break;
+
+ case CmpInst::ICMP_SLE: // signed less or equal
+ upper = true;
+ distance1 = 0;
+ distance2 = 1;
+ break;
+
+ default:
+ return;
+ }
+
+ ++NumBranchTested;
+ source = ICI->getOperand(0);
+ dest = ICI->getOperand(1);
+ if (demandProve(dest, source, distance1, upper)) {
+ removeRedundancy(TI, true);
+ } else if (demandProve(dest, source, distance2, !upper)) {
+ removeRedundancy(TI, false);
+ }
+}
+
+/// Substitutes Terminator Instruction TI, that is a conditional branch,
+/// with one unconditional branch. Succ_edge determines if the new
+/// unconditional edge will be the first or second edge of the former TI
+/// instruction.
+void ABCD::removeRedundancy(TerminatorInst *TI, bool Succ_edge) {
+ BasicBlock *Succ;
+ if (Succ_edge) {
+ Succ = TI->getSuccessor(0);
+ fixPhi(TI->getParent(), TI->getSuccessor(1));
+ } else {
+ Succ = TI->getSuccessor(1);
+ fixPhi(TI->getParent(), TI->getSuccessor(0));
+ }
+
+ BranchInst::Create(Succ, TI);
+ TI->eraseFromParent(); // XXX: invoke
+ ++NumBranchRemoved;
+ modified = true;
+}
+
+/// When an conditional branch is removed, the BasicBlock that is no longer
+/// reachable will have problems in phi functions. This method fixes these
+/// phis removing the former BasicBlock from the list of incoming BasicBlocks
+/// of all phis. In case the phi remains with no predecessor it will be
+/// marked to be removed later.
+void ABCD::fixPhi(BasicBlock *BB, BasicBlock *Succ) {
+ BasicBlock::iterator begin = Succ->begin();
+ while (PHINode *PN = dyn_cast<PHINode>(begin++)) {
+ PN->removeIncomingValue(BB, false);
+ if (PN->getNumIncomingValues() == 0)
+ phis_to_remove.push_back(PN);
+ }
+}
+
+/// Removes phis that have no predecessor
+void ABCD::removePhis() {
+ for (unsigned i = 0, e = phis_to_remove.size(); i != e; ++i) {
+ PHINode *PN = phis_to_remove[i];
+ PN->replaceAllUsesWith(UndefValue::get(PN->getType()));
+ PN->eraseFromParent();
+ }
+}
+
+/// Creates constraints for Instructions.
+/// If the constraint for this instruction has already been created
+/// nothing is done.
+void ABCD::createConstraintInstruction(Instruction *I) {
+ // Test if this instruction has not been created before
+ if (created.insert(I)) {
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
+ createConstraintBinaryOperator(BO);
+ } else if (PHINode *PN = dyn_cast<PHINode>(I)) {
+ createConstraintPHINode(PN);
+ }
+ }
+}
+
+/// Creates constraints for Binary Operators.
+/// It will create constraints only for addition and subtraction,
+/// the other binary operations are not treated by ABCD.
+/// For additions in the form a = b + X and a = X + b, where X is a constant,
+/// the constraint a <= b + X can be obtained. For this constraint, an edge
+/// a->b with weight X is added to the lower bound graph, and an edge
+/// b->a with weight -X is added to the upper bound graph.
+/// Only subtractions in the format a = b - X is used by ABCD.
+/// Edges are created using the same semantic as addition.
+void ABCD::createConstraintBinaryOperator(BinaryOperator *BO) {
+ Instruction *I1 = NULL, *I2 = NULL;
+ ConstantInt *CI1 = NULL, *CI2 = NULL;
+
+ // Test if an operand is an Instruction and the other is a Constant
+ if (!createBinaryOperatorInfo(BO, &I1, &I2, &CI1, &CI2))
+ return;
+
+ Instruction *I = 0;
+ APInt value;
+
+ switch (BO->getOpcode()) {
+ case Instruction::Add:
+ if (I1) {
+ I = I1;
+ value = CI2->getValue();
+ } else if (I2) {
+ I = I2;
+ value = CI1->getValue();
+ }
+ break;
+
+ case Instruction::Sub:
+ // Instructions like a = X-b, where X is a constant are not represented
+ // in the graph.
+ if (!I1)
+ return;
+
+ I = I1;
+ value = -CI2->getValue();
+ break;
+
+ default:
+ return;
+ }
+
+ inequality_graph.addEdge(I, BO, value, true);
+ inequality_graph.addEdge(BO, I, -value, false);
+ createConstraintInstruction(I);
+}
+
+/// Given a binary operator, we are only interest in the case
+/// that one operand is an Instruction and the other is a ConstantInt. In
+/// this case the method returns true, otherwise false. It also obtains the
+/// Instruction and ConstantInt from the BinaryOperator and returns it.
+bool ABCD::createBinaryOperatorInfo(BinaryOperator *BO, Instruction **I1,
+ Instruction **I2, ConstantInt **C1,
+ ConstantInt **C2) {
+ Value *op1 = BO->getOperand(0);
+ Value *op2 = BO->getOperand(1);
+
+ if ((*I1 = dyn_cast<Instruction>(op1))) {
+ if ((*C2 = dyn_cast<ConstantInt>(op2)))
+ return true; // First is Instruction and second ConstantInt
+
+ return false; // Both are Instruction
+ } else {
+ if ((*C1 = dyn_cast<ConstantInt>(op1)) &&
+ (*I2 = dyn_cast<Instruction>(op2)))
+ return true; // First is ConstantInt and second Instruction
+
+ return false; // Both are not Instruction
+ }
+}
+
+/// Creates constraints for Comparator Instructions.
+/// Only comparators that have any of the following operators
+/// are used to create constraints: >=, >, <=, <. And only if
+/// at least one operand is an Instruction. In a Comparator Instruction
+/// a op b, there will be 4 sigma functions a_t, a_f, b_t and b_f. Where
+/// t and f represent sigma for operands in true and false branches. The
+/// following constraints can be obtained. a_t <= a, a_f <= a, b_t <= b and
+/// b_f <= b. There are two more constraints that depend on the operator.
+/// For the operator <= : a_t <= b_t and b_f <= a_f-1
+/// For the operator < : a_t <= b_t-1 and b_f <= a_f
+/// For the operator >= : b_t <= a_t and a_f <= b_f-1
+/// For the operator > : b_t <= a_t-1 and a_f <= b_f
+void ABCD::createConstraintCmpInst(ICmpInst *ICI, TerminatorInst *TI) {
+ Value *V_op1 = ICI->getOperand(0);
+ Value *V_op2 = ICI->getOperand(1);
+
+ if (!isa<IntegerType>(V_op1->getType()))
+ return;
+
+ Instruction *I_op1 = dyn_cast<Instruction>(V_op1);
+ Instruction *I_op2 = dyn_cast<Instruction>(V_op2);
+
+ // Test if at least one operand is an Instruction
+ if (!I_op1 && !I_op2)
+ return;
+
+ BasicBlock *BB_succ_t = TI->getSuccessor(0);
+ BasicBlock *BB_succ_f = TI->getSuccessor(1);
+
+ PHINode *SIG_op1_t = NULL, *SIG_op1_f = NULL,
+ *SIG_op2_t = NULL, *SIG_op2_f = NULL;
+
+ createConstraintSigInst(I_op1, BB_succ_t, BB_succ_f, &SIG_op1_t, &SIG_op1_f);
+ createConstraintSigInst(I_op2, BB_succ_t, BB_succ_f, &SIG_op2_t, &SIG_op2_f);
+
+ int32_t width = cast<IntegerType>(V_op1->getType())->getBitWidth();
+ APInt MinusOne = APInt::getAllOnesValue(width);
+ APInt Zero = APInt::getNullValue(width);
+
+ CmpInst::Predicate Pred = ICI->getPredicate();
+ switch (Pred) {
+ case CmpInst::ICMP_SGT: // signed greater than
+ createConstraintSigSig(SIG_op2_t, SIG_op1_t, MinusOne);
+ createConstraintSigSig(SIG_op1_f, SIG_op2_f, Zero);
+ break;
+
+ case CmpInst::ICMP_SGE: // signed greater or equal
+ createConstraintSigSig(SIG_op2_t, SIG_op1_t, Zero);
+ createConstraintSigSig(SIG_op1_f, SIG_op2_f, MinusOne);
+ break;
+
+ case CmpInst::ICMP_SLT: // signed less than
+ createConstraintSigSig(SIG_op1_t, SIG_op2_t, MinusOne);
+ createConstraintSigSig(SIG_op2_f, SIG_op1_f, Zero);
+ break;
+
+ case CmpInst::ICMP_SLE: // signed less or equal
+ createConstraintSigSig(SIG_op1_t, SIG_op2_t, Zero);
+ createConstraintSigSig(SIG_op2_f, SIG_op1_f, MinusOne);
+ break;
+
+ default:
+ break;
+ }
+
+ if (I_op1)
+ createConstraintInstruction(I_op1);
+ if (I_op2)
+ createConstraintInstruction(I_op2);
+}
+
+/// Creates constraints for PHI nodes.
+/// In a PHI node a = phi(b,c) we can create the constraint
+/// a<= max(b,c). With this constraint there will be the edges,
+/// b->a and c->a with weight 0 in the lower bound graph, and the edges
+/// a->b and a->c with weight 0 in the upper bound graph.
+void ABCD::createConstraintPHINode(PHINode *PN) {
+ int32_t width = cast<IntegerType>(PN->getType())->getBitWidth();
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
+ Value *V = PN->getIncomingValue(i);
+ if (Instruction *I = dyn_cast<Instruction>(V)) {
+ createConstraintInstruction(I);
+ }
+ inequality_graph.addEdge(V, PN, APInt(width, 0), true);
+ inequality_graph.addEdge(V, PN, APInt(width, 0), false);
+ }
+}
+
+/// This method creates a constraint between a Sigma and an Instruction.
+/// These constraints are created as soon as we find a comparator that uses a
+/// SSI variable.
+void ABCD::createConstraintSigInst(Instruction *I_op, BasicBlock *BB_succ_t,
+ BasicBlock *BB_succ_f, PHINode **SIG_op_t,
+ PHINode **SIG_op_f) {
+ *SIG_op_t = findSigma(BB_succ_t, I_op);
+ *SIG_op_f = findSigma(BB_succ_f, I_op);
+
+ if (*SIG_op_t) {
+ int32_t width = cast<IntegerType>((*SIG_op_t)->getType())->getBitWidth();
+ inequality_graph.addEdge(I_op, *SIG_op_t, APInt(width, 0), true);
+ inequality_graph.addEdge(*SIG_op_t, I_op, APInt(width, 0), false);
+ created.insert(*SIG_op_t);
+ }
+ if (*SIG_op_f) {
+ int32_t width = cast<IntegerType>((*SIG_op_f)->getType())->getBitWidth();
+ inequality_graph.addEdge(I_op, *SIG_op_f, APInt(width, 0), true);
+ inequality_graph.addEdge(*SIG_op_f, I_op, APInt(width, 0), false);
+ created.insert(*SIG_op_f);
+ }
+}
+
+/// If PN_op1 and PN_o2 are different from NULL, create a constraint
+/// PN_op2 -> PN_op1 with value. In case any of them is NULL, replace
+/// with the respective V_op#, if V_op# is a ConstantInt.
+void ABCD::createConstraintSigSig(PHINode *SIG_op1, PHINode *SIG_op2,
+ APInt value) {
+ if (SIG_op1 && SIG_op2) {
+ inequality_graph.addEdge(SIG_op2, SIG_op1, value, true);
+ inequality_graph.addEdge(SIG_op1, SIG_op2, -value, false);
+ }
+}
+
+/// Returns the sigma representing the Instruction I in BasicBlock BB.
+/// Returns NULL in case there is no sigma for this Instruction in this
+/// Basic Block. This methods assume that sigmas are the first instructions
+/// in a block, and that there can be only two sigmas in a block. So it will
+/// only look on the first two instructions of BasicBlock BB.
+PHINode *ABCD::findSigma(BasicBlock *BB, Instruction *I) {
+ // BB has more than one predecessor, BB cannot have sigmas.
+ if (I == NULL || BB->getSinglePredecessor() == NULL)
+ return NULL;
+
+ BasicBlock::iterator begin = BB->begin();
+ BasicBlock::iterator end = BB->end();
+
+ for (unsigned i = 0; i < 2 && begin != end; ++i, ++begin) {
+ Instruction *I_succ = begin;
+ if (PHINode *PN = dyn_cast<PHINode>(I_succ))
+ if (PN->getIncomingValue(0) == I)
+ return PN;
+ }
+
+ return NULL;
+}
+
+/// Original ABCD algorithm to prove redundant checks.
+/// This implementation works on any kind of inequality branch.
+bool ABCD::demandProve(Value *a, Value *b, int c, bool upper_bound) {
+ int32_t width = cast<IntegerType>(a->getType())->getBitWidth();
+ Bound *bound = new Bound(APInt(width, c), upper_bound);
+
+ mem_result.clear();
+ active.clear();
+
+ ProveResult res = prove(a, b, bound, 0);
+ return res != False;
+}
+
+/// Prove that distance between b and a is <= bound
+ABCD::ProveResult ABCD::prove(Value *a, Value *b, Bound *bound,
+ unsigned level) {
+ // if (C[b-a<=e] == True for some e <= bound
+ // Same or stronger difference was already proven
+ if (mem_result.hasTrue(b, bound))
+ return True;
+
+ // if (C[b-a<=e] == False for some e >= bound
+ // Same or weaker difference was already disproved
+ if (mem_result.hasFalse(b, bound))
+ return False;
+
+ // if (C[b-a<=e] == Reduced for some e <= bound
+ // b is on a cycle that was reduced for same or stronger difference
+ if (mem_result.hasReduced(b, bound))
+ return Reduced;
+
+ // traversal reached the source vertex
+ if (a == b && Bound::geq(bound, APInt(bound->getBitWidth(), 0, true)))
+ return True;
+
+ // if b has no predecessor then fail
+ if (!inequality_graph.hasEdge(b, bound->isUpperBound()))
+ return False;
+
+ // a cycle was encountered
+ if (active.count(b)) {
+ if (Bound::leq(active.lookup(b), bound))
+ return Reduced; // a "harmless" cycle
+
+ return False; // an amplifying cycle
+ }
+
+ active[b] = bound;
+ PHINode *PN = dyn_cast<PHINode>(b);
+
+ // Test if a Value is a Phi. If it is a PHINode with more than 1 incoming
+ // value, then it is a phi, if it has 1 incoming value it is a sigma.
+ if (PN && PN->getNumIncomingValues() > 1)
+ updateMemDistance(a, b, bound, level, min);
+ else
+ updateMemDistance(a, b, bound, level, max);
+
+ active.erase(b);
+
+ ABCD::ProveResult res = mem_result.getBoundResult(b, bound);
+ return res;
+}
+
+/// Updates the distance value for a and b
+void ABCD::updateMemDistance(Value *a, Value *b, Bound *bound, unsigned level,
+ meet_function meet) {
+ ABCD::ProveResult res = (meet == max) ? False : True;
+
+ SmallPtrSet<Edge *, 16> Edges = inequality_graph.getEdges(b);
+ SmallPtrSet<Edge *, 16>::iterator begin = Edges.begin(), end = Edges.end();
+
+ for (; begin != end ; ++begin) {
+ if (((res >= Reduced) && (meet == max)) ||
+ ((res == False) && (meet == min))) {
+ break;
+ }
+ Edge *in = *begin;
+ if (in->isUpperBound() == bound->isUpperBound()) {
+ Value *succ = in->getVertex();
+ res = meet(res, prove(a, succ, new Bound(bound, in->getValue()),
+ level+1));
+ }
+ }
+
+ mem_result.updateBound(b, bound, res);
+}
+
+/// Return the stored result for this bound
+ABCD::ProveResult ABCD::MemoizedResultChart::getResult(const Bound *bound)const{
+ if (max_false && Bound::leq(bound, max_false))
+ return False;
+ if (min_true && Bound::leq(min_true, bound))
+ return True;
+ if (min_reduced && Bound::leq(min_reduced, bound))
+ return Reduced;
+ return False;
+}
+
+/// Stores a false found
+void ABCD::MemoizedResultChart::addFalse(Bound *bound) {
+ if (!max_false || Bound::leq(max_false, bound))
+ max_false = bound;
+
+ if (Bound::eq(max_false, min_reduced))
+ min_reduced = Bound::createIncrement(min_reduced);
+ if (Bound::eq(max_false, min_true))
+ min_true = Bound::createIncrement(min_true);
+ if (Bound::eq(min_reduced, min_true))
+ min_reduced = NULL;
+ clearRedundantReduced();
+}
+
+/// Stores a true found
+void ABCD::MemoizedResultChart::addTrue(Bound *bound) {
+ if (!min_true || Bound::leq(bound, min_true))
+ min_true = bound;
+
+ if (Bound::eq(min_true, min_reduced))
+ min_reduced = Bound::createDecrement(min_reduced);
+ if (Bound::eq(min_true, max_false))
+ max_false = Bound::createDecrement(max_false);
+ if (Bound::eq(max_false, min_reduced))
+ min_reduced = NULL;
+ clearRedundantReduced();
+}
+
+/// Stores a Reduced found
+void ABCD::MemoizedResultChart::addReduced(Bound *bound) {
+ if (!min_reduced || Bound::leq(bound, min_reduced))
+ min_reduced = bound;
+
+ if (Bound::eq(min_reduced, min_true))
+ min_true = Bound::createIncrement(min_true);
+ if (Bound::eq(min_reduced, max_false))
+ max_false = Bound::createDecrement(max_false);
+}
+
+/// Clears redundant reduced
+/// If a min_true is smaller than a min_reduced then the min_reduced
+/// is unnecessary and then removed. It also works for min_reduced
+/// begin smaller than max_false.
+void ABCD::MemoizedResultChart::clearRedundantReduced() {
+ if (min_true && min_reduced && Bound::lt(min_true, min_reduced))
+ min_reduced = NULL;
+ if (max_false && min_reduced && Bound::lt(min_reduced, max_false))
+ min_reduced = NULL;
+}
+
+/// Stores the bound found
+void ABCD::MemoizedResult::updateBound(Value *b, Bound *bound,
+ const ProveResult res) {
+ if (res == False) {
+ map[b].addFalse(bound);
+ } else if (res == True) {
+ map[b].addTrue(bound);
+ } else {
+ map[b].addReduced(bound);
+ }
+}
+
+/// Adds an edge from V_from to V_to with weight value
+void ABCD::InequalityGraph::addEdge(Value *V_to, Value *V_from,
+ APInt value, bool upper) {
+ assert(V_from->getType() == V_to->getType());
+ assert(cast<IntegerType>(V_from->getType())->getBitWidth() ==
+ value.getBitWidth());
+
+ DenseMap<Value *, SmallPtrSet<Edge *, 16> >::iterator from;
+ from = addNode(V_from);
+ from->second.insert(new Edge(V_to, value, upper));
+}
+
+/// Test if there is any edge from V in the upper direction
+bool ABCD::InequalityGraph::hasEdge(Value *V, bool upper) const {
+ SmallPtrSet<Edge *, 16> it = graph.lookup(V);
+
+ SmallPtrSet<Edge *, 16>::iterator begin = it.begin();
+ SmallPtrSet<Edge *, 16>::iterator end = it.end();
+ for (; begin != end; ++begin) {
+ if ((*begin)->isUpperBound() == upper) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/// Prints the header of the dot file
+void ABCD::InequalityGraph::printHeader(raw_ostream &OS, Function &F) const {
+ OS << "digraph dotgraph {\n";
+ OS << "label=\"Inequality Graph for \'";
+ OS << F.getNameStr() << "\' function\";\n";
+ OS << "node [shape=record,fontname=\"Times-Roman\",fontsize=14];\n";
+}
+
+/// Prints the body of the dot file
+void ABCD::InequalityGraph::printBody(raw_ostream &OS) const {
+ DenseMap<Value *, SmallPtrSet<Edge *, 16> >::iterator begin =
+ graph.begin(), end = graph.end();
+
+ for (; begin != end ; ++begin) {
+ SmallPtrSet<Edge *, 16>::iterator begin_par =
+ begin->second.begin(), end_par = begin->second.end();
+ Value *source = begin->first;
+
+ printVertex(OS, source);
+
+ for (; begin_par != end_par ; ++begin_par) {
+ Edge *edge = *begin_par;
+ printEdge(OS, source, edge);
+ }
+ }
+}
+
+/// Prints vertex source to the dot file
+///
+void ABCD::InequalityGraph::printVertex(raw_ostream &OS, Value *source) const {
+ OS << "\"";
+ printName(OS, source);
+ OS << "\"";
+ OS << " [label=\"{";
+ printName(OS, source);
+ OS << "}\"];\n";
+}
+
+/// Prints the edge to the dot file
+void ABCD::InequalityGraph::printEdge(raw_ostream &OS, Value *source,
+ Edge *edge) const {
+ Value *dest = edge->getVertex();
+ APInt value = edge->getValue();
+ bool upper = edge->isUpperBound();
+
+ OS << "\"";
+ printName(OS, source);
+ OS << "\"";
+ OS << " -> ";
+ OS << "\"";
+ printName(OS, dest);
+ OS << "\"";
+ OS << " [label=\"" << value << "\"";
+ if (upper) {
+ OS << "color=\"blue\"";
+ } else {
+ OS << "color=\"red\"";
+ }
+ OS << "];\n";
+}
+
+void ABCD::InequalityGraph::printName(raw_ostream &OS, Value *info) const {
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(info)) {
+ OS << *CI;
+ } else {
+ if (!info->hasName()) {
+ info->setName("V");
+ }
+ OS << info->getNameStr();
+ }
+}
+
+/// createABCDPass - The public interface to this file...
+FunctionPass *llvm::createABCDPass() {
+ return new ABCD();
+}
diff --git a/lib/Transforms/Scalar/CMakeLists.txt b/lib/Transforms/Scalar/CMakeLists.txt
index cbeed4c..e048518 100644
--- a/lib/Transforms/Scalar/CMakeLists.txt
+++ b/lib/Transforms/Scalar/CMakeLists.txt
@@ -1,12 +1,13 @@
add_llvm_library(LLVMScalarOpts
+ ABCD.cpp
ADCE.cpp
BasicBlockPlacement.cpp
- CodeGenLICM.cpp
CodeGenPrepare.cpp
CondPropagate.cpp
ConstantProp.cpp
DCE.cpp
DeadStoreElimination.cpp
+ GEPSplitter.cpp
GVN.cpp
IndVarSimplify.cpp
InstructionCombining.cpp
@@ -16,12 +17,13 @@ add_llvm_library(LLVMScalarOpts
LoopIndexSplit.cpp
LoopRotation.cpp
LoopStrengthReduce.cpp
- LoopUnroll.cpp
+ LoopUnrollPass.cpp
LoopUnswitch.cpp
MemCpyOptimizer.cpp
Reassociate.cpp
Reg2Mem.cpp
SCCP.cpp
+ SCCVN.cpp
Scalar.cpp
ScalarReplAggregates.cpp
SimplifyCFGPass.cpp
diff --git a/lib/Transforms/Scalar/CodeGenLICM.cpp b/lib/Transforms/Scalar/CodeGenLICM.cpp
deleted file mode 100644
index 10f950e..0000000
--- a/lib/Transforms/Scalar/CodeGenLICM.cpp
+++ /dev/null
@@ -1,112 +0,0 @@
-//===- CodeGenLICM.cpp - LICM a function for code generation --------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This function performs late LICM, hoisting constants out of loops that
-// are not valid immediates. It should not be followed by instcombine,
-// because instcombine would quickly stuff the constants back into the loop.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "codegen-licm"
-#include "llvm/Transforms/Scalar.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Instructions.h"
-#include "llvm/IntrinsicInst.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/Analysis/LoopPass.h"
-#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/ADT/DenseMap.h"
-using namespace llvm;
-
-namespace {
- class CodeGenLICM : public LoopPass {
- virtual bool runOnLoop(Loop *L, LPPassManager &LPM);
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
- public:
- static char ID; // Pass identification, replacement for typeid
- explicit CodeGenLICM() : LoopPass(&ID) {}
- };
-}
-
-char CodeGenLICM::ID = 0;
-static RegisterPass<CodeGenLICM> X("codegen-licm",
- "hoist constants out of loops");
-
-Pass *llvm::createCodeGenLICMPass() {
- return new CodeGenLICM();
-}
-
-bool CodeGenLICM::runOnLoop(Loop *L, LPPassManager &) {
- bool Changed = false;
-
- // Only visit outermost loops.
- if (L->getParentLoop()) return Changed;
-
- Instruction *PreheaderTerm = L->getLoopPreheader()->getTerminator();
- DenseMap<Constant *, BitCastInst *> HoistedConstants;
-
- for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
- I != E; ++I) {
- BasicBlock *BB = *I;
- for (BasicBlock::iterator BBI = BB->begin(), BBE = BB->end();
- BBI != BBE; ++BBI) {
- Instruction *I = BBI;
- // TODO: For now, skip all intrinsic instructions, because some of them
- // can require their operands to be constants, and we don't want to
- // break that.
- if (isa<IntrinsicInst>(I))
- continue;
- // LLVM represents fneg as -0.0-x; don't hoist the -0.0 out.
- if (BinaryOperator::isFNeg(I) ||
- BinaryOperator::isNeg(I) ||
- BinaryOperator::isNot(I))
- continue;
- for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
- // Don't hoist out switch case constants.
- if (isa<SwitchInst>(I) && i == 1)
- break;
- // Don't hoist out shuffle masks.
- if (isa<ShuffleVectorInst>(I) && i == 2)
- break;
- Value *Op = I->getOperand(i);
- Constant *C = dyn_cast<Constant>(Op);
- if (!C) continue;
- // TODO: Ask the target which constants are legal. This would allow
- // us to add support for hoisting ConstantInts and GlobalValues too.
- if (isa<ConstantFP>(C) ||
- isa<ConstantVector>(C) ||
- isa<ConstantAggregateZero>(C)) {
- BitCastInst *&BC = HoistedConstants[C];
- if (!BC)
- BC = new BitCastInst(C, C->getType(), "hoist", PreheaderTerm);
- I->setOperand(i, BC);
- Changed = true;
- }
- }
- }
- }
-
- return Changed;
-}
-
-void CodeGenLICM::getAnalysisUsage(AnalysisUsage &AU) const {
- // This pass preserves just about everything. List some popular things here.
- AU.setPreservesCFG();
- AU.addPreservedID(LoopSimplifyID);
- AU.addPreserved<LoopInfo>();
- AU.addPreserved<AliasAnalysis>();
- AU.addPreserved("scalar-evolution");
- AU.addPreserved("iv-users");
- AU.addPreserved("lda");
- AU.addPreserved("live-values");
-
- // Hoisting requires a loop preheader.
- AU.addRequiredID(LoopSimplifyID);
-}
diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp
index 42209b8..9ca90c3 100644
--- a/lib/Transforms/Scalar/CodeGenPrepare.cpp
+++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp
@@ -318,6 +318,7 @@ static void SplitEdgeNicely(TerminatorInst *TI, unsigned SuccNum,
if (Invoke->getSuccessor(1) == Dest)
return;
}
+
// As a hack, never split backedges of loops. Even though the copy for any
// PHIs inserted on the backedge would be dead for exits from the loop, we
@@ -852,7 +853,7 @@ bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
// Split all critical edges where the dest block has a PHI.
TerminatorInst *BBTI = BB.getTerminator();
- if (BBTI->getNumSuccessors() > 1) {
+ if (BBTI->getNumSuccessors() > 1 && !isa<IndirectBrInst>(BBTI)) {
for (unsigned i = 0, e = BBTI->getNumSuccessors(); i != e; ++i) {
BasicBlock *SuccBB = BBTI->getSuccessor(i);
if (isa<PHINode>(SuccBB->begin()) && isCriticalEdge(BBTI, i, true))
diff --git a/lib/Transforms/Scalar/CondPropagate.cpp b/lib/Transforms/Scalar/CondPropagate.cpp
index 5b573f4..8a6c556 100644
--- a/lib/Transforms/Scalar/CondPropagate.cpp
+++ b/lib/Transforms/Scalar/CondPropagate.cpp
@@ -196,18 +196,20 @@ void CondProp::SimplifyPredecessors(SwitchInst *SI) {
// possible, and to avoid invalidating "i".
for (unsigned i = PN->getNumIncomingValues(); i != 0; --i)
if (ConstantInt *CI = dyn_cast<ConstantInt>(PN->getIncomingValue(i-1))) {
- // If we have a constant, forward the edge from its current to its
- // ultimate destination.
- unsigned DestCase = SI->findCaseValue(CI);
- RevectorBlockTo(PN->getIncomingBlock(i-1),
- SI->getSuccessor(DestCase));
- ++NumSwThread;
-
- // If there were two predecessors before this simplification, or if the
- // PHI node contained all the same value except for the one we just
- // substituted, the PHI node may be deleted. Don't iterate through it the
- // last time.
- if (SI->getCondition() != PN) return;
+ BasicBlock *PredBB = PN->getIncomingBlock(i-1);
+ if (isa<BranchInst>(PredBB->getTerminator())) {
+ // If we have a constant, forward the edge from its current to its
+ // ultimate destination.
+ unsigned DestCase = SI->findCaseValue(CI);
+ RevectorBlockTo(PredBB, SI->getSuccessor(DestCase));
+ ++NumSwThread;
+
+ // If there were two predecessors before this simplification, or if the
+ // PHI node contained all the same value except for the one we just
+ // substituted, the PHI node may be deleted. Don't iterate through it the
+ // last time.
+ if (SI->getCondition() != PN) return;
+ }
}
}
diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp
index a7b3e75..60b12fd 100644
--- a/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -26,6 +26,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Transforms/Utils/Local.h"
@@ -49,7 +50,7 @@ namespace {
}
bool runOnBasicBlock(BasicBlock &BB);
- bool handleFreeWithNonTrivialDependency(FreeInst *F, MemDepResult Dep);
+ bool handleFreeWithNonTrivialDependency(Instruction *F, MemDepResult Dep);
bool handleEndBlock(BasicBlock &BB);
bool RemoveUndeadPointers(Value* Ptr, uint64_t killPointerSize,
BasicBlock::iterator& BBI,
@@ -88,7 +89,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
Instruction *Inst = BBI++;
// If we find a store or a free, get its memory dependence.
- if (!isa<StoreInst>(Inst) && !isa<FreeInst>(Inst))
+ if (!isa<StoreInst>(Inst) && !isFreeCall(Inst))
continue;
// Don't molest volatile stores or do queries that will return "clobber".
@@ -103,8 +104,8 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
if (InstDep.isNonLocal()) continue;
// Handle frees whose dependencies are non-trivial.
- if (FreeInst *FI = dyn_cast<FreeInst>(Inst)) {
- MadeChange |= handleFreeWithNonTrivialDependency(FI, InstDep);
+ if (isFreeCall(Inst)) {
+ MadeChange |= handleFreeWithNonTrivialDependency(Inst, InstDep);
continue;
}
@@ -153,6 +154,26 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
continue;
}
}
+
+ // If this is a lifetime end marker, we can throw away the store.
+ if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(InstDep.getInst())) {
+ if (II->getIntrinsicID() == Intrinsic::lifetime_end) {
+ // Delete the store and now-dead instructions that feed it.
+ // DeleteDeadInstruction can delete the current instruction. Save BBI
+ // in case we need it.
+ WeakVH NextInst(BBI);
+
+ DeleteDeadInstruction(SI);
+
+ if (NextInst == 0) // Next instruction deleted.
+ BBI = BB.begin();
+ else if (BBI != BB.begin()) // Revisit this instruction if possible.
+ --BBI;
+ NumFastStores++;
+ MadeChange = true;
+ continue;
+ }
+ }
}
// If this block ends in a return, unwind, or unreachable, all allocas are
@@ -165,7 +186,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
/// handleFreeWithNonTrivialDependency - Handle frees of entire structures whose
/// dependency is a store to a field of that structure.
-bool DSE::handleFreeWithNonTrivialDependency(FreeInst *F, MemDepResult Dep) {
+bool DSE::handleFreeWithNonTrivialDependency(Instruction *F, MemDepResult Dep) {
AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
StoreInst *Dependency = dyn_cast_or_null<StoreInst>(Dep.getInst());
@@ -175,7 +196,7 @@ bool DSE::handleFreeWithNonTrivialDependency(FreeInst *F, MemDepResult Dep) {
Value *DepPointer = Dependency->getPointerOperand()->getUnderlyingObject();
// Check for aliasing.
- if (AA.alias(F->getPointerOperand(), 1, DepPointer, 1) !=
+ if (AA.alias(F->getOperand(1), 1, DepPointer, 1) !=
AliasAnalysis::MustAlias)
return false;
diff --git a/lib/Transforms/Scalar/GEPSplitter.cpp b/lib/Transforms/Scalar/GEPSplitter.cpp
new file mode 100644
index 0000000..610a41d
--- /dev/null
+++ b/lib/Transforms/Scalar/GEPSplitter.cpp
@@ -0,0 +1,81 @@
+//===- GEPSplitter.cpp - Split complex GEPs into simple ones --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This function breaks GEPs with more than 2 non-zero operands into smaller
+// GEPs each with no more than 2 non-zero operands. This exposes redundancy
+// between GEPs with common initial operand sequences.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "split-geps"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/Instructions.h"
+#include "llvm/Pass.h"
+using namespace llvm;
+
+namespace {
+ class GEPSplitter : public FunctionPass {
+ virtual bool runOnFunction(Function &F);
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+ public:
+ static char ID; // Pass identification, replacement for typeid
+ explicit GEPSplitter() : FunctionPass(&ID) {}
+ };
+}
+
+char GEPSplitter::ID = 0;
+static RegisterPass<GEPSplitter> X("split-geps",
+ "split complex GEPs into simple GEPs");
+
+FunctionPass *llvm::createGEPSplitterPass() {
+ return new GEPSplitter();
+}
+
+bool GEPSplitter::runOnFunction(Function &F) {
+ bool Changed = false;
+
+ // Visit each GEP instruction.
+ for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
+ for (BasicBlock::iterator II = I->begin(), IE = I->end(); II != IE; )
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(II++)) {
+ unsigned NumOps = GEP->getNumOperands();
+ // Ignore GEPs which are already simple.
+ if (NumOps <= 2)
+ continue;
+ bool FirstIndexIsZero = isa<ConstantInt>(GEP->getOperand(1)) &&
+ cast<ConstantInt>(GEP->getOperand(1))->isZero();
+ if (NumOps == 3 && FirstIndexIsZero)
+ continue;
+ // The first index is special and gets expanded with a 2-operand GEP
+ // (unless it's zero, in which case we can skip this).
+ Value *NewGEP = FirstIndexIsZero ?
+ GEP->getOperand(0) :
+ GetElementPtrInst::Create(GEP->getOperand(0), GEP->getOperand(1),
+ "tmp", GEP);
+ // All remaining indices get expanded with a 3-operand GEP with zero
+ // as the second operand.
+ Value *Idxs[2];
+ Idxs[0] = ConstantInt::get(Type::getInt64Ty(F.getContext()), 0);
+ for (unsigned i = 2; i != NumOps; ++i) {
+ Idxs[1] = GEP->getOperand(i);
+ NewGEP = GetElementPtrInst::Create(NewGEP, Idxs, Idxs+2, "tmp", GEP);
+ }
+ GEP->replaceAllUsesWith(NewGEP);
+ GEP->eraseFromParent();
+ Changed = true;
+ }
+
+ return Changed;
+}
+
+void GEPSplitter::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+}
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index 8859324..0e3f750 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -33,7 +33,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/MallocHelper.h"
+#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/CommandLine.h"
@@ -669,9 +669,10 @@ namespace {
bool runOnFunction(Function &F);
public:
static char ID; // Pass identification, replacement for typeid
- GVN() : FunctionPass(&ID) { }
+ GVN(bool nopre = false) : FunctionPass(&ID), NoPRE(nopre) { }
private:
+ bool NoPRE;
MemoryDependenceAnalysis *MD;
DominatorTree *DT;
@@ -710,7 +711,7 @@ namespace {
}
// createGVNPass - The public interface to this file...
-FunctionPass *llvm::createGVNPass() { return new GVN(); }
+FunctionPass *llvm::createGVNPass(bool NoPRE) { return new GVN(NoPRE); }
static RegisterPass<GVN> X("gvn",
"Global Value Numbering");
@@ -1243,11 +1244,20 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
Instruction *DepInst = DepInfo.getInst();
// Loading the allocation -> undef.
- if (isa<AllocationInst>(DepInst) || isMalloc(DepInst)) {
+ if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) {
ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
UndefValue::get(LI->getType())));
continue;
}
+
+ // Loading immediately after lifetime begin or end -> undef.
+ if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) {
+ if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
+ II->getIntrinsicID() == Intrinsic::lifetime_end) {
+ ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
+ UndefValue::get(LI->getType())));
+ }
+ }
if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
// Reject loads and stores that are to the same address but are of
@@ -1585,12 +1595,24 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
// If this load really doesn't depend on anything, then we must be loading an
// undef value. This can happen when loading for a fresh allocation with no
// intervening stores, for example.
- if (isa<AllocationInst>(DepInst) || isMalloc(DepInst)) {
+ if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) {
L->replaceAllUsesWith(UndefValue::get(L->getType()));
toErase.push_back(L);
NumGVNLoad++;
return true;
}
+
+ // If this load occurs either right after a lifetime begin or a lifetime end,
+ // then the loaded value is undefined.
+ if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) {
+ if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
+ II->getIntrinsicID() == Intrinsic::lifetime_end) {
+ L->replaceAllUsesWith(UndefValue::get(L->getType()));
+ toErase.push_back(L);
+ NumGVNLoad++;
+ return true;
+ }
+ }
return false;
}
@@ -1653,7 +1675,7 @@ bool GVN::processInstruction(Instruction *I,
// Allocations are always uniquely numbered, so we can save time and memory
// by fast failing them.
- } else if (isa<AllocationInst>(I) || isa<TerminatorInst>(I)) {
+ } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) {
localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
return false;
}
@@ -1788,7 +1810,7 @@ bool GVN::processBlock(BasicBlock *BB) {
/// performPRE - Perform a purely local form of PRE that looks for diamond
/// control flow patterns and attempts to perform simple PRE at the join point.
-bool GVN::performPRE(Function& F) {
+bool GVN::performPRE(Function &F) {
bool Changed = false;
SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit;
DenseMap<BasicBlock*, Value*> predMap;
@@ -1803,7 +1825,7 @@ bool GVN::performPRE(Function& F) {
BE = CurrentBlock->end(); BI != BE; ) {
Instruction *CurInst = BI++;
- if (isa<AllocationInst>(CurInst) ||
+ if (isa<AllocaInst>(CurInst) ||
isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) ||
CurInst->getType()->isVoidTy() ||
CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
@@ -1853,6 +1875,10 @@ bool GVN::performPRE(Function& F) {
// we would need to insert instructions in more than one pred.
if (NumWithout != 1 || NumWith == 0)
continue;
+
+ // Don't do PRE across indirect branch.
+ if (isa<IndirectBrInst>(PREPred->getTerminator()))
+ continue;
// We can't do PRE safely on a critical edge, so instead we schedule
// the edge to be split and perform the PRE the next time we iterate
diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp
index e2d9e0b..b0bc70c 100644
--- a/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -292,7 +292,7 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L,
if (NumPreds != 1) {
// Clone the PHI and delete the original one. This lets IVUsers and
// any other maps purge the original user from their records.
- PHINode *NewPN = PN->clone();
+ PHINode *NewPN = cast<PHINode>(PN->clone());
NewPN->takeName(PN);
NewPN->insertBefore(PN);
PN->replaceAllUsesWith(NewPN);
@@ -322,7 +322,7 @@ void IndVarSimplify::RewriteNonIntegerIVs(Loop *L) {
// may not have been able to compute a trip count. Now that we've done some
// re-writing, the trip count may be computable.
if (Changed)
- SE->forgetLoopBackedgeTakenCount(L);
+ SE->forgetLoop(L);
}
bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp
index b41b5d4..7e75cfb 100644
--- a/lib/Transforms/Scalar/InstructionCombining.cpp
+++ b/lib/Transforms/Scalar/InstructionCombining.cpp
@@ -42,7 +42,7 @@
#include "llvm/GlobalVariable.h"
#include "llvm/Operator.h"
#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Analysis/MallocHelper.h"
+#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
@@ -217,6 +217,7 @@ namespace {
//
Instruction *visitAdd(BinaryOperator &I);
Instruction *visitFAdd(BinaryOperator &I);
+ Value *OptimizePointerDifference(Value *LHS, Value *RHS, const Type *Ty);
Instruction *visitSub(BinaryOperator &I);
Instruction *visitFSub(BinaryOperator &I);
Instruction *visitMul(BinaryOperator &I);
@@ -284,8 +285,8 @@ namespace {
Instruction *visitInvokeInst(InvokeInst &II);
Instruction *visitPHINode(PHINode &PN);
Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP);
- Instruction *visitAllocationInst(AllocationInst &AI);
- Instruction *visitFreeInst(FreeInst &FI);
+ Instruction *visitAllocaInst(AllocaInst &AI);
+ Instruction *visitFree(Instruction &FI);
Instruction *visitLoadInst(LoadInst &LI);
Instruction *visitStoreInst(StoreInst &SI);
Instruction *visitBranchInst(BranchInst &BI);
@@ -416,6 +417,7 @@ namespace {
Instruction *FoldPHIArgOpIntoPHI(PHINode &PN);
Instruction *FoldPHIArgBinOpIntoPHI(PHINode &PN);
Instruction *FoldPHIArgGEPIntoPHI(PHINode &PN);
+ Instruction *FoldPHIArgLoadIntoPHI(PHINode &PN);
Instruction *OptAndOp(Instruction *Op, ConstantInt *OpRHS,
@@ -425,7 +427,7 @@ namespace {
bool isSub, Instruction &I);
Instruction *InsertRangeTest(Value *V, Constant *Lo, Constant *Hi,
bool isSigned, bool Inside, Instruction &IB);
- Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocationInst &AI);
+ Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocaInst &AI);
Instruction *MatchBSwap(BinaryOperator &I);
bool SimplifyStoreAtEndOfBlock(StoreInst &SI);
Instruction *SimplifyMemTransfer(MemIntrinsic *MI);
@@ -630,9 +632,32 @@ static inline Value *dyn_castFNegVal(Value *V) {
return 0;
}
-static inline Value *dyn_castNotVal(Value *V) {
+/// isFreeToInvert - Return true if the specified value is free to invert (apply
+/// ~ to). This happens in cases where the ~ can be eliminated.
+static inline bool isFreeToInvert(Value *V) {
+ // ~(~(X)) -> X.
if (BinaryOperator::isNot(V))
- return BinaryOperator::getNotArgument(V);
+ return true;
+
+ // Constants can be considered to be not'ed values.
+ if (isa<ConstantInt>(V))
+ return true;
+
+ // Compares can be inverted if they have a single use.
+ if (CmpInst *CI = dyn_cast<CmpInst>(V))
+ return CI->hasOneUse();
+
+ return false;
+}
+
+static inline Value *dyn_castNotVal(Value *V) {
+ // If this is not(not(x)) don't return that this is a not: we want the two
+ // not's to be folded first.
+ if (BinaryOperator::isNot(V)) {
+ Value *Operand = BinaryOperator::getNotArgument(V);
+ if (!isFreeToInvert(Operand))
+ return Operand;
+ }
// Constants can be considered to be not'ed values...
if (ConstantInt *C = dyn_cast<ConstantInt>(V))
@@ -640,6 +665,8 @@ static inline Value *dyn_castNotVal(Value *V) {
return 0;
}
+
+
// dyn_castFoldableMul - If this value is a multiply that can be folded into
// other computations (because it has a constant operand), return the
// non-constant operand of the multiply, and set CST to point to the multiplier.
@@ -2394,8 +2421,8 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
ConstantExpr::getSExt(CI, I.getType()) == RHSC &&
WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI)) {
// Insert the new, smaller add.
- Value *NewAdd = Builder->CreateAdd(LHSConv->getOperand(0),
- CI, "addconv");
+ Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
+ CI, "addconv");
return new SExtInst(NewAdd, I.getType());
}
}
@@ -2410,8 +2437,8 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
WillNotOverflowSignedAdd(LHSConv->getOperand(0),
RHSConv->getOperand(0))) {
// Insert the new integer add.
- Value *NewAdd = Builder->CreateAdd(LHSConv->getOperand(0),
- RHSConv->getOperand(0), "addconv");
+ Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
+ RHSConv->getOperand(0), "addconv");
return new SExtInst(NewAdd, I.getType());
}
}
@@ -2467,8 +2494,8 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
ConstantExpr::getSIToFP(CI, I.getType()) == CFP &&
WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI)) {
// Insert the new integer add.
- Value *NewAdd = Builder->CreateAdd(LHSConv->getOperand(0),
- CI, "addconv");
+ Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
+ CI, "addconv");
return new SIToFPInst(NewAdd, I.getType());
}
}
@@ -2483,8 +2510,8 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
WillNotOverflowSignedAdd(LHSConv->getOperand(0),
RHSConv->getOperand(0))) {
// Insert the new integer add.
- Value *NewAdd = Builder->CreateAdd(LHSConv->getOperand(0),
- RHSConv->getOperand(0), "addconv");
+ Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
+ RHSConv->getOperand(0),"addconv");
return new SIToFPInst(NewAdd, I.getType());
}
}
@@ -2493,13 +2520,210 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
return Changed ? &I : 0;
}
+
+/// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the
+/// code necessary to compute the offset from the base pointer (without adding
+/// in the base pointer). Return the result as a signed integer of intptr size.
+static Value *EmitGEPOffset(User *GEP, InstCombiner &IC) {
+ TargetData &TD = *IC.getTargetData();
+ gep_type_iterator GTI = gep_type_begin(GEP);
+ const Type *IntPtrTy = TD.getIntPtrType(GEP->getContext());
+ Value *Result = Constant::getNullValue(IntPtrTy);
+
+ // Build a mask for high order bits.
+ unsigned IntPtrWidth = TD.getPointerSizeInBits();
+ uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
+
+ for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
+ ++i, ++GTI) {
+ Value *Op = *i;
+ uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
+ if (ConstantInt *OpC = dyn_cast<ConstantInt>(Op)) {
+ if (OpC->isZero()) continue;
+
+ // Handle a struct index, which adds its field offset to the pointer.
+ if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
+ Size = TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
+
+ Result = IC.Builder->CreateAdd(Result,
+ ConstantInt::get(IntPtrTy, Size),
+ GEP->getName()+".offs");
+ continue;
+ }
+
+ Constant *Scale = ConstantInt::get(IntPtrTy, Size);
+ Constant *OC =
+ ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
+ Scale = ConstantExpr::getMul(OC, Scale);
+ // Emit an add instruction.
+ Result = IC.Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
+ continue;
+ }
+ // Convert to correct type.
+ if (Op->getType() != IntPtrTy)
+ Op = IC.Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
+ if (Size != 1) {
+ Constant *Scale = ConstantInt::get(IntPtrTy, Size);
+ // We'll let instcombine(mul) convert this to a shl if possible.
+ Op = IC.Builder->CreateMul(Op, Scale, GEP->getName()+".idx");
+ }
+
+ // Emit an add instruction.
+ Result = IC.Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
+ }
+ return Result;
+}
+
+
+/// EvaluateGEPOffsetExpression - Return a value that can be used to compare
+/// the *offset* implied by a GEP to zero. For example, if we have &A[i], we
+/// want to return 'i' for "icmp ne i, 0". Note that, in general, indices can
+/// be complex, and scales are involved. The above expression would also be
+/// legal to codegen as "icmp ne (i*4), 0" (assuming A is a pointer to i32).
+/// This later form is less amenable to optimization though, and we are allowed
+/// to generate the first by knowing that pointer arithmetic doesn't overflow.
+///
+/// If we can't emit an optimized form for this expression, this returns null.
+///
+static Value *EvaluateGEPOffsetExpression(User *GEP, Instruction &I,
+ InstCombiner &IC) {
+ TargetData &TD = *IC.getTargetData();
+ gep_type_iterator GTI = gep_type_begin(GEP);
+
+ // Check to see if this gep only has a single variable index. If so, and if
+ // any constant indices are a multiple of its scale, then we can compute this
+ // in terms of the scale of the variable index. For example, if the GEP
+ // implies an offset of "12 + i*4", then we can codegen this as "3 + i",
+ // because the expression will cross zero at the same point.
+ unsigned i, e = GEP->getNumOperands();
+ int64_t Offset = 0;
+ for (i = 1; i != e; ++i, ++GTI) {
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
+ // Compute the aggregate offset of constant indices.
+ if (CI->isZero()) continue;
+
+ // Handle a struct index, which adds its field offset to the pointer.
+ if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
+ Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
+ } else {
+ uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
+ Offset += Size*CI->getSExtValue();
+ }
+ } else {
+ // Found our variable index.
+ break;
+ }
+ }
+
+ // If there are no variable indices, we must have a constant offset, just
+ // evaluate it the general way.
+ if (i == e) return 0;
+
+ Value *VariableIdx = GEP->getOperand(i);
+ // Determine the scale factor of the variable element. For example, this is
+ // 4 if the variable index is into an array of i32.
+ uint64_t VariableScale = TD.getTypeAllocSize(GTI.getIndexedType());
+
+ // Verify that there are no other variable indices. If so, emit the hard way.
+ for (++i, ++GTI; i != e; ++i, ++GTI) {
+ ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i));
+ if (!CI) return 0;
+
+ // Compute the aggregate offset of constant indices.
+ if (CI->isZero()) continue;
+
+ // Handle a struct index, which adds its field offset to the pointer.
+ if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
+ Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
+ } else {
+ uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
+ Offset += Size*CI->getSExtValue();
+ }
+ }
+
+ // Okay, we know we have a single variable index, which must be a
+ // pointer/array/vector index. If there is no offset, life is simple, return
+ // the index.
+ unsigned IntPtrWidth = TD.getPointerSizeInBits();
+ if (Offset == 0) {
+ // Cast to intptrty in case a truncation occurs. If an extension is needed,
+ // we don't need to bother extending: the extension won't affect where the
+ // computation crosses zero.
+ if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth)
+ VariableIdx = new TruncInst(VariableIdx,
+ TD.getIntPtrType(VariableIdx->getContext()),
+ VariableIdx->getName(), &I);
+ return VariableIdx;
+ }
+
+ // Otherwise, there is an index. The computation we will do will be modulo
+ // the pointer size, so get it.
+ uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
+
+ Offset &= PtrSizeMask;
+ VariableScale &= PtrSizeMask;
+
+ // To do this transformation, any constant index must be a multiple of the
+ // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i",
+ // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a
+ // multiple of the variable scale.
+ int64_t NewOffs = Offset / (int64_t)VariableScale;
+ if (Offset != NewOffs*(int64_t)VariableScale)
+ return 0;
+
+ // Okay, we can do this evaluation. Start by converting the index to intptr.
+ const Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext());
+ if (VariableIdx->getType() != IntPtrTy)
+ VariableIdx = CastInst::CreateIntegerCast(VariableIdx, IntPtrTy,
+ true /*SExt*/,
+ VariableIdx->getName(), &I);
+ Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs);
+ return BinaryOperator::CreateAdd(VariableIdx, OffsetVal, "offset", &I);
+}
+
+
+/// Optimize pointer differences into the same array into a size. Consider:
+/// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer
+/// operands to the ptrtoint instructions for the LHS/RHS of the subtract.
+///
+Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
+ const Type *Ty) {
+ assert(TD && "Must have target data info for this");
+
+ // If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
+ // this.
+ bool Swapped;
+ GetElementPtrInst *GEP;
+
+ if ((GEP = dyn_cast<GetElementPtrInst>(LHS)) &&
+ GEP->getOperand(0) == RHS)
+ Swapped = false;
+ else if ((GEP = dyn_cast<GetElementPtrInst>(RHS)) &&
+ GEP->getOperand(0) == LHS)
+ Swapped = true;
+ else
+ return 0;
+
+ // TODO: Could also optimize &A[i] - &A[j] -> "i-j".
+
+ // Emit the offset of the GEP and an intptr_t.
+ Value *Result = EmitGEPOffset(GEP, *this);
+
+ // If we have p - gep(p, ...) then we have to negate the result.
+ if (Swapped)
+ Result = Builder->CreateNeg(Result, "diff.neg");
+
+ return Builder->CreateIntCast(Result, Ty, true);
+}
+
+
Instruction *InstCombiner::visitSub(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
if (Op0 == Op1) // sub X, X -> 0
return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
- // If this is a 'B = x-(-A)', change to B = x+A...
+ // If this is a 'B = x-(-A)', change to B = x+A.
if (Value *V = dyn_castNegVal(Op1))
return BinaryOperator::CreateAdd(Op0, V);
@@ -2507,9 +2731,11 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
return ReplaceInstUsesWith(I, Op0); // undef - X -> undef
if (isa<UndefValue>(Op1))
return ReplaceInstUsesWith(I, Op1); // X - undef -> undef
-
+ if (I.getType() == Type::getInt1Ty(*Context))
+ return BinaryOperator::CreateXor(Op0, Op1);
+
if (ConstantInt *C = dyn_cast<ConstantInt>(Op0)) {
- // Replace (-1 - A) with (~A)...
+ // Replace (-1 - A) with (~A).
if (C->isAllOnesValue())
return BinaryOperator::CreateNot(Op1);
@@ -2532,8 +2758,7 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
SI->getOperand(0), CU, SI->getName());
}
}
- }
- else if (SI->getOpcode() == Instruction::AShr) {
+ } else if (SI->getOpcode() == Instruction::AShr) {
if (ConstantInt *CU = dyn_cast<ConstantInt>(SI->getOperand(1))) {
// Check to see if we are shifting out everything but the sign bit.
if (CU->getLimitedValue(SI->getType()->getPrimitiveSizeInBits()) ==
@@ -2558,9 +2783,6 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
return SelectInst::Create(ZI->getOperand(0), SubOne(C), C);
}
- if (I.getType() == Type::getInt1Ty(*Context))
- return BinaryOperator::CreateXor(Op0, Op1);
-
if (BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1)) {
if (Op1I->getOpcode() == Instruction::Add) {
if (Op1I->getOperand(0) == Op0) // X-(X+Y) == -Y
@@ -2642,6 +2864,28 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
if (X == dyn_castFoldableMul(Op1, C2))
return BinaryOperator::CreateMul(X, ConstantExpr::getSub(C1, C2));
}
+
+ // Optimize pointer differences into the same array into a size. Consider:
+ // &A[10] - &A[0]: we should compile this to "10".
+ if (TD) {
+ if (PtrToIntInst *LHS = dyn_cast<PtrToIntInst>(Op0))
+ if (PtrToIntInst *RHS = dyn_cast<PtrToIntInst>(Op1))
+ if (Value *Res = OptimizePointerDifference(LHS->getOperand(0),
+ RHS->getOperand(0),
+ I.getType()))
+ return ReplaceInstUsesWith(I, Res);
+
+ // trunc(p)-trunc(q) -> trunc(p-q)
+ if (TruncInst *LHST = dyn_cast<TruncInst>(Op0))
+ if (TruncInst *RHST = dyn_cast<TruncInst>(Op1))
+ if (PtrToIntInst *LHS = dyn_cast<PtrToIntInst>(LHST->getOperand(0)))
+ if (PtrToIntInst *RHS = dyn_cast<PtrToIntInst>(RHST->getOperand(0)))
+ if (Value *Res = OptimizePointerDifference(LHS->getOperand(0),
+ RHS->getOperand(0),
+ I.getType()))
+ return ReplaceInstUsesWith(I, Res);
+ }
+
return 0;
}
@@ -3510,9 +3754,9 @@ static Value *getFCmpValue(bool isordered, unsigned code,
/// PredicatesFoldable - Return true if both predicates match sign or if at
/// least one of them is an equality comparison (which is signless).
static bool PredicatesFoldable(ICmpInst::Predicate p1, ICmpInst::Predicate p2) {
- return (ICmpInst::isSignedPredicate(p1) == ICmpInst::isSignedPredicate(p2)) ||
- (ICmpInst::isSignedPredicate(p1) && ICmpInst::isEquality(p2)) ||
- (ICmpInst::isSignedPredicate(p2) && ICmpInst::isEquality(p1));
+ return (CmpInst::isSigned(p1) == CmpInst::isSigned(p2)) ||
+ (CmpInst::isSigned(p1) && ICmpInst::isEquality(p2)) ||
+ (CmpInst::isSigned(p2) && ICmpInst::isEquality(p1));
}
namespace {
@@ -3549,9 +3793,7 @@ struct FoldICmpLogical {
default: llvm_unreachable("Illegal logical opcode!"); return 0;
}
- bool isSigned = ICmpInst::isSignedPredicate(RHSICI->getPredicate()) ||
- ICmpInst::isSignedPredicate(ICI->getPredicate());
-
+ bool isSigned = RHSICI->isSigned() || ICI->isSigned();
Value *RV = getICmpValue(isSigned, Code, LHS, RHS, IC.getContext());
if (Instruction *I = dyn_cast<Instruction>(RV))
return I;
@@ -3848,9 +4090,9 @@ Instruction *InstCombiner::FoldAndOfICmps(Instruction &I,
// Ensure that the larger constant is on the RHS.
bool ShouldSwap;
- if (ICmpInst::isSignedPredicate(LHSCC) ||
+ if (CmpInst::isSigned(LHSCC) ||
(ICmpInst::isEquality(LHSCC) &&
- ICmpInst::isSignedPredicate(RHSCC)))
+ CmpInst::isSigned(RHSCC)))
ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue());
else
ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue());
@@ -4167,7 +4409,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
if (Instruction *CastOp = dyn_cast<Instruction>(CI->getOperand(0))) {
if ((isa<TruncInst>(CI) || isa<BitCastInst>(CI)) &&
CastOp->getNumOperands() == 2)
- if (ConstantInt *AndCI = dyn_cast<ConstantInt>(CastOp->getOperand(1))) {
+ if (ConstantInt *AndCI =dyn_cast<ConstantInt>(CastOp->getOperand(1))){
if (CastOp->getOpcode() == Instruction::And) {
// Change: and (cast (and X, C1) to T), C2
// into : and (cast X to T), trunc_or_bitcast(C1)&C2
@@ -4536,9 +4778,9 @@ Instruction *InstCombiner::FoldOrOfICmps(Instruction &I,
// Ensure that the larger constant is on the RHS.
bool ShouldSwap;
- if (ICmpInst::isSignedPredicate(LHSCC) ||
+ if (CmpInst::isSigned(LHSCC) ||
(ICmpInst::isEquality(LHSCC) &&
- ICmpInst::isSignedPredicate(RHSCC)))
+ CmpInst::isSigned(RHSCC)))
ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue());
else
ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue());
@@ -4961,14 +5203,14 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
if (Ret) return Ret;
}
- if (match(Op0, m_Not(m_Value(A)))) { // ~A | Op1
+ if ((A = dyn_castNotVal(Op0))) { // ~A | Op1
if (A == Op1) // ~A | A == -1
return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
} else {
A = 0;
}
// Note, A is still live here!
- if (match(Op1, m_Not(m_Value(B)))) { // Op0 | ~B
+ if ((B = dyn_castNotVal(Op1))) { // Op0 | ~B
if (Op0 == B)
return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
@@ -5065,12 +5307,13 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
// Is this a ~ operation?
if (Value *NotOp = dyn_castNotVal(&I)) {
- // ~(~X & Y) --> (X | ~Y) - De Morgan's Law
- // ~(~X | Y) === (X & ~Y) - De Morgan's Law
if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(NotOp)) {
if (Op0I->getOpcode() == Instruction::And ||
Op0I->getOpcode() == Instruction::Or) {
- if (dyn_castNotVal(Op0I->getOperand(1))) Op0I->swapOperands();
+ // ~(~X & Y) --> (X | ~Y) - De Morgan's Law
+ // ~(~X | Y) === (X & ~Y) - De Morgan's Law
+ if (dyn_castNotVal(Op0I->getOperand(1)))
+ Op0I->swapOperands();
if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0))) {
Value *NotY =
Builder->CreateNot(Op0I->getOperand(1),
@@ -5079,6 +5322,19 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
return BinaryOperator::CreateOr(Op0NotVal, NotY);
return BinaryOperator::CreateAnd(Op0NotVal, NotY);
}
+
+ // ~(X & Y) --> (~X | ~Y) - De Morgan's Law
+ // ~(X | Y) === (~X & ~Y) - De Morgan's Law
+ if (isFreeToInvert(Op0I->getOperand(0)) &&
+ isFreeToInvert(Op0I->getOperand(1))) {
+ Value *NotX =
+ Builder->CreateNot(Op0I->getOperand(0), "notlhs");
+ Value *NotY =
+ Builder->CreateNot(Op0I->getOperand(1), "notrhs");
+ if (Op0I->getOpcode() == Instruction::And)
+ return BinaryOperator::CreateOr(NotX, NotY);
+ return BinaryOperator::CreateAnd(NotX, NotY);
+ }
}
}
}
@@ -5379,166 +5635,6 @@ static bool SubWithOverflow(Constant *&Result, Constant *In1,
IsSigned);
}
-/// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the
-/// code necessary to compute the offset from the base pointer (without adding
-/// in the base pointer). Return the result as a signed integer of intptr size.
-static Value *EmitGEPOffset(User *GEP, Instruction &I, InstCombiner &IC) {
- TargetData &TD = *IC.getTargetData();
- gep_type_iterator GTI = gep_type_begin(GEP);
- const Type *IntPtrTy = TD.getIntPtrType(I.getContext());
- Value *Result = Constant::getNullValue(IntPtrTy);
-
- // Build a mask for high order bits.
- unsigned IntPtrWidth = TD.getPointerSizeInBits();
- uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
-
- for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
- ++i, ++GTI) {
- Value *Op = *i;
- uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
- if (ConstantInt *OpC = dyn_cast<ConstantInt>(Op)) {
- if (OpC->isZero()) continue;
-
- // Handle a struct index, which adds its field offset to the pointer.
- if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
- Size = TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
-
- Result = IC.Builder->CreateAdd(Result,
- ConstantInt::get(IntPtrTy, Size),
- GEP->getName()+".offs");
- continue;
- }
-
- Constant *Scale = ConstantInt::get(IntPtrTy, Size);
- Constant *OC =
- ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
- Scale = ConstantExpr::getMul(OC, Scale);
- // Emit an add instruction.
- Result = IC.Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
- continue;
- }
- // Convert to correct type.
- if (Op->getType() != IntPtrTy)
- Op = IC.Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
- if (Size != 1) {
- Constant *Scale = ConstantInt::get(IntPtrTy, Size);
- // We'll let instcombine(mul) convert this to a shl if possible.
- Op = IC.Builder->CreateMul(Op, Scale, GEP->getName()+".idx");
- }
-
- // Emit an add instruction.
- Result = IC.Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
- }
- return Result;
-}
-
-
-/// EvaluateGEPOffsetExpression - Return a value that can be used to compare
-/// the *offset* implied by a GEP to zero. For example, if we have &A[i], we
-/// want to return 'i' for "icmp ne i, 0". Note that, in general, indices can
-/// be complex, and scales are involved. The above expression would also be
-/// legal to codegen as "icmp ne (i*4), 0" (assuming A is a pointer to i32).
-/// This later form is less amenable to optimization though, and we are allowed
-/// to generate the first by knowing that pointer arithmetic doesn't overflow.
-///
-/// If we can't emit an optimized form for this expression, this returns null.
-///
-static Value *EvaluateGEPOffsetExpression(User *GEP, Instruction &I,
- InstCombiner &IC) {
- TargetData &TD = *IC.getTargetData();
- gep_type_iterator GTI = gep_type_begin(GEP);
-
- // Check to see if this gep only has a single variable index. If so, and if
- // any constant indices are a multiple of its scale, then we can compute this
- // in terms of the scale of the variable index. For example, if the GEP
- // implies an offset of "12 + i*4", then we can codegen this as "3 + i",
- // because the expression will cross zero at the same point.
- unsigned i, e = GEP->getNumOperands();
- int64_t Offset = 0;
- for (i = 1; i != e; ++i, ++GTI) {
- if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
- // Compute the aggregate offset of constant indices.
- if (CI->isZero()) continue;
-
- // Handle a struct index, which adds its field offset to the pointer.
- if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
- Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
- } else {
- uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
- Offset += Size*CI->getSExtValue();
- }
- } else {
- // Found our variable index.
- break;
- }
- }
-
- // If there are no variable indices, we must have a constant offset, just
- // evaluate it the general way.
- if (i == e) return 0;
-
- Value *VariableIdx = GEP->getOperand(i);
- // Determine the scale factor of the variable element. For example, this is
- // 4 if the variable index is into an array of i32.
- uint64_t VariableScale = TD.getTypeAllocSize(GTI.getIndexedType());
-
- // Verify that there are no other variable indices. If so, emit the hard way.
- for (++i, ++GTI; i != e; ++i, ++GTI) {
- ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i));
- if (!CI) return 0;
-
- // Compute the aggregate offset of constant indices.
- if (CI->isZero()) continue;
-
- // Handle a struct index, which adds its field offset to the pointer.
- if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
- Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
- } else {
- uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
- Offset += Size*CI->getSExtValue();
- }
- }
-
- // Okay, we know we have a single variable index, which must be a
- // pointer/array/vector index. If there is no offset, life is simple, return
- // the index.
- unsigned IntPtrWidth = TD.getPointerSizeInBits();
- if (Offset == 0) {
- // Cast to intptrty in case a truncation occurs. If an extension is needed,
- // we don't need to bother extending: the extension won't affect where the
- // computation crosses zero.
- if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth)
- VariableIdx = new TruncInst(VariableIdx,
- TD.getIntPtrType(VariableIdx->getContext()),
- VariableIdx->getName(), &I);
- return VariableIdx;
- }
-
- // Otherwise, there is an index. The computation we will do will be modulo
- // the pointer size, so get it.
- uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
-
- Offset &= PtrSizeMask;
- VariableScale &= PtrSizeMask;
-
- // To do this transformation, any constant index must be a multiple of the
- // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i",
- // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a
- // multiple of the variable scale.
- int64_t NewOffs = Offset / (int64_t)VariableScale;
- if (Offset != NewOffs*(int64_t)VariableScale)
- return 0;
-
- // Okay, we can do this evaluation. Start by converting the index to intptr.
- const Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext());
- if (VariableIdx->getType() != IntPtrTy)
- VariableIdx = CastInst::CreateIntegerCast(VariableIdx, IntPtrTy,
- true /*SExt*/,
- VariableIdx->getName(), &I);
- Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs);
- return BinaryOperator::CreateAdd(VariableIdx, OffsetVal, "offset", &I);
-}
-
/// FoldGEPICmp - Fold comparisons between a GEP instruction and something
/// else. At this point we know that the GEP is on the LHS of the comparison.
@@ -5559,7 +5655,7 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
// If not, synthesize the offset the hard way.
if (Offset == 0)
- Offset = EmitGEPOffset(GEPLHS, I, *this);
+ Offset = EmitGEPOffset(GEPLHS, *this);
return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset,
Constant::getNullValue(Offset->getType()));
} else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) {
@@ -5645,8 +5741,8 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
(isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
(isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
// ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2)
- Value *L = EmitGEPOffset(GEPLHS, I, *this);
- Value *R = EmitGEPOffset(GEPRHS, I, *this);
+ Value *L = EmitGEPOffset(GEPLHS, *this);
+ Value *R = EmitGEPOffset(GEPRHS, *this);
return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R);
}
}
@@ -6087,7 +6183,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// EQ and NE we use unsigned values.
APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
- if (ICmpInst::isSignedPredicate(I.getPredicate())) {
+ if (I.isSigned()) {
ComputeSignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne,
Op0Min, Op0Max);
ComputeSignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne,
@@ -6217,7 +6313,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// Turn a signed comparison into an unsigned one if both operands
// are known to have the same sign.
- if (I.isSignedPredicate() &&
+ if (I.isSigned() &&
((Op0KnownZero.isNegative() && Op1KnownZero.isNegative()) ||
(Op0KnownOne.isNegative() && Op1KnownOne.isNegative())))
return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1);
@@ -6397,7 +6493,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// icmp u/s (a ^ signbit), (b ^ signbit) --> icmp s/u a, b
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
if (CI->getValue().isSignBit()) {
- ICmpInst::Predicate Pred = I.isSignedPredicate()
+ ICmpInst::Predicate Pred = I.isSigned()
? I.getUnsignedPredicate()
: I.getSignedPredicate();
return new ICmpInst(Pred, Op0I->getOperand(0),
@@ -6405,7 +6501,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
}
if (CI->getValue().isMaxSignedValue()) {
- ICmpInst::Predicate Pred = I.isSignedPredicate()
+ ICmpInst::Predicate Pred = I.isSigned()
? I.getUnsignedPredicate()
: I.getSignedPredicate();
Pred = I.getSwappedPredicate(Pred);
@@ -6542,7 +6638,7 @@ Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
// work. :( The if statement below tests that condition and bails
// if it finds it.
bool DivIsSigned = DivI->getOpcode() == Instruction::SDiv;
- if (!ICI.isEquality() && DivIsSigned != ICI.isSignedPredicate())
+ if (!ICI.isEquality() && DivIsSigned != ICI.isSigned())
return 0;
if (DivRHS->isZero())
return 0; // The ProdOV computation fails on divide by zero.
@@ -6741,7 +6837,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
// (icmp u/s (xor A SignBit), C) -> (icmp s/u A, (xor C SignBit))
if (!ICI.isEquality() && XorCST->getValue().isSignBit()) {
const APInt &SignBit = XorCST->getValue();
- ICmpInst::Predicate Pred = ICI.isSignedPredicate()
+ ICmpInst::Predicate Pred = ICI.isSigned()
? ICI.getUnsignedPredicate()
: ICI.getSignedPredicate();
return new ICmpInst(Pred, LHSI->getOperand(0),
@@ -6751,7 +6847,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
// (icmp u/s (xor A ~SignBit), C) -> (icmp s/u (xor C ~SignBit), A)
if (!ICI.isEquality() && XorCST->getValue().isMaxSignedValue()) {
const APInt &NotSignBit = XorCST->getValue();
- ICmpInst::Predicate Pred = ICI.isSignedPredicate()
+ ICmpInst::Predicate Pred = ICI.isSigned()
? ICI.getUnsignedPredicate()
: ICI.getSignedPredicate();
Pred = ICI.getSwappedPredicate(Pred);
@@ -7009,7 +7105,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
ConstantRange CR = ICI.makeConstantRange(ICI.getPredicate(), RHSV)
.subtract(LHSV);
- if (ICI.isSignedPredicate()) {
+ if (ICI.isSigned()) {
if (CR.getLower().isSignBit()) {
return new ICmpInst(ICmpInst::ICMP_SLT, LHSI->getOperand(0),
ConstantInt::get(*Context, CR.getUpper()));
@@ -7184,7 +7280,7 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
return 0;
bool isSignedExt = LHSCI->getOpcode() == Instruction::SExt;
- bool isSignedCmp = ICI.isSignedPredicate();
+ bool isSignedCmp = ICI.isSigned();
if (CastInst *CI = dyn_cast<CastInst>(ICI.getOperand(1))) {
// Not an extension from the same type?
@@ -7745,7 +7841,7 @@ static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
/// PromoteCastOfAllocation - If we find a cast of an allocation instruction,
/// try to eliminate the cast by moving the type information into the alloc.
Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
- AllocationInst &AI) {
+ AllocaInst &AI) {
const PointerType *PTy = cast<PointerType>(CI.getType());
BuilderTy AllocaBuilder(*Builder);
@@ -7817,7 +7913,7 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
Amt = AllocaBuilder.CreateAdd(Amt, Off, "tmp");
}
- AllocationInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
+ AllocaInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
New->setAlignment(AI.getAlignment());
New->takeName(&AI);
@@ -8163,8 +8259,7 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
if (TD && GEP->hasOneUse() && isa<BitCastInst>(GEP->getOperand(0))) {
if (GEP->hasAllConstantIndices()) {
// We are guaranteed to get a constant from EmitGEPOffset.
- ConstantInt *OffsetV =
- cast<ConstantInt>(EmitGEPOffset(GEP, CI, *this));
+ ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(GEP, *this));
int64_t Offset = OffsetV->getSExtValue();
// Get the base pointer input of the bitcast, and the type it points to.
@@ -8878,7 +8973,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
// size, rewrite the allocation instruction to allocate the "right" type.
// There is no need to modify malloc calls because it is their bitcast that
// needs to be cleaned up.
- if (AllocationInst *AI = dyn_cast<AllocationInst>(Src))
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(Src))
if (Instruction *V = PromoteCastOfAllocation(CI, *AI))
return V;
@@ -9747,6 +9842,9 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
/// the heavy lifting.
///
Instruction *InstCombiner::visitCallInst(CallInst &CI) {
+ if (isFreeCall(&CI))
+ return visitFree(CI);
+
// If the caller function is nounwind, mark the call as nounwind, even if the
// callee isn't.
if (CI.getParent()->getParent()->doesNotThrow() &&
@@ -10691,6 +10789,96 @@ static bool isSafeAndProfitableToSinkLoad(LoadInst *L) {
return true;
}
+Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
+ LoadInst *FirstLI = cast<LoadInst>(PN.getIncomingValue(0));
+
+ // When processing loads, we need to propagate two bits of information to the
+ // sunk load: whether it is volatile, and what its alignment is. We currently
+ // don't sink loads when some have their alignment specified and some don't.
+ // visitLoadInst will propagate an alignment onto the load when TD is around,
+ // and if TD isn't around, we can't handle the mixed case.
+ bool isVolatile = FirstLI->isVolatile();
+ unsigned LoadAlignment = FirstLI->getAlignment();
+
+ // We can't sink the load if the loaded value could be modified between the
+ // load and the PHI.
+ if (FirstLI->getParent() != PN.getIncomingBlock(0) ||
+ !isSafeAndProfitableToSinkLoad(FirstLI))
+ return 0;
+
+ // If the PHI is of volatile loads and the load block has multiple
+ // successors, sinking it would remove a load of the volatile value from
+ // the path through the other successor.
+ if (isVolatile &&
+ FirstLI->getParent()->getTerminator()->getNumSuccessors() != 1)
+ return 0;
+
+ // Check to see if all arguments are the same operation.
+ for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
+ LoadInst *LI = dyn_cast<LoadInst>(PN.getIncomingValue(i));
+ if (!LI || !LI->hasOneUse())
+ return 0;
+
+ // We can't sink the load if the loaded value could be modified between
+ // the load and the PHI.
+ if (LI->isVolatile() != isVolatile ||
+ LI->getParent() != PN.getIncomingBlock(i) ||
+ !isSafeAndProfitableToSinkLoad(LI))
+ return 0;
+
+ // If some of the loads have an alignment specified but not all of them,
+ // we can't do the transformation.
+ if ((LoadAlignment != 0) != (LI->getAlignment() != 0))
+ return 0;
+
+ LoadAlignment = std::min(LoadAlignment, LI->getAlignment());
+
+ // If the PHI is of volatile loads and the load block has multiple
+ // successors, sinking it would remove a load of the volatile value from
+ // the path through the other successor.
+ if (isVolatile &&
+ LI->getParent()->getTerminator()->getNumSuccessors() != 1)
+ return 0;
+ }
+
+ // Okay, they are all the same operation. Create a new PHI node of the
+ // correct type, and PHI together all of the LHS's of the instructions.
+ PHINode *NewPN = PHINode::Create(FirstLI->getOperand(0)->getType(),
+ PN.getName()+".in");
+ NewPN->reserveOperandSpace(PN.getNumOperands()/2);
+
+ Value *InVal = FirstLI->getOperand(0);
+ NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
+
+ // Add all operands to the new PHI.
+ for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
+ Value *NewInVal = cast<LoadInst>(PN.getIncomingValue(i))->getOperand(0);
+ if (NewInVal != InVal)
+ InVal = 0;
+ NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
+ }
+
+ Value *PhiVal;
+ if (InVal) {
+ // The new PHI unions all of the same values together. This is really
+ // common, so we handle it intelligently here for compile-time speed.
+ PhiVal = InVal;
+ delete NewPN;
+ } else {
+ InsertNewInstBefore(NewPN, PN);
+ PhiVal = NewPN;
+ }
+
+ // If this was a volatile load that we are merging, make sure to loop through
+ // and mark all the input loads as non-volatile. If we don't do this, we will
+ // insert a new volatile load and the old ones will not be deletable.
+ if (isVolatile)
+ for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
+ cast<LoadInst>(PN.getIncomingValue(i))->setVolatile(false);
+
+ return new LoadInst(PhiVal, "", isVolatile, LoadAlignment);
+}
+
// FoldPHIArgOpIntoPHI - If all operands to a PHI node are the same "unary"
// operator and they all are only used by the PHI, PHI together their
@@ -10698,13 +10886,18 @@ static bool isSafeAndProfitableToSinkLoad(LoadInst *L) {
Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
+ if (isa<GetElementPtrInst>(FirstInst))
+ return FoldPHIArgGEPIntoPHI(PN);
+ if (isa<LoadInst>(FirstInst))
+ return FoldPHIArgLoadIntoPHI(PN);
+
// Scan the instruction, looking for input operations that can be folded away.
// If all input operands to the phi are the same instruction (e.g. a cast from
// the same type or "+42") we can pull the operation through the PHI, reducing
// code size and simplifying code.
Constant *ConstantOp = 0;
const Type *CastSrcTy = 0;
- bool isVolatile = false;
+
if (isa<CastInst>(FirstInst)) {
CastSrcTy = FirstInst->getOperand(0)->getType();
} else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
@@ -10713,51 +10906,18 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
if (ConstantOp == 0)
return FoldPHIArgBinOpIntoPHI(PN);
- } else if (LoadInst *LI = dyn_cast<LoadInst>(FirstInst)) {
- isVolatile = LI->isVolatile();
- // We can't sink the load if the loaded value could be modified between the
- // load and the PHI.
- if (LI->getParent() != PN.getIncomingBlock(0) ||
- !isSafeAndProfitableToSinkLoad(LI))
- return 0;
-
- // If the PHI is of volatile loads and the load block has multiple
- // successors, sinking it would remove a load of the volatile value from
- // the path through the other successor.
- if (isVolatile &&
- LI->getParent()->getTerminator()->getNumSuccessors() != 1)
- return 0;
-
- } else if (isa<GetElementPtrInst>(FirstInst)) {
- return FoldPHIArgGEPIntoPHI(PN);
} else {
return 0; // Cannot fold this operation.
}
// Check to see if all arguments are the same operation.
for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
- if (!isa<Instruction>(PN.getIncomingValue(i))) return 0;
- Instruction *I = cast<Instruction>(PN.getIncomingValue(i));
- if (!I->hasOneUse() || !I->isSameOperationAs(FirstInst))
+ Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
+ if (I == 0 || !I->hasOneUse() || !I->isSameOperationAs(FirstInst))
return 0;
if (CastSrcTy) {
if (I->getOperand(0)->getType() != CastSrcTy)
return 0; // Cast operation must match.
- } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
- // We can't sink the load if the loaded value could be modified between
- // the load and the PHI.
- if (LI->isVolatile() != isVolatile ||
- LI->getParent() != PN.getIncomingBlock(i) ||
- !isSafeAndProfitableToSinkLoad(LI))
- return 0;
-
- // If the PHI is of volatile loads and the load block has multiple
- // successors, sinking it would remove a load of the volatile value from
- // the path through the other successor.
- if (isVolatile &&
- LI->getParent()->getTerminator()->getNumSuccessors() != 1)
- return 0;
-
} else if (I->getOperand(1) != ConstantOp) {
return 0;
}
@@ -10792,23 +10952,15 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
}
// Insert and return the new operation.
- if (CastInst* FirstCI = dyn_cast<CastInst>(FirstInst))
+ if (CastInst *FirstCI = dyn_cast<CastInst>(FirstInst))
return CastInst::Create(FirstCI->getOpcode(), PhiVal, PN.getType());
+
if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst))
return BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp);
- if (CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst))
- return CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
- PhiVal, ConstantOp);
- assert(isa<LoadInst>(FirstInst) && "Unknown operation");
-
- // If this was a volatile load that we are merging, make sure to loop through
- // and mark all the input loads as non-volatile. If we don't do this, we will
- // insert a new volatile load and the old ones will not be deletable.
- if (isVolatile)
- for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
- cast<LoadInst>(PN.getIncomingValue(i))->setVolatile(false);
- return new LoadInst(PhiVal, "", isVolatile);
+ CmpInst *CIOp = cast<CmpInst>(FirstInst);
+ return CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
+ PhiVal, ConstantOp);
}
/// DeadPHICycle - Return true if this PHI node is only used by a PHI node cycle
@@ -10940,6 +11092,31 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) {
}
}
}
+
+ // If there are multiple PHIs, sort their operands so that they all list
+ // the blocks in the same order. This will help identical PHIs be eliminated
+ // by other passes. Other passes shouldn't depend on this for correctness
+ // however.
+ PHINode *FirstPN = cast<PHINode>(PN.getParent()->begin());
+ if (&PN != FirstPN)
+ for (unsigned i = 0, e = FirstPN->getNumIncomingValues(); i != e; ++i) {
+ BasicBlock *BBA = PN.getIncomingBlock(i);
+ BasicBlock *BBB = FirstPN->getIncomingBlock(i);
+ if (BBA != BBB) {
+ Value *VA = PN.getIncomingValue(i);
+ unsigned j = PN.getBasicBlockIndex(BBB);
+ Value *VB = PN.getIncomingValue(j);
+ PN.setIncomingBlock(i, BBB);
+ PN.setIncomingValue(i, VB);
+ PN.setIncomingBlock(j, BBA);
+ PN.setIncomingValue(j, VA);
+ // NOTE: Instcombine normally would want us to "return &PN" if we
+ // modified any of the operands of an instruction. However, since we
+ // aren't adding or removing uses (just rearranging them) we don't do
+ // this in this case.
+ }
+ }
+
return 0;
}
@@ -11190,8 +11367,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
!isa<BitCastInst>(BCI->getOperand(0)) && GEP.hasAllConstantIndices()) {
// Determine how much the GEP moves the pointer. We are guaranteed to get
// a constant back from EmitGEPOffset.
- ConstantInt *OffsetV =
- cast<ConstantInt>(EmitGEPOffset(&GEP, GEP, *this));
+ ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(&GEP, *this));
int64_t Offset = OffsetV->getSExtValue();
// If this GEP instruction doesn't move the pointer, just replace the GEP
@@ -11199,7 +11375,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
if (Offset == 0) {
// If the bitcast is of an allocation, and the allocation will be
// converted to match the type of the cast, don't touch this.
- if (isa<AllocationInst>(BCI->getOperand(0)) ||
+ if (isa<AllocaInst>(BCI->getOperand(0)) ||
isMalloc(BCI->getOperand(0))) {
// See if the bitcast simplifies, if so, don't nuke this GEP yet.
if (Instruction *I = visitBitCast(*BCI)) {
@@ -11238,21 +11414,21 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
return 0;
}
-Instruction *InstCombiner::visitAllocationInst(AllocationInst &AI) {
- // Convert: malloc Ty, C - where C is a constant != 1 into: malloc [C x Ty], 1
+Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
+ // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
if (AI.isArrayAllocation()) { // Check C != 1
if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
const Type *NewTy =
ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
assert(isa<AllocaInst>(AI) && "Unknown type of allocation inst!");
- AllocationInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
+ AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
New->setAlignment(AI.getAlignment());
// Scan to the end of the allocation instructions, to skip over a block of
// allocas if possible...also skip interleaved debug info
//
BasicBlock::iterator It = New;
- while (isa<AllocationInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
+ while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
// Now that I is pointing to the first non-allocation-inst in the block,
// insert our getelementptr instruction...
@@ -11287,8 +11463,8 @@ Instruction *InstCombiner::visitAllocationInst(AllocationInst &AI) {
return 0;
}
-Instruction *InstCombiner::visitFreeInst(FreeInst &FI) {
- Value *Op = FI.getOperand(0);
+Instruction *InstCombiner::visitFree(Instruction &FI) {
+ Value *Op = FI.getOperand(1);
// free undef -> unreachable.
if (isa<UndefValue>(Op)) {
@@ -11302,22 +11478,8 @@ Instruction *InstCombiner::visitFreeInst(FreeInst &FI) {
// when lots of inlining happens.
if (isa<ConstantPointerNull>(Op))
return EraseInstFromFunction(FI);
-
- // Change free <ty>* (cast <ty2>* X to <ty>*) into free <ty2>* X
- if (BitCastInst *CI = dyn_cast<BitCastInst>(Op)) {
- FI.setOperand(0, CI->getOperand(0));
- return &FI;
- }
-
- // Change free (gep X, 0,0,0,0) into free(X)
- if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
- if (GEPI->hasAllZeroIndices()) {
- Worklist.Add(GEPI);
- FI.setOperand(0, GEPI->getOperand(0));
- return &FI;
- }
- }
-
+
+ // If we have a malloc call whose only use is a free call, delete both.
if (isMalloc(Op)) {
if (CallInst* CI = extractMallocCallFromBitCast(Op)) {
if (Op->hasOneUse() && CI->hasOneUse()) {
@@ -11337,7 +11499,6 @@ Instruction *InstCombiner::visitFreeInst(FreeInst &FI) {
return 0;
}
-
/// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
const TargetData *TD) {
@@ -11838,9 +11999,11 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
return false;
--BBI;
}
- // If this isn't a store, or isn't a store to the same location, bail out.
+ // If this isn't a store, isn't a store to the same location, or if the
+ // alignments differ, bail out.
OtherStore = dyn_cast<StoreInst>(BBI);
- if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1))
+ if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
+ OtherStore->getAlignment() != SI.getAlignment())
return false;
} else {
// Otherwise, the other block ended with a conditional branch. If one of the
@@ -11855,7 +12018,8 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
for (;; --BBI) {
// Check to see if we find the matching store.
if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
- if (OtherStore->getOperand(1) != SI.getOperand(1))
+ if (OtherStore->getOperand(1) != SI.getOperand(1) ||
+ OtherStore->getAlignment() != SI.getAlignment())
return false;
break;
}
@@ -11890,7 +12054,8 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
// insert it.
BBI = DestBB->getFirstNonPHI();
InsertNewInstBefore(new StoreInst(MergedVal, SI.getOperand(1),
- OtherStore->isVolatile()), *BBI);
+ OtherStore->isVolatile(),
+ SI.getAlignment()), *BBI);
// Nuke the old stores.
EraseInstFromFunction(SI);
diff --git a/lib/Transforms/Scalar/LoopDeletion.cpp b/lib/Transforms/Scalar/LoopDeletion.cpp
index 5f93756..866d8b4 100644
--- a/lib/Transforms/Scalar/LoopDeletion.cpp
+++ b/lib/Transforms/Scalar/LoopDeletion.cpp
@@ -33,8 +33,6 @@ namespace {
// Possibly eliminate loop L if it is dead.
bool runOnLoop(Loop* L, LPPassManager& LPM);
- bool SingleDominatingExit(Loop* L,
- SmallVector<BasicBlock*, 4>& exitingBlocks);
bool IsLoopDead(Loop* L, SmallVector<BasicBlock*, 4>& exitingBlocks,
SmallVector<BasicBlock*, 4>& exitBlocks,
bool &Changed, BasicBlock *Preheader);
@@ -63,25 +61,6 @@ Pass* llvm::createLoopDeletionPass() {
return new LoopDeletion();
}
-/// SingleDominatingExit - Checks that there is only a single blocks that
-/// branches out of the loop, and that it also g the latch block. Loops
-/// with multiple or non-latch-dominating exiting blocks could be dead, but we'd
-/// have to do more extensive analysis to make sure, for instance, that the
-/// control flow logic involved was or could be made loop-invariant.
-bool LoopDeletion::SingleDominatingExit(Loop* L,
- SmallVector<BasicBlock*, 4>& exitingBlocks) {
-
- if (exitingBlocks.size() != 1)
- return false;
-
- BasicBlock* latch = L->getLoopLatch();
- if (!latch)
- return false;
-
- DominatorTree& DT = getAnalysis<DominatorTree>();
- return DT.dominates(exitingBlocks[0], latch);
-}
-
/// IsLoopDead - Determined if a loop is dead. This assumes that we've already
/// checked for unique exit and exiting blocks, and that the code is in LCSSA
/// form.
@@ -154,9 +133,8 @@ bool LoopDeletion::runOnLoop(Loop* L, LPPassManager& LPM) {
if (exitBlocks.size() != 1)
return false;
- // Loops with multiple exits or exits that don't dominate the latch
- // are too complicated to handle correctly.
- if (!SingleDominatingExit(L, exitingBlocks))
+ // Loops with multiple exits are too complicated to handle correctly.
+ if (exitingBlocks.size() != 1)
return false;
// Finally, we have to check that the loop really is dead.
@@ -167,7 +145,7 @@ bool LoopDeletion::runOnLoop(Loop* L, LPPassManager& LPM) {
// Don't remove loops for which we can't solve the trip count.
// They could be infinite, in which case we'd be changing program behavior.
ScalarEvolution& SE = getAnalysis<ScalarEvolution>();
- const SCEV *S = SE.getBackedgeTakenCount(L);
+ const SCEV *S = SE.getMaxBackedgeTakenCount(L);
if (isa<SCEVCouldNotCompute>(S))
return Changed;
@@ -183,7 +161,7 @@ bool LoopDeletion::runOnLoop(Loop* L, LPPassManager& LPM) {
// Tell ScalarEvolution that the loop is deleted. Do this before
// deleting the loop so that ScalarEvolution can look at the loop
// to determine what it needs to clean up.
- SE.forgetLoopBackedgeTakenCount(L);
+ SE.forgetLoop(L);
// Connect the preheader directly to the exit block.
TerminatorInst* TI = preheader->getTerminator();
diff --git a/lib/Transforms/Scalar/LoopIndexSplit.cpp b/lib/Transforms/Scalar/LoopIndexSplit.cpp
index 5f9d370..920d85c 100644
--- a/lib/Transforms/Scalar/LoopIndexSplit.cpp
+++ b/lib/Transforms/Scalar/LoopIndexSplit.cpp
@@ -426,7 +426,7 @@ bool LoopIndexSplit::processOneIterationLoop() {
// c1 = icmp uge i32 SplitValue, StartValue
// c2 = icmp ult i32 SplitValue, ExitValue
// and i32 c1, c2
- Instruction *C1 = new ICmpInst(BR, ExitCondition->isSignedPredicate() ?
+ Instruction *C1 = new ICmpInst(BR, ExitCondition->isSigned() ?
ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE,
SplitValue, StartValue, "lisplit");
@@ -478,7 +478,7 @@ bool LoopIndexSplit::processOneIterationLoop() {
/// with a loop invariant value. Update loop's lower and upper bound based on
/// the loop invariant value.
bool LoopIndexSplit::restrictLoopBound(ICmpInst &Op) {
- bool Sign = Op.isSignedPredicate();
+ bool Sign = Op.isSigned();
Instruction *PHTerm = L->getLoopPreheader()->getTerminator();
if (IVisGT(*ExitCondition) || IVisGE(*ExitCondition)) {
@@ -933,7 +933,7 @@ bool LoopIndexSplit::splitLoop() {
return false;
// If the predicate sign does not match then skip.
- if (ExitCondition->isSignedPredicate() != SplitCondition->isSignedPredicate())
+ if (ExitCondition->isSigned() != SplitCondition->isSigned())
return false;
unsigned EVOpNum = (ExitCondition->getOperand(1) == IVExitValue);
@@ -963,7 +963,7 @@ bool LoopIndexSplit::splitLoop() {
//[*] Calculate new loop bounds.
Value *AEV = SplitValue;
Value *BSV = SplitValue;
- bool Sign = SplitCondition->isSignedPredicate();
+ bool Sign = SplitCondition->isSigned();
Instruction *PHTerm = L->getLoopPreheader()->getTerminator();
if (IVisLT(*ExitCondition)) {
diff --git a/lib/Transforms/Scalar/LoopRotation.cpp b/lib/Transforms/Scalar/LoopRotation.cpp
index 70c69bb..7a4bb35 100644
--- a/lib/Transforms/Scalar/LoopRotation.cpp
+++ b/lib/Transforms/Scalar/LoopRotation.cpp
@@ -21,6 +21,7 @@
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/SSAUpdater.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/ADT/Statistic.h"
@@ -32,16 +33,6 @@ using namespace llvm;
STATISTIC(NumRotated, "Number of loops rotated");
namespace {
- class RenameData {
- public:
- RenameData(Instruction *O, Value *P, Instruction *H)
- : Original(O), PreHeader(P), Header(H) { }
- public:
- Instruction *Original; // Original instruction
- Value *PreHeader; // Original pre-header replacement
- Instruction *Header; // New header replacement
- };
-
class LoopRotate : public LoopPass {
public:
static char ID; // Pass ID, replacement for typeid
@@ -71,25 +62,12 @@ namespace {
/// Initialize local data
void initialize();
- /// Make sure all Exit block PHINodes have required incoming values.
- /// If incoming value is constant or defined outside the loop then
- /// PHINode may not have an entry for original pre-header.
- void updateExitBlock();
-
- /// Return true if this instruction is used outside original header.
- bool usedOutsideOriginalHeader(Instruction *In);
-
- /// Find Replacement information for instruction. Return NULL if it is
- /// not available.
- const RenameData *findReplacementData(Instruction *I);
-
/// After loop rotation, loop pre-header has multiple sucessors.
/// Insert one forwarding basic block to ensure that loop pre-header
/// has only one successor.
void preserveCanonicalLoopForm(LPPassManager &LPM);
private:
-
Loop *L;
BasicBlock *OrigHeader;
BasicBlock *OrigPreHeader;
@@ -97,7 +75,6 @@ namespace {
BasicBlock *NewHeader;
BasicBlock *Exit;
LPPassManager *LPM_Ptr;
- SmallVector<RenameData, MAX_HEADER_SIZE> LoopHeaderInfo;
};
}
@@ -141,7 +118,7 @@ bool LoopRotate::rotateLoop(Loop *Lp, LPPassManager &LPM) {
// If the loop header is not one of the loop exiting blocks then
// either this loop is already rotated or it is not
// suitable for loop rotation transformations.
- if (!L->isLoopExit(OrigHeader))
+ if (!L->isLoopExiting(OrigHeader))
return false;
BranchInst *BI = dyn_cast<BranchInst>(OrigHeader->getTerminator());
@@ -180,7 +157,7 @@ bool LoopRotate::rotateLoop(Loop *Lp, LPPassManager &LPM) {
// Anything ScalarEvolution may know about this loop or the PHI nodes
// in its header will soon be invalidated.
if (ScalarEvolution *SE = getAnalysisIfAvailable<ScalarEvolution>())
- SE->forgetLoopBackedgeTakenCount(L);
+ SE->forgetLoop(L);
// Find new Loop header. NewHeader is a Header's one and only successor
// that is inside loop. Header's other successor is outside the
@@ -199,168 +176,88 @@ bool LoopRotate::rotateLoop(Loop *Lp, LPPassManager &LPM) {
"New header doesn't have one pred!");
FoldSingleEntryPHINodes(NewHeader);
- // Copy PHI nodes and other instructions from the original header
- // into the original pre-header. Unlike the original header, the original
- // pre-header is not a member of the loop.
- //
- // The new loop header is the one and only successor of original header that
- // is inside the loop. All other original header successors are outside
- // the loop. Copy PHI Nodes from the original header into the new loop header.
- // Add second incoming value, from original loop pre-header into these phi
- // nodes. If a value defined in original header is used outside original
- // header then new loop header will need new phi nodes with two incoming
- // values, one definition from original header and second definition is
- // from original loop pre-header.
-
- // Remove terminator from Original pre-header. Original pre-header will
- // receive a clone of original header terminator as a new terminator.
- OrigPreHeader->getInstList().pop_back();
+ // Begin by walking OrigHeader and populating ValueMap with an entry for
+ // each Instruction.
BasicBlock::iterator I = OrigHeader->begin(), E = OrigHeader->end();
- PHINode *PN = 0;
- for (; (PN = dyn_cast<PHINode>(I)); ++I) {
- // PHI nodes are not copied into original pre-header. Instead their values
- // are directly propagated.
- Value *NPV = PN->getIncomingValueForBlock(OrigPreHeader);
-
- // Create a new PHI node with two incoming values for NewHeader.
- // One incoming value is from OrigLatch (through OrigHeader) and the
- // second incoming value is from original pre-header.
- PHINode *NH = PHINode::Create(PN->getType(), PN->getName(),
- NewHeader->begin());
- NH->addIncoming(PN->getIncomingValueForBlock(OrigLatch), OrigHeader);
- NH->addIncoming(NPV, OrigPreHeader);
-
- // "In" can be replaced by NH at various places.
- LoopHeaderInfo.push_back(RenameData(PN, NPV, NH));
- }
+ DenseMap<const Value *, Value *> ValueMap;
- // Now, handle non-phi instructions.
- for (; I != E; ++I) {
- Instruction *In = I;
- assert(!isa<PHINode>(In) && "PHINode is not expected here");
-
- // This is not a PHI instruction. Insert its clone into original pre-header.
- // If this instruction is using a value from same basic block then
- // update it to use value from cloned instruction.
- Instruction *C = In->clone();
- C->setName(In->getName());
- OrigPreHeader->getInstList().push_back(C);
-
- for (unsigned opi = 0, e = In->getNumOperands(); opi != e; ++opi) {
- Instruction *OpInsn = dyn_cast<Instruction>(In->getOperand(opi));
- if (!OpInsn) continue; // Ignore non-instruction values.
- if (const RenameData *D = findReplacementData(OpInsn))
- C->setOperand(opi, D->PreHeader);
- }
+ // For PHI nodes, the value available in OldPreHeader is just the
+ // incoming value from OldPreHeader.
+ for (; PHINode *PN = dyn_cast<PHINode>(I); ++I)
+ ValueMap[PN] = PN->getIncomingValue(PN->getBasicBlockIndex(OrigPreHeader));
- // If this instruction is used outside this basic block then
- // create new PHINode for this instruction.
- Instruction *NewHeaderReplacement = NULL;
- if (usedOutsideOriginalHeader(In)) {
- PHINode *PN = PHINode::Create(In->getType(), In->getName(),
- NewHeader->begin());
- PN->addIncoming(In, OrigHeader);
- PN->addIncoming(C, OrigPreHeader);
- NewHeaderReplacement = PN;
- }
- LoopHeaderInfo.push_back(RenameData(In, C, NewHeaderReplacement));
+ // For the rest of the instructions, create a clone in the OldPreHeader.
+ TerminatorInst *LoopEntryBranch = OrigPreHeader->getTerminator();
+ for (; I != E; ++I) {
+ Instruction *C = I->clone();
+ C->setName(I->getName());
+ C->insertBefore(LoopEntryBranch);
+ ValueMap[I] = C;
}
- // Rename uses of original header instructions to reflect their new
- // definitions (either from original pre-header node or from newly created
- // new header PHINodes.
- //
- // Original header instructions are used in
- // 1) Original header:
- //
- // If instruction is used in non-phi instructions then it is using
- // defintion from original heder iteself. Do not replace this use
- // with definition from new header or original pre-header.
- //
- // If instruction is used in phi node then it is an incoming
- // value. Rename its use to reflect new definition from new-preheader
- // or new header.
- //
- // 2) Inside loop but not in original header
- //
- // Replace this use to reflect definition from new header.
- for (unsigned LHI = 0, LHI_E = LoopHeaderInfo.size(); LHI != LHI_E; ++LHI) {
- const RenameData &ILoopHeaderInfo = LoopHeaderInfo[LHI];
-
- if (!ILoopHeaderInfo.Header)
- continue;
-
- Instruction *OldPhi = ILoopHeaderInfo.Original;
- Instruction *NewPhi = ILoopHeaderInfo.Header;
-
- // Before replacing uses, collect them first, so that iterator is
- // not invalidated.
- SmallVector<Instruction *, 16> AllUses;
- for (Value::use_iterator UI = OldPhi->use_begin(), UE = OldPhi->use_end();
- UI != UE; ++UI)
- AllUses.push_back(cast<Instruction>(UI));
-
- for (SmallVector<Instruction *, 16>::iterator UI = AllUses.begin(),
- UE = AllUses.end(); UI != UE; ++UI) {
- Instruction *U = *UI;
- BasicBlock *Parent = U->getParent();
-
- // Used inside original header
- if (Parent == OrigHeader) {
- // Do not rename uses inside original header non-phi instructions.
- PHINode *PU = dyn_cast<PHINode>(U);
- if (!PU)
+ // Along with all the other instructions, we just cloned OrigHeader's
+ // terminator into OrigPreHeader. Fix up the PHI nodes in each of OrigHeader's
+ // successors by duplicating their incoming values for OrigHeader.
+ TerminatorInst *TI = OrigHeader->getTerminator();
+ for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
+ for (BasicBlock::iterator BI = TI->getSuccessor(i)->begin();
+ PHINode *PN = dyn_cast<PHINode>(BI); ++BI)
+ PN->addIncoming(PN->getIncomingValueForBlock(OrigHeader), OrigPreHeader);
+
+ // Now that OrigPreHeader has a clone of OrigHeader's terminator, remove
+ // OrigPreHeader's old terminator (the original branch into the loop), and
+ // remove the corresponding incoming values from the PHI nodes in OrigHeader.
+ LoopEntryBranch->eraseFromParent();
+ for (I = OrigHeader->begin(); PHINode *PN = dyn_cast<PHINode>(I); ++I)
+ PN->removeIncomingValue(PN->getBasicBlockIndex(OrigPreHeader));
+
+ // Now fix up users of the instructions in OrigHeader, inserting PHI nodes
+ // as necessary.
+ SSAUpdater SSA;
+ for (I = OrigHeader->begin(); I != E; ++I) {
+ Value *OrigHeaderVal = I;
+ Value *OrigPreHeaderVal = ValueMap[OrigHeaderVal];
+
+ // The value now exits in two versions: the initial value in the preheader
+ // and the loop "next" value in the original header.
+ SSA.Initialize(OrigHeaderVal);
+ SSA.AddAvailableValue(OrigHeader, OrigHeaderVal);
+ SSA.AddAvailableValue(OrigPreHeader, OrigPreHeaderVal);
+
+ // Visit each use of the OrigHeader instruction.
+ for (Value::use_iterator UI = OrigHeaderVal->use_begin(),
+ UE = OrigHeaderVal->use_end(); UI != UE; ) {
+ // Grab the use before incrementing the iterator.
+ Use &U = UI.getUse();
+
+ // Increment the iterator before removing the use from the list.
+ ++UI;
+
+ // SSAUpdater can't handle a non-PHI use in the same block as an
+ // earlier def. We can easily handle those cases manually.
+ Instruction *UserInst = cast<Instruction>(U.getUser());
+ if (!isa<PHINode>(UserInst)) {
+ BasicBlock *UserBB = UserInst->getParent();
+
+ // The original users in the OrigHeader are already using the
+ // original definitions.
+ if (UserBB == OrigHeader)
continue;
- // Do not rename uses inside original header phi nodes, if the
- // incoming value is for new header.
- if (PU->getBasicBlockIndex(NewHeader) != -1
- && PU->getIncomingValueForBlock(NewHeader) == U)
+ // Users in the OrigPreHeader need to use the value to which the
+ // original definitions are mapped.
+ if (UserBB == OrigPreHeader) {
+ U = OrigPreHeaderVal;
continue;
-
- U->replaceUsesOfWith(OldPhi, NewPhi);
- continue;
+ }
}
- // Used inside loop, but not in original header.
- if (L->contains(U->getParent())) {
- if (U != NewPhi)
- U->replaceUsesOfWith(OldPhi, NewPhi);
- continue;
- }
-
- // Used inside Exit Block. Since we are in LCSSA form, U must be PHINode.
- if (U->getParent() == Exit) {
- assert(isa<PHINode>(U) && "Use in Exit Block that is not PHINode");
-
- PHINode *UPhi = cast<PHINode>(U);
- // UPhi already has one incoming argument from original header.
- // Add second incoming argument from new Pre header.
- UPhi->addIncoming(ILoopHeaderInfo.PreHeader, OrigPreHeader);
- } else {
- // Used outside Exit block. Create a new PHI node in the exit block
- // to receive the value from the new header and pre-header.
- PHINode *PN = PHINode::Create(U->getType(), U->getName(),
- Exit->begin());
- PN->addIncoming(ILoopHeaderInfo.PreHeader, OrigPreHeader);
- PN->addIncoming(OldPhi, OrigHeader);
- U->replaceUsesOfWith(OldPhi, PN);
- }
+ // Anything else can be handled by SSAUpdater.
+ SSA.RewriteUse(U);
}
}
-
- /// Make sure all Exit block PHINodes have required incoming values.
- updateExitBlock();
-
- // Update CFG
- // Removing incoming branch from loop preheader to original header.
- // Now original header is inside the loop.
- for (BasicBlock::iterator I = OrigHeader->begin();
- (PN = dyn_cast<PHINode>(I)); ++I)
- PN->removeIncomingValue(OrigPreHeader);
-
- // Make NewHeader as the new header for the loop.
+ // NewHeader is now the header of the loop.
L->moveToHeader(NewHeader);
preserveCanonicalLoopForm(LPM);
@@ -369,31 +266,6 @@ bool LoopRotate::rotateLoop(Loop *Lp, LPPassManager &LPM) {
return true;
}
-/// Make sure all Exit block PHINodes have required incoming values.
-/// If an incoming value is constant or defined outside the loop then
-/// PHINode may not have an entry for the original pre-header.
-void LoopRotate::updateExitBlock() {
-
- PHINode *PN;
- for (BasicBlock::iterator I = Exit->begin();
- (PN = dyn_cast<PHINode>(I)); ++I) {
-
- // There is already one incoming value from original pre-header block.
- if (PN->getBasicBlockIndex(OrigPreHeader) != -1)
- continue;
-
- const RenameData *ILoopHeaderInfo;
- Value *V = PN->getIncomingValueForBlock(OrigHeader);
- if (isa<Instruction>(V) &&
- (ILoopHeaderInfo = findReplacementData(cast<Instruction>(V)))) {
- assert(ILoopHeaderInfo->PreHeader && "Missing New Preheader Instruction");
- PN->addIncoming(ILoopHeaderInfo->PreHeader, OrigPreHeader);
- } else {
- PN->addIncoming(V, OrigPreHeader);
- }
- }
-}
-
/// Initialize local data
void LoopRotate::initialize() {
L = NULL;
@@ -401,34 +273,6 @@ void LoopRotate::initialize() {
OrigPreHeader = NULL;
NewHeader = NULL;
Exit = NULL;
-
- LoopHeaderInfo.clear();
-}
-
-/// Return true if this instruction is used by any instructions in the loop that
-/// aren't in original header.
-bool LoopRotate::usedOutsideOriginalHeader(Instruction *In) {
- for (Value::use_iterator UI = In->use_begin(), UE = In->use_end();
- UI != UE; ++UI) {
- BasicBlock *UserBB = cast<Instruction>(UI)->getParent();
- if (UserBB != OrigHeader && L->contains(UserBB))
- return true;
- }
-
- return false;
-}
-
-/// Find Replacement information for instruction. Return NULL if it is
-/// not available.
-const RenameData *LoopRotate::findReplacementData(Instruction *In) {
-
- // Since LoopHeaderInfo is small, linear walk is OK.
- for (unsigned LHI = 0, LHI_E = LoopHeaderInfo.size(); LHI != LHI_E; ++LHI) {
- const RenameData &ILoopHeaderInfo = LoopHeaderInfo[LHI];
- if (ILoopHeaderInfo.Original == In)
- return &ILoopHeaderInfo;
- }
- return NULL;
}
/// After loop rotation, loop pre-header has multiple sucessors.
diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index d8f6cc1..e20fb16 100644
--- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -1914,7 +1914,7 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
continue;
// Watch out for overflow.
- if (ICmpInst::isSignedPredicate(Predicate) &&
+ if (ICmpInst::isSigned(Predicate) &&
(CmpVal & SignBit) != (NewCmpVal & SignBit))
continue;
@@ -1956,7 +1956,7 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
// Check if it is possible to rewrite it using
// an iv / stride of a smaller integer type.
unsigned Bits = NewTyBits;
- if (ICmpInst::isSignedPredicate(Predicate))
+ if (ICmpInst::isSigned(Predicate))
--Bits;
uint64_t Mask = (1ULL << Bits) - 1;
if (((uint64_t)NewCmpVal & Mask) != (uint64_t)NewCmpVal)
@@ -2262,6 +2262,10 @@ void LoopStrengthReduce::OptimizeShadowIV(Loop *L) {
if (!C) continue;
+ // Ignore negative constants, as the code below doesn't handle them
+ // correctly. TODO: Remove this restriction.
+ if (!C->getValue().isStrictlyPositive()) continue;
+
/* Add new PHINode. */
PHINode *NewPH = PHINode::Create(DestTy, "IV.S.", PH);
diff --git a/lib/Transforms/Scalar/LoopUnrollPass.cpp b/lib/Transforms/Scalar/LoopUnrollPass.cpp
new file mode 100644
index 0000000..c2bf9f2
--- /dev/null
+++ b/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -0,0 +1,151 @@
+//===-- LoopUnroll.cpp - Loop unroller pass -------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass implements a simple loop unroller. It works best when loops have
+// been canonicalized by the -indvars pass, allowing it to determine the trip
+// counts of loops easily.
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "loop-unroll"
+#include "llvm/IntrinsicInst.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Analysis/LoopPass.h"
+#include "llvm/Analysis/InlineCost.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Utils/UnrollLoop.h"
+#include <climits>
+
+using namespace llvm;
+
+static cl::opt<unsigned>
+UnrollThreshold("unroll-threshold", cl::init(100), cl::Hidden,
+ cl::desc("The cut-off point for automatic loop unrolling"));
+
+static cl::opt<unsigned>
+UnrollCount("unroll-count", cl::init(0), cl::Hidden,
+ cl::desc("Use this unroll count for all loops, for testing purposes"));
+
+static cl::opt<bool>
+UnrollAllowPartial("unroll-allow-partial", cl::init(false), cl::Hidden,
+ cl::desc("Allows loops to be partially unrolled until "
+ "-unroll-threshold loop size is reached."));
+
+namespace {
+ class LoopUnroll : public LoopPass {
+ public:
+ static char ID; // Pass ID, replacement for typeid
+ LoopUnroll() : LoopPass(&ID) {}
+
+ /// A magic value for use with the Threshold parameter to indicate
+ /// that the loop unroll should be performed regardless of how much
+ /// code expansion would result.
+ static const unsigned NoThreshold = UINT_MAX;
+
+ bool runOnLoop(Loop *L, LPPassManager &LPM);
+
+ /// This transformation requires natural loop information & requires that
+ /// loop preheaders be inserted into the CFG...
+ ///
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequiredID(LoopSimplifyID);
+ AU.addRequiredID(LCSSAID);
+ AU.addRequired<LoopInfo>();
+ AU.addPreservedID(LCSSAID);
+ AU.addPreserved<LoopInfo>();
+ // FIXME: Loop unroll requires LCSSA. And LCSSA requires dom info.
+ // If loop unroll does not preserve dom info then LCSSA pass on next
+ // loop will receive invalid dom info.
+ // For now, recreate dom info, if loop is unrolled.
+ AU.addPreserved<DominatorTree>();
+ AU.addPreserved<DominanceFrontier>();
+ }
+ };
+}
+
+char LoopUnroll::ID = 0;
+static RegisterPass<LoopUnroll> X("loop-unroll", "Unroll loops");
+
+Pass *llvm::createLoopUnrollPass() { return new LoopUnroll(); }
+
+/// ApproximateLoopSize - Approximate the size of the loop.
+static unsigned ApproximateLoopSize(const Loop *L) {
+ CodeMetrics Metrics;
+ for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
+ I != E; ++I)
+ Metrics.analyzeBasicBlock(*I);
+ return Metrics.NumInsts;
+}
+
+bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
+ assert(L->isLCSSAForm());
+ LoopInfo *LI = &getAnalysis<LoopInfo>();
+
+ BasicBlock *Header = L->getHeader();
+ DEBUG(errs() << "Loop Unroll: F[" << Header->getParent()->getName()
+ << "] Loop %" << Header->getName() << "\n");
+ (void)Header;
+
+ // Find trip count
+ unsigned TripCount = L->getSmallConstantTripCount();
+ unsigned Count = UnrollCount;
+
+ // Automatically select an unroll count.
+ if (Count == 0) {
+ // Conservative heuristic: if we know the trip count, see if we can
+ // completely unroll (subject to the threshold, checked below); otherwise
+ // try to find greatest modulo of the trip count which is still under
+ // threshold value.
+ if (TripCount == 0)
+ return false;
+ Count = TripCount;
+ }
+
+ // Enforce the threshold.
+ if (UnrollThreshold != NoThreshold) {
+ unsigned LoopSize = ApproximateLoopSize(L);
+ DEBUG(errs() << " Loop Size = " << LoopSize << "\n");
+ uint64_t Size = (uint64_t)LoopSize*Count;
+ if (TripCount != 1 && Size > UnrollThreshold) {
+ DEBUG(errs() << " Too large to fully unroll with count: " << Count
+ << " because size: " << Size << ">" << UnrollThreshold << "\n");
+ if (!UnrollAllowPartial) {
+ DEBUG(errs() << " will not try to unroll partially because "
+ << "-unroll-allow-partial not given\n");
+ return false;
+ }
+ // Reduce unroll count to be modulo of TripCount for partial unrolling
+ Count = UnrollThreshold / LoopSize;
+ while (Count != 0 && TripCount%Count != 0) {
+ Count--;
+ }
+ if (Count < 2) {
+ DEBUG(errs() << " could not unroll partially\n");
+ return false;
+ }
+ DEBUG(errs() << " partially unrolling with count: " << Count << "\n");
+ }
+ }
+
+ // Unroll the loop.
+ Function *F = L->getHeader()->getParent();
+ if (!UnrollLoop(L, Count, LI, &LPM))
+ return false;
+
+ // FIXME: Reconstruct dom info, because it is not preserved properly.
+ DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>();
+ if (DT) {
+ DT->runOnFunction(*F);
+ DominanceFrontier *DF = getAnalysisIfAvailable<DominanceFrontier>();
+ if (DF)
+ DF->runOnFunction(*F);
+ }
+ return true;
+}
diff --git a/lib/Transforms/Scalar/LoopUnswitch.cpp b/lib/Transforms/Scalar/LoopUnswitch.cpp
index 223d2b9..c7b00da 100644
--- a/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -430,7 +430,8 @@ bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val){
// large numbers of branches which cause loop unswitching to go crazy.
// This is a very ad-hoc heuristic.
if (Metrics.NumInsts > Threshold ||
- Metrics.NumBlocks * 5 > Threshold) {
+ Metrics.NumBlocks * 5 > Threshold ||
+ Metrics.NeverInline) {
DEBUG(errs() << "NOT unswitching loop %"
<< currentLoop->getHeader()->getName() << ", cost too high: "
<< currentLoop->getBlocks().size() << "\n");
diff --git a/lib/Transforms/Scalar/SCCP.cpp b/lib/Transforms/Scalar/SCCP.cpp
index b745097..05a0eee 100644
--- a/lib/Transforms/Scalar/SCCP.cpp
+++ b/lib/Transforms/Scalar/SCCP.cpp
@@ -15,10 +15,6 @@
// * Proves values to be constant, and replaces them with constants
// * Proves conditional branches to be unconditional
//
-// Notice that:
-// * This pass has a habit of making definitions be dead. It is a good idea
-// to to run a DCE pass sometime after running this pass.
-//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "sccp"
@@ -27,11 +23,11 @@
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Instructions.h"
-#include "llvm/LLVMContext.h"
#include "llvm/Pass.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Target/TargetData.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -39,7 +35,8 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
@@ -51,7 +48,6 @@ STATISTIC(NumInstRemoved, "Number of instructions removed");
STATISTIC(NumDeadBlocks , "Number of basic blocks unreachable");
STATISTIC(IPNumInstRemoved, "Number of instructions removed by IPSCCP");
-STATISTIC(IPNumDeadBlocks , "Number of basic blocks unreachable by IPSCCP");
STATISTIC(IPNumArgsElimed ,"Number of arguments constant propagated by IPSCCP");
STATISTIC(IPNumGlobalConst, "Number of globals found to be constant by IPSCCP");
@@ -60,7 +56,7 @@ namespace {
/// an LLVM value may occupy. It is a simple class with value semantics.
///
class LatticeVal {
- enum {
+ enum LatticeValueTy {
/// undefined - This LLVM Value has no known value yet.
undefined,
@@ -76,63 +72,82 @@ class LatticeVal {
/// overdefined - This instruction is not known to be constant, and we know
/// it has a value.
overdefined
- } LatticeValue; // The current lattice position
+ };
+
+ /// Val: This stores the current lattice value along with the Constant* for
+ /// the constant if this is a 'constant' or 'forcedconstant' value.
+ PointerIntPair<Constant *, 2, LatticeValueTy> Val;
+
+ LatticeValueTy getLatticeValue() const {
+ return Val.getInt();
+ }
- Constant *ConstantVal; // If Constant value, the current value
public:
- inline LatticeVal() : LatticeValue(undefined), ConstantVal(0) {}
+ LatticeVal() : Val(0, undefined) {}
- // markOverdefined - Return true if this is a new status to be in...
- inline bool markOverdefined() {
- if (LatticeValue != overdefined) {
- LatticeValue = overdefined;
- return true;
- }
- return false;
+ bool isUndefined() const { return getLatticeValue() == undefined; }
+ bool isConstant() const {
+ return getLatticeValue() == constant || getLatticeValue() == forcedconstant;
+ }
+ bool isOverdefined() const { return getLatticeValue() == overdefined; }
+
+ Constant *getConstant() const {
+ assert(isConstant() && "Cannot get the constant of a non-constant!");
+ return Val.getPointer();
+ }
+
+ /// markOverdefined - Return true if this is a change in status.
+ bool markOverdefined() {
+ if (isOverdefined())
+ return false;
+
+ Val.setInt(overdefined);
+ return true;
}
- // markConstant - Return true if this is a new status for us.
- inline bool markConstant(Constant *V) {
- if (LatticeValue != constant) {
- if (LatticeValue == undefined) {
- LatticeValue = constant;
- assert(V && "Marking constant with NULL");
- ConstantVal = V;
- } else {
- assert(LatticeValue == forcedconstant &&
- "Cannot move from overdefined to constant!");
- // Stay at forcedconstant if the constant is the same.
- if (V == ConstantVal) return false;
-
- // Otherwise, we go to overdefined. Assumptions made based on the
- // forced value are possibly wrong. Assuming this is another constant
- // could expose a contradiction.
- LatticeValue = overdefined;
- }
- return true;
+ /// markConstant - Return true if this is a change in status.
+ bool markConstant(Constant *V) {
+ if (getLatticeValue() == constant) { // Constant but not forcedconstant.
+ assert(getConstant() == V && "Marking constant with different value");
+ return false;
+ }
+
+ if (isUndefined()) {
+ Val.setInt(constant);
+ assert(V && "Marking constant with NULL");
+ Val.setPointer(V);
} else {
- assert(ConstantVal == V && "Marking constant with different value");
+ assert(getLatticeValue() == forcedconstant &&
+ "Cannot move from overdefined to constant!");
+ // Stay at forcedconstant if the constant is the same.
+ if (V == getConstant()) return false;
+
+ // Otherwise, we go to overdefined. Assumptions made based on the
+ // forced value are possibly wrong. Assuming this is another constant
+ // could expose a contradiction.
+ Val.setInt(overdefined);
}
- return false;
+ return true;
}
- inline void markForcedConstant(Constant *V) {
- assert(LatticeValue == undefined && "Can't force a defined value!");
- LatticeValue = forcedconstant;
- ConstantVal = V;
+ /// getConstantInt - If this is a constant with a ConstantInt value, return it
+ /// otherwise return null.
+ ConstantInt *getConstantInt() const {
+ if (isConstant())
+ return dyn_cast<ConstantInt>(getConstant());
+ return 0;
}
- inline bool isUndefined() const { return LatticeValue == undefined; }
- inline bool isConstant() const {
- return LatticeValue == constant || LatticeValue == forcedconstant;
- }
- inline bool isOverdefined() const { return LatticeValue == overdefined; }
-
- inline Constant *getConstant() const {
- assert(isConstant() && "Cannot get the constant of a non-constant!");
- return ConstantVal;
+ void markForcedConstant(Constant *V) {
+ assert(isUndefined() && "Can't force a defined value!");
+ Val.setInt(forcedconstant);
+ Val.setPointer(V);
}
};
+} // end anonymous namespace.
+
+
+namespace {
//===----------------------------------------------------------------------===//
//
@@ -140,10 +155,15 @@ public:
/// Constant Propagation.
///
class SCCPSolver : public InstVisitor<SCCPSolver> {
- LLVMContext *Context;
- DenseSet<BasicBlock*> BBExecutable;// The basic blocks that are executable
- std::map<Value*, LatticeVal> ValueState; // The state each value is in.
+ const TargetData *TD;
+ SmallPtrSet<BasicBlock*, 8> BBExecutable;// The BBs that are executable.
+ DenseMap<Value*, LatticeVal> ValueState; // The state each value is in.
+ /// StructValueState - This maintains ValueState for values that have
+ /// StructType, for example for formal arguments, calls, insertelement, etc.
+ ///
+ DenseMap<std::pair<Value*, unsigned>, LatticeVal> StructValueState;
+
/// GlobalValue - If we are tracking any values for the contents of a global
/// variable, we keep a mapping from the constant accessor to the element of
/// the global, to the currently known value. If the value becomes
@@ -158,13 +178,23 @@ class SCCPSolver : public InstVisitor<SCCPSolver> {
/// TrackedMultipleRetVals - Same as TrackedRetVals, but used for functions
/// that return multiple values.
DenseMap<std::pair<Function*, unsigned>, LatticeVal> TrackedMultipleRetVals;
-
- // The reason for two worklists is that overdefined is the lowest state
- // on the lattice, and moving things to overdefined as fast as possible
- // makes SCCP converge much faster.
- // By having a separate worklist, we accomplish this because everything
- // possibly overdefined will become overdefined at the soonest possible
- // point.
+
+ /// MRVFunctionsTracked - Each function in TrackedMultipleRetVals is
+ /// represented here for efficient lookup.
+ SmallPtrSet<Function*, 16> MRVFunctionsTracked;
+
+ /// TrackingIncomingArguments - This is the set of functions for whose
+ /// arguments we make optimistic assumptions about and try to prove as
+ /// constants.
+ SmallPtrSet<Function*, 16> TrackingIncomingArguments;
+
+ /// The reason for two worklists is that overdefined is the lowest state
+ /// on the lattice, and moving things to overdefined as fast as possible
+ /// makes SCCP converge much faster.
+ ///
+ /// By having a separate worklist, we accomplish this because everything
+ /// possibly overdefined will become overdefined at the soonest possible
+ /// point.
SmallVector<Value*, 64> OverdefinedInstWorkList;
SmallVector<Value*, 64> InstWorkList;
@@ -180,14 +210,17 @@ class SCCPSolver : public InstVisitor<SCCPSolver> {
typedef std::pair<BasicBlock*, BasicBlock*> Edge;
DenseSet<Edge> KnownFeasibleEdges;
public:
- void setContext(LLVMContext *C) { Context = C; }
+ SCCPSolver(const TargetData *td) : TD(td) {}
/// MarkBlockExecutable - This method can be used by clients to mark all of
/// the blocks that are known to be intrinsically live in the processed unit.
- void MarkBlockExecutable(BasicBlock *BB) {
+ ///
+ /// This returns true if the block was not considered live before.
+ bool MarkBlockExecutable(BasicBlock *BB) {
+ if (!BBExecutable.insert(BB)) return false;
DEBUG(errs() << "Marking Block Executable: " << BB->getName() << "\n");
- BBExecutable.insert(BB); // Basic block is executable!
BBWorkList.push_back(BB); // Add the block to the work list!
+ return true;
}
/// TrackValueOfGlobalVariable - Clients can use this method to
@@ -195,8 +228,8 @@ public:
/// specified global variable if it can. This is only legal to call if
/// performing Interprocedural SCCP.
void TrackValueOfGlobalVariable(GlobalVariable *GV) {
- const Type *ElTy = GV->getType()->getElementType();
- if (ElTy->isFirstClassType()) {
+ // We only track the contents of scalar globals.
+ if (GV->getType()->getElementType()->isSingleValueType()) {
LatticeVal &IV = TrackedGlobals[GV];
if (!isa<UndefValue>(GV->getInitializer()))
IV.markConstant(GV->getInitializer());
@@ -207,9 +240,9 @@ public:
/// and out of the specified function (which cannot have its address taken),
/// this method must be called.
void AddTrackedFunction(Function *F) {
- assert(F->hasLocalLinkage() && "Can only track internal functions!");
// Add an entry, F -> undef.
if (const StructType *STy = dyn_cast<StructType>(F->getReturnType())) {
+ MRVFunctionsTracked.insert(F);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
TrackedMultipleRetVals.insert(std::make_pair(std::make_pair(F, i),
LatticeVal()));
@@ -217,6 +250,10 @@ public:
TrackedRetVals.insert(std::make_pair(F, LatticeVal()));
}
+ void AddArgumentTrackedFunction(Function *F) {
+ TrackingIncomingArguments.insert(F);
+ }
+
/// Solve - Solve for constants and executable blocks.
///
void Solve();
@@ -232,10 +269,17 @@ public:
return BBExecutable.count(BB);
}
- /// getValueMapping - Once we have solved for constants, return the mapping of
- /// LLVM values to LatticeVals.
- std::map<Value*, LatticeVal> &getValueMapping() {
- return ValueState;
+ LatticeVal getLatticeValueFor(Value *V) const {
+ DenseMap<Value*, LatticeVal>::const_iterator I = ValueState.find(V);
+ assert(I != ValueState.end() && "V is not in valuemap!");
+ return I->second;
+ }
+
+ LatticeVal getStructLatticeValueFor(Value *V, unsigned i) const {
+ DenseMap<std::pair<Value*, unsigned>, LatticeVal>::const_iterator I =
+ StructValueState.find(std::make_pair(V, i));
+ assert(I != StructValueState.end() && "V is not in valuemap!");
+ return I->second;
}
/// getTrackedRetVals - Get the inferred return value map.
@@ -250,48 +294,61 @@ public:
return TrackedGlobals;
}
- inline void markOverdefined(Value *V) {
+ void markOverdefined(Value *V) {
+ assert(!isa<StructType>(V->getType()) && "Should use other method");
markOverdefined(ValueState[V], V);
}
+ /// markAnythingOverdefined - Mark the specified value overdefined. This
+ /// works with both scalars and structs.
+ void markAnythingOverdefined(Value *V) {
+ if (const StructType *STy = dyn_cast<StructType>(V->getType()))
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
+ markOverdefined(getStructValueState(V, i), V);
+ else
+ markOverdefined(V);
+ }
+
private:
// markConstant - Make a value be marked as "constant". If the value
// is not already a constant, add it to the instruction work list so that
// the users of the instruction are updated later.
//
- inline void markConstant(LatticeVal &IV, Value *V, Constant *C) {
- if (IV.markConstant(C)) {
- DEBUG(errs() << "markConstant: " << *C << ": " << *V << '\n');
- InstWorkList.push_back(V);
- }
- }
-
- inline void markForcedConstant(LatticeVal &IV, Value *V, Constant *C) {
- IV.markForcedConstant(C);
- DEBUG(errs() << "markForcedConstant: " << *C << ": " << *V << '\n');
+ void markConstant(LatticeVal &IV, Value *V, Constant *C) {
+ if (!IV.markConstant(C)) return;
+ DEBUG(errs() << "markConstant: " << *C << ": " << *V << '\n');
InstWorkList.push_back(V);
}
- inline void markConstant(Value *V, Constant *C) {
+ void markConstant(Value *V, Constant *C) {
+ assert(!isa<StructType>(V->getType()) && "Should use other method");
markConstant(ValueState[V], V, C);
}
+ void markForcedConstant(Value *V, Constant *C) {
+ assert(!isa<StructType>(V->getType()) && "Should use other method");
+ ValueState[V].markForcedConstant(C);
+ DEBUG(errs() << "markForcedConstant: " << *C << ": " << *V << '\n');
+ InstWorkList.push_back(V);
+ }
+
+
// markOverdefined - Make a value be marked as "overdefined". If the
// value is not already overdefined, add it to the overdefined instruction
// work list so that the users of the instruction are updated later.
- inline void markOverdefined(LatticeVal &IV, Value *V) {
- if (IV.markOverdefined()) {
- DEBUG(errs() << "markOverdefined: ";
- if (Function *F = dyn_cast<Function>(V))
- errs() << "Function '" << F->getName() << "'\n";
- else
- errs() << *V << '\n');
- // Only instructions go on the work list
- OverdefinedInstWorkList.push_back(V);
- }
+ void markOverdefined(LatticeVal &IV, Value *V) {
+ if (!IV.markOverdefined()) return;
+
+ DEBUG(errs() << "markOverdefined: ";
+ if (Function *F = dyn_cast<Function>(V))
+ errs() << "Function '" << F->getName() << "'\n";
+ else
+ errs() << *V << '\n');
+ // Only instructions go on the work list
+ OverdefinedInstWorkList.push_back(V);
}
- inline void mergeInValue(LatticeVal &IV, Value *V, LatticeVal &MergeWithV) {
+ void mergeInValue(LatticeVal &IV, Value *V, LatticeVal MergeWithV) {
if (IV.isOverdefined() || MergeWithV.isUndefined())
return; // Noop.
if (MergeWithV.isOverdefined())
@@ -302,53 +359,85 @@ private:
markOverdefined(IV, V);
}
- inline void mergeInValue(Value *V, LatticeVal &MergeWithV) {
- return mergeInValue(ValueState[V], V, MergeWithV);
+ void mergeInValue(Value *V, LatticeVal MergeWithV) {
+ assert(!isa<StructType>(V->getType()) && "Should use other method");
+ mergeInValue(ValueState[V], V, MergeWithV);
}
- // getValueState - Return the LatticeVal object that corresponds to the value.
- // This function is necessary because not all values should start out in the
- // underdefined state... Argument's should be overdefined, and
- // constants should be marked as constants. If a value is not known to be an
- // Instruction object, then use this accessor to get its value from the map.
- //
- inline LatticeVal &getValueState(Value *V) {
- std::map<Value*, LatticeVal>::iterator I = ValueState.find(V);
- if (I != ValueState.end()) return I->second; // Common case, in the map
+ /// getValueState - Return the LatticeVal object that corresponds to the
+ /// value. This function handles the case when the value hasn't been seen yet
+ /// by properly seeding constants etc.
+ LatticeVal &getValueState(Value *V) {
+ assert(!isa<StructType>(V->getType()) && "Should use getStructValueState");
+
+ // TODO: Change to do insert+find in one operation.
+ DenseMap<Value*, LatticeVal>::iterator I = ValueState.find(V);
+ if (I != ValueState.end())
+ return I->second; // Common case, already in the map.
+
+ LatticeVal &LV = ValueState[V];
if (Constant *C = dyn_cast<Constant>(V)) {
- if (isa<UndefValue>(V)) {
- // Nothing to do, remain undefined.
- } else {
- LatticeVal &LV = ValueState[C];
+ // Undef values remain undefined.
+ if (!isa<UndefValue>(V))
LV.markConstant(C); // Constants are constant
- return LV;
- }
}
- // All others are underdefined by default...
- return ValueState[V];
+
+ // All others are underdefined by default.
+ return LV;
}
- // markEdgeExecutable - Mark a basic block as executable, adding it to the BB
- // work list if it is not already executable...
- //
+ /// getStructValueState - Return the LatticeVal object that corresponds to the
+ /// value/field pair. This function handles the case when the value hasn't
+ /// been seen yet by properly seeding constants etc.
+ LatticeVal &getStructValueState(Value *V, unsigned i) {
+ assert(isa<StructType>(V->getType()) && "Should use getValueState");
+ assert(i < cast<StructType>(V->getType())->getNumElements() &&
+ "Invalid element #");
+
+ // TODO: Change to do insert+find in one operation.
+ DenseMap<std::pair<Value*, unsigned>, LatticeVal>::iterator
+ I = StructValueState.find(std::make_pair(V, i));
+ if (I != StructValueState.end())
+ return I->second; // Common case, already in the map.
+
+ LatticeVal &LV = StructValueState[std::make_pair(V, i)];
+
+ if (Constant *C = dyn_cast<Constant>(V)) {
+ if (isa<UndefValue>(C))
+ ; // Undef values remain undefined.
+ else if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C))
+ LV.markConstant(CS->getOperand(i)); // Constants are constant.
+ else if (isa<ConstantAggregateZero>(C)) {
+ const Type *FieldTy = cast<StructType>(V->getType())->getElementType(i);
+ LV.markConstant(Constant::getNullValue(FieldTy));
+ } else
+ LV.markOverdefined(); // Unknown sort of constant.
+ }
+
+ // All others are underdefined by default.
+ return LV;
+ }
+
+
+ /// markEdgeExecutable - Mark a basic block as executable, adding it to the BB
+ /// work list if it is not already executable.
void markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest) {
if (!KnownFeasibleEdges.insert(Edge(Source, Dest)).second)
return; // This edge is already known to be executable!
- if (BBExecutable.count(Dest)) {
- DEBUG(errs() << "Marking Edge Executable: " << Source->getName()
- << " -> " << Dest->getName() << "\n");
-
- // The destination is already executable, but we just made an edge
+ if (!MarkBlockExecutable(Dest)) {
+ // If the destination is already executable, we just made an *edge*
// feasible that wasn't before. Revisit the PHI nodes in the block
// because they have potentially new operands.
- for (BasicBlock::iterator I = Dest->begin(); isa<PHINode>(I); ++I)
- visitPHINode(*cast<PHINode>(I));
+ DEBUG(errs() << "Marking Edge Executable: " << Source->getName()
+ << " -> " << Dest->getName() << "\n");
- } else {
- MarkBlockExecutable(Dest);
+ PHINode *PN;
+ for (BasicBlock::iterator I = Dest->begin();
+ (PN = dyn_cast<PHINode>(I)); ++I)
+ visitPHINode(*PN);
}
}
@@ -358,28 +447,39 @@ private:
void getFeasibleSuccessors(TerminatorInst &TI, SmallVector<bool, 16> &Succs);
// isEdgeFeasible - Return true if the control flow edge from the 'From' basic
- // block to the 'To' basic block is currently feasible...
+ // block to the 'To' basic block is currently feasible.
//
bool isEdgeFeasible(BasicBlock *From, BasicBlock *To);
// OperandChangedState - This method is invoked on all of the users of an
- // instruction that was just changed state somehow.... Based on this
+ // instruction that was just changed state somehow. Based on this
// information, we need to update the specified user of this instruction.
//
- void OperandChangedState(User *U) {
- // Only instructions use other variable values!
- Instruction &I = cast<Instruction>(*U);
- if (BBExecutable.count(I.getParent())) // Inst is executable?
- visit(I);
+ void OperandChangedState(Instruction *I) {
+ if (BBExecutable.count(I->getParent())) // Inst is executable?
+ visit(*I);
+ }
+
+ /// RemoveFromOverdefinedPHIs - If I has any entries in the
+ /// UsersOfOverdefinedPHIs map for PN, remove them now.
+ void RemoveFromOverdefinedPHIs(Instruction *I, PHINode *PN) {
+ if (UsersOfOverdefinedPHIs.empty()) return;
+ std::multimap<PHINode*, Instruction*>::iterator It, E;
+ tie(It, E) = UsersOfOverdefinedPHIs.equal_range(PN);
+ while (It != E) {
+ if (It->second == I)
+ UsersOfOverdefinedPHIs.erase(It++);
+ else
+ ++It;
+ }
}
private:
friend class InstVisitor<SCCPSolver>;
- // visit implementations - Something changed in this instruction... Either an
+ // visit implementations - Something changed in this instruction. Either an
// operand made a transition, or the instruction is newly executable. Change
// the value type of I to reflect these changes if appropriate.
- //
void visitPHINode(PHINode &I);
// Terminators
@@ -396,11 +496,11 @@ private:
void visitExtractValueInst(ExtractValueInst &EVI);
void visitInsertValueInst(InsertValueInst &IVI);
- // Instructions that cannot be folded away...
- void visitStoreInst (Instruction &I);
+ // Instructions that cannot be folded away.
+ void visitStoreInst (StoreInst &I);
void visitLoadInst (LoadInst &I);
void visitGetElementPtrInst(GetElementPtrInst &I);
- void visitCallInst (CallInst &I) {
+ void visitCallInst (CallInst &I) {
visitCallSite(CallSite::get(&I));
}
void visitInvokeInst (InvokeInst &II) {
@@ -410,15 +510,14 @@ private:
void visitCallSite (CallSite CS);
void visitUnwindInst (TerminatorInst &I) { /*returns void*/ }
void visitUnreachableInst(TerminatorInst &I) { /*returns void*/ }
- void visitAllocationInst(Instruction &I) { markOverdefined(&I); }
+ void visitAllocaInst (Instruction &I) { markOverdefined(&I); }
void visitVANextInst (Instruction &I) { markOverdefined(&I); }
- void visitVAArgInst (Instruction &I) { markOverdefined(&I); }
- void visitFreeInst (Instruction &I) { /*returns void*/ }
+ void visitVAArgInst (Instruction &I) { markAnythingOverdefined(&I); }
void visitInstruction(Instruction &I) {
- // If a new instruction is added to LLVM that we don't handle...
+ // If a new instruction is added to LLVM that we don't handle.
errs() << "SCCP: Don't know how to handle: " << I;
- markOverdefined(&I); // Just in case
+ markAnythingOverdefined(&I); // Just in case
}
};
@@ -434,37 +533,61 @@ void SCCPSolver::getFeasibleSuccessors(TerminatorInst &TI,
if (BranchInst *BI = dyn_cast<BranchInst>(&TI)) {
if (BI->isUnconditional()) {
Succs[0] = true;
- } else {
- LatticeVal &BCValue = getValueState(BI->getCondition());
- if (BCValue.isOverdefined() ||
- (BCValue.isConstant() && !isa<ConstantInt>(BCValue.getConstant()))) {
- // Overdefined condition variables, and branches on unfoldable constant
- // conditions, mean the branch could go either way.
+ return;
+ }
+
+ LatticeVal BCValue = getValueState(BI->getCondition());
+ ConstantInt *CI = BCValue.getConstantInt();
+ if (CI == 0) {
+ // Overdefined condition variables, and branches on unfoldable constant
+ // conditions, mean the branch could go either way.
+ if (!BCValue.isUndefined())
Succs[0] = Succs[1] = true;
- } else if (BCValue.isConstant()) {
- // Constant condition variables mean the branch can only go a single way
- Succs[BCValue.getConstant() == ConstantInt::getFalse(*Context)] = true;
- }
+ return;
}
- } else if (isa<InvokeInst>(&TI)) {
+
+ // Constant condition variables mean the branch can only go a single way.
+ Succs[CI->isZero()] = true;
+ return;
+ }
+
+ if (isa<InvokeInst>(TI)) {
// Invoke instructions successors are always executable.
Succs[0] = Succs[1] = true;
- } else if (SwitchInst *SI = dyn_cast<SwitchInst>(&TI)) {
- LatticeVal &SCValue = getValueState(SI->getCondition());
- if (SCValue.isOverdefined() || // Overdefined condition?
- (SCValue.isConstant() && !isa<ConstantInt>(SCValue.getConstant()))) {
+ return;
+ }
+
+ if (SwitchInst *SI = dyn_cast<SwitchInst>(&TI)) {
+ LatticeVal SCValue = getValueState(SI->getCondition());
+ ConstantInt *CI = SCValue.getConstantInt();
+
+ if (CI == 0) { // Overdefined or undefined condition?
// All destinations are executable!
- Succs.assign(TI.getNumSuccessors(), true);
- } else if (SCValue.isConstant())
- Succs[SI->findCaseValue(cast<ConstantInt>(SCValue.getConstant()))] = true;
- } else {
- llvm_unreachable("SCCP: Don't know how to handle this terminator!");
+ if (!SCValue.isUndefined())
+ Succs.assign(TI.getNumSuccessors(), true);
+ return;
+ }
+
+ Succs[SI->findCaseValue(CI)] = true;
+ return;
+ }
+
+ // TODO: This could be improved if the operand is a [cast of a] BlockAddress.
+ if (isa<IndirectBrInst>(&TI)) {
+ // Just mark all destinations executable!
+ Succs.assign(TI.getNumSuccessors(), true);
+ return;
}
+
+#ifndef NDEBUG
+ errs() << "Unknown terminator instruction: " << TI << '\n';
+#endif
+ llvm_unreachable("SCCP: Don't know how to handle this terminator!");
}
// isEdgeFeasible - Return true if the control flow edge from the 'From' basic
-// block to the 'To' basic block is currently feasible...
+// block to the 'To' basic block is currently feasible.
//
bool SCCPSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To) {
assert(BBExecutable.count(To) && "Dest should always be alive!");
@@ -472,58 +595,57 @@ bool SCCPSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To) {
// Make sure the source basic block is executable!!
if (!BBExecutable.count(From)) return false;
- // Check to make sure this edge itself is actually feasible now...
+ // Check to make sure this edge itself is actually feasible now.
TerminatorInst *TI = From->getTerminator();
if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
if (BI->isUnconditional())
return true;
- else {
- LatticeVal &BCValue = getValueState(BI->getCondition());
- if (BCValue.isOverdefined()) {
- // Overdefined condition variables mean the branch could go either way.
- return true;
- } else if (BCValue.isConstant()) {
- // Not branching on an evaluatable constant?
- if (!isa<ConstantInt>(BCValue.getConstant())) return true;
+
+ LatticeVal BCValue = getValueState(BI->getCondition());
- // Constant condition variables mean the branch can only go a single way
- return BI->getSuccessor(BCValue.getConstant() ==
- ConstantInt::getFalse(*Context)) == To;
- }
- return false;
- }
- } else if (isa<InvokeInst>(TI)) {
- // Invoke instructions successors are always executable.
+ // Overdefined condition variables mean the branch could go either way,
+ // undef conditions mean that neither edge is feasible yet.
+ ConstantInt *CI = BCValue.getConstantInt();
+ if (CI == 0)
+ return !BCValue.isUndefined();
+
+ // Constant condition variables mean the branch can only go a single way.
+ return BI->getSuccessor(CI->isZero()) == To;
+ }
+
+ // Invoke instructions successors are always executable.
+ if (isa<InvokeInst>(TI))
return true;
- } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
- LatticeVal &SCValue = getValueState(SI->getCondition());
- if (SCValue.isOverdefined()) { // Overdefined condition?
- // All destinations are executable!
- return true;
- } else if (SCValue.isConstant()) {
- Constant *CPV = SCValue.getConstant();
- if (!isa<ConstantInt>(CPV))
- return true; // not a foldable constant?
-
- // Make sure to skip the "default value" which isn't a value
- for (unsigned i = 1, E = SI->getNumSuccessors(); i != E; ++i)
- if (SI->getSuccessorValue(i) == CPV) // Found the taken branch...
- return SI->getSuccessor(i) == To;
-
- // Constant value not equal to any of the branches... must execute
- // default branch then...
- return SI->getDefaultDest() == To;
- }
- return false;
- } else {
+
+ if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
+ LatticeVal SCValue = getValueState(SI->getCondition());
+ ConstantInt *CI = SCValue.getConstantInt();
+
+ if (CI == 0)
+ return !SCValue.isUndefined();
+
+ // Make sure to skip the "default value" which isn't a value
+ for (unsigned i = 1, E = SI->getNumSuccessors(); i != E; ++i)
+ if (SI->getSuccessorValue(i) == CI) // Found the taken branch.
+ return SI->getSuccessor(i) == To;
+
+ // If the constant value is not equal to any of the branches, we must
+ // execute default branch.
+ return SI->getDefaultDest() == To;
+ }
+
+ // Just mark all destinations executable!
+ // TODO: This could be improved if the operand is a [cast of a] BlockAddress.
+ if (isa<IndirectBrInst>(&TI))
+ return true;
+
#ifndef NDEBUG
- errs() << "Unknown terminator instruction: " << *TI << '\n';
+ errs() << "Unknown terminator instruction: " << *TI << '\n';
#endif
- llvm_unreachable(0);
- }
+ llvm_unreachable(0);
}
-// visit Implementations - Something changed in this instruction... Either an
+// visit Implementations - Something changed in this instruction, either an
// operand made a transition, or the instruction is newly executable. Change
// the value type of I to reflect these changes if appropriate. This method
// makes sure to do the following actions:
@@ -542,31 +664,33 @@ bool SCCPSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To) {
// successors executable.
//
void SCCPSolver::visitPHINode(PHINode &PN) {
- LatticeVal &PNIV = getValueState(&PN);
- if (PNIV.isOverdefined()) {
+ // If this PN returns a struct, just mark the result overdefined.
+ // TODO: We could do a lot better than this if code actually uses this.
+ if (isa<StructType>(PN.getType()))
+ return markAnythingOverdefined(&PN);
+
+ if (getValueState(&PN).isOverdefined()) {
// There may be instructions using this PHI node that are not overdefined
// themselves. If so, make sure that they know that the PHI node operand
// changed.
std::multimap<PHINode*, Instruction*>::iterator I, E;
tie(I, E) = UsersOfOverdefinedPHIs.equal_range(&PN);
- if (I != E) {
- SmallVector<Instruction*, 16> Users;
- for (; I != E; ++I) Users.push_back(I->second);
- while (!Users.empty()) {
- visit(Users.back());
- Users.pop_back();
- }
- }
+ if (I == E)
+ return;
+
+ SmallVector<Instruction*, 16> Users;
+ for (; I != E; ++I)
+ Users.push_back(I->second);
+ while (!Users.empty())
+ visit(Users.pop_back_val());
return; // Quick exit
}
// Super-extra-high-degree PHI nodes are unlikely to ever be marked constant,
// and slow us down a lot. Just mark them overdefined.
- if (PN.getNumIncomingValues() > 64) {
- markOverdefined(PNIV, &PN);
- return;
- }
-
+ if (PN.getNumIncomingValues() > 64)
+ return markOverdefined(&PN);
+
// Look at all of the executable operands of the PHI node. If any of them
// are overdefined, the PHI becomes overdefined as well. If they are all
// constant, and they agree with each other, the PHI becomes the identical
@@ -575,32 +699,28 @@ void SCCPSolver::visitPHINode(PHINode &PN) {
//
Constant *OperandVal = 0;
for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
- LatticeVal &IV = getValueState(PN.getIncomingValue(i));
+ LatticeVal IV = getValueState(PN.getIncomingValue(i));
if (IV.isUndefined()) continue; // Doesn't influence PHI node.
- if (isEdgeFeasible(PN.getIncomingBlock(i), PN.getParent())) {
- if (IV.isOverdefined()) { // PHI node becomes overdefined!
- markOverdefined(&PN);
- return;
- }
+ if (!isEdgeFeasible(PN.getIncomingBlock(i), PN.getParent()))
+ continue;
+
+ if (IV.isOverdefined()) // PHI node becomes overdefined!
+ return markOverdefined(&PN);
- if (OperandVal == 0) { // Grab the first value...
- OperandVal = IV.getConstant();
- } else { // Another value is being merged in!
- // There is already a reachable operand. If we conflict with it,
- // then the PHI node becomes overdefined. If we agree with it, we
- // can continue on.
-
- // Check to see if there are two different constants merging...
- if (IV.getConstant() != OperandVal) {
- // Yes there is. This means the PHI node is not constant.
- // You must be overdefined poor PHI.
- //
- markOverdefined(&PN); // The PHI node now becomes overdefined
- return; // I'm done analyzing you
- }
- }
+ if (OperandVal == 0) { // Grab the first value.
+ OperandVal = IV.getConstant();
+ continue;
}
+
+ // There is already a reachable operand. If we conflict with it,
+ // then the PHI node becomes overdefined. If we agree with it, we
+ // can continue on.
+
+ // Check to see if there are two different constants merging, if so, the PHI
+ // node is overdefined.
+ if (IV.getConstant() != OperandVal)
+ return markOverdefined(&PN);
}
// If we exited the loop, this means that the PHI node only has constant
@@ -612,44 +732,33 @@ void SCCPSolver::visitPHINode(PHINode &PN) {
markConstant(&PN, OperandVal); // Acquire operand value
}
+
+
+
void SCCPSolver::visitReturnInst(ReturnInst &I) {
- if (I.getNumOperands() == 0) return; // Ret void
+ if (I.getNumOperands() == 0) return; // ret void
Function *F = I.getParent()->getParent();
+ Value *ResultOp = I.getOperand(0);
+
// If we are tracking the return value of this function, merge it in.
- if (!F->hasLocalLinkage())
- return;
-
- if (!TrackedRetVals.empty() && I.getNumOperands() == 1) {
+ if (!TrackedRetVals.empty() && !isa<StructType>(ResultOp->getType())) {
DenseMap<Function*, LatticeVal>::iterator TFRVI =
TrackedRetVals.find(F);
- if (TFRVI != TrackedRetVals.end() &&
- !TFRVI->second.isOverdefined()) {
- LatticeVal &IV = getValueState(I.getOperand(0));
- mergeInValue(TFRVI->second, F, IV);
+ if (TFRVI != TrackedRetVals.end()) {
+ mergeInValue(TFRVI->second, F, getValueState(ResultOp));
return;
}
}
// Handle functions that return multiple values.
- if (!TrackedMultipleRetVals.empty() && I.getNumOperands() > 1) {
- for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
- DenseMap<std::pair<Function*, unsigned>, LatticeVal>::iterator
- It = TrackedMultipleRetVals.find(std::make_pair(F, i));
- if (It == TrackedMultipleRetVals.end()) break;
- mergeInValue(It->second, F, getValueState(I.getOperand(i)));
- }
- } else if (!TrackedMultipleRetVals.empty() &&
- I.getNumOperands() == 1 &&
- isa<StructType>(I.getOperand(0)->getType())) {
- for (unsigned i = 0, e = I.getOperand(0)->getType()->getNumContainedTypes();
- i != e; ++i) {
- DenseMap<std::pair<Function*, unsigned>, LatticeVal>::iterator
- It = TrackedMultipleRetVals.find(std::make_pair(F, i));
- if (It == TrackedMultipleRetVals.end()) break;
- if (Value *Val = FindInsertedValue(I.getOperand(0), i, I.getContext()))
- mergeInValue(It->second, F, getValueState(Val));
- }
+ if (!TrackedMultipleRetVals.empty()) {
+ if (const StructType *STy = dyn_cast<StructType>(ResultOp->getType()))
+ if (MRVFunctionsTracked.count(F))
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
+ mergeInValue(TrackedMultipleRetVals[std::make_pair(F, i)], F,
+ getStructValueState(ResultOp, i));
+
}
}
@@ -659,356 +768,306 @@ void SCCPSolver::visitTerminatorInst(TerminatorInst &TI) {
BasicBlock *BB = TI.getParent();
- // Mark all feasible successors executable...
+ // Mark all feasible successors executable.
for (unsigned i = 0, e = SuccFeasible.size(); i != e; ++i)
if (SuccFeasible[i])
markEdgeExecutable(BB, TI.getSuccessor(i));
}
void SCCPSolver::visitCastInst(CastInst &I) {
- Value *V = I.getOperand(0);
- LatticeVal &VState = getValueState(V);
- if (VState.isOverdefined()) // Inherit overdefinedness of operand
+ LatticeVal OpSt = getValueState(I.getOperand(0));
+ if (OpSt.isOverdefined()) // Inherit overdefinedness of operand
markOverdefined(&I);
- else if (VState.isConstant()) // Propagate constant value
+ else if (OpSt.isConstant()) // Propagate constant value
markConstant(&I, ConstantExpr::getCast(I.getOpcode(),
- VState.getConstant(), I.getType()));
+ OpSt.getConstant(), I.getType()));
}
-void SCCPSolver::visitExtractValueInst(ExtractValueInst &EVI) {
- Value *Aggr = EVI.getAggregateOperand();
-
- // If the operand to the extractvalue is an undef, the result is undef.
- if (isa<UndefValue>(Aggr))
- return;
- // Currently only handle single-index extractvalues.
- if (EVI.getNumIndices() != 1) {
- markOverdefined(&EVI);
- return;
- }
-
- Function *F = 0;
- if (CallInst *CI = dyn_cast<CallInst>(Aggr))
- F = CI->getCalledFunction();
- else if (InvokeInst *II = dyn_cast<InvokeInst>(Aggr))
- F = II->getCalledFunction();
-
- // TODO: If IPSCCP resolves the callee of this function, we could propagate a
- // result back!
- if (F == 0 || TrackedMultipleRetVals.empty()) {
- markOverdefined(&EVI);
- return;
- }
-
- // See if we are tracking the result of the callee. If not tracking this
- // function (for example, it is a declaration) just move to overdefined.
- if (!TrackedMultipleRetVals.count(std::make_pair(F, *EVI.idx_begin()))) {
- markOverdefined(&EVI);
- return;
- }
-
- // Otherwise, the value will be merged in here as a result of CallSite
- // handling.
+void SCCPSolver::visitExtractValueInst(ExtractValueInst &EVI) {
+ // If this returns a struct, mark all elements over defined, we don't track
+ // structs in structs.
+ if (isa<StructType>(EVI.getType()))
+ return markAnythingOverdefined(&EVI);
+
+ // If this is extracting from more than one level of struct, we don't know.
+ if (EVI.getNumIndices() != 1)
+ return markOverdefined(&EVI);
+
+ Value *AggVal = EVI.getAggregateOperand();
+ unsigned i = *EVI.idx_begin();
+ LatticeVal EltVal = getStructValueState(AggVal, i);
+ mergeInValue(getValueState(&EVI), &EVI, EltVal);
}
void SCCPSolver::visitInsertValueInst(InsertValueInst &IVI) {
+ const StructType *STy = dyn_cast<StructType>(IVI.getType());
+ if (STy == 0)
+ return markOverdefined(&IVI);
+
+ // If this has more than one index, we can't handle it, drive all results to
+ // undef.
+ if (IVI.getNumIndices() != 1)
+ return markAnythingOverdefined(&IVI);
+
Value *Aggr = IVI.getAggregateOperand();
- Value *Val = IVI.getInsertedValueOperand();
-
- // If the operands to the insertvalue are undef, the result is undef.
- if (isa<UndefValue>(Aggr) && isa<UndefValue>(Val))
- return;
-
- // Currently only handle single-index insertvalues.
- if (IVI.getNumIndices() != 1) {
- markOverdefined(&IVI);
- return;
- }
-
- // Currently only handle insertvalue instructions that are in a single-use
- // chain that builds up a return value.
- for (const InsertValueInst *TmpIVI = &IVI; ; ) {
- if (!TmpIVI->hasOneUse()) {
- markOverdefined(&IVI);
- return;
+ unsigned Idx = *IVI.idx_begin();
+
+ // Compute the result based on what we're inserting.
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ // This passes through all values that aren't the inserted element.
+ if (i != Idx) {
+ LatticeVal EltVal = getStructValueState(Aggr, i);
+ mergeInValue(getStructValueState(&IVI, i), &IVI, EltVal);
+ continue;
}
- const Value *V = *TmpIVI->use_begin();
- if (isa<ReturnInst>(V))
- break;
- TmpIVI = dyn_cast<InsertValueInst>(V);
- if (!TmpIVI) {
- markOverdefined(&IVI);
- return;
+
+ Value *Val = IVI.getInsertedValueOperand();
+ if (isa<StructType>(Val->getType()))
+ // We don't track structs in structs.
+ markOverdefined(getStructValueState(&IVI, i), &IVI);
+ else {
+ LatticeVal InVal = getValueState(Val);
+ mergeInValue(getStructValueState(&IVI, i), &IVI, InVal);
}
}
-
- // See if we are tracking the result of the callee.
- Function *F = IVI.getParent()->getParent();
- DenseMap<std::pair<Function*, unsigned>, LatticeVal>::iterator
- It = TrackedMultipleRetVals.find(std::make_pair(F, *IVI.idx_begin()));
-
- // Merge in the inserted member value.
- if (It != TrackedMultipleRetVals.end())
- mergeInValue(It->second, F, getValueState(Val));
-
- // Mark the aggregate result of the IVI overdefined; any tracking that we do
- // will be done on the individual member values.
- markOverdefined(&IVI);
}
void SCCPSolver::visitSelectInst(SelectInst &I) {
- LatticeVal &CondValue = getValueState(I.getCondition());
+ // If this select returns a struct, just mark the result overdefined.
+ // TODO: We could do a lot better than this if code actually uses this.
+ if (isa<StructType>(I.getType()))
+ return markAnythingOverdefined(&I);
+
+ LatticeVal CondValue = getValueState(I.getCondition());
if (CondValue.isUndefined())
return;
- if (CondValue.isConstant()) {
- if (ConstantInt *CondCB = dyn_cast<ConstantInt>(CondValue.getConstant())){
- mergeInValue(&I, getValueState(CondCB->getZExtValue() ? I.getTrueValue()
- : I.getFalseValue()));
- return;
- }
+
+ if (ConstantInt *CondCB = CondValue.getConstantInt()) {
+ Value *OpVal = CondCB->isZero() ? I.getFalseValue() : I.getTrueValue();
+ mergeInValue(&I, getValueState(OpVal));
+ return;
}
// Otherwise, the condition is overdefined or a constant we can't evaluate.
// See if we can produce something better than overdefined based on the T/F
// value.
- LatticeVal &TVal = getValueState(I.getTrueValue());
- LatticeVal &FVal = getValueState(I.getFalseValue());
+ LatticeVal TVal = getValueState(I.getTrueValue());
+ LatticeVal FVal = getValueState(I.getFalseValue());
// select ?, C, C -> C.
if (TVal.isConstant() && FVal.isConstant() &&
- TVal.getConstant() == FVal.getConstant()) {
- markConstant(&I, FVal.getConstant());
- return;
- }
+ TVal.getConstant() == FVal.getConstant())
+ return markConstant(&I, FVal.getConstant());
- if (TVal.isUndefined()) { // select ?, undef, X -> X.
- mergeInValue(&I, FVal);
- } else if (FVal.isUndefined()) { // select ?, X, undef -> X.
- mergeInValue(&I, TVal);
- } else {
- markOverdefined(&I);
- }
+ if (TVal.isUndefined()) // select ?, undef, X -> X.
+ return mergeInValue(&I, FVal);
+ if (FVal.isUndefined()) // select ?, X, undef -> X.
+ return mergeInValue(&I, TVal);
+ markOverdefined(&I);
}
-// Handle BinaryOperators and Shift Instructions...
+// Handle Binary Operators.
void SCCPSolver::visitBinaryOperator(Instruction &I) {
+ LatticeVal V1State = getValueState(I.getOperand(0));
+ LatticeVal V2State = getValueState(I.getOperand(1));
+
LatticeVal &IV = ValueState[&I];
if (IV.isOverdefined()) return;
- LatticeVal &V1State = getValueState(I.getOperand(0));
- LatticeVal &V2State = getValueState(I.getOperand(1));
-
- if (V1State.isOverdefined() || V2State.isOverdefined()) {
- // If this is an AND or OR with 0 or -1, it doesn't matter that the other
- // operand is overdefined.
- if (I.getOpcode() == Instruction::And || I.getOpcode() == Instruction::Or) {
- LatticeVal *NonOverdefVal = 0;
- if (!V1State.isOverdefined()) {
- NonOverdefVal = &V1State;
- } else if (!V2State.isOverdefined()) {
- NonOverdefVal = &V2State;
+ if (V1State.isConstant() && V2State.isConstant())
+ return markConstant(IV, &I,
+ ConstantExpr::get(I.getOpcode(), V1State.getConstant(),
+ V2State.getConstant()));
+
+ // If something is undef, wait for it to resolve.
+ if (!V1State.isOverdefined() && !V2State.isOverdefined())
+ return;
+
+ // Otherwise, one of our operands is overdefined. Try to produce something
+ // better than overdefined with some tricks.
+
+ // If this is an AND or OR with 0 or -1, it doesn't matter that the other
+ // operand is overdefined.
+ if (I.getOpcode() == Instruction::And || I.getOpcode() == Instruction::Or) {
+ LatticeVal *NonOverdefVal = 0;
+ if (!V1State.isOverdefined())
+ NonOverdefVal = &V1State;
+ else if (!V2State.isOverdefined())
+ NonOverdefVal = &V2State;
+
+ if (NonOverdefVal) {
+ if (NonOverdefVal->isUndefined()) {
+ // Could annihilate value.
+ if (I.getOpcode() == Instruction::And)
+ markConstant(IV, &I, Constant::getNullValue(I.getType()));
+ else if (const VectorType *PT = dyn_cast<VectorType>(I.getType()))
+ markConstant(IV, &I, Constant::getAllOnesValue(PT));
+ else
+ markConstant(IV, &I,
+ Constant::getAllOnesValue(I.getType()));
+ return;
}
-
- if (NonOverdefVal) {
- if (NonOverdefVal->isUndefined()) {
- // Could annihilate value.
- if (I.getOpcode() == Instruction::And)
- markConstant(IV, &I, Constant::getNullValue(I.getType()));
- else if (const VectorType *PT = dyn_cast<VectorType>(I.getType()))
- markConstant(IV, &I, Constant::getAllOnesValue(PT));
- else
- markConstant(IV, &I,
- Constant::getAllOnesValue(I.getType()));
- return;
- } else {
- if (I.getOpcode() == Instruction::And) {
- if (NonOverdefVal->getConstant()->isNullValue()) {
- markConstant(IV, &I, NonOverdefVal->getConstant());
- return; // X and 0 = 0
- }
- } else {
- if (ConstantInt *CI =
- dyn_cast<ConstantInt>(NonOverdefVal->getConstant()))
- if (CI->isAllOnesValue()) {
- markConstant(IV, &I, NonOverdefVal->getConstant());
- return; // X or -1 = -1
- }
- }
- }
+
+ if (I.getOpcode() == Instruction::And) {
+ // X and 0 = 0
+ if (NonOverdefVal->getConstant()->isNullValue())
+ return markConstant(IV, &I, NonOverdefVal->getConstant());
+ } else {
+ if (ConstantInt *CI = NonOverdefVal->getConstantInt())
+ if (CI->isAllOnesValue()) // X or -1 = -1
+ return markConstant(IV, &I, NonOverdefVal->getConstant());
}
}
+ }
- // If both operands are PHI nodes, it is possible that this instruction has
- // a constant value, despite the fact that the PHI node doesn't. Check for
- // this condition now.
- if (PHINode *PN1 = dyn_cast<PHINode>(I.getOperand(0)))
- if (PHINode *PN2 = dyn_cast<PHINode>(I.getOperand(1)))
- if (PN1->getParent() == PN2->getParent()) {
- // Since the two PHI nodes are in the same basic block, they must have
- // entries for the same predecessors. Walk the predecessor list, and
- // if all of the incoming values are constants, and the result of
- // evaluating this expression with all incoming value pairs is the
- // same, then this expression is a constant even though the PHI node
- // is not a constant!
- LatticeVal Result;
- for (unsigned i = 0, e = PN1->getNumIncomingValues(); i != e; ++i) {
- LatticeVal &In1 = getValueState(PN1->getIncomingValue(i));
- BasicBlock *InBlock = PN1->getIncomingBlock(i);
- LatticeVal &In2 =
- getValueState(PN2->getIncomingValueForBlock(InBlock));
-
- if (In1.isOverdefined() || In2.isOverdefined()) {
+ // If both operands are PHI nodes, it is possible that this instruction has
+ // a constant value, despite the fact that the PHI node doesn't. Check for
+ // this condition now.
+ if (PHINode *PN1 = dyn_cast<PHINode>(I.getOperand(0)))
+ if (PHINode *PN2 = dyn_cast<PHINode>(I.getOperand(1)))
+ if (PN1->getParent() == PN2->getParent()) {
+ // Since the two PHI nodes are in the same basic block, they must have
+ // entries for the same predecessors. Walk the predecessor list, and
+ // if all of the incoming values are constants, and the result of
+ // evaluating this expression with all incoming value pairs is the
+ // same, then this expression is a constant even though the PHI node
+ // is not a constant!
+ LatticeVal Result;
+ for (unsigned i = 0, e = PN1->getNumIncomingValues(); i != e; ++i) {
+ LatticeVal In1 = getValueState(PN1->getIncomingValue(i));
+ BasicBlock *InBlock = PN1->getIncomingBlock(i);
+ LatticeVal In2 =getValueState(PN2->getIncomingValueForBlock(InBlock));
+
+ if (In1.isOverdefined() || In2.isOverdefined()) {
+ Result.markOverdefined();
+ break; // Cannot fold this operation over the PHI nodes!
+ }
+
+ if (In1.isConstant() && In2.isConstant()) {
+ Constant *V = ConstantExpr::get(I.getOpcode(), In1.getConstant(),
+ In2.getConstant());
+ if (Result.isUndefined())
+ Result.markConstant(V);
+ else if (Result.isConstant() && Result.getConstant() != V) {
Result.markOverdefined();
- break; // Cannot fold this operation over the PHI nodes!
- } else if (In1.isConstant() && In2.isConstant()) {
- Constant *V =
- ConstantExpr::get(I.getOpcode(), In1.getConstant(),
- In2.getConstant());
- if (Result.isUndefined())
- Result.markConstant(V);
- else if (Result.isConstant() && Result.getConstant() != V) {
- Result.markOverdefined();
- break;
- }
+ break;
}
}
+ }
- // If we found a constant value here, then we know the instruction is
- // constant despite the fact that the PHI nodes are overdefined.
- if (Result.isConstant()) {
- markConstant(IV, &I, Result.getConstant());
- // Remember that this instruction is virtually using the PHI node
- // operands.
- UsersOfOverdefinedPHIs.insert(std::make_pair(PN1, &I));
- UsersOfOverdefinedPHIs.insert(std::make_pair(PN2, &I));
- return;
- } else if (Result.isUndefined()) {
- return;
- }
-
- // Okay, this really is overdefined now. Since we might have
- // speculatively thought that this was not overdefined before, and
- // added ourselves to the UsersOfOverdefinedPHIs list for the PHIs,
- // make sure to clean out any entries that we put there, for
- // efficiency.
- std::multimap<PHINode*, Instruction*>::iterator It, E;
- tie(It, E) = UsersOfOverdefinedPHIs.equal_range(PN1);
- while (It != E) {
- if (It->second == &I) {
- UsersOfOverdefinedPHIs.erase(It++);
- } else
- ++It;
- }
- tie(It, E) = UsersOfOverdefinedPHIs.equal_range(PN2);
- while (It != E) {
- if (It->second == &I) {
- UsersOfOverdefinedPHIs.erase(It++);
- } else
- ++It;
- }
+ // If we found a constant value here, then we know the instruction is
+ // constant despite the fact that the PHI nodes are overdefined.
+ if (Result.isConstant()) {
+ markConstant(IV, &I, Result.getConstant());
+ // Remember that this instruction is virtually using the PHI node
+ // operands.
+ UsersOfOverdefinedPHIs.insert(std::make_pair(PN1, &I));
+ UsersOfOverdefinedPHIs.insert(std::make_pair(PN2, &I));
+ return;
}
+
+ if (Result.isUndefined())
+ return;
- markOverdefined(IV, &I);
- } else if (V1State.isConstant() && V2State.isConstant()) {
- markConstant(IV, &I,
- ConstantExpr::get(I.getOpcode(), V1State.getConstant(),
- V2State.getConstant()));
- }
+ // Okay, this really is overdefined now. Since we might have
+ // speculatively thought that this was not overdefined before, and
+ // added ourselves to the UsersOfOverdefinedPHIs list for the PHIs,
+ // make sure to clean out any entries that we put there, for
+ // efficiency.
+ RemoveFromOverdefinedPHIs(&I, PN1);
+ RemoveFromOverdefinedPHIs(&I, PN2);
+ }
+
+ markOverdefined(&I);
}
-// Handle ICmpInst instruction...
+// Handle ICmpInst instruction.
void SCCPSolver::visitCmpInst(CmpInst &I) {
+ LatticeVal V1State = getValueState(I.getOperand(0));
+ LatticeVal V2State = getValueState(I.getOperand(1));
+
LatticeVal &IV = ValueState[&I];
if (IV.isOverdefined()) return;
- LatticeVal &V1State = getValueState(I.getOperand(0));
- LatticeVal &V2State = getValueState(I.getOperand(1));
-
- if (V1State.isOverdefined() || V2State.isOverdefined()) {
- // If both operands are PHI nodes, it is possible that this instruction has
- // a constant value, despite the fact that the PHI node doesn't. Check for
- // this condition now.
- if (PHINode *PN1 = dyn_cast<PHINode>(I.getOperand(0)))
- if (PHINode *PN2 = dyn_cast<PHINode>(I.getOperand(1)))
- if (PN1->getParent() == PN2->getParent()) {
- // Since the two PHI nodes are in the same basic block, they must have
- // entries for the same predecessors. Walk the predecessor list, and
- // if all of the incoming values are constants, and the result of
- // evaluating this expression with all incoming value pairs is the
- // same, then this expression is a constant even though the PHI node
- // is not a constant!
- LatticeVal Result;
- for (unsigned i = 0, e = PN1->getNumIncomingValues(); i != e; ++i) {
- LatticeVal &In1 = getValueState(PN1->getIncomingValue(i));
- BasicBlock *InBlock = PN1->getIncomingBlock(i);
- LatticeVal &In2 =
- getValueState(PN2->getIncomingValueForBlock(InBlock));
-
- if (In1.isOverdefined() || In2.isOverdefined()) {
+ if (V1State.isConstant() && V2State.isConstant())
+ return markConstant(IV, &I, ConstantExpr::getCompare(I.getPredicate(),
+ V1State.getConstant(),
+ V2State.getConstant()));
+
+ // If operands are still undefined, wait for it to resolve.
+ if (!V1State.isOverdefined() && !V2State.isOverdefined())
+ return;
+
+ // If something is overdefined, use some tricks to avoid ending up and over
+ // defined if we can.
+
+ // If both operands are PHI nodes, it is possible that this instruction has
+ // a constant value, despite the fact that the PHI node doesn't. Check for
+ // this condition now.
+ if (PHINode *PN1 = dyn_cast<PHINode>(I.getOperand(0)))
+ if (PHINode *PN2 = dyn_cast<PHINode>(I.getOperand(1)))
+ if (PN1->getParent() == PN2->getParent()) {
+ // Since the two PHI nodes are in the same basic block, they must have
+ // entries for the same predecessors. Walk the predecessor list, and
+ // if all of the incoming values are constants, and the result of
+ // evaluating this expression with all incoming value pairs is the
+ // same, then this expression is a constant even though the PHI node
+ // is not a constant!
+ LatticeVal Result;
+ for (unsigned i = 0, e = PN1->getNumIncomingValues(); i != e; ++i) {
+ LatticeVal In1 = getValueState(PN1->getIncomingValue(i));
+ BasicBlock *InBlock = PN1->getIncomingBlock(i);
+ LatticeVal In2 =getValueState(PN2->getIncomingValueForBlock(InBlock));
+
+ if (In1.isOverdefined() || In2.isOverdefined()) {
+ Result.markOverdefined();
+ break; // Cannot fold this operation over the PHI nodes!
+ }
+
+ if (In1.isConstant() && In2.isConstant()) {
+ Constant *V = ConstantExpr::getCompare(I.getPredicate(),
+ In1.getConstant(),
+ In2.getConstant());
+ if (Result.isUndefined())
+ Result.markConstant(V);
+ else if (Result.isConstant() && Result.getConstant() != V) {
Result.markOverdefined();
- break; // Cannot fold this operation over the PHI nodes!
- } else if (In1.isConstant() && In2.isConstant()) {
- Constant *V = ConstantExpr::getCompare(I.getPredicate(),
- In1.getConstant(),
- In2.getConstant());
- if (Result.isUndefined())
- Result.markConstant(V);
- else if (Result.isConstant() && Result.getConstant() != V) {
- Result.markOverdefined();
- break;
- }
+ break;
}
}
+ }
- // If we found a constant value here, then we know the instruction is
- // constant despite the fact that the PHI nodes are overdefined.
- if (Result.isConstant()) {
- markConstant(IV, &I, Result.getConstant());
- // Remember that this instruction is virtually using the PHI node
- // operands.
- UsersOfOverdefinedPHIs.insert(std::make_pair(PN1, &I));
- UsersOfOverdefinedPHIs.insert(std::make_pair(PN2, &I));
- return;
- } else if (Result.isUndefined()) {
- return;
- }
-
- // Okay, this really is overdefined now. Since we might have
- // speculatively thought that this was not overdefined before, and
- // added ourselves to the UsersOfOverdefinedPHIs list for the PHIs,
- // make sure to clean out any entries that we put there, for
- // efficiency.
- std::multimap<PHINode*, Instruction*>::iterator It, E;
- tie(It, E) = UsersOfOverdefinedPHIs.equal_range(PN1);
- while (It != E) {
- if (It->second == &I) {
- UsersOfOverdefinedPHIs.erase(It++);
- } else
- ++It;
- }
- tie(It, E) = UsersOfOverdefinedPHIs.equal_range(PN2);
- while (It != E) {
- if (It->second == &I) {
- UsersOfOverdefinedPHIs.erase(It++);
- } else
- ++It;
- }
+ // If we found a constant value here, then we know the instruction is
+ // constant despite the fact that the PHI nodes are overdefined.
+ if (Result.isConstant()) {
+ markConstant(&I, Result.getConstant());
+ // Remember that this instruction is virtually using the PHI node
+ // operands.
+ UsersOfOverdefinedPHIs.insert(std::make_pair(PN1, &I));
+ UsersOfOverdefinedPHIs.insert(std::make_pair(PN2, &I));
+ return;
}
+
+ if (Result.isUndefined())
+ return;
- markOverdefined(IV, &I);
- } else if (V1State.isConstant() && V2State.isConstant()) {
- markConstant(IV, &I, ConstantExpr::getCompare(I.getPredicate(),
- V1State.getConstant(),
- V2State.getConstant()));
- }
+ // Okay, this really is overdefined now. Since we might have
+ // speculatively thought that this was not overdefined before, and
+ // added ourselves to the UsersOfOverdefinedPHIs list for the PHIs,
+ // make sure to clean out any entries that we put there, for
+ // efficiency.
+ RemoveFromOverdefinedPHIs(&I, PN1);
+ RemoveFromOverdefinedPHIs(&I, PN2);
+ }
+
+ markOverdefined(&I);
}
void SCCPSolver::visitExtractElementInst(ExtractElementInst &I) {
- // FIXME : SCCP does not handle vectors properly.
- markOverdefined(&I);
- return;
+ // TODO : SCCP does not handle vectors properly.
+ return markOverdefined(&I);
#if 0
LatticeVal &ValState = getValueState(I.getOperand(0));
@@ -1023,9 +1082,8 @@ void SCCPSolver::visitExtractElementInst(ExtractElementInst &I) {
}
void SCCPSolver::visitInsertElementInst(InsertElementInst &I) {
- // FIXME : SCCP does not handle vectors properly.
- markOverdefined(&I);
- return;
+ // TODO : SCCP does not handle vectors properly.
+ return markOverdefined(&I);
#if 0
LatticeVal &ValState = getValueState(I.getOperand(0));
LatticeVal &EltState = getValueState(I.getOperand(1));
@@ -1048,9 +1106,8 @@ void SCCPSolver::visitInsertElementInst(InsertElementInst &I) {
}
void SCCPSolver::visitShuffleVectorInst(ShuffleVectorInst &I) {
- // FIXME : SCCP does not handle vectors properly.
- markOverdefined(&I);
- return;
+ // TODO : SCCP does not handle vectors properly.
+ return markOverdefined(&I);
#if 0
LatticeVal &V1State = getValueState(I.getOperand(0));
LatticeVal &V2State = getValueState(I.getOperand(1));
@@ -1076,46 +1133,46 @@ void SCCPSolver::visitShuffleVectorInst(ShuffleVectorInst &I) {
#endif
}
-// Handle getelementptr instructions... if all operands are constants then we
+// Handle getelementptr instructions. If all operands are constants then we
// can turn this into a getelementptr ConstantExpr.
//
void SCCPSolver::visitGetElementPtrInst(GetElementPtrInst &I) {
- LatticeVal &IV = ValueState[&I];
- if (IV.isOverdefined()) return;
+ if (ValueState[&I].isOverdefined()) return;
SmallVector<Constant*, 8> Operands;
Operands.reserve(I.getNumOperands());
for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
- LatticeVal &State = getValueState(I.getOperand(i));
+ LatticeVal State = getValueState(I.getOperand(i));
if (State.isUndefined())
- return; // Operands are not resolved yet...
- else if (State.isOverdefined()) {
- markOverdefined(IV, &I);
- return;
- }
+ return; // Operands are not resolved yet.
+
+ if (State.isOverdefined())
+ return markOverdefined(&I);
+
assert(State.isConstant() && "Unknown state!");
Operands.push_back(State.getConstant());
}
Constant *Ptr = Operands[0];
- Operands.erase(Operands.begin()); // Erase the pointer from idx list...
-
- markConstant(IV, &I, ConstantExpr::getGetElementPtr(Ptr, &Operands[0],
- Operands.size()));
+ markConstant(&I, ConstantExpr::getGetElementPtr(Ptr, &Operands[0]+1,
+ Operands.size()-1));
}
-void SCCPSolver::visitStoreInst(Instruction &SI) {
+void SCCPSolver::visitStoreInst(StoreInst &SI) {
+ // If this store is of a struct, ignore it.
+ if (isa<StructType>(SI.getOperand(0)->getType()))
+ return;
+
if (TrackedGlobals.empty() || !isa<GlobalVariable>(SI.getOperand(1)))
return;
+
GlobalVariable *GV = cast<GlobalVariable>(SI.getOperand(1));
DenseMap<GlobalVariable*, LatticeVal>::iterator I = TrackedGlobals.find(GV);
if (I == TrackedGlobals.end() || I->second.isOverdefined()) return;
- // Get the value we are storing into the global.
- LatticeVal &PtrVal = getValueState(SI.getOperand(0));
-
- mergeInValue(I->second, GV, PtrVal);
+ // Get the value we are storing into the global, then merge it.
+ mergeInValue(I->second, GV, getValueState(SI.getOperand(0)));
if (I->second.isOverdefined())
TrackedGlobals.erase(I); // No need to keep tracking this!
}
@@ -1124,50 +1181,42 @@ void SCCPSolver::visitStoreInst(Instruction &SI) {
// Handle load instructions. If the operand is a constant pointer to a constant
// global, we can replace the load with the loaded constant value!
void SCCPSolver::visitLoadInst(LoadInst &I) {
+ // If this load is of a struct, just mark the result overdefined.
+ if (isa<StructType>(I.getType()))
+ return markAnythingOverdefined(&I);
+
+ LatticeVal PtrVal = getValueState(I.getOperand(0));
+ if (PtrVal.isUndefined()) return; // The pointer is not resolved yet!
+
LatticeVal &IV = ValueState[&I];
if (IV.isOverdefined()) return;
- LatticeVal &PtrVal = getValueState(I.getOperand(0));
- if (PtrVal.isUndefined()) return; // The pointer is not resolved yet!
- if (PtrVal.isConstant() && !I.isVolatile()) {
- Value *Ptr = PtrVal.getConstant();
- // TODO: Consider a target hook for valid address spaces for this xform.
- if (isa<ConstantPointerNull>(Ptr) && I.getPointerAddressSpace() == 0) {
- // load null -> null
- markConstant(IV, &I, Constant::getNullValue(I.getType()));
- return;
- }
+ if (!PtrVal.isConstant() || I.isVolatile())
+ return markOverdefined(IV, &I);
+
+ Constant *Ptr = PtrVal.getConstant();
- // Transform load (constant global) into the value loaded.
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
- if (GV->isConstant()) {
- if (GV->hasDefinitiveInitializer()) {
- markConstant(IV, &I, GV->getInitializer());
- return;
- }
- } else if (!TrackedGlobals.empty()) {
- // If we are tracking this global, merge in the known value for it.
- DenseMap<GlobalVariable*, LatticeVal>::iterator It =
- TrackedGlobals.find(GV);
- if (It != TrackedGlobals.end()) {
- mergeInValue(IV, &I, It->second);
- return;
- }
+ // load null -> null
+ if (isa<ConstantPointerNull>(Ptr) && I.getPointerAddressSpace() == 0)
+ return markConstant(IV, &I, Constant::getNullValue(I.getType()));
+
+ // Transform load (constant global) into the value loaded.
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
+ if (!TrackedGlobals.empty()) {
+ // If we are tracking this global, merge in the known value for it.
+ DenseMap<GlobalVariable*, LatticeVal>::iterator It =
+ TrackedGlobals.find(GV);
+ if (It != TrackedGlobals.end()) {
+ mergeInValue(IV, &I, It->second);
+ return;
}
}
-
- // Transform load (constantexpr_GEP global, 0, ...) into the value loaded.
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
- if (CE->getOpcode() == Instruction::GetElementPtr)
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0)))
- if (GV->isConstant() && GV->hasDefinitiveInitializer())
- if (Constant *V =
- ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE)) {
- markConstant(IV, &I, V);
- return;
- }
}
+ // Transform load from a constant into a constant if possible.
+ if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, TD))
+ return markConstant(IV, &I, C);
+
// Otherwise we cannot say for certain what value this load will produce.
// Bail out.
markOverdefined(IV, &I);
@@ -1180,97 +1229,83 @@ void SCCPSolver::visitCallSite(CallSite CS) {
// The common case is that we aren't tracking the callee, either because we
// are not doing interprocedural analysis or the callee is indirect, or is
// external. Handle these cases first.
- if (F == 0 || !F->hasLocalLinkage()) {
+ if (F == 0 || F->isDeclaration()) {
CallOverdefined:
// Void return and not tracking callee, just bail.
if (I->getType()->isVoidTy()) return;
// Otherwise, if we have a single return value case, and if the function is
// a declaration, maybe we can constant fold it.
- if (!isa<StructType>(I->getType()) && F && F->isDeclaration() &&
+ if (F && F->isDeclaration() && !isa<StructType>(I->getType()) &&
canConstantFoldCallTo(F)) {
SmallVector<Constant*, 8> Operands;
for (CallSite::arg_iterator AI = CS.arg_begin(), E = CS.arg_end();
AI != E; ++AI) {
- LatticeVal &State = getValueState(*AI);
+ LatticeVal State = getValueState(*AI);
+
if (State.isUndefined())
return; // Operands are not resolved yet.
- else if (State.isOverdefined()) {
- markOverdefined(I);
- return;
- }
+ if (State.isOverdefined())
+ return markOverdefined(I);
assert(State.isConstant() && "Unknown state!");
Operands.push_back(State.getConstant());
}
// If we can constant fold this, mark the result of the call as a
// constant.
- if (Constant *C = ConstantFoldCall(F, Operands.data(), Operands.size())) {
- markConstant(I, C);
- return;
- }
+ if (Constant *C = ConstantFoldCall(F, Operands.data(), Operands.size()))
+ return markConstant(I, C);
}
// Otherwise, we don't know anything about this call, mark it overdefined.
- markOverdefined(I);
- return;
+ return markAnythingOverdefined(I);
}
- // If this is a single/zero retval case, see if we're tracking the function.
- DenseMap<Function*, LatticeVal>::iterator TFRVI = TrackedRetVals.find(F);
- if (TFRVI != TrackedRetVals.end()) {
- // If so, propagate the return value of the callee into this call result.
- mergeInValue(I, TFRVI->second);
- } else if (isa<StructType>(I->getType())) {
- // Check to see if we're tracking this callee, if not, handle it in the
- // common path above.
- DenseMap<std::pair<Function*, unsigned>, LatticeVal>::iterator
- TMRVI = TrackedMultipleRetVals.find(std::make_pair(F, 0));
- if (TMRVI == TrackedMultipleRetVals.end())
- goto CallOverdefined;
-
- // Need to mark as overdefined, otherwise it stays undefined which
- // creates extractvalue undef, <idx>
- markOverdefined(I);
- // If we are tracking this callee, propagate the return values of the call
- // into this call site. We do this by walking all the uses. Single-index
- // ExtractValueInst uses can be tracked; anything more complicated is
- // currently handled conservatively.
- for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
- UI != E; ++UI) {
- if (ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(*UI)) {
- if (EVI->getNumIndices() == 1) {
- mergeInValue(EVI,
- TrackedMultipleRetVals[std::make_pair(F, *EVI->idx_begin())]);
- continue;
- }
+ // If this is a local function that doesn't have its address taken, mark its
+ // entry block executable and merge in the actual arguments to the call into
+ // the formal arguments of the function.
+ if (!TrackingIncomingArguments.empty() && TrackingIncomingArguments.count(F)){
+ MarkBlockExecutable(F->begin());
+
+ // Propagate information from this call site into the callee.
+ CallSite::arg_iterator CAI = CS.arg_begin();
+ for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
+ AI != E; ++AI, ++CAI) {
+ // If this argument is byval, and if the function is not readonly, there
+ // will be an implicit copy formed of the input aggregate.
+ if (AI->hasByValAttr() && !F->onlyReadsMemory()) {
+ markOverdefined(AI);
+ continue;
+ }
+
+ if (const StructType *STy = dyn_cast<StructType>(AI->getType())) {
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
+ mergeInValue(getStructValueState(AI, i), AI,
+ getStructValueState(*CAI, i));
+ } else {
+ mergeInValue(AI, getValueState(*CAI));
}
- // The aggregate value is used in a way not handled here. Assume nothing.
- markOverdefined(*UI);
}
- } else {
- // Otherwise we're not tracking this callee, so handle it in the
- // common path above.
- goto CallOverdefined;
}
-
- // Finally, if this is the first call to the function hit, mark its entry
- // block executable.
- if (!BBExecutable.count(F->begin()))
- MarkBlockExecutable(F->begin());
- // Propagate information from this call site into the callee.
- CallSite::arg_iterator CAI = CS.arg_begin();
- for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
- AI != E; ++AI, ++CAI) {
- LatticeVal &IV = ValueState[AI];
- if (AI->hasByValAttr() && !F->onlyReadsMemory()) {
- IV.markOverdefined();
- continue;
- }
- if (!IV.isOverdefined())
- mergeInValue(IV, AI, getValueState(*CAI));
+ // If this is a single/zero retval case, see if we're tracking the function.
+ if (const StructType *STy = dyn_cast<StructType>(F->getReturnType())) {
+ if (!MRVFunctionsTracked.count(F))
+ goto CallOverdefined; // Not tracking this callee.
+
+ // If we are tracking this callee, propagate the result of the function
+ // into this call site.
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
+ mergeInValue(getStructValueState(I, i), I,
+ TrackedMultipleRetVals[std::make_pair(F, i)]);
+ } else {
+ DenseMap<Function*, LatticeVal>::iterator TFRVI = TrackedRetVals.find(F);
+ if (TFRVI == TrackedRetVals.end())
+ goto CallOverdefined; // Not tracking this callee.
+
+ // If so, propagate the return value of the callee into this call result.
+ mergeInValue(I, TFRVI->second);
}
}
@@ -1278,10 +1313,10 @@ void SCCPSolver::Solve() {
// Process the work lists until they are empty!
while (!BBWorkList.empty() || !InstWorkList.empty() ||
!OverdefinedInstWorkList.empty()) {
- // Process the instruction work list...
+ // Process the overdefined instruction's work list first, which drives other
+ // things to overdefined more quickly.
while (!OverdefinedInstWorkList.empty()) {
- Value *I = OverdefinedInstWorkList.back();
- OverdefinedInstWorkList.pop_back();
+ Value *I = OverdefinedInstWorkList.pop_back_val();
DEBUG(errs() << "\nPopped off OI-WL: " << *I << '\n');
@@ -1290,33 +1325,35 @@ void SCCPSolver::Solve() {
//
// Anything on this worklist that is overdefined need not be visited
// since all of its users will have already been marked as overdefined
- // Update all of the users of this instruction's value...
+ // Update all of the users of this instruction's value.
//
for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
UI != E; ++UI)
- OperandChangedState(*UI);
+ if (Instruction *I = dyn_cast<Instruction>(*UI))
+ OperandChangedState(I);
}
- // Process the instruction work list...
+
+ // Process the instruction work list.
while (!InstWorkList.empty()) {
- Value *I = InstWorkList.back();
- InstWorkList.pop_back();
+ Value *I = InstWorkList.pop_back_val();
DEBUG(errs() << "\nPopped off I-WL: " << *I << '\n');
- // "I" got into the work list because it either made the transition from
- // bottom to constant
+ // "I" got into the work list because it made the transition from undef to
+ // constant.
//
// Anything on this worklist that is overdefined need not be visited
// since all of its users will have already been marked as overdefined.
- // Update all of the users of this instruction's value...
+ // Update all of the users of this instruction's value.
//
- if (!getValueState(I).isOverdefined())
+ if (isa<StructType>(I->getType()) || !getValueState(I).isOverdefined())
for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
UI != E; ++UI)
- OperandChangedState(*UI);
+ if (Instruction *I = dyn_cast<Instruction>(*UI))
+ OperandChangedState(I);
}
- // Process the basic block work list...
+ // Process the basic block work list.
while (!BBWorkList.empty()) {
BasicBlock *BB = BBWorkList.back();
BBWorkList.pop_back();
@@ -1357,13 +1394,35 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) {
// Look for instructions which produce undef values.
if (I->getType()->isVoidTy()) continue;
+ if (const StructType *STy = dyn_cast<StructType>(I->getType())) {
+ // Only a few things that can be structs matter for undef. Just send
+ // all their results to overdefined. We could be more precise than this
+ // but it isn't worth bothering.
+ if (isa<CallInst>(I) || isa<SelectInst>(I)) {
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ LatticeVal &LV = getStructValueState(I, i);
+ if (LV.isUndefined())
+ markOverdefined(LV, I);
+ }
+ }
+ continue;
+ }
+
LatticeVal &LV = getValueState(I);
if (!LV.isUndefined()) continue;
+ // No instructions using structs need disambiguation.
+ if (isa<StructType>(I->getOperand(0)->getType()))
+ continue;
+
// Get the lattice values of the first two operands for use below.
- LatticeVal &Op0LV = getValueState(I->getOperand(0));
+ LatticeVal Op0LV = getValueState(I->getOperand(0));
LatticeVal Op1LV;
if (I->getNumOperands() == 2) {
+ // No instructions using structs need disambiguation.
+ if (isa<StructType>(I->getOperand(1)->getType()))
+ continue;
+
// If this is a two-operand instruction, and if both operands are
// undefs, the result stays undef.
Op1LV = getValueState(I->getOperand(1));
@@ -1380,23 +1439,18 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) {
// After a zero extend, we know the top part is zero. SExt doesn't have
// to be handled here, because we don't know whether the top part is 1's
// or 0's.
- assert(Op0LV.isUndefined());
- markForcedConstant(LV, I, Constant::getNullValue(ITy));
+ markForcedConstant(I, Constant::getNullValue(ITy));
return true;
case Instruction::Mul:
case Instruction::And:
// undef * X -> 0. X could be zero.
// undef & X -> 0. X could be zero.
- markForcedConstant(LV, I, Constant::getNullValue(ITy));
+ markForcedConstant(I, Constant::getNullValue(ITy));
return true;
case Instruction::Or:
// undef | X -> -1. X could be -1.
- if (const VectorType *PTy = dyn_cast<VectorType>(ITy))
- markForcedConstant(LV, I,
- Constant::getAllOnesValue(PTy));
- else
- markForcedConstant(LV, I, Constant::getAllOnesValue(ITy));
+ markForcedConstant(I, Constant::getAllOnesValue(ITy));
return true;
case Instruction::SDiv:
@@ -1409,7 +1463,7 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) {
// undef / X -> 0. X could be maxint.
// undef % X -> 0. X could be 1.
- markForcedConstant(LV, I, Constant::getNullValue(ITy));
+ markForcedConstant(I, Constant::getNullValue(ITy));
return true;
case Instruction::AShr:
@@ -1418,9 +1472,9 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) {
// X >>s undef -> X. X could be 0, X could have the high-bit known set.
if (Op0LV.isConstant())
- markForcedConstant(LV, I, Op0LV.getConstant());
+ markForcedConstant(I, Op0LV.getConstant());
else
- markOverdefined(LV, I);
+ markOverdefined(I);
return true;
case Instruction::LShr:
case Instruction::Shl:
@@ -1430,7 +1484,7 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) {
// X >> undef -> 0. X could be 0.
// X << undef -> 0. X could be 0.
- markForcedConstant(LV, I, Constant::getNullValue(ITy));
+ markForcedConstant(I, Constant::getNullValue(ITy));
return true;
case Instruction::Select:
// undef ? X : Y -> X or Y. There could be commonality between X/Y.
@@ -1448,15 +1502,15 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) {
}
if (Op1LV.isConstant())
- markForcedConstant(LV, I, Op1LV.getConstant());
+ markForcedConstant(I, Op1LV.getConstant());
else
- markOverdefined(LV, I);
+ markOverdefined(I);
return true;
case Instruction::Call:
// If a call has an undef result, it is because it is constant foldable
// but one of the inputs was undef. Just force the result to
// overdefined.
- markOverdefined(LV, I);
+ markOverdefined(I);
return true;
}
}
@@ -1467,7 +1521,7 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) {
if (!getValueState(BI->getCondition()).isUndefined())
continue;
} else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
- if (SI->getNumSuccessors()<2) // no cases
+ if (SI->getNumSuccessors() < 2) // no cases
continue;
if (!getValueState(SI->getCondition()).isUndefined())
continue;
@@ -1493,7 +1547,7 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) {
// as undef, then further analysis could think the undef went another way
// leading to an inconsistent set of conclusions.
if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
- BI->setCondition(ConstantInt::getFalse(*Context));
+ BI->setCondition(ConstantInt::getFalse(BI->getContext()));
} else {
SwitchInst *SI = cast<SwitchInst>(TI);
SI->setCondition(SI->getCaseValue(1));
@@ -1531,26 +1585,40 @@ char SCCP::ID = 0;
static RegisterPass<SCCP>
X("sccp", "Sparse Conditional Constant Propagation");
-// createSCCPPass - This is the public interface to this file...
+// createSCCPPass - This is the public interface to this file.
FunctionPass *llvm::createSCCPPass() {
return new SCCP();
}
+static void DeleteInstructionInBlock(BasicBlock *BB) {
+ DEBUG(errs() << " BasicBlock Dead:" << *BB);
+ ++NumDeadBlocks;
+
+ // Delete the instructions backwards, as it has a reduced likelihood of
+ // having to update as many def-use and use-def chains.
+ while (!isa<TerminatorInst>(BB->begin())) {
+ Instruction *I = --BasicBlock::iterator(BB->getTerminator());
+
+ if (!I->use_empty())
+ I->replaceAllUsesWith(UndefValue::get(I->getType()));
+ BB->getInstList().erase(I);
+ ++NumInstRemoved;
+ }
+}
// runOnFunction() - Run the Sparse Conditional Constant Propagation algorithm,
// and return true if the function was modified.
//
bool SCCP::runOnFunction(Function &F) {
DEBUG(errs() << "SCCP on function '" << F.getName() << "'\n");
- SCCPSolver Solver;
- Solver.setContext(&F.getContext());
+ SCCPSolver Solver(getAnalysisIfAvailable<TargetData>());
// Mark the first block of the function as being executable.
Solver.MarkBlockExecutable(F.begin());
// Mark all arguments to the function as being overdefined.
for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end(); AI != E;++AI)
- Solver.markOverdefined(AI);
+ Solver.markAnythingOverdefined(AI);
// Solve for constants.
bool ResolvedUndefs = true;
@@ -1565,57 +1633,45 @@ bool SCCP::runOnFunction(Function &F) {
// If we decided that there are basic blocks that are dead in this function,
// delete their contents now. Note that we cannot actually delete the blocks,
// as we cannot modify the CFG of the function.
- //
- SmallVector<Instruction*, 512> Insts;
- std::map<Value*, LatticeVal> &Values = Solver.getValueMapping();
- for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
+ for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
if (!Solver.isBlockExecutable(BB)) {
- DEBUG(errs() << " BasicBlock Dead:" << *BB);
- ++NumDeadBlocks;
-
- // Delete the instructions backwards, as it has a reduced likelihood of
- // having to update as many def-use and use-def chains.
- for (BasicBlock::iterator I = BB->begin(), E = BB->getTerminator();
- I != E; ++I)
- Insts.push_back(I);
- while (!Insts.empty()) {
- Instruction *I = Insts.back();
- Insts.pop_back();
- if (!I->use_empty())
- I->replaceAllUsesWith(UndefValue::get(I->getType()));
- BB->getInstList().erase(I);
- MadeChanges = true;
- ++NumInstRemoved;
- }
- } else {
- // Iterate over all of the instructions in a function, replacing them with
- // constants if we have found them to be of constant values.
- //
- for (BasicBlock::iterator BI = BB->begin(), E = BB->end(); BI != E; ) {
- Instruction *Inst = BI++;
- if (Inst->getType()->isVoidTy() || isa<TerminatorInst>(Inst))
- continue;
-
- LatticeVal &IV = Values[Inst];
- if (!IV.isConstant() && !IV.isUndefined())
- continue;
-
- Constant *Const = IV.isConstant()
- ? IV.getConstant() : UndefValue::get(Inst->getType());
- DEBUG(errs() << " Constant: " << *Const << " = " << *Inst);
+ DeleteInstructionInBlock(BB);
+ MadeChanges = true;
+ continue;
+ }
+
+ // Iterate over all of the instructions in a function, replacing them with
+ // constants if we have found them to be of constant values.
+ //
+ for (BasicBlock::iterator BI = BB->begin(), E = BB->end(); BI != E; ) {
+ Instruction *Inst = BI++;
+ if (Inst->getType()->isVoidTy() || isa<TerminatorInst>(Inst))
+ continue;
+
+ // TODO: Reconstruct structs from their elements.
+ if (isa<StructType>(Inst->getType()))
+ continue;
+
+ LatticeVal IV = Solver.getLatticeValueFor(Inst);
+ if (IV.isOverdefined())
+ continue;
+
+ Constant *Const = IV.isConstant()
+ ? IV.getConstant() : UndefValue::get(Inst->getType());
+ DEBUG(errs() << " Constant: " << *Const << " = " << *Inst);
- // Replaces all of the uses of a variable with uses of the constant.
- Inst->replaceAllUsesWith(Const);
-
- // Delete the instruction.
- Inst->eraseFromParent();
-
- // Hey, we just changed something!
- MadeChanges = true;
- ++NumInstRemoved;
- }
+ // Replaces all of the uses of a variable with uses of the constant.
+ Inst->replaceAllUsesWith(Const);
+
+ // Delete the instruction.
+ Inst->eraseFromParent();
+
+ // Hey, we just changed something!
+ MadeChanges = true;
+ ++NumInstRemoved;
}
+ }
return MadeChanges;
}
@@ -1637,7 +1693,7 @@ char IPSCCP::ID = 0;
static RegisterPass<IPSCCP>
Y("ipsccp", "Interprocedural Sparse Conditional Constant Propagation");
-// createIPSCCPPass - This is the public interface to this file...
+// createIPSCCPPass - This is the public interface to this file.
ModulePass *llvm::createIPSCCPPass() {
return new IPSCCP();
}
@@ -1654,12 +1710,14 @@ static bool AddressIsTaken(GlobalValue *GV) {
return true; // Storing addr of GV.
} else if (isa<InvokeInst>(*UI) || isa<CallInst>(*UI)) {
// Make sure we are calling the function, not passing the address.
- CallSite CS = CallSite::get(cast<Instruction>(*UI));
- if (CS.hasArgument(GV))
+ if (UI.getOperandNo() != 0)
return true;
} else if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
if (LI->isVolatile())
return true;
+ } else if (isa<BlockAddress>(*UI)) {
+ // blockaddress doesn't take the address of the function, it takes addr
+ // of label.
} else {
return true;
}
@@ -1667,25 +1725,37 @@ static bool AddressIsTaken(GlobalValue *GV) {
}
bool IPSCCP::runOnModule(Module &M) {
- LLVMContext *Context = &M.getContext();
-
- SCCPSolver Solver;
- Solver.setContext(Context);
+ SCCPSolver Solver(getAnalysisIfAvailable<TargetData>());
// Loop over all functions, marking arguments to those with their addresses
// taken or that are external as overdefined.
//
- for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F)
- if (!F->hasLocalLinkage() || AddressIsTaken(F)) {
- if (!F->isDeclaration())
- Solver.MarkBlockExecutable(F->begin());
- for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
- AI != E; ++AI)
- Solver.markOverdefined(AI);
- } else {
+ for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
+ if (F->isDeclaration())
+ continue;
+
+ // If this is a strong or ODR definition of this function, then we can
+ // propagate information about its result into callsites of it.
+ if (!F->mayBeOverridden())
Solver.AddTrackedFunction(F);
+
+ // If this function only has direct calls that we can see, we can track its
+ // arguments and return value aggressively, and can assume it is not called
+ // unless we see evidence to the contrary.
+ if (F->hasLocalLinkage() && !AddressIsTaken(F)) {
+ Solver.AddArgumentTrackedFunction(F);
+ continue;
}
+ // Assume the function is called.
+ Solver.MarkBlockExecutable(F->begin());
+
+ // Assume nothing about the incoming arguments.
+ for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
+ AI != E; ++AI)
+ Solver.markAnythingOverdefined(AI);
+ }
+
// Loop over global variables. We inform the solver about any internal global
// variables that do not have their 'addresses taken'. If they don't have
// their addresses taken, we can propagate constants through them.
@@ -1710,48 +1780,37 @@ bool IPSCCP::runOnModule(Module &M) {
// Iterate over all of the instructions in the module, replacing them with
// constants if we have found them to be of constant values.
//
- SmallVector<Instruction*, 512> Insts;
SmallVector<BasicBlock*, 512> BlocksToErase;
- std::map<Value*, LatticeVal> &Values = Solver.getValueMapping();
for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
- for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
- AI != E; ++AI)
- if (!AI->use_empty()) {
- LatticeVal &IV = Values[AI];
- if (IV.isConstant() || IV.isUndefined()) {
- Constant *CST = IV.isConstant() ?
- IV.getConstant() : UndefValue::get(AI->getType());
- DEBUG(errs() << "*** Arg " << *AI << " = " << *CST <<"\n");
-
- // Replaces all of the uses of a variable with uses of the
- // constant.
- AI->replaceAllUsesWith(CST);
- ++IPNumArgsElimed;
- }
+ if (Solver.isBlockExecutable(F->begin())) {
+ for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
+ AI != E; ++AI) {
+ if (AI->use_empty() || isa<StructType>(AI->getType())) continue;
+
+ // TODO: Could use getStructLatticeValueFor to find out if the entire
+ // result is a constant and replace it entirely if so.
+
+ LatticeVal IV = Solver.getLatticeValueFor(AI);
+ if (IV.isOverdefined()) continue;
+
+ Constant *CST = IV.isConstant() ?
+ IV.getConstant() : UndefValue::get(AI->getType());
+ DEBUG(errs() << "*** Arg " << *AI << " = " << *CST <<"\n");
+
+ // Replaces all of the uses of a variable with uses of the
+ // constant.
+ AI->replaceAllUsesWith(CST);
+ ++IPNumArgsElimed;
}
+ }
- for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
+ for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
if (!Solver.isBlockExecutable(BB)) {
- DEBUG(errs() << " BasicBlock Dead:" << *BB);
- ++IPNumDeadBlocks;
+ DeleteInstructionInBlock(BB);
+ MadeChanges = true;
- // Delete the instructions backwards, as it has a reduced likelihood of
- // having to update as many def-use and use-def chains.
TerminatorInst *TI = BB->getTerminator();
- for (BasicBlock::iterator I = BB->begin(), E = TI; I != E; ++I)
- Insts.push_back(I);
-
- while (!Insts.empty()) {
- Instruction *I = Insts.back();
- Insts.pop_back();
- if (!I->use_empty())
- I->replaceAllUsesWith(UndefValue::get(I->getType()));
- BB->getInstList().erase(I);
- MadeChanges = true;
- ++IPNumInstRemoved;
- }
-
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
BasicBlock *Succ = TI->getSuccessor(i);
if (!Succ->empty() && isa<PHINode>(Succ->begin()))
@@ -1759,40 +1818,44 @@ bool IPSCCP::runOnModule(Module &M) {
}
if (!TI->use_empty())
TI->replaceAllUsesWith(UndefValue::get(TI->getType()));
- BB->getInstList().erase(TI);
+ TI->eraseFromParent();
if (&*BB != &F->front())
BlocksToErase.push_back(BB);
else
new UnreachableInst(M.getContext(), BB);
+ continue;
+ }
+
+ for (BasicBlock::iterator BI = BB->begin(), E = BB->end(); BI != E; ) {
+ Instruction *Inst = BI++;
+ if (Inst->getType()->isVoidTy() || isa<StructType>(Inst->getType()))
+ continue;
+
+ // TODO: Could use getStructLatticeValueFor to find out if the entire
+ // result is a constant and replace it entirely if so.
+
+ LatticeVal IV = Solver.getLatticeValueFor(Inst);
+ if (IV.isOverdefined())
+ continue;
+
+ Constant *Const = IV.isConstant()
+ ? IV.getConstant() : UndefValue::get(Inst->getType());
+ DEBUG(errs() << " Constant: " << *Const << " = " << *Inst);
- } else {
- for (BasicBlock::iterator BI = BB->begin(), E = BB->end(); BI != E; ) {
- Instruction *Inst = BI++;
- if (Inst->getType()->isVoidTy())
- continue;
-
- LatticeVal &IV = Values[Inst];
- if (!IV.isConstant() && !IV.isUndefined())
- continue;
-
- Constant *Const = IV.isConstant()
- ? IV.getConstant() : UndefValue::get(Inst->getType());
- DEBUG(errs() << " Constant: " << *Const << " = " << *Inst);
-
- // Replaces all of the uses of a variable with uses of the
- // constant.
- Inst->replaceAllUsesWith(Const);
-
- // Delete the instruction.
- if (!isa<CallInst>(Inst) && !isa<TerminatorInst>(Inst))
- Inst->eraseFromParent();
+ // Replaces all of the uses of a variable with uses of the
+ // constant.
+ Inst->replaceAllUsesWith(Const);
+
+ // Delete the instruction.
+ if (!isa<CallInst>(Inst) && !isa<TerminatorInst>(Inst))
+ Inst->eraseFromParent();
- // Hey, we just changed something!
- MadeChanges = true;
- ++IPNumInstRemoved;
- }
+ // Hey, we just changed something!
+ MadeChanges = true;
+ ++IPNumInstRemoved;
}
+ }
// Now that all instructions in the function are constant folded, erase dead
// blocks, because we can now use ConstantFoldTerminator to get rid of
@@ -1844,16 +1907,21 @@ bool IPSCCP::runOnModule(Module &M) {
// TODO: Process multiple value ret instructions also.
const DenseMap<Function*, LatticeVal> &RV = Solver.getTrackedRetVals();
for (DenseMap<Function*, LatticeVal>::const_iterator I = RV.begin(),
- E = RV.end(); I != E; ++I)
- if (!I->second.isOverdefined() &&
- !I->first->getReturnType()->isVoidTy()) {
- Function *F = I->first;
- for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
- if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator()))
- if (!isa<UndefValue>(RI->getOperand(0)))
- RI->setOperand(0, UndefValue::get(F->getReturnType()));
- }
-
+ E = RV.end(); I != E; ++I) {
+ Function *F = I->first;
+ if (I->second.isOverdefined() || F->getReturnType()->isVoidTy())
+ continue;
+
+ // We can only do this if we know that nothing else can call the function.
+ if (!F->hasLocalLinkage() || AddressIsTaken(F))
+ continue;
+
+ for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
+ if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator()))
+ if (!isa<UndefValue>(RI->getOperand(0)))
+ RI->setOperand(0, UndefValue::get(F->getReturnType()));
+ }
+
// If we infered constant or undef values for globals variables, we can delete
// the global and any stores that remain to it.
const DenseMap<GlobalVariable*, LatticeVal> &TG = Solver.getTrackedGlobals();
diff --git a/lib/Transforms/Scalar/SCCVN.cpp b/lib/Transforms/Scalar/SCCVN.cpp
new file mode 100644
index 0000000..c047fca
--- /dev/null
+++ b/lib/Transforms/Scalar/SCCVN.cpp
@@ -0,0 +1,721 @@
+//===- SCCVN.cpp - Eliminate redundant values -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass performs global value numbering to eliminate fully redundant
+// instructions. This is based on the paper "SCC-based Value Numbering"
+// by Cooper, et al.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "sccvn"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/BasicBlock.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Function.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Operator.h"
+#include "llvm/Value.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SparseBitVector.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Transforms/Utils/SSAUpdater.h"
+#include <cstdio>
+using namespace llvm;
+
+STATISTIC(NumSCCVNInstr, "Number of instructions deleted by SCCVN");
+STATISTIC(NumSCCVNPhi, "Number of phis deleted by SCCVN");
+
+//===----------------------------------------------------------------------===//
+// ValueTable Class
+//===----------------------------------------------------------------------===//
+
+/// This class holds the mapping between values and value numbers. It is used
+/// as an efficient mechanism to determine the expression-wise equivalence of
+/// two values.
+namespace {
+ struct Expression {
+ enum ExpressionOpcode { ADD, FADD, SUB, FSUB, MUL, FMUL,
+ UDIV, SDIV, FDIV, UREM, SREM,
+ FREM, SHL, LSHR, ASHR, AND, OR, XOR, ICMPEQ,
+ ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE,
+ ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ,
+ FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE,
+ FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE,
+ FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT,
+ SHUFFLE, SELECT, TRUNC, ZEXT, SEXT, FPTOUI,
+ FPTOSI, UITOFP, SITOFP, FPTRUNC, FPEXT,
+ PTRTOINT, INTTOPTR, BITCAST, GEP, CALL, CONSTANT,
+ INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE };
+
+ ExpressionOpcode opcode;
+ const Type* type;
+ SmallVector<uint32_t, 4> varargs;
+
+ Expression() { }
+ Expression(ExpressionOpcode o) : opcode(o) { }
+
+ bool operator==(const Expression &other) const {
+ if (opcode != other.opcode)
+ return false;
+ else if (opcode == EMPTY || opcode == TOMBSTONE)
+ return true;
+ else if (type != other.type)
+ return false;
+ else {
+ if (varargs.size() != other.varargs.size())
+ return false;
+
+ for (size_t i = 0; i < varargs.size(); ++i)
+ if (varargs[i] != other.varargs[i])
+ return false;
+
+ return true;
+ }
+ }
+
+ bool operator!=(const Expression &other) const {
+ return !(*this == other);
+ }
+ };
+
+ class ValueTable {
+ private:
+ DenseMap<Value*, uint32_t> valueNumbering;
+ DenseMap<Expression, uint32_t> expressionNumbering;
+ DenseMap<Value*, uint32_t> constantsNumbering;
+
+ uint32_t nextValueNumber;
+
+ Expression::ExpressionOpcode getOpcode(BinaryOperator* BO);
+ Expression::ExpressionOpcode getOpcode(CmpInst* C);
+ Expression::ExpressionOpcode getOpcode(CastInst* C);
+ Expression create_expression(BinaryOperator* BO);
+ Expression create_expression(CmpInst* C);
+ Expression create_expression(ShuffleVectorInst* V);
+ Expression create_expression(ExtractElementInst* C);
+ Expression create_expression(InsertElementInst* V);
+ Expression create_expression(SelectInst* V);
+ Expression create_expression(CastInst* C);
+ Expression create_expression(GetElementPtrInst* G);
+ Expression create_expression(CallInst* C);
+ Expression create_expression(Constant* C);
+ Expression create_expression(ExtractValueInst* C);
+ Expression create_expression(InsertValueInst* C);
+ public:
+ ValueTable() : nextValueNumber(1) { }
+ uint32_t computeNumber(Value *V);
+ uint32_t lookup(Value *V);
+ void add(Value *V, uint32_t num);
+ void clear();
+ void clearExpressions();
+ void erase(Value *v);
+ unsigned size();
+ void verifyRemoved(const Value *) const;
+ };
+}
+
+namespace llvm {
+template <> struct DenseMapInfo<Expression> {
+ static inline Expression getEmptyKey() {
+ return Expression(Expression::EMPTY);
+ }
+
+ static inline Expression getTombstoneKey() {
+ return Expression(Expression::TOMBSTONE);
+ }
+
+ static unsigned getHashValue(const Expression e) {
+ unsigned hash = e.opcode;
+
+ hash = ((unsigned)((uintptr_t)e.type >> 4) ^
+ (unsigned)((uintptr_t)e.type >> 9));
+
+ for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(),
+ E = e.varargs.end(); I != E; ++I)
+ hash = *I + hash * 37;
+
+ return hash;
+ }
+ static bool isEqual(const Expression &LHS, const Expression &RHS) {
+ return LHS == RHS;
+ }
+ static bool isPod() { return true; }
+};
+}
+
+//===----------------------------------------------------------------------===//
+// ValueTable Internal Functions
+//===----------------------------------------------------------------------===//
+Expression::ExpressionOpcode ValueTable::getOpcode(BinaryOperator* BO) {
+ switch(BO->getOpcode()) {
+ default: // THIS SHOULD NEVER HAPPEN
+ llvm_unreachable("Binary operator with unknown opcode?");
+ case Instruction::Add: return Expression::ADD;
+ case Instruction::FAdd: return Expression::FADD;
+ case Instruction::Sub: return Expression::SUB;
+ case Instruction::FSub: return Expression::FSUB;
+ case Instruction::Mul: return Expression::MUL;
+ case Instruction::FMul: return Expression::FMUL;
+ case Instruction::UDiv: return Expression::UDIV;
+ case Instruction::SDiv: return Expression::SDIV;
+ case Instruction::FDiv: return Expression::FDIV;
+ case Instruction::URem: return Expression::UREM;
+ case Instruction::SRem: return Expression::SREM;
+ case Instruction::FRem: return Expression::FREM;
+ case Instruction::Shl: return Expression::SHL;
+ case Instruction::LShr: return Expression::LSHR;
+ case Instruction::AShr: return Expression::ASHR;
+ case Instruction::And: return Expression::AND;
+ case Instruction::Or: return Expression::OR;
+ case Instruction::Xor: return Expression::XOR;
+ }
+}
+
+Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) {
+ if (isa<ICmpInst>(C)) {
+ switch (C->getPredicate()) {
+ default: // THIS SHOULD NEVER HAPPEN
+ llvm_unreachable("Comparison with unknown predicate?");
+ case ICmpInst::ICMP_EQ: return Expression::ICMPEQ;
+ case ICmpInst::ICMP_NE: return Expression::ICMPNE;
+ case ICmpInst::ICMP_UGT: return Expression::ICMPUGT;
+ case ICmpInst::ICMP_UGE: return Expression::ICMPUGE;
+ case ICmpInst::ICMP_ULT: return Expression::ICMPULT;
+ case ICmpInst::ICMP_ULE: return Expression::ICMPULE;
+ case ICmpInst::ICMP_SGT: return Expression::ICMPSGT;
+ case ICmpInst::ICMP_SGE: return Expression::ICMPSGE;
+ case ICmpInst::ICMP_SLT: return Expression::ICMPSLT;
+ case ICmpInst::ICMP_SLE: return Expression::ICMPSLE;
+ }
+ } else {
+ switch (C->getPredicate()) {
+ default: // THIS SHOULD NEVER HAPPEN
+ llvm_unreachable("Comparison with unknown predicate?");
+ case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ;
+ case FCmpInst::FCMP_OGT: return Expression::FCMPOGT;
+ case FCmpInst::FCMP_OGE: return Expression::FCMPOGE;
+ case FCmpInst::FCMP_OLT: return Expression::FCMPOLT;
+ case FCmpInst::FCMP_OLE: return Expression::FCMPOLE;
+ case FCmpInst::FCMP_ONE: return Expression::FCMPONE;
+ case FCmpInst::FCMP_ORD: return Expression::FCMPORD;
+ case FCmpInst::FCMP_UNO: return Expression::FCMPUNO;
+ case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ;
+ case FCmpInst::FCMP_UGT: return Expression::FCMPUGT;
+ case FCmpInst::FCMP_UGE: return Expression::FCMPUGE;
+ case FCmpInst::FCMP_ULT: return Expression::FCMPULT;
+ case FCmpInst::FCMP_ULE: return Expression::FCMPULE;
+ case FCmpInst::FCMP_UNE: return Expression::FCMPUNE;
+ }
+ }
+}
+
+Expression::ExpressionOpcode ValueTable::getOpcode(CastInst* C) {
+ switch(C->getOpcode()) {
+ default: // THIS SHOULD NEVER HAPPEN
+ llvm_unreachable("Cast operator with unknown opcode?");
+ case Instruction::Trunc: return Expression::TRUNC;
+ case Instruction::ZExt: return Expression::ZEXT;
+ case Instruction::SExt: return Expression::SEXT;
+ case Instruction::FPToUI: return Expression::FPTOUI;
+ case Instruction::FPToSI: return Expression::FPTOSI;
+ case Instruction::UIToFP: return Expression::UITOFP;
+ case Instruction::SIToFP: return Expression::SITOFP;
+ case Instruction::FPTrunc: return Expression::FPTRUNC;
+ case Instruction::FPExt: return Expression::FPEXT;
+ case Instruction::PtrToInt: return Expression::PTRTOINT;
+ case Instruction::IntToPtr: return Expression::INTTOPTR;
+ case Instruction::BitCast: return Expression::BITCAST;
+ }
+}
+
+Expression ValueTable::create_expression(CallInst* C) {
+ Expression e;
+
+ e.type = C->getType();
+ e.opcode = Expression::CALL;
+
+ e.varargs.push_back(lookup(C->getCalledFunction()));
+ for (CallInst::op_iterator I = C->op_begin()+1, E = C->op_end();
+ I != E; ++I)
+ e.varargs.push_back(lookup(*I));
+
+ return e;
+}
+
+Expression ValueTable::create_expression(BinaryOperator* BO) {
+ Expression e;
+ e.varargs.push_back(lookup(BO->getOperand(0)));
+ e.varargs.push_back(lookup(BO->getOperand(1)));
+ e.type = BO->getType();
+ e.opcode = getOpcode(BO);
+
+ return e;
+}
+
+Expression ValueTable::create_expression(CmpInst* C) {
+ Expression e;
+
+ e.varargs.push_back(lookup(C->getOperand(0)));
+ e.varargs.push_back(lookup(C->getOperand(1)));
+ e.type = C->getType();
+ e.opcode = getOpcode(C);
+
+ return e;
+}
+
+Expression ValueTable::create_expression(CastInst* C) {
+ Expression e;
+
+ e.varargs.push_back(lookup(C->getOperand(0)));
+ e.type = C->getType();
+ e.opcode = getOpcode(C);
+
+ return e;
+}
+
+Expression ValueTable::create_expression(ShuffleVectorInst* S) {
+ Expression e;
+
+ e.varargs.push_back(lookup(S->getOperand(0)));
+ e.varargs.push_back(lookup(S->getOperand(1)));
+ e.varargs.push_back(lookup(S->getOperand(2)));
+ e.type = S->getType();
+ e.opcode = Expression::SHUFFLE;
+
+ return e;
+}
+
+Expression ValueTable::create_expression(ExtractElementInst* E) {
+ Expression e;
+
+ e.varargs.push_back(lookup(E->getOperand(0)));
+ e.varargs.push_back(lookup(E->getOperand(1)));
+ e.type = E->getType();
+ e.opcode = Expression::EXTRACT;
+
+ return e;
+}
+
+Expression ValueTable::create_expression(InsertElementInst* I) {
+ Expression e;
+
+ e.varargs.push_back(lookup(I->getOperand(0)));
+ e.varargs.push_back(lookup(I->getOperand(1)));
+ e.varargs.push_back(lookup(I->getOperand(2)));
+ e.type = I->getType();
+ e.opcode = Expression::INSERT;
+
+ return e;
+}
+
+Expression ValueTable::create_expression(SelectInst* I) {
+ Expression e;
+
+ e.varargs.push_back(lookup(I->getCondition()));
+ e.varargs.push_back(lookup(I->getTrueValue()));
+ e.varargs.push_back(lookup(I->getFalseValue()));
+ e.type = I->getType();
+ e.opcode = Expression::SELECT;
+
+ return e;
+}
+
+Expression ValueTable::create_expression(GetElementPtrInst* G) {
+ Expression e;
+
+ e.varargs.push_back(lookup(G->getPointerOperand()));
+ e.type = G->getType();
+ e.opcode = Expression::GEP;
+
+ for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end();
+ I != E; ++I)
+ e.varargs.push_back(lookup(*I));
+
+ return e;
+}
+
+Expression ValueTable::create_expression(ExtractValueInst* E) {
+ Expression e;
+
+ e.varargs.push_back(lookup(E->getAggregateOperand()));
+ for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
+ II != IE; ++II)
+ e.varargs.push_back(*II);
+ e.type = E->getType();
+ e.opcode = Expression::EXTRACTVALUE;
+
+ return e;
+}
+
+Expression ValueTable::create_expression(InsertValueInst* E) {
+ Expression e;
+
+ e.varargs.push_back(lookup(E->getAggregateOperand()));
+ e.varargs.push_back(lookup(E->getInsertedValueOperand()));
+ for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
+ II != IE; ++II)
+ e.varargs.push_back(*II);
+ e.type = E->getType();
+ e.opcode = Expression::INSERTVALUE;
+
+ return e;
+}
+
+//===----------------------------------------------------------------------===//
+// ValueTable External Functions
+//===----------------------------------------------------------------------===//
+
+/// add - Insert a value into the table with a specified value number.
+void ValueTable::add(Value *V, uint32_t num) {
+ valueNumbering[V] = num;
+}
+
+/// computeNumber - Returns the value number for the specified value, assigning
+/// it a new number if it did not have one before.
+uint32_t ValueTable::computeNumber(Value *V) {
+ if (uint32_t v = valueNumbering[V])
+ return v;
+ else if (uint32_t v= constantsNumbering[V])
+ return v;
+
+ if (!isa<Instruction>(V)) {
+ constantsNumbering[V] = nextValueNumber;
+ return nextValueNumber++;
+ }
+
+ Instruction* I = cast<Instruction>(V);
+ Expression exp;
+ switch (I->getOpcode()) {
+ case Instruction::Add:
+ case Instruction::FAdd:
+ case Instruction::Sub:
+ case Instruction::FSub:
+ case Instruction::Mul:
+ case Instruction::FMul:
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::FDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::FRem:
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ case Instruction::And:
+ case Instruction::Or :
+ case Instruction::Xor:
+ exp = create_expression(cast<BinaryOperator>(I));
+ break;
+ case Instruction::ICmp:
+ case Instruction::FCmp:
+ exp = create_expression(cast<CmpInst>(I));
+ break;
+ case Instruction::Trunc:
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ case Instruction::UIToFP:
+ case Instruction::SIToFP:
+ case Instruction::FPTrunc:
+ case Instruction::FPExt:
+ case Instruction::PtrToInt:
+ case Instruction::IntToPtr:
+ case Instruction::BitCast:
+ exp = create_expression(cast<CastInst>(I));
+ break;
+ case Instruction::Select:
+ exp = create_expression(cast<SelectInst>(I));
+ break;
+ case Instruction::ExtractElement:
+ exp = create_expression(cast<ExtractElementInst>(I));
+ break;
+ case Instruction::InsertElement:
+ exp = create_expression(cast<InsertElementInst>(I));
+ break;
+ case Instruction::ShuffleVector:
+ exp = create_expression(cast<ShuffleVectorInst>(I));
+ break;
+ case Instruction::ExtractValue:
+ exp = create_expression(cast<ExtractValueInst>(I));
+ break;
+ case Instruction::InsertValue:
+ exp = create_expression(cast<InsertValueInst>(I));
+ break;
+ case Instruction::GetElementPtr:
+ exp = create_expression(cast<GetElementPtrInst>(I));
+ break;
+ default:
+ valueNumbering[V] = nextValueNumber;
+ return nextValueNumber++;
+ }
+
+ uint32_t& e = expressionNumbering[exp];
+ if (!e) e = nextValueNumber++;
+ valueNumbering[V] = e;
+
+ return e;
+}
+
+/// lookup - Returns the value number of the specified value. Returns 0 if
+/// the value has not yet been numbered.
+uint32_t ValueTable::lookup(Value *V) {
+ if (!isa<Instruction>(V)) {
+ if (!constantsNumbering.count(V))
+ constantsNumbering[V] = nextValueNumber++;
+ return constantsNumbering[V];
+ }
+
+ return valueNumbering[V];
+}
+
+/// clear - Remove all entries from the ValueTable
+void ValueTable::clear() {
+ valueNumbering.clear();
+ expressionNumbering.clear();
+ constantsNumbering.clear();
+ nextValueNumber = 1;
+}
+
+void ValueTable::clearExpressions() {
+ expressionNumbering.clear();
+ constantsNumbering.clear();
+ nextValueNumber = 1;
+}
+
+/// erase - Remove a value from the value numbering
+void ValueTable::erase(Value *V) {
+ valueNumbering.erase(V);
+}
+
+/// verifyRemoved - Verify that the value is removed from all internal data
+/// structures.
+void ValueTable::verifyRemoved(const Value *V) const {
+ for (DenseMap<Value*, uint32_t>::iterator
+ I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
+ assert(I->first != V && "Inst still occurs in value numbering map!");
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// SCCVN Pass
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+ struct ValueNumberScope {
+ ValueNumberScope* parent;
+ DenseMap<uint32_t, Value*> table;
+ SparseBitVector<128> availIn;
+ SparseBitVector<128> availOut;
+
+ ValueNumberScope(ValueNumberScope* p) : parent(p) { }
+ };
+
+ class SCCVN : public FunctionPass {
+ bool runOnFunction(Function &F);
+ public:
+ static char ID; // Pass identification, replacement for typeid
+ SCCVN() : FunctionPass(&ID) { }
+
+ private:
+ ValueTable VT;
+ DenseMap<BasicBlock*, ValueNumberScope*> BBMap;
+
+ // This transformation requires dominator postdominator info
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<DominatorTree>();
+
+ AU.addPreserved<DominatorTree>();
+ AU.setPreservesCFG();
+ }
+ };
+
+ char SCCVN::ID = 0;
+}
+
+// createSCCVNPass - The public interface to this file...
+FunctionPass *llvm::createSCCVNPass() { return new SCCVN(); }
+
+static RegisterPass<SCCVN> X("sccvn",
+ "SCC Value Numbering");
+
+static Value *lookupNumber(ValueNumberScope *Locals, uint32_t num) {
+ while (Locals) {
+ DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num);
+ if (I != Locals->table.end())
+ return I->second;
+ Locals = Locals->parent;
+ }
+
+ return 0;
+}
+
+bool SCCVN::runOnFunction(Function& F) {
+ // Implement the RPO version of the SCCVN algorithm. Conceptually,
+ // we optimisitically assume that all instructions with the same opcode have
+ // the same VN. Then we deepen our comparison by one level, to all
+ // instructions whose operands have the same opcodes get the same VN. We
+ // iterate this process until the partitioning stops changing, at which
+ // point we have computed a full numbering.
+ ReversePostOrderTraversal<Function*> RPOT(&F);
+ bool done = false;
+ while (!done) {
+ done = true;
+ VT.clearExpressions();
+ for (ReversePostOrderTraversal<Function*>::rpo_iterator I = RPOT.begin(),
+ E = RPOT.end(); I != E; ++I) {
+ BasicBlock* BB = *I;
+ for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
+ BI != BE; ++BI) {
+ uint32_t origVN = VT.lookup(BI);
+ uint32_t newVN = VT.computeNumber(BI);
+ if (origVN != newVN)
+ done = false;
+ }
+ }
+ }
+
+ // Now, do a dominator walk, eliminating simple, dominated redundancies as we
+ // go. Also, build the ValueNumberScope structure that will be used for
+ // computing full availability.
+ DominatorTree& DT = getAnalysis<DominatorTree>();
+ bool changed = false;
+ for (df_iterator<DomTreeNode*> DI = df_begin(DT.getRootNode()),
+ DE = df_end(DT.getRootNode()); DI != DE; ++DI) {
+ BasicBlock* BB = DI->getBlock();
+ if (DI->getIDom())
+ BBMap[BB] = new ValueNumberScope(BBMap[DI->getIDom()->getBlock()]);
+ else
+ BBMap[BB] = new ValueNumberScope(0);
+
+ for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) {
+ uint32_t num = VT.lookup(I);
+ Value* repl = lookupNumber(BBMap[BB], num);
+
+ if (repl) {
+ if (isa<PHINode>(I))
+ ++NumSCCVNPhi;
+ else
+ ++NumSCCVNInstr;
+ I->replaceAllUsesWith(repl);
+ Instruction* OldInst = I;
+ ++I;
+ BBMap[BB]->table[num] = repl;
+ OldInst->eraseFromParent();
+ changed = true;
+ } else {
+ BBMap[BB]->table[num] = I;
+ BBMap[BB]->availOut.set(num);
+
+ ++I;
+ }
+ }
+ }
+
+ // FIXME: This code is commented out for now, because it can lead to the
+ // insertion of a lot of redundant PHIs being inserted by SSAUpdater.
+#if 0
+ // Perform a forward data-flow to compute availability at all points on
+ // the CFG.
+ do {
+ changed = false;
+ for (ReversePostOrderTraversal<Function*>::rpo_iterator I = RPOT.begin(),
+ E = RPOT.end(); I != E; ++I) {
+ BasicBlock* BB = *I;
+ ValueNumberScope *VNS = BBMap[BB];
+
+ SparseBitVector<128> preds;
+ bool first = true;
+ for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
+ PI != PE; ++PI) {
+ if (first) {
+ preds = BBMap[*PI]->availOut;
+ first = false;
+ } else {
+ preds &= BBMap[*PI]->availOut;
+ }
+ }
+
+ changed |= (VNS->availIn |= preds);
+ changed |= (VNS->availOut |= preds);
+ }
+ } while (changed);
+
+ // Use full availability information to perform non-dominated replacements.
+ SSAUpdater SSU;
+ for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) {
+ if (!BBMap.count(FI)) continue;
+ for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
+ BI != BE; ) {
+ uint32_t num = VT.lookup(BI);
+ if (!BBMap[FI]->availIn.test(num)) {
+ ++BI;
+ continue;
+ }
+
+ SSU.Initialize(BI);
+
+ SmallPtrSet<BasicBlock*, 8> visited;
+ SmallVector<BasicBlock*, 8> stack;
+ visited.insert(FI);
+ for (pred_iterator PI = pred_begin(FI), PE = pred_end(FI);
+ PI != PE; ++PI)
+ if (!visited.count(*PI))
+ stack.push_back(*PI);
+
+ while (!stack.empty()) {
+ BasicBlock* CurrBB = stack.back();
+ stack.pop_back();
+ visited.insert(CurrBB);
+
+ ValueNumberScope* S = BBMap[CurrBB];
+ if (S->table.count(num)) {
+ SSU.AddAvailableValue(CurrBB, S->table[num]);
+ } else {
+ for (pred_iterator PI = pred_begin(CurrBB), PE = pred_end(CurrBB);
+ PI != PE; ++PI)
+ if (!visited.count(*PI))
+ stack.push_back(*PI);
+ }
+ }
+
+ Value* repl = SSU.GetValueInMiddleOfBlock(FI);
+ BI->replaceAllUsesWith(repl);
+ Instruction* CurInst = BI;
+ ++BI;
+ BBMap[FI]->table[num] = repl;
+ if (isa<PHINode>(CurInst))
+ ++NumSCCVNPhi;
+ else
+ ++NumSCCVNInstr;
+
+ CurInst->eraseFromParent();
+ }
+ }
+#endif
+
+ VT.clear();
+ for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator
+ I = BBMap.begin(), E = BBMap.end(); I != E; ++I)
+ delete I->second;
+ BBMap.clear();
+
+ return changed;
+}
diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index 610d874..2e3b694 100644
--- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -100,32 +100,32 @@ namespace {
void MarkUnsafe(AllocaInfo &I) { I.isUnsafe = true; }
- int isSafeAllocaToScalarRepl(AllocationInst *AI);
+ int isSafeAllocaToScalarRepl(AllocaInst *AI);
- void isSafeUseOfAllocation(Instruction *User, AllocationInst *AI,
+ void isSafeUseOfAllocation(Instruction *User, AllocaInst *AI,
AllocaInfo &Info);
- void isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI,
+ void isSafeElementUse(Value *Ptr, bool isFirstElt, AllocaInst *AI,
AllocaInfo &Info);
- void isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI,
+ void isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocaInst *AI,
unsigned OpNo, AllocaInfo &Info);
- void isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocationInst *AI,
+ void isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocaInst *AI,
AllocaInfo &Info);
- void DoScalarReplacement(AllocationInst *AI,
- std::vector<AllocationInst*> &WorkList);
+ void DoScalarReplacement(AllocaInst *AI,
+ std::vector<AllocaInst*> &WorkList);
void CleanupGEP(GetElementPtrInst *GEP);
- void CleanupAllocaUsers(AllocationInst *AI);
- AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base);
+ void CleanupAllocaUsers(AllocaInst *AI);
+ AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocaInst *Base);
- void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI,
+ void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocaInst *AI,
SmallVector<AllocaInst*, 32> &NewElts);
void RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
- AllocationInst *AI,
+ AllocaInst *AI,
SmallVector<AllocaInst*, 32> &NewElts);
- void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocationInst *AI,
+ void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
SmallVector<AllocaInst*, 32> &NewElts);
- void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI,
+ void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
SmallVector<AllocaInst*, 32> &NewElts);
bool CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy,
@@ -135,7 +135,7 @@ namespace {
uint64_t Offset, IRBuilder<> &Builder);
Value *ConvertScalar_InsertValue(Value *StoredVal, Value *ExistingVal,
uint64_t Offset, IRBuilder<> &Builder);
- static Instruction *isOnlyCopiedFromConstantGlobal(AllocationInst *AI);
+ static Instruction *isOnlyCopiedFromConstantGlobal(AllocaInst *AI);
};
}
@@ -213,18 +213,18 @@ static uint64_t getNumSAElements(const Type *T) {
// them if they are only used by getelementptr instructions.
//
bool SROA::performScalarRepl(Function &F) {
- std::vector<AllocationInst*> WorkList;
+ std::vector<AllocaInst*> WorkList;
// Scan the entry basic block, adding any alloca's and mallocs to the worklist
BasicBlock &BB = F.getEntryBlock();
for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I)
- if (AllocationInst *A = dyn_cast<AllocationInst>(I))
+ if (AllocaInst *A = dyn_cast<AllocaInst>(I))
WorkList.push_back(A);
// Process the worklist
bool Changed = false;
while (!WorkList.empty()) {
- AllocationInst *AI = WorkList.back();
+ AllocaInst *AI = WorkList.back();
WorkList.pop_back();
// Handle dead allocas trivially. These can be formed by SROA'ing arrays
@@ -335,8 +335,8 @@ bool SROA::performScalarRepl(Function &F) {
/// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl
/// predicate, do SROA now.
-void SROA::DoScalarReplacement(AllocationInst *AI,
- std::vector<AllocationInst*> &WorkList) {
+void SROA::DoScalarReplacement(AllocaInst *AI,
+ std::vector<AllocaInst*> &WorkList) {
DEBUG(errs() << "Found inst to SROA: " << *AI << '\n');
SmallVector<AllocaInst*, 32> ElementAllocas;
if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
@@ -455,7 +455,7 @@ void SROA::DoScalarReplacement(AllocationInst *AI,
/// getelementptr instruction of an array aggregate allocation. isFirstElt
/// indicates whether Ptr is known to the start of the aggregate.
///
-void SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI,
+void SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocaInst *AI,
AllocaInfo &Info) {
for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end();
I != E; ++I) {
@@ -520,7 +520,7 @@ static bool AllUsersAreLoads(Value *Ptr) {
/// isSafeUseOfAllocation - Check to see if this user is an allowed use for an
/// aggregate allocation.
///
-void SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI,
+void SROA::isSafeUseOfAllocation(Instruction *User, AllocaInst *AI,
AllocaInfo &Info) {
if (BitCastInst *C = dyn_cast<BitCastInst>(User))
return isSafeUseOfBitCastedAllocation(C, AI, Info);
@@ -605,7 +605,7 @@ void SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI,
/// isSafeMemIntrinsicOnAllocation - Return true if the specified memory
/// intrinsic can be promoted by SROA. At this point, we know that the operand
/// of the memintrinsic is a pointer to the beginning of the allocation.
-void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI,
+void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocaInst *AI,
unsigned OpNo, AllocaInfo &Info) {
// If not constant length, give up.
ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
@@ -632,7 +632,7 @@ void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI,
/// isSafeUseOfBitCastedAllocation - Return true if all users of this bitcast
/// are
-void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI,
+void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocaInst *AI,
AllocaInfo &Info) {
for (Value::use_iterator UI = BC->use_begin(), E = BC->use_end();
UI != E; ++UI) {
@@ -690,7 +690,7 @@ void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI,
/// RewriteBitCastUserOfAlloca - BCInst (transitively) bitcasts AI, or indexes
/// to its first element. Transform users of the cast to use the new values
/// instead.
-void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI,
+void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocaInst *AI,
SmallVector<AllocaInst*, 32> &NewElts) {
Value::use_iterator UI = BCInst->use_begin(), UE = BCInst->use_end();
while (UI != UE) {
@@ -729,7 +729,7 @@ void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI,
/// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI.
/// Rewrite it to copy or set the elements of the scalarized memory.
void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
- AllocationInst *AI,
+ AllocaInst *AI,
SmallVector<AllocaInst*, 32> &NewElts) {
// If this is a memcpy/memmove, construct the other pointer as the
@@ -905,8 +905,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
/// RewriteStoreUserOfWholeAlloca - We found an store of an integer that
/// overwrites the entire allocation. Extract out the pieces of the stored
/// integer and store them individually.
-void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
- AllocationInst *AI,
+void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
SmallVector<AllocaInst*, 32> &NewElts){
// Extract each element out of the integer according to its structure offset
// and store the element value to the individual alloca.
@@ -1029,7 +1028,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
/// RewriteLoadUserOfWholeAlloca - We found an load of the entire allocation to
/// an integer. Load the individual pieces to form the aggregate value.
-void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI,
+void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
SmallVector<AllocaInst*, 32> &NewElts) {
// Extract each element out of the NewElts according to its structure offset
// and form the result value.
@@ -1162,7 +1161,7 @@ static bool HasPadding(const Type *Ty, const TargetData &TD) {
/// an aggregate can be broken down into elements. Return 0 if not, 3 if safe,
/// or 1 if safe after canonicalization has been performed.
///
-int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) {
+int SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) {
// Loop over the use list of the alloca. We can only transform it if all of
// the users are safe to transform.
AllocaInfo Info;
@@ -1245,7 +1244,7 @@ void SROA::CleanupGEP(GetElementPtrInst *GEPI) {
/// CleanupAllocaUsers - If SROA reported that it can promote the specified
/// allocation, but only if cleaned up, perform the cleanups required.
-void SROA::CleanupAllocaUsers(AllocationInst *AI) {
+void SROA::CleanupAllocaUsers(AllocaInst *AI) {
// At this point, we know that the end result will be SROA'd and promoted, so
// we can insert ugly code if required so long as sroa+mem2reg will clean it
// up.
@@ -1853,7 +1852,7 @@ static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy,
/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
/// modified by a copy from a constant global. If we can prove this, we can
/// replace any uses of the alloca with uses of the global directly.
-Instruction *SROA::isOnlyCopiedFromConstantGlobal(AllocationInst *AI) {
+Instruction *SROA::isOnlyCopiedFromConstantGlobal(AllocaInst *AI) {
Instruction *TheCopy = 0;
if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false))
return TheCopy;
diff --git a/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/lib/Transforms/Scalar/SimplifyCFGPass.cpp
index 29712b3..6a81480 100644
--- a/lib/Transforms/Scalar/SimplifyCFGPass.cpp
+++ b/lib/Transforms/Scalar/SimplifyCFGPass.cpp
@@ -126,6 +126,9 @@ static bool MarkAliveBlocks(BasicBlock *BB,
}
}
+ // Store to undef and store to null are undefined and used to signal that
+ // they should be changed to unreachable by passes that can't modify the
+ // CFG.
if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
Value *Ptr = SI->getOperand(1);
diff --git a/lib/Transforms/Scalar/SimplifyLibCalls.cpp b/lib/Transforms/Scalar/SimplifyLibCalls.cpp
index e1866015..575c93b 100644
--- a/lib/Transforms/Scalar/SimplifyLibCalls.cpp
+++ b/lib/Transforms/Scalar/SimplifyLibCalls.cpp
@@ -509,6 +509,27 @@ static bool IsOnlyUsedInZeroEqualityComparison(Value *V) {
}
//===----------------------------------------------------------------------===//
+// Miscellaneous LibCall/Intrinsic Optimizations
+//===----------------------------------------------------------------------===//
+
+namespace {
+struct SizeOpt : public LibCallOptimization {
+ virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ // TODO: We can do more with this, but delaying to here should be no change
+ // in behavior.
+ ConstantInt *Const = dyn_cast<ConstantInt>(CI->getOperand(2));
+
+ if (!Const) return 0;
+
+ if (Const->getZExtValue() < 2)
+ return Constant::getAllOnesValue(Const->getType());
+ else
+ return ConstantInt::get(Const->getType(), 0);
+ }
+};
+}
+
+//===----------------------------------------------------------------------===//
// String and Memory LibCall Optimizations
//===----------------------------------------------------------------------===//
@@ -1548,6 +1569,7 @@ namespace {
// Formatting and IO Optimizations
SPrintFOpt SPrintF; PrintFOpt PrintF;
FWriteOpt FWrite; FPutsOpt FPuts; FPrintFOpt FPrintF;
+ SizeOpt ObjectSize;
bool Modified; // This is only used by doInitialization.
public:
@@ -1653,6 +1675,9 @@ void SimplifyLibCalls::InitOptimizations() {
Optimizations["fwrite"] = &FWrite;
Optimizations["fputs"] = &FPuts;
Optimizations["fprintf"] = &FPrintF;
+
+ // Miscellaneous
+ Optimizations["llvm.objectsize"] = &ObjectSize;
}
diff --git a/lib/Transforms/Scalar/TailDuplication.cpp b/lib/Transforms/Scalar/TailDuplication.cpp
index 68689d6..4864e23 100644
--- a/lib/Transforms/Scalar/TailDuplication.cpp
+++ b/lib/Transforms/Scalar/TailDuplication.cpp
@@ -129,7 +129,7 @@ bool TailDup::shouldEliminateUnconditionalBranch(TerminatorInst *TI,
if (isa<CallInst>(I) || isa<InvokeInst>(I)) return false;
// Also alloca and malloc.
- if (isa<AllocationInst>(I)) return false;
+ if (isa<AllocaInst>(I)) return false;
// Some vector instructions can expand into a number of instructions.
if (isa<ShuffleVectorInst>(I) || isa<ExtractElementInst>(I) ||
diff --git a/lib/Transforms/Utils/BasicBlockUtils.cpp b/lib/Transforms/Utils/BasicBlockUtils.cpp
index 35907fd..c728c0b 100644
--- a/lib/Transforms/Utils/BasicBlockUtils.cpp
+++ b/lib/Transforms/Utils/BasicBlockUtils.cpp
@@ -65,9 +65,6 @@ void llvm::DeleteDeadBlock(BasicBlock *BB) {
/// when all entries to the PHI nodes in a block are guaranteed equal, such as
/// when the block has exactly one predecessor.
void llvm::FoldSingleEntryPHINodes(BasicBlock *BB) {
- if (!isa<PHINode>(BB->begin()))
- return;
-
while (PHINode *PN = dyn_cast<PHINode>(BB->begin())) {
if (PN->getIncomingValue(0) != PN)
PN->replaceAllUsesWith(PN->getIncomingValue(0));
@@ -97,10 +94,14 @@ void llvm::DeleteDeadPHIs(BasicBlock *BB) {
/// MergeBlockIntoPredecessor - Attempts to merge a block into its predecessor,
/// if possible. The return value indicates success or failure.
-bool llvm::MergeBlockIntoPredecessor(BasicBlock* BB, Pass* P) {
+bool llvm::MergeBlockIntoPredecessor(BasicBlock *BB, Pass *P) {
pred_iterator PI(pred_begin(BB)), PE(pred_end(BB));
- // Can't merge the entry block.
- if (pred_begin(BB) == pred_end(BB)) return false;
+ // Can't merge the entry block. Don't merge away blocks who have their
+ // address taken: this is a bug if the predecessor block is the entry node
+ // (because we'd end up taking the address of the entry) and undesirable in
+ // any case.
+ if (pred_begin(BB) == pred_end(BB) ||
+ BB->hasAddressTaken()) return false;
BasicBlock *PredBB = *PI++;
for (; PI != PE; ++PI) // Search all predecessors, see if they are all same
@@ -274,6 +275,8 @@ void llvm::RemoveSuccessor(TerminatorInst *TI, unsigned SuccNum) {
/// SplitEdge - Split the edge connecting specified block. Pass P must
/// not be NULL.
BasicBlock *llvm::SplitEdge(BasicBlock *BB, BasicBlock *Succ, Pass *P) {
+ assert(!isa<IndirectBrInst>(BB->getTerminator()) &&
+ "Cannot split an edge from an IndirectBrInst");
TerminatorInst *LatchTerm = BB->getTerminator();
unsigned SuccNum = 0;
#ifndef NDEBUG
@@ -675,7 +678,7 @@ void llvm::CopyPrecedingStopPoint(Instruction *I,
if (I != I->getParent()->begin()) {
BasicBlock::iterator BBI = I; --BBI;
if (DbgStopPointInst *DSPI = dyn_cast<DbgStopPointInst>(BBI)) {
- CallInst *newDSPI = DSPI->clone();
+ CallInst *newDSPI = cast<CallInst>(DSPI->clone());
newDSPI->insertBefore(InsertPos);
}
}
diff --git a/lib/Transforms/Utils/BasicInliner.cpp b/lib/Transforms/Utils/BasicInliner.cpp
index 4b720b1..b5ffe06 100644
--- a/lib/Transforms/Utils/BasicInliner.cpp
+++ b/lib/Transforms/Utils/BasicInliner.cpp
@@ -34,7 +34,7 @@ namespace llvm {
/// BasicInlinerImpl - BasicInliner implemantation class. This hides
/// container info, used by basic inliner, from public interface.
- struct VISIBILITY_HIDDEN BasicInlinerImpl {
+ struct BasicInlinerImpl {
BasicInlinerImpl(const BasicInlinerImpl&); // DO NOT IMPLEMENT
void operator=(const BasicInlinerImpl&); // DO NO IMPLEMENT
diff --git a/lib/Transforms/Utils/BreakCriticalEdges.cpp b/lib/Transforms/Utils/BreakCriticalEdges.cpp
index 849b2b5..ccd97c8 100644
--- a/lib/Transforms/Utils/BreakCriticalEdges.cpp
+++ b/lib/Transforms/Utils/BreakCriticalEdges.cpp
@@ -26,7 +26,6 @@
#include "llvm/Instructions.h"
#include "llvm/Type.h"
#include "llvm/Support/CFG.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
@@ -35,7 +34,7 @@ using namespace llvm;
STATISTIC(NumBroken, "Number of blocks inserted");
namespace {
- struct VISIBILITY_HIDDEN BreakCriticalEdges : public FunctionPass {
+ struct BreakCriticalEdges : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
BreakCriticalEdges() : FunctionPass(&ID) {}
@@ -70,7 +69,7 @@ bool BreakCriticalEdges::runOnFunction(Function &F) {
bool Changed = false;
for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
TerminatorInst *TI = I->getTerminator();
- if (TI->getNumSuccessors() > 1)
+ if (TI->getNumSuccessors() > 1 && !isa<IndirectBrInst>(TI))
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
if (SplitCriticalEdge(TI, i, this)) {
++NumBroken;
@@ -151,14 +150,29 @@ static void CreatePHIsForSplitLoopExit(SmallVectorImpl<BasicBlock *> &Preds,
/// SplitCriticalEdge - If this edge is a critical edge, insert a new node to
/// split the critical edge. This will update DominatorTree and
-/// DominatorFrontier information if it is available, thus calling this pass
-/// will not invalidate any of them. This returns true if the edge was split,
-/// false otherwise. This ensures that all edges to that dest go to one block
-/// instead of each going to a different block.
-//
+/// DominatorFrontier information if it is available, thus calling this pass
+/// will not invalidate either of them. This returns the new block if the edge
+/// was split, null otherwise.
+///
+/// If MergeIdenticalEdges is true (not the default), *all* edges from TI to the
+/// specified successor will be merged into the same critical edge block.
+/// This is most commonly interesting with switch instructions, which may
+/// have many edges to any one destination. This ensures that all edges to that
+/// dest go to one block instead of each going to a different block, but isn't
+/// the standard definition of a "critical edge".
+///
+/// It is invalid to call this function on a critical edge that starts at an
+/// IndirectBrInst. Splitting these edges will almost always create an invalid
+/// program because the address of the new block won't be the one that is jumped
+/// to.
+///
BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
Pass *P, bool MergeIdenticalEdges) {
if (!isCriticalEdge(TI, SuccNum, MergeIdenticalEdges)) return 0;
+
+ assert(!isa<IndirectBrInst>(TI) &&
+ "Cannot split critical edge from IndirectBrInst");
+
BasicBlock *TIBB = TI->getParent();
BasicBlock *DestBB = TI->getSuccessor(SuccNum);
diff --git a/lib/Transforms/Utils/CMakeLists.txt b/lib/Transforms/Utils/CMakeLists.txt
index f4394ea..93577b4 100644
--- a/lib/Transforms/Utils/CMakeLists.txt
+++ b/lib/Transforms/Utils/CMakeLists.txt
@@ -13,7 +13,7 @@ add_llvm_library(LLVMTransformUtils
LCSSA.cpp
Local.cpp
LoopSimplify.cpp
- LowerAllocations.cpp
+ LoopUnroll.cpp
LowerInvoke.cpp
LowerSwitch.cpp
Mem2Reg.cpp
@@ -22,7 +22,6 @@ add_llvm_library(LLVMTransformUtils
SSI.cpp
SimplifyCFG.cpp
UnifyFunctionExitNodes.cpp
- UnrollLoop.cpp
ValueMapper.cpp
)
diff --git a/lib/Transforms/Utils/CloneFunction.cpp b/lib/Transforms/Utils/CloneFunction.cpp
index 30130fa..fd8862c 100644
--- a/lib/Transforms/Utils/CloneFunction.cpp
+++ b/lib/Transforms/Utils/CloneFunction.cpp
@@ -20,9 +20,7 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Function.h"
-#include "llvm/LLVMContext.h"
#include "llvm/Support/CFG.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Transforms/Utils/ValueMapper.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/DebugInfo.h"
@@ -176,7 +174,7 @@ Function *llvm::CloneFunction(const Function *F,
namespace {
/// PruningFunctionCloner - This class is a private class used to implement
/// the CloneAndPruneFunctionInto method.
- struct VISIBILITY_HIDDEN PruningFunctionCloner {
+ struct PruningFunctionCloner {
Function *NewFunc;
const Function *OldFunc;
DenseMap<const Value*, Value*> &ValueMap;
@@ -329,8 +327,7 @@ ConstantFoldMappedInstruction(const Instruction *I) {
SmallVector<Constant*, 8> Ops;
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
if (Constant *Op = dyn_cast_or_null<Constant>(MapValue(I->getOperand(i),
- ValueMap,
- Context)))
+ ValueMap)))
Ops.push_back(Op);
else
return 0; // All operands not constant!
@@ -366,7 +363,6 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
ClonedCodeInfo *CodeInfo,
const TargetData *TD) {
assert(NameSuffix && "NameSuffix cannot be null!");
- LLVMContext &Context = OldFunc->getContext();
#ifndef NDEBUG
for (Function::const_arg_iterator II = OldFunc->arg_begin(),
@@ -437,7 +433,7 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
if (BasicBlock *MappedBlock =
cast_or_null<BasicBlock>(ValueMap[PN->getIncomingBlock(pred)])) {
Value *InVal = MapValue(PN->getIncomingValue(pred),
- ValueMap, Context);
+ ValueMap);
assert(InVal && "Unknown input value?");
PN->setIncomingValue(pred, InVal);
PN->setIncomingBlock(pred, MappedBlock);
diff --git a/lib/Transforms/Utils/CloneModule.cpp b/lib/Transforms/Utils/CloneModule.cpp
index 0285f8c..a163f89 100644
--- a/lib/Transforms/Utils/CloneModule.cpp
+++ b/lib/Transforms/Utils/CloneModule.cpp
@@ -89,8 +89,7 @@ Module *llvm::CloneModule(const Module *M,
GlobalVariable *GV = cast<GlobalVariable>(ValueMap[I]);
if (I->hasInitializer())
GV->setInitializer(cast<Constant>(MapValue(I->getInitializer(),
- ValueMap,
- M->getContext())));
+ ValueMap)));
GV->setLinkage(I->getLinkage());
GV->setThreadLocal(I->isThreadLocal());
GV->setConstant(I->isConstant());
@@ -121,7 +120,7 @@ Module *llvm::CloneModule(const Module *M,
GlobalAlias *GA = cast<GlobalAlias>(ValueMap[I]);
GA->setLinkage(I->getLinkage());
if (const Constant* C = I->getAliasee())
- GA->setAliasee(cast<Constant>(MapValue(C, ValueMap, M->getContext())));
+ GA->setAliasee(cast<Constant>(MapValue(C, ValueMap)));
}
return New;
diff --git a/lib/Transforms/Utils/CodeExtractor.cpp b/lib/Transforms/Utils/CodeExtractor.cpp
index c39ccf7..f966681 100644
--- a/lib/Transforms/Utils/CodeExtractor.cpp
+++ b/lib/Transforms/Utils/CodeExtractor.cpp
@@ -26,7 +26,6 @@
#include "llvm/Analysis/Verifier.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -44,7 +43,7 @@ AggregateArgsOpt("aggregate-extracted-args", cl::Hidden,
cl::desc("Aggregate arguments to code-extracted functions"));
namespace {
- class VISIBILITY_HIDDEN CodeExtractor {
+ class CodeExtractor {
typedef std::vector<Value*> Values;
std::set<BasicBlock*> BlocksToExtract;
DominatorTree* DT;
diff --git a/lib/Transforms/Utils/InlineFunction.cpp b/lib/Transforms/Utils/InlineFunction.cpp
index 619c939..20f5a4a 100644
--- a/lib/Transforms/Utils/InlineFunction.cpp
+++ b/lib/Transforms/Utils/InlineFunction.cpp
@@ -619,8 +619,17 @@ bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD,
"Ret value not consistent in function!");
PHI->addIncoming(RI->getReturnValue(), RI->getParent());
}
+
+ // Now that we inserted the PHI, check to see if it has a single value
+ // (e.g. all the entries are the same or undef). If so, remove the PHI so
+ // it doesn't block other optimizations.
+ if (Value *V = PHI->hasConstantValue()) {
+ PHI->replaceAllUsesWith(V);
+ PHI->eraseFromParent();
+ }
}
+
// Add a branch to the merge points and remove return instructions.
for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
ReturnInst *RI = Returns[i];
diff --git a/lib/Transforms/Utils/Local.cpp b/lib/Transforms/Utils/Local.cpp
index b622611..543ddf1 100644
--- a/lib/Transforms/Utils/Local.cpp
+++ b/lib/Transforms/Utils/Local.cpp
@@ -59,9 +59,8 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom) {
// If we see a free or a call which may write to memory (i.e. which might do
// a free) the pointer could be marked invalid.
- if (isa<FreeInst>(BBI) ||
- (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
- !isa<DbgInfoIntrinsic>(BBI)))
+ if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
+ !isa<DbgInfoIntrinsic>(BBI))
return false;
if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
@@ -110,7 +109,9 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB) {
// unconditional branch.
BI->setUnconditionalDest(Destination);
return true;
- } else if (Dest2 == Dest1) { // Conditional branch to same location?
+ }
+
+ if (Dest2 == Dest1) { // Conditional branch to same location?
// This branch matches something like this:
// br bool %cond, label %Dest, label %Dest
// and changes it into: br label %Dest
@@ -123,7 +124,10 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB) {
BI->setUnconditionalDest(Dest1);
return true;
}
- } else if (SwitchInst *SI = dyn_cast<SwitchInst>(T)) {
+ return false;
+ }
+
+ if (SwitchInst *SI = dyn_cast<SwitchInst>(T)) {
// If we are switching on a constant, we can convert the switch into a
// single branch instruction!
ConstantInt *CI = dyn_cast<ConstantInt>(SI->getCondition());
@@ -132,7 +136,7 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB) {
assert(TheOnlyDest == SI->getDefaultDest() &&
"Default destination is not successor #0?");
- // Figure out which case it goes to...
+ // Figure out which case it goes to.
for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i) {
// Found case matching a constant operand?
if (SI->getSuccessorValue(i) == CI) {
@@ -143,7 +147,7 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB) {
// Check to see if this branch is going to the same place as the default
// dest. If so, eliminate it as an explicit compare.
if (SI->getSuccessor(i) == DefaultDest) {
- // Remove this entry...
+ // Remove this entry.
DefaultDest->removePredecessor(SI->getParent());
SI->removeCase(i);
--i; --e; // Don't skip an entry...
@@ -165,7 +169,7 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB) {
// If we found a single destination that we can fold the switch into, do so
// now.
if (TheOnlyDest) {
- // Insert the new branch..
+ // Insert the new branch.
BranchInst::Create(TheOnlyDest, SI);
BasicBlock *BB = SI->getParent();
@@ -179,22 +183,54 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB) {
Succ->removePredecessor(BB);
}
- // Delete the old switch...
+ // Delete the old switch.
BB->getInstList().erase(SI);
return true;
- } else if (SI->getNumSuccessors() == 2) {
+ }
+
+ if (SI->getNumSuccessors() == 2) {
// Otherwise, we can fold this switch into a conditional branch
// instruction if it has only one non-default destination.
Value *Cond = new ICmpInst(SI, ICmpInst::ICMP_EQ, SI->getCondition(),
SI->getSuccessorValue(1), "cond");
- // Insert the new branch...
+ // Insert the new branch.
BranchInst::Create(SI->getSuccessor(1), SI->getSuccessor(0), Cond, SI);
- // Delete the old switch...
+ // Delete the old switch.
SI->eraseFromParent();
return true;
}
+ return false;
}
+
+ if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(T)) {
+ // indirectbr blockaddress(@F, @BB) -> br label @BB
+ if (BlockAddress *BA =
+ dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
+ BasicBlock *TheOnlyDest = BA->getBasicBlock();
+ // Insert the new branch.
+ BranchInst::Create(TheOnlyDest, IBI);
+
+ for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
+ if (IBI->getDestination(i) == TheOnlyDest)
+ TheOnlyDest = 0;
+ else
+ IBI->getDestination(i)->removePredecessor(IBI->getParent());
+ }
+ IBI->eraseFromParent();
+
+ // If we didn't find our destination in the IBI successor list, then we
+ // have undefined behavior. Replace the unconditional branch with an
+ // 'unreachable' instruction.
+ if (TheOnlyDest) {
+ BB->getTerminator()->eraseFromParent();
+ new UnreachableInst(BB->getContext(), BB);
+ }
+
+ return true;
+ }
+ }
+
return false;
}
diff --git a/lib/Transforms/Utils/LoopSimplify.cpp b/lib/Transforms/Utils/LoopSimplify.cpp
index c22708a..cd8d952 100644
--- a/lib/Transforms/Utils/LoopSimplify.cpp
+++ b/lib/Transforms/Utils/LoopSimplify.cpp
@@ -46,7 +46,6 @@
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Support/CFG.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/ADT/SetOperations.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/Statistic.h"
@@ -57,7 +56,7 @@ STATISTIC(NumInserted, "Number of pre-header or exit blocks inserted");
STATISTIC(NumNested , "Number of nested loops split out");
namespace {
- struct VISIBILITY_HIDDEN LoopSimplify : public LoopPass {
+ struct LoopSimplify : public LoopPass {
static char ID; // Pass identification, replacement for typeid
LoopSimplify() : LoopPass(&ID) {}
diff --git a/lib/Transforms/Utils/UnrollLoop.cpp b/lib/Transforms/Utils/LoopUnroll.cpp
index 4d838b5..d68427a 100644
--- a/lib/Transforms/Utils/UnrollLoop.cpp
+++ b/lib/Transforms/Utils/LoopUnroll.cpp
@@ -44,8 +44,8 @@ static inline void RemapInstruction(Instruction *I,
for (unsigned op = 0, E = I->getNumOperands(); op != E; ++op) {
Value *Op = I->getOperand(op);
DenseMap<const Value *, Value*>::iterator It = ValueMap.find(Op);
- if (It != ValueMap.end()) Op = It->second;
- I->setOperand(op, Op);
+ if (It != ValueMap.end())
+ I->setOperand(op, It->second);
}
}
diff --git a/lib/Transforms/Utils/LowerInvoke.cpp b/lib/Transforms/Utils/LowerInvoke.cpp
index 9a3de26..6e6e8d2 100644
--- a/lib/Transforms/Utils/LowerInvoke.cpp
+++ b/lib/Transforms/Utils/LowerInvoke.cpp
@@ -47,7 +47,6 @@
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Target/TargetLowering.h"
#include <csetjmp>
#include <set>
@@ -61,7 +60,7 @@ static cl::opt<bool> ExpensiveEHSupport("enable-correct-eh-support",
cl::desc("Make the -lowerinvoke pass insert expensive, but correct, EH code"));
namespace {
- class VISIBILITY_HIDDEN LowerInvoke : public FunctionPass {
+ class LowerInvoke : public FunctionPass {
// Used for both models.
Constant *WriteFn;
Constant *AbortFn;
@@ -87,7 +86,6 @@ namespace {
// This is a cluster of orthogonal Transforms
AU.addPreservedID(PromoteMemoryToRegisterID);
AU.addPreservedID(LowerSwitchID);
- AU.addPreservedID(LowerAllocationsID);
}
private:
diff --git a/lib/Transforms/Utils/LowerSwitch.cpp b/lib/Transforms/Utils/LowerSwitch.cpp
index 764f098..8c18b59 100644
--- a/lib/Transforms/Utils/LowerSwitch.cpp
+++ b/lib/Transforms/Utils/LowerSwitch.cpp
@@ -21,8 +21,8 @@
#include "llvm/LLVMContext.h"
#include "llvm/Pass.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/Support/Debug.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
using namespace llvm;
@@ -31,7 +31,7 @@ namespace {
/// LowerSwitch Pass - Replace all SwitchInst instructions with chained branch
/// instructions. Note that this cannot be a BasicBlock pass because it
/// modifies the CFG!
- class VISIBILITY_HIDDEN LowerSwitch : public FunctionPass {
+ class LowerSwitch : public FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid
LowerSwitch() : FunctionPass(&ID) {}
@@ -43,7 +43,6 @@ namespace {
AU.addPreserved<UnifyFunctionExitNodes>();
AU.addPreservedID(PromoteMemoryToRegisterID);
AU.addPreservedID(LowerInvokePassID);
- AU.addPreservedID(LowerAllocationsID);
}
struct CaseRange {
diff --git a/lib/Transforms/Utils/Mem2Reg.cpp b/lib/Transforms/Utils/Mem2Reg.cpp
index 5df0832..9416604 100644
--- a/lib/Transforms/Utils/Mem2Reg.cpp
+++ b/lib/Transforms/Utils/Mem2Reg.cpp
@@ -20,13 +20,12 @@
#include "llvm/Instructions.h"
#include "llvm/Function.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Support/Compiler.h"
using namespace llvm;
STATISTIC(NumPromoted, "Number of alloca's promoted");
namespace {
- struct VISIBILITY_HIDDEN PromotePass : public FunctionPass {
+ struct PromotePass : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
PromotePass() : FunctionPass(&ID) {}
@@ -45,7 +44,6 @@ namespace {
AU.addPreserved<UnifyFunctionExitNodes>();
AU.addPreservedID(LowerSwitchID);
AU.addPreservedID(LowerInvokePassID);
- AU.addPreservedID(LowerAllocationsID);
}
};
} // end of anonymous namespace
diff --git a/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
index 9ca06bd..de6ad1d 100644
--- a/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
+++ b/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
@@ -32,7 +32,6 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/CFG.h"
-#include "llvm/Support/Compiler.h"
#include <algorithm>
using namespace llvm;
@@ -100,7 +99,7 @@ namespace {
struct AllocaInfo;
// Data package used by RenamePass()
- class VISIBILITY_HIDDEN RenamePassData {
+ class RenamePassData {
public:
typedef std::vector<Value *> ValVector;
@@ -123,7 +122,7 @@ namespace {
///
/// This functionality is important because it avoids scanning large basic
/// blocks multiple times when promoting many allocas in the same block.
- class VISIBILITY_HIDDEN LargeBlockInfo {
+ class LargeBlockInfo {
/// InstNumbers - For each instruction that we track, keep the index of the
/// instruction. The index starts out as the number of the instruction from
/// the start of the block.
@@ -170,7 +169,7 @@ namespace {
}
};
- struct VISIBILITY_HIDDEN PromoteMem2Reg {
+ struct PromoteMem2Reg {
/// Allocas - The alloca instructions being promoted.
///
std::vector<AllocaInst*> Allocas;
@@ -750,7 +749,12 @@ void PromoteMem2Reg::RewriteSingleStoreAlloca(AllocaInst *AI,
}
// Otherwise, we *can* safely rewrite this load.
- LI->replaceAllUsesWith(OnlyStore->getOperand(0));
+ Value *ReplVal = OnlyStore->getOperand(0);
+ // If the replacement value is the load, this must occur in unreachable
+ // code.
+ if (ReplVal == LI)
+ ReplVal = UndefValue::get(LI->getType());
+ LI->replaceAllUsesWith(ReplVal);
if (AST && isa<PointerType>(LI->getType()))
AST->deleteValue(LI);
LI->eraseFromParent();
diff --git a/lib/Transforms/Utils/SSI.cpp b/lib/Transforms/Utils/SSI.cpp
index 3bb2e8e..1c4afff 100644
--- a/lib/Transforms/Utils/SSI.cpp
+++ b/lib/Transforms/Utils/SSI.cpp
@@ -396,7 +396,7 @@ static RegisterPass<SSI> X("ssi", "Static Single Information Construction");
/// SSIEverything - A pass that runs createSSI on every non-void variable,
/// intended for debugging.
namespace {
- struct VISIBILITY_HIDDEN SSIEverything : public FunctionPass {
+ struct SSIEverything : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
SSIEverything() : FunctionPass(&ID) {}
diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp
index 6fd7d7b..8e1fb98 100644
--- a/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -24,6 +24,7 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
@@ -1748,6 +1749,68 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) {
return true;
}
+/// EliminateDuplicatePHINodes - Check for and eliminate duplicate PHI
+/// nodes in this block. This doesn't try to be clever about PHI nodes
+/// which differ only in the order of the incoming values, but instcombine
+/// orders them so it usually won't matter.
+///
+static bool EliminateDuplicatePHINodes(BasicBlock *BB) {
+ bool Changed = false;
+
+ // This implementation doesn't currently consider undef operands
+ // specially. Theroetically, two phis which are identical except for
+ // one having an undef where the other doesn't could be collapsed.
+
+ // Map from PHI hash values to PHI nodes. If multiple PHIs have
+ // the same hash value, the element is the first PHI in the
+ // linked list in CollisionMap.
+ DenseMap<uintptr_t, PHINode *> HashMap;
+
+ // Maintain linked lists of PHI nodes with common hash values.
+ DenseMap<PHINode *, PHINode *> CollisionMap;
+
+ // Examine each PHI.
+ for (BasicBlock::iterator I = BB->begin();
+ PHINode *PN = dyn_cast<PHINode>(I++); ) {
+ // Compute a hash value on the operands. Instcombine will likely have sorted
+ // them, which helps expose duplicates, but we have to check all the
+ // operands to be safe in case instcombine hasn't run.
+ uintptr_t Hash = 0;
+ for (User::op_iterator I = PN->op_begin(), E = PN->op_end(); I != E; ++I) {
+ // This hash algorithm is quite weak as hash functions go, but it seems
+ // to do a good enough job for this particular purpose, and is very quick.
+ Hash ^= reinterpret_cast<uintptr_t>(static_cast<Value *>(*I));
+ Hash = (Hash << 7) | (Hash >> (sizeof(uintptr_t) * CHAR_BIT - 7));
+ }
+ // If we've never seen this hash value before, it's a unique PHI.
+ std::pair<DenseMap<uintptr_t, PHINode *>::iterator, bool> Pair =
+ HashMap.insert(std::make_pair(Hash, PN));
+ if (Pair.second) continue;
+ // Otherwise it's either a duplicate or a hash collision.
+ for (PHINode *OtherPN = Pair.first->second; ; ) {
+ if (OtherPN->isIdenticalTo(PN)) {
+ // A duplicate. Replace this PHI with its duplicate.
+ PN->replaceAllUsesWith(OtherPN);
+ PN->eraseFromParent();
+ Changed = true;
+ break;
+ }
+ // A non-duplicate hash collision.
+ DenseMap<PHINode *, PHINode *>::iterator I = CollisionMap.find(OtherPN);
+ if (I == CollisionMap.end()) {
+ // Set this PHI to be the head of the linked list of colliding PHIs.
+ PHINode *Old = Pair.first->second;
+ Pair.first->second = PN;
+ CollisionMap[PN] = Old;
+ break;
+ }
+ // Procede to the next PHI in the list.
+ OtherPN = I->second;
+ }
+ }
+
+ return Changed;
+}
/// SimplifyCFG - This function is used to do simplification of a CFG. For
/// example, it adjusts branches to branches to eliminate the extra hop, it
@@ -1777,6 +1840,9 @@ bool llvm::SimplifyCFG(BasicBlock *BB) {
// away...
Changed |= ConstantFoldTerminator(BB);
+ // Check for and eliminate duplicate PHI nodes in this block.
+ Changed |= EliminateDuplicatePHINodes(BB);
+
// If there is a trivial two-entry PHI node in this basic block, and we can
// eliminate it, do so now.
if (PHINode *PN = dyn_cast<PHINode>(BB->begin()))
diff --git a/lib/Transforms/Utils/ValueMapper.cpp b/lib/Transforms/Utils/ValueMapper.cpp
index 2d8332f..39331d7 100644
--- a/lib/Transforms/Utils/ValueMapper.cpp
+++ b/lib/Transforms/Utils/ValueMapper.cpp
@@ -13,18 +13,15 @@
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Utils/ValueMapper.h"
-#include "llvm/BasicBlock.h"
#include "llvm/DerivedTypes.h" // For getNullValue(Type::Int32Ty)
#include "llvm/Constants.h"
-#include "llvm/GlobalValue.h"
-#include "llvm/Instruction.h"
-#include "llvm/LLVMContext.h"
+#include "llvm/Function.h"
#include "llvm/Metadata.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
-Value *llvm::MapValue(const Value *V, ValueMapTy &VM, LLVMContext &Context) {
+Value *llvm::MapValue(const Value *V, ValueMapTy &VM) {
Value *&VMSlot = VM[V];
if (VMSlot) return VMSlot; // Does it exist in the map yet?
@@ -36,80 +33,91 @@ Value *llvm::MapValue(const Value *V, ValueMapTy &VM, LLVMContext &Context) {
if (isa<GlobalValue>(V) || isa<InlineAsm>(V) || isa<MetadataBase>(V))
return VMSlot = const_cast<Value*>(V);
- if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
- if (isa<ConstantInt>(C) || isa<ConstantFP>(C) ||
- isa<ConstantPointerNull>(C) || isa<ConstantAggregateZero>(C) ||
- isa<UndefValue>(C) || isa<MDString>(C))
- return VMSlot = C; // Primitive constants map directly
- else if (ConstantArray *CA = dyn_cast<ConstantArray>(C)) {
- for (User::op_iterator b = CA->op_begin(), i = b, e = CA->op_end();
- i != e; ++i) {
- Value *MV = MapValue(*i, VM, Context);
- if (MV != *i) {
- // This array must contain a reference to a global, make a new array
- // and return it.
- //
- std::vector<Constant*> Values;
- Values.reserve(CA->getNumOperands());
- for (User::op_iterator j = b; j != i; ++j)
- Values.push_back(cast<Constant>(*j));
- Values.push_back(cast<Constant>(MV));
- for (++i; i != e; ++i)
- Values.push_back(cast<Constant>(MapValue(*i, VM, Context)));
- return VM[V] = ConstantArray::get(CA->getType(), Values);
- }
+ Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V));
+ if (C == 0) return 0;
+
+ if (isa<ConstantInt>(C) || isa<ConstantFP>(C) ||
+ isa<ConstantPointerNull>(C) || isa<ConstantAggregateZero>(C) ||
+ isa<UndefValue>(C) || isa<MDString>(C))
+ return VMSlot = C; // Primitive constants map directly
+
+ if (ConstantArray *CA = dyn_cast<ConstantArray>(C)) {
+ for (User::op_iterator b = CA->op_begin(), i = b, e = CA->op_end();
+ i != e; ++i) {
+ Value *MV = MapValue(*i, VM);
+ if (MV != *i) {
+ // This array must contain a reference to a global, make a new array
+ // and return it.
+ //
+ std::vector<Constant*> Values;
+ Values.reserve(CA->getNumOperands());
+ for (User::op_iterator j = b; j != i; ++j)
+ Values.push_back(cast<Constant>(*j));
+ Values.push_back(cast<Constant>(MV));
+ for (++i; i != e; ++i)
+ Values.push_back(cast<Constant>(MapValue(*i, VM)));
+ return VM[V] = ConstantArray::get(CA->getType(), Values);
}
- return VM[V] = C;
-
- } else if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) {
- for (User::op_iterator b = CS->op_begin(), i = b, e = CS->op_end();
- i != e; ++i) {
- Value *MV = MapValue(*i, VM, Context);
- if (MV != *i) {
- // This struct must contain a reference to a global, make a new struct
- // and return it.
- //
- std::vector<Constant*> Values;
- Values.reserve(CS->getNumOperands());
- for (User::op_iterator j = b; j != i; ++j)
- Values.push_back(cast<Constant>(*j));
- Values.push_back(cast<Constant>(MV));
- for (++i; i != e; ++i)
- Values.push_back(cast<Constant>(MapValue(*i, VM, Context)));
- return VM[V] = ConstantStruct::get(CS->getType(), Values);
- }
+ }
+ return VM[V] = C;
+ }
+
+ if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) {
+ for (User::op_iterator b = CS->op_begin(), i = b, e = CS->op_end();
+ i != e; ++i) {
+ Value *MV = MapValue(*i, VM);
+ if (MV != *i) {
+ // This struct must contain a reference to a global, make a new struct
+ // and return it.
+ //
+ std::vector<Constant*> Values;
+ Values.reserve(CS->getNumOperands());
+ for (User::op_iterator j = b; j != i; ++j)
+ Values.push_back(cast<Constant>(*j));
+ Values.push_back(cast<Constant>(MV));
+ for (++i; i != e; ++i)
+ Values.push_back(cast<Constant>(MapValue(*i, VM)));
+ return VM[V] = ConstantStruct::get(CS->getType(), Values);
}
- return VM[V] = C;
-
- } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
- std::vector<Constant*> Ops;
- for (User::op_iterator i = CE->op_begin(), e = CE->op_end(); i != e; ++i)
- Ops.push_back(cast<Constant>(MapValue(*i, VM, Context)));
- return VM[V] = CE->getWithOperands(Ops);
- } else if (ConstantVector *CP = dyn_cast<ConstantVector>(C)) {
- for (User::op_iterator b = CP->op_begin(), i = b, e = CP->op_end();
- i != e; ++i) {
- Value *MV = MapValue(*i, VM, Context);
- if (MV != *i) {
- // This vector value must contain a reference to a global, make a new
- // vector constant and return it.
- //
- std::vector<Constant*> Values;
- Values.reserve(CP->getNumOperands());
- for (User::op_iterator j = b; j != i; ++j)
- Values.push_back(cast<Constant>(*j));
- Values.push_back(cast<Constant>(MV));
- for (++i; i != e; ++i)
- Values.push_back(cast<Constant>(MapValue(*i, VM, Context)));
- return VM[V] = ConstantVector::get(Values);
- }
+ }
+ return VM[V] = C;
+ }
+
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
+ std::vector<Constant*> Ops;
+ for (User::op_iterator i = CE->op_begin(), e = CE->op_end(); i != e; ++i)
+ Ops.push_back(cast<Constant>(MapValue(*i, VM)));
+ return VM[V] = CE->getWithOperands(Ops);
+ }
+
+ if (ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
+ for (User::op_iterator b = CV->op_begin(), i = b, e = CV->op_end();
+ i != e; ++i) {
+ Value *MV = MapValue(*i, VM);
+ if (MV != *i) {
+ // This vector value must contain a reference to a global, make a new
+ // vector constant and return it.
+ //
+ std::vector<Constant*> Values;
+ Values.reserve(CV->getNumOperands());
+ for (User::op_iterator j = b; j != i; ++j)
+ Values.push_back(cast<Constant>(*j));
+ Values.push_back(cast<Constant>(MV));
+ for (++i; i != e; ++i)
+ Values.push_back(cast<Constant>(MapValue(*i, VM)));
+ return VM[V] = ConstantVector::get(Values);
}
- return VM[V] = C;
-
- } else {
- llvm_unreachable("Unknown type of constant!");
}
+ return VM[V] = C;
}
+
+ if (BlockAddress *BA = dyn_cast<BlockAddress>(C)) {
+ Function *F = cast<Function>(MapValue(BA->getFunction(), VM));
+ BasicBlock *BB = cast_or_null<BasicBlock>(MapValue(BA->getBasicBlock(),VM));
+ return VM[V] = BlockAddress::get(F, BB ? BB : BA->getBasicBlock());
+ }
+
+ llvm_unreachable("Unknown type of constant!");
return 0;
}
@@ -118,7 +126,7 @@ Value *llvm::MapValue(const Value *V, ValueMapTy &VM, LLVMContext &Context) {
///
void llvm::RemapInstruction(Instruction *I, ValueMapTy &ValueMap) {
for (User::op_iterator op = I->op_begin(), E = I->op_end(); op != E; ++op) {
- Value *V = MapValue(*op, ValueMap, I->getParent()->getContext());
+ Value *V = MapValue(*op, ValueMap);
assert(V && "Referenced value not in value map!");
*op = V;
}
OpenPOWER on IntegriCloud