diff options
Diffstat (limited to 'contrib/llvm/lib/Transforms/IPO')
-rw-r--r-- | contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp | 43 | ||||
-rw-r--r-- | contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp | 49 | ||||
-rw-r--r-- | contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp | 12 | ||||
-rw-r--r-- | contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp | 4 | ||||
-rw-r--r-- | contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp | 125 | ||||
-rw-r--r-- | contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp | 2 | ||||
-rw-r--r-- | contrib/llvm/lib/Transforms/IPO/IPO.cpp | 15 | ||||
-rw-r--r-- | contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp | 15 | ||||
-rw-r--r-- | contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp | 26 | ||||
-rw-r--r-- | contrib/llvm/lib/Transforms/IPO/Inliner.cpp | 4 | ||||
-rw-r--r-- | contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp | 73 | ||||
-rw-r--r-- | contrib/llvm/lib/Transforms/IPO/LowerSetJmp.cpp | 547 | ||||
-rw-r--r-- | contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp | 70 | ||||
-rw-r--r-- | contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp | 343 | ||||
-rw-r--r-- | contrib/llvm/lib/Transforms/IPO/PruneEH.cpp | 5 | ||||
-rw-r--r-- | contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp | 2 |
16 files changed, 609 insertions, 726 deletions
diff --git a/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp index fa007cf..e160f63 100644 --- a/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp +++ b/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp @@ -155,12 +155,12 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) { for (unsigned i = 0; i != PointerArgs.size(); ++i) { bool isByVal = F->paramHasAttr(PointerArgs[i].second+1, Attribute::ByVal); Argument *PtrArg = PointerArgs[i].first; - const Type *AgTy = cast<PointerType>(PtrArg->getType())->getElementType(); + Type *AgTy = cast<PointerType>(PtrArg->getType())->getElementType(); // If this is a byval argument, and if the aggregate type is small, just // pass the elements, which is always safe. if (isByVal) { - if (const StructType *STy = dyn_cast<StructType>(AgTy)) { + if (StructType *STy = dyn_cast<StructType>(AgTy)) { if (maxElements > 0 && STy->getNumElements() > maxElements) { DEBUG(dbgs() << "argpromotion disable promoting argument '" << PtrArg->getName() << "' because it would require adding more" @@ -190,7 +190,7 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) { // If the argument is a recursive type and we're in a recursive // function, we could end up infinitely peeling the function argument. if (isSelfRecursive) { - if (const StructType *STy = dyn_cast<StructType>(AgTy)) { + if (StructType *STy = dyn_cast<StructType>(AgTy)) { bool RecursiveType = false; for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { if (STy->getElementType(i) == PtrArg->getType()) { @@ -382,7 +382,8 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, bool isByVal) const { User *U = *UI; Operands.clear(); if (LoadInst *LI = dyn_cast<LoadInst>(U)) { - if (LI->isVolatile()) return false; // Don't hack volatile loads + // Don't hack volatile/atomic loads + if (!LI->isSimple()) return false; Loads.push_back(LI); // Direct loads are equivalent to a GEP with a zero index and then a load. Operands.push_back(0); @@ -410,7 +411,8 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, bool isByVal) const { for (Value::use_iterator UI = GEP->use_begin(), E = GEP->use_end(); UI != E; ++UI) if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) { - if (LI->isVolatile()) return false; // Don't hack volatile loads + // Don't hack volatile/atomic loads + if (!LI->isSimple()) return false; Loads.push_back(LI); } else { // Other uses than load? @@ -492,7 +494,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F, // Start by computing a new prototype for the function, which is the same as // the old function, but has modified arguments. - const FunctionType *FTy = F->getFunctionType(); + FunctionType *FTy = F->getFunctionType(); std::vector<Type*> Params; typedef std::set<IndicesVector> ScalarizeTable; @@ -527,8 +529,8 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F, ++I, ++ArgIndex) { if (ByValArgsToTransform.count(I)) { // Simple byval argument? Just add all the struct element types. - const Type *AgTy = cast<PointerType>(I->getType())->getElementType(); - const StructType *STy = cast<StructType>(AgTy); + Type *AgTy = cast<PointerType>(I->getType())->getElementType(); + StructType *STy = cast<StructType>(AgTy); for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) Params.push_back(STy->getElementType(i)); ++NumByValArgsPromoted; @@ -576,9 +578,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F, for (ScalarizeTable::iterator SI = ArgIndices.begin(), E = ArgIndices.end(); SI != E; ++SI) { // not allowed to dereference ->begin() if size() is 0 - Params.push_back(GetElementPtrInst::getIndexedType(I->getType(), - SI->begin(), - SI->end())); + Params.push_back(GetElementPtrInst::getIndexedType(I->getType(), *SI)); assert(Params.back()); } @@ -593,7 +593,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F, if (Attributes attrs = PAL.getFnAttributes()) AttributesVec.push_back(AttributeWithIndex::get(~0, attrs)); - const Type *RetTy = FTy->getReturnType(); + Type *RetTy = FTy->getReturnType(); // Work around LLVM bug PR56: the CWriter cannot emit varargs functions which // have zero fixed arguments. @@ -662,13 +662,13 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F, } else if (ByValArgsToTransform.count(I)) { // Emit a GEP and load for each element of the struct. - const Type *AgTy = cast<PointerType>(I->getType())->getElementType(); - const StructType *STy = cast<StructType>(AgTy); + Type *AgTy = cast<PointerType>(I->getType())->getElementType(); + StructType *STy = cast<StructType>(AgTy); Value *Idxs[2] = { ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), 0 }; for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i); - Value *Idx = GetElementPtrInst::Create(*AI, Idxs, Idxs+2, + Value *Idx = GetElementPtrInst::Create(*AI, Idxs, (*AI)->getName()+"."+utostr(i), Call); // TODO: Tell AA about the new values? @@ -686,12 +686,12 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F, LoadInst *OrigLoad = OriginalLoads[*SI]; if (!SI->empty()) { Ops.reserve(SI->size()); - const Type *ElTy = V->getType(); + Type *ElTy = V->getType(); for (IndicesVector::const_iterator II = SI->begin(), IE = SI->end(); II != IE; ++II) { // Use i32 to index structs, and i64 for others (pointers/arrays). // This satisfies GEP constraints. - const Type *IdxTy = (ElTy->isStructTy() ? + Type *IdxTy = (ElTy->isStructTy() ? Type::getInt32Ty(F->getContext()) : Type::getInt64Ty(F->getContext())); Ops.push_back(ConstantInt::get(IdxTy, *II)); @@ -699,8 +699,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F, ElTy = cast<CompositeType>(ElTy)->getTypeAtIndex(*II); } // And create a GEP to extract those indices. - V = GetElementPtrInst::Create(V, Ops.begin(), Ops.end(), - V->getName()+".idx", Call); + V = GetElementPtrInst::Create(V, Ops, V->getName()+".idx", Call); Ops.clear(); AA.copyValue(OrigLoad->getOperand(0), V); } @@ -792,16 +791,16 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F, Instruction *InsertPt = NF->begin()->begin(); // Just add all the struct element types. - const Type *AgTy = cast<PointerType>(I->getType())->getElementType(); + Type *AgTy = cast<PointerType>(I->getType())->getElementType(); Value *TheAlloca = new AllocaInst(AgTy, 0, "", InsertPt); - const StructType *STy = cast<StructType>(AgTy); + StructType *STy = cast<StructType>(AgTy); Value *Idxs[2] = { ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), 0 }; for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i); Value *Idx = - GetElementPtrInst::Create(TheAlloca, Idxs, Idxs+2, + GetElementPtrInst::Create(TheAlloca, Idxs, TheAlloca->getName()+"."+Twine(i), InsertPt); I2->setName(I->getName()+"."+Twine(i)); diff --git a/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp b/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp index a21efce..c3ecb7a 100644 --- a/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp +++ b/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp @@ -23,7 +23,9 @@ #include "llvm/DerivedTypes.h" #include "llvm/Module.h" #include "llvm/Pass.h" +#include "llvm/Target/TargetData.h" #include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/Statistic.h" using namespace llvm; @@ -37,10 +39,18 @@ namespace { initializeConstantMergePass(*PassRegistry::getPassRegistry()); } - // run - For this pass, process all of the globals in the module, - // eliminating duplicate constants. - // + // For this pass, process all of the globals in the module, eliminating + // duplicate constants. bool runOnModule(Module &M); + + // Return true iff we can determine the alignment of this global variable. + bool hasKnownAlignment(GlobalVariable *GV) const; + + // Return the alignment of the global, including converting the default + // alignment to a concrete value. + unsigned getAlignment(GlobalVariable *GV) const; + + const TargetData *TD; }; } @@ -77,15 +87,28 @@ static bool IsBetterCannonical(const GlobalVariable &A, return A.hasUnnamedAddr(); } +bool ConstantMerge::hasKnownAlignment(GlobalVariable *GV) const { + return TD || GV->getAlignment() != 0; +} + +unsigned ConstantMerge::getAlignment(GlobalVariable *GV) const { + if (TD) + return TD->getPreferredAlignment(GV); + return GV->getAlignment(); +} + bool ConstantMerge::runOnModule(Module &M) { + TD = getAnalysisIfAvailable<TargetData>(); + // Find all the globals that are marked "used". These cannot be merged. SmallPtrSet<const GlobalValue*, 8> UsedGlobals; FindUsedValues(M.getGlobalVariable("llvm.used"), UsedGlobals); FindUsedValues(M.getGlobalVariable("llvm.compiler.used"), UsedGlobals); - // Map unique constant/section pairs to globals. We don't want to merge - // globals in different sections. - DenseMap<Constant*, GlobalVariable*> CMap; + // Map unique <constants, has-unknown-alignment> pairs to globals. We don't + // want to merge globals of unknown alignment with those of explicit + // alignment. If we have TargetData, we always know the alignment. + DenseMap<PointerIntPair<Constant*, 1, bool>, GlobalVariable*> CMap; // Replacements - This vector contains a list of replacements to perform. SmallVector<std::pair<GlobalVariable*, GlobalVariable*>, 32> Replacements; @@ -120,7 +143,8 @@ bool ConstantMerge::runOnModule(Module &M) { Constant *Init = GV->getInitializer(); // Check to see if the initializer is already known. - GlobalVariable *&Slot = CMap[Init]; + PointerIntPair<Constant*, 1, bool> Pair(Init, hasKnownAlignment(GV)); + GlobalVariable *&Slot = CMap[Pair]; // If this is the first constant we find or if the old on is local, // replace with the current one. It the current is externally visible @@ -152,7 +176,8 @@ bool ConstantMerge::runOnModule(Module &M) { Constant *Init = GV->getInitializer(); // Check to see if the initializer is already known. - GlobalVariable *Slot = CMap[Init]; + PointerIntPair<Constant*, 1, bool> Pair(Init, hasKnownAlignment(GV)); + GlobalVariable *Slot = CMap[Pair]; if (!Slot || Slot == GV) continue; @@ -175,6 +200,14 @@ bool ConstantMerge::runOnModule(Module &M) { // now. This avoid invalidating the pointers in CMap, which are unneeded // now. for (unsigned i = 0, e = Replacements.size(); i != e; ++i) { + // Bump the alignment if necessary. + if (Replacements[i].first->getAlignment() || + Replacements[i].second->getAlignment()) { + Replacements[i].second->setAlignment(std::max( + Replacements[i].first->getAlignment(), + Replacements[i].second->getAlignment())); + } + // Eliminate any uses of the dead global. Replacements[i].first->replaceAllUsesWith(Replacements[i].second); diff --git a/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp b/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp index 1517765..4bb6f7a 100644 --- a/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp +++ b/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp @@ -206,7 +206,7 @@ bool DAE::DeleteDeadVarargs(Function &Fn) { // Start by computing a new prototype for the function, which is the same as // the old function, but doesn't have isVarArg set. - const FunctionType *FTy = Fn.getFunctionType(); + FunctionType *FTy = Fn.getFunctionType(); std::vector<Type*> Params(FTy->param_begin(), FTy->param_end()); FunctionType *NFTy = FunctionType::get(FTy->getReturnType(), @@ -344,7 +344,7 @@ bool DAE::RemoveDeadArgumentsFromCallers(Function &Fn) static unsigned NumRetVals(const Function *F) { if (F->getReturnType()->isVoidTy()) return 0; - else if (const StructType *STy = dyn_cast<StructType>(F->getReturnType())) + else if (StructType *STy = dyn_cast<StructType>(F->getReturnType())) return STy->getNumElements(); else return 1; @@ -491,7 +491,7 @@ void DAE::SurveyFunction(const Function &F) { // Keep track of the number of live retvals, so we can skip checks once all // of them turn out to be live. unsigned NumLiveRetVals = 0; - const Type *STy = dyn_cast<StructType>(F.getReturnType()); + Type *STy = dyn_cast<StructType>(F.getReturnType()); // Loop all uses of the function. for (Value::const_use_iterator I = F.use_begin(), E = F.use_end(); I != E; ++I) { @@ -646,7 +646,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) { // Start by computing a new prototype for the function, which is the same as // the old function, but has fewer arguments and a different return type. - const FunctionType *FTy = F->getFunctionType(); + FunctionType *FTy = F->getFunctionType(); std::vector<Type*> Params; // Set up to build a new list of parameter attributes. @@ -660,7 +660,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) { // Find out the new return value. Type *RetTy = FTy->getReturnType(); - const Type *NRetTy = NULL; + Type *NRetTy = NULL; unsigned RetCount = NumRetVals(F); // -1 means unused, other numbers are the new index @@ -669,7 +669,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) { if (RetTy->isVoidTy()) { NRetTy = RetTy; } else { - const StructType *STy = dyn_cast<StructType>(RetTy); + StructType *STy = dyn_cast<StructType>(RetTy); if (STy) // Look at each of the original return values individually. for (unsigned i = 0; i != RetCount; ++i) { diff --git a/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp index 95decec..0edf342 100644 --- a/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp +++ b/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp @@ -163,14 +163,14 @@ bool FunctionAttrs::AddReadAttrs(const CallGraphSCC &SCC) { ReadsMemory = true; continue; } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) { - // Ignore non-volatile loads from local memory. + // Ignore non-volatile loads from local memory. (Atomic is okay here.) if (!LI->isVolatile()) { AliasAnalysis::Location Loc = AA->getLocation(LI); if (AA->pointsToConstantMemory(Loc, /*OrLocal=*/true)) continue; } } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { - // Ignore non-volatile stores to local memory. + // Ignore non-volatile stores to local memory. (Atomic is okay here.) if (!SI->isVolatile()) { AliasAnalysis::Location Loc = AA->getLocation(SI); if (AA->pointsToConstantMemory(Loc, /*OrLocal=*/true)) diff --git a/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp index 4ac721d..3552d03 100644 --- a/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp +++ b/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp @@ -195,12 +195,14 @@ static bool AnalyzeGlobal(const Value *V, GlobalStatus &GS, } if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { GS.isLoaded = true; - if (LI->isVolatile()) return true; // Don't hack on volatile loads. + // Don't hack on volatile/atomic loads. + if (!LI->isSimple()) return true; } else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) { // Don't allow a store OF the address, only stores TO the address. if (SI->getOperand(0) == V) return true; - if (SI->isVolatile()) return true; // Don't hack on volatile stores. + // Don't hack on volatile/atomic stores. + if (!SI->isSimple()) return true; // If this is a direct store to the global (i.e., the global is a scalar // value, not an aggregate), keep more specific information about @@ -281,18 +283,18 @@ static Constant *getAggregateConstantElement(Constant *Agg, Constant *Idx) { } else if (ConstantVector *CP = dyn_cast<ConstantVector>(Agg)) { if (IdxV < CP->getNumOperands()) return CP->getOperand(IdxV); } else if (isa<ConstantAggregateZero>(Agg)) { - if (const StructType *STy = dyn_cast<StructType>(Agg->getType())) { + if (StructType *STy = dyn_cast<StructType>(Agg->getType())) { if (IdxV < STy->getNumElements()) return Constant::getNullValue(STy->getElementType(IdxV)); - } else if (const SequentialType *STy = + } else if (SequentialType *STy = dyn_cast<SequentialType>(Agg->getType())) { return Constant::getNullValue(STy->getElementType()); } } else if (isa<UndefValue>(Agg)) { - if (const StructType *STy = dyn_cast<StructType>(Agg->getType())) { + if (StructType *STy = dyn_cast<StructType>(Agg->getType())) { if (IdxV < STy->getNumElements()) return UndefValue::get(STy->getElementType(IdxV)); - } else if (const SequentialType *STy = + } else if (SequentialType *STy = dyn_cast<SequentialType>(Agg->getType())) { return UndefValue::get(STy->getElementType()); } @@ -430,7 +432,7 @@ static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) { ++GEPI; // Skip over the pointer index. // If this is a use of an array allocation, do a bit more checking for sanity. - if (const ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) { + if (ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) { uint64_t NumElements = AT->getNumElements(); ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2)); @@ -451,9 +453,9 @@ static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) { GEPI != E; ++GEPI) { uint64_t NumElements; - if (const ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI)) + if (ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI)) NumElements = SubArrayTy->getNumElements(); - else if (const VectorType *SubVectorTy = dyn_cast<VectorType>(*GEPI)) + else if (VectorType *SubVectorTy = dyn_cast<VectorType>(*GEPI)) NumElements = SubVectorTy->getNumElements(); else { assert((*GEPI)->isStructTy() && @@ -498,7 +500,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) { assert(GV->hasLocalLinkage() && !GV->isConstant()); Constant *Init = GV->getInitializer(); - const Type *Ty = Init->getType(); + Type *Ty = Init->getType(); std::vector<GlobalVariable*> NewGlobals; Module::GlobalListType &Globals = GV->getParent()->getGlobalList(); @@ -508,7 +510,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) { if (StartAlignment == 0) StartAlignment = TD.getABITypeAlignment(GV->getType()); - if (const StructType *STy = dyn_cast<StructType>(Ty)) { + if (StructType *STy = dyn_cast<StructType>(Ty)) { NewGlobals.reserve(STy->getNumElements()); const StructLayout &Layout = *TD.getStructLayout(STy); for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { @@ -531,9 +533,9 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) { if (NewAlign > TD.getABITypeAlignment(STy->getElementType(i))) NGV->setAlignment(NewAlign); } - } else if (const SequentialType *STy = dyn_cast<SequentialType>(Ty)) { + } else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) { unsigned NumElements = 0; - if (const ArrayType *ATy = dyn_cast<ArrayType>(STy)) + if (ArrayType *ATy = dyn_cast<ArrayType>(STy)) NumElements = ATy->getNumElements(); else NumElements = cast<VectorType>(STy)->getNumElements(); @@ -596,15 +598,14 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) { Idxs.push_back(NullInt); for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i) Idxs.push_back(CE->getOperand(i)); - NewPtr = ConstantExpr::getGetElementPtr(cast<Constant>(NewPtr), - &Idxs[0], Idxs.size()); + NewPtr = ConstantExpr::getGetElementPtr(cast<Constant>(NewPtr), Idxs); } else { GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP); SmallVector<Value*, 8> Idxs; Idxs.push_back(NullInt); for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) Idxs.push_back(GEPI->getOperand(i)); - NewPtr = GetElementPtrInst::Create(NewPtr, Idxs.begin(), Idxs.end(), + NewPtr = GetElementPtrInst::Create(NewPtr, Idxs, GEPI->getName()+"."+Twine(Val),GEPI); } } @@ -753,8 +754,7 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) { break; if (Idxs.size() == GEPI->getNumOperands()-1) Changed |= OptimizeAwayTrappingUsesOfValue(GEPI, - ConstantExpr::getGetElementPtr(NewV, &Idxs[0], - Idxs.size())); + ConstantExpr::getGetElementPtr(NewV, Idxs)); if (GEPI->use_empty()) { Changed = true; GEPI->eraseFromParent(); @@ -846,12 +846,12 @@ static void ConstantPropUsersOf(Value *V) { /// malloc into a global, and any loads of GV as uses of the new global. static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, - const Type *AllocTy, + Type *AllocTy, ConstantInt *NElements, TargetData* TD) { DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n'); - const Type *GlobalType; + Type *GlobalType; if (NElements->getZExtValue() == 1) GlobalType = AllocTy; else @@ -1192,7 +1192,7 @@ static Value *GetHeapSROAValue(Value *V, unsigned FieldNo, } else if (PHINode *PN = dyn_cast<PHINode>(V)) { // PN's type is pointer to struct. Make a new PHI of pointer to struct // field. - const StructType *ST = + StructType *ST = cast<StructType>(cast<PointerType>(PN->getType())->getElementType()); PHINode *NewPN = @@ -1245,8 +1245,7 @@ static void RewriteHeapSROALoadUser(Instruction *LoadUser, GEPIdx.push_back(GEPI->getOperand(1)); GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end()); - Value *NGEPI = GetElementPtrInst::Create(NewPtr, - GEPIdx.begin(), GEPIdx.end(), + Value *NGEPI = GetElementPtrInst::Create(NewPtr, GEPIdx, GEPI->getName(), GEPI); GEPI->replaceAllUsesWith(NGEPI); GEPI->eraseFromParent(); @@ -1260,11 +1259,9 @@ static void RewriteHeapSROALoadUser(Instruction *LoadUser, // already been seen first by another load, so its uses have already been // processed. PHINode *PN = cast<PHINode>(LoadUser); - bool Inserted; - DenseMap<Value*, std::vector<Value*> >::iterator InsertPos; - tie(InsertPos, Inserted) = - InsertedScalarizedValues.insert(std::make_pair(PN, std::vector<Value*>())); - if (!Inserted) return; + if (!InsertedScalarizedValues.insert(std::make_pair(PN, + std::vector<Value*>())).second) + return; // If this is the first time we've seen this PHI, recursively process all // users. @@ -1298,8 +1295,8 @@ static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load, static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI, Value* NElems, TargetData *TD) { DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n'); - const Type* MAT = getMallocAllocatedType(CI); - const StructType *STy = cast<StructType>(MAT); + Type* MAT = getMallocAllocatedType(CI); + StructType *STy = cast<StructType>(MAT); // There is guaranteed to be at least one use of the malloc (storing // it into GV). If there are other uses, change them to be uses of @@ -1313,8 +1310,8 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI, std::vector<Value*> FieldMallocs; for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){ - const Type *FieldTy = STy->getElementType(FieldNo); - const PointerType *PFieldTy = PointerType::getUnqual(FieldTy); + Type *FieldTy = STy->getElementType(FieldNo); + PointerType *PFieldTy = PointerType::getUnqual(FieldTy); GlobalVariable *NGV = new GlobalVariable(*GV->getParent(), @@ -1325,9 +1322,9 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI, FieldGlobals.push_back(NGV); unsigned TypeSize = TD->getTypeAllocSize(FieldTy); - if (const StructType *ST = dyn_cast<StructType>(FieldTy)) + if (StructType *ST = dyn_cast<StructType>(FieldTy)) TypeSize = TD->getStructLayout(ST)->getSizeInBytes(); - const Type *IntPtrTy = TD->getIntPtrType(CI->getContext()); + Type *IntPtrTy = TD->getIntPtrType(CI->getContext()); Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy, ConstantInt::get(IntPtrTy, TypeSize), NElems, 0, @@ -1379,8 +1376,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI, for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock); Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal, - Constant::getNullValue(GVVal->getType()), - "tmp"); + Constant::getNullValue(GVVal->getType())); BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it", OrigBB->getParent()); BasicBlock *NextBlock = BasicBlock::Create(Cmp->getContext(), "next", @@ -1428,7 +1424,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI, // Insert a store of null into each global. for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { - const PointerType *PT = cast<PointerType>(FieldGlobals[i]->getType()); + PointerType *PT = cast<PointerType>(FieldGlobals[i]->getType()); Constant *Null = Constant::getNullValue(PT->getElementType()); new StoreInst(Null, FieldGlobals[i], SI); } @@ -1485,7 +1481,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI, /// cast of malloc. static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, CallInst *CI, - const Type *AllocTy, + Type *AllocTy, Module::global_iterator &GVI, TargetData *TD) { if (!TD) @@ -1538,10 +1534,10 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, // If this is an allocation of a fixed size array of structs, analyze as a // variable size array. malloc [100 x struct],1 -> malloc struct, 100 if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1)) - if (const ArrayType *AT = dyn_cast<ArrayType>(AllocTy)) + if (ArrayType *AT = dyn_cast<ArrayType>(AllocTy)) AllocTy = AT->getElementType(); - const StructType *AllocSTy = dyn_cast<StructType>(AllocTy); + StructType *AllocSTy = dyn_cast<StructType>(AllocTy); if (!AllocSTy) return false; @@ -1552,8 +1548,8 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, // If this is a fixed size array, transform the Malloc to be an alloc of // structs. malloc [100 x struct],1 -> malloc struct, 100 - if (const ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI))) { - const Type *IntPtrTy = TD->getIntPtrType(CI->getContext()); + if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI))) { + Type *IntPtrTy = TD->getIntPtrType(CI->getContext()); unsigned TypeSize = TD->getStructLayout(AllocSTy)->getSizeInBytes(); Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize); Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements()); @@ -1596,7 +1592,7 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC)) return true; } else if (CallInst *CI = extractMallocCall(StoredOnceVal)) { - const Type* MallocType = getMallocAllocatedType(CI); + Type* MallocType = getMallocAllocatedType(CI); if (MallocType && TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, GVI, TD)) return true; @@ -1611,7 +1607,7 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, /// can shrink the global into a boolean and select between the two values /// whenever it is used. This exposes the values to other scalar optimizations. static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) { - const Type *GVElType = GV->getType()->getElementType(); + Type *GVElType = GV->getType()->getElementType(); // If GVElType is already i1, it is already shrunk. If the type of the GV is // an FP value, pointer or vector, don't do this optimization because a select @@ -1761,7 +1757,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV, DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV); Instruction& FirstI = const_cast<Instruction&>(*GS.AccessingFunction ->getEntryBlock().begin()); - const Type* ElemTy = GV->getType()->getElementType(); + Type* ElemTy = GV->getType()->getElementType(); // FIXME: Pass Global's alignment when globals have alignment AllocaInst* Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), &FirstI); if (!isa<UndefValue>(GV->getInitializer())) @@ -2003,7 +1999,7 @@ static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL, CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()), 65535); CSVals[1] = 0; - const StructType *StructTy = + StructType *StructTy = cast <StructType>( cast<ArrayType>(GCL->getType()->getElementType())->getElementType()); @@ -2013,9 +2009,9 @@ static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL, if (Ctors[i]) { CSVals[1] = Ctors[i]; } else { - const Type *FTy = FunctionType::get(Type::getVoidTy(GCL->getContext()), + Type *FTy = FunctionType::get(Type::getVoidTy(GCL->getContext()), false); - const PointerType *PFTy = PointerType::getUnqual(FTy); + PointerType *PFTy = PointerType::getUnqual(FTy); CSVals[1] = Constant::getNullValue(PFTy); CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()), 0x7fffffff); @@ -2196,7 +2192,7 @@ static Constant *EvaluateStoreInto(Constant *Init, Constant *Val, } std::vector<Constant*> Elts; - if (const StructType *STy = dyn_cast<StructType>(Init->getType())) { + if (StructType *STy = dyn_cast<StructType>(Init->getType())) { // Break up the constant into its elements. if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) { @@ -2224,10 +2220,10 @@ static Constant *EvaluateStoreInto(Constant *Init, Constant *Val, } ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo)); - const SequentialType *InitTy = cast<SequentialType>(Init->getType()); + SequentialType *InitTy = cast<SequentialType>(Init->getType()); uint64_t NumElts; - if (const ArrayType *ATy = dyn_cast<ArrayType>(InitTy)) + if (ArrayType *ATy = dyn_cast<ArrayType>(InitTy)) NumElts = ATy->getNumElements(); else NumElts = cast<VectorType>(InitTy)->getNumElements(); @@ -2338,7 +2334,7 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal, Constant *InstResult = 0; if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) { - if (SI->isVolatile()) return false; // no volatile accesses. + if (!SI->isSimple()) return false; // no volatile/atomic accesses. Constant *Ptr = getVal(Values, SI->getOperand(1)); if (!isSimpleEnoughPointerToCommit(Ptr)) // If this is too complex for us to commit, reject it. @@ -2358,7 +2354,7 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal, // stored value. Ptr = CE->getOperand(0); - const Type *NewTy=cast<PointerType>(Ptr->getType())->getElementType(); + Type *NewTy=cast<PointerType>(Ptr->getType())->getElementType(); // In order to push the bitcast onto the stored value, a bitcast // from NewTy to Val's type must be legal. If it's not, we can try @@ -2367,14 +2363,14 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal, // If NewTy is a struct, we can convert the pointer to the struct // into a pointer to its first member. // FIXME: This could be extended to support arrays as well. - if (const StructType *STy = dyn_cast<StructType>(NewTy)) { + if (StructType *STy = dyn_cast<StructType>(NewTy)) { NewTy = STy->getTypeAtIndex(0U); - const IntegerType *IdxTy =IntegerType::get(NewTy->getContext(), 32); + IntegerType *IdxTy =IntegerType::get(NewTy->getContext(), 32); Constant *IdxZero = ConstantInt::get(IdxTy, 0, false); Constant * const IdxList[] = {IdxZero, IdxZero}; - Ptr = ConstantExpr::getGetElementPtr(Ptr, IdxList, 2); + Ptr = ConstantExpr::getGetElementPtr(Ptr, IdxList); // If we can't improve the situation by introspecting NewTy, // we have to give up. @@ -2411,17 +2407,17 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal, for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e; ++i) GEPOps.push_back(getVal(Values, *i)); - InstResult = cast<GEPOperator>(GEP)->isInBounds() ? - ConstantExpr::getInBoundsGetElementPtr(P, &GEPOps[0], GEPOps.size()) : - ConstantExpr::getGetElementPtr(P, &GEPOps[0], GEPOps.size()); + InstResult = + ConstantExpr::getGetElementPtr(P, GEPOps, + cast<GEPOperator>(GEP)->isInBounds()); } else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) { - if (LI->isVolatile()) return false; // no volatile accesses. + if (!LI->isSimple()) return false; // no volatile/atomic accesses. InstResult = ComputeLoadResult(getVal(Values, LI->getOperand(0)), MutatedMemory); if (InstResult == 0) return false; // Could not evaluate load. } else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) { if (AI->isArrayAllocation()) return false; // Cannot handle array allocs. - const Type *Ty = AI->getType()->getElementType(); + Type *Ty = AI->getType()->getElementType(); AllocaTmps.push_back(new GlobalVariable(Ty, false, GlobalValue::InternalLinkage, UndefValue::get(Ty), @@ -2465,8 +2461,7 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal, if (Callee->isDeclaration()) { // If this is a function we can constant fold, do it. - if (Constant *C = ConstantFoldCall(Callee, Formals.data(), - Formals.size())) { + if (Constant *C = ConstantFoldCall(Callee, Formals)) { InstResult = C; } else { return false; @@ -2512,7 +2507,7 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal, CallStack.pop_back(); // return from fn. return true; // We succeeded at evaluating this ctor! } else { - // invoke, unwind, unreachable. + // invoke, unwind, resume, unreachable. return false; // Cannot handle this terminator. } @@ -2711,7 +2706,7 @@ static Function *FindCXAAtExit(Module &M) { if (!Fn) return 0; - const FunctionType *FTy = Fn->getFunctionType(); + FunctionType *FTy = Fn->getFunctionType(); // Checking that the function has the right return type, the right number of // parameters and that they all have pointer types should be enough. diff --git a/contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp b/contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp index 25c0134..d757e1f 100644 --- a/contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp +++ b/contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp @@ -167,7 +167,7 @@ bool IPCP::PropagateConstantReturn(Function &F) { // Check to see if this function returns a constant. SmallVector<Value *,4> RetVals; - const StructType *STy = dyn_cast<StructType>(F.getReturnType()); + StructType *STy = dyn_cast<StructType>(F.getReturnType()); if (STy) for (unsigned i = 0, e = STy->getNumElements(); i < e; ++i) RetVals.push_back(UndefValue::get(STy->getElementType(i))); diff --git a/contrib/llvm/lib/Transforms/IPO/IPO.cpp b/contrib/llvm/lib/Transforms/IPO/IPO.cpp index 31ce95f..6233922 100644 --- a/contrib/llvm/lib/Transforms/IPO/IPO.cpp +++ b/contrib/llvm/lib/Transforms/IPO/IPO.cpp @@ -13,6 +13,7 @@ // //===----------------------------------------------------------------------===// +#include "llvm-c/Initialization.h" #include "llvm-c/Transforms/IPO.h" #include "llvm/InitializePasses.h" #include "llvm/PassManager.h" @@ -35,7 +36,6 @@ void llvm::initializeIPO(PassRegistry &Registry) { initializeLoopExtractorPass(Registry); initializeBlockExtractorPassPass(Registry); initializeSingleLoopExtractorPass(Registry); - initializeLowerSetJmpPass(Registry); initializeMergeFunctionsPass(Registry); initializePartialInlinerPass(Registry); initializePruneEHPass(Registry); @@ -70,6 +70,10 @@ void LLVMAddFunctionInliningPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createFunctionInliningPass()); } +void LLVMAddAlwaysInlinerPass(LLVMPassManagerRef PM) { + unwrap(PM)->add(llvm::createAlwaysInlinerPass()); +} + void LLVMAddGlobalDCEPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createGlobalDCEPass()); } @@ -82,10 +86,6 @@ void LLVMAddIPConstantPropagationPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createIPConstantPropagationPass()); } -void LLVMAddLowerSetJmpPass(LLVMPassManagerRef PM) { - unwrap(PM)->add(createLowerSetJmpPass()); -} - void LLVMAddPruneEHPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createPruneEHPass()); } @@ -98,11 +98,6 @@ void LLVMAddInternalizePass(LLVMPassManagerRef PM, unsigned AllButMain) { unwrap(PM)->add(createInternalizePass(AllButMain != 0)); } - -void LLVMAddRaiseAllocationsPass(LLVMPassManagerRef PM) { - // FIXME: Remove in LLVM 3.0. -} - void LLVMAddStripDeadPrototypesPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createStripDeadPrototypesPass()); } diff --git a/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp b/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp index ce795b7..c0426da 100644 --- a/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp +++ b/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp @@ -23,6 +23,7 @@ #include "llvm/Support/CallSite.h" #include "llvm/Transforms/IPO.h" #include "llvm/Transforms/IPO/InlinerPass.h" +#include "llvm/Target/TargetData.h" #include "llvm/ADT/SmallPtrSet.h" using namespace llvm; @@ -32,10 +33,10 @@ namespace { // AlwaysInliner only inlines functions that are mark as "always inline". class AlwaysInliner : public Inliner { // Functions that are never inlined - SmallPtrSet<const Function*, 16> NeverInline; + SmallPtrSet<const Function*, 16> NeverInline; InlineCostAnalyzer CA; public: - // Use extremely low threshold. + // Use extremely low threshold. AlwaysInliner() : Inliner(ID, -2000000000) { initializeAlwaysInlinerPass(*PassRegistry::getPassRegistry()); } @@ -52,8 +53,8 @@ namespace { void growCachedCostInfo(Function* Caller, Function* Callee) { CA.growCachedCostInfo(Caller, Callee); } - virtual bool doFinalization(CallGraph &CG) { - return removeDeadFunctions(CG, &NeverInline); + virtual bool doFinalization(CallGraph &CG) { + return removeDeadFunctions(CG, &NeverInline); } virtual bool doInitialization(CallGraph &CG); void releaseMemory() { @@ -71,11 +72,13 @@ INITIALIZE_PASS_END(AlwaysInliner, "always-inline", Pass *llvm::createAlwaysInlinerPass() { return new AlwaysInliner(); } -// doInitialization - Initializes the vector of functions that have not +// doInitialization - Initializes the vector of functions that have not // been annotated with the "always inline" attribute. bool AlwaysInliner::doInitialization(CallGraph &CG) { + CA.setTargetData(getAnalysisIfAvailable<TargetData>()); + Module &M = CG.getModule(); - + for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) if (!I->isDeclaration() && !I->hasFnAttr(Attribute::AlwaysInline)) diff --git a/contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp b/contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp index 0c5b3be..84dd4fd 100644 --- a/contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp +++ b/contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp @@ -22,6 +22,7 @@ #include "llvm/Support/CallSite.h" #include "llvm/Transforms/IPO.h" #include "llvm/Transforms/IPO/InlinerPass.h" +#include "llvm/Target/TargetData.h" #include "llvm/ADT/SmallPtrSet.h" using namespace llvm; @@ -30,7 +31,7 @@ namespace { class SimpleInliner : public Inliner { // Functions that are never inlined - SmallPtrSet<const Function*, 16> NeverInline; + SmallPtrSet<const Function*, 16> NeverInline; InlineCostAnalyzer CA; public: SimpleInliner() : Inliner(ID) { @@ -68,16 +69,17 @@ INITIALIZE_PASS_END(SimpleInliner, "inline", Pass *llvm::createFunctionInliningPass() { return new SimpleInliner(); } -Pass *llvm::createFunctionInliningPass(int Threshold) { +Pass *llvm::createFunctionInliningPass(int Threshold) { return new SimpleInliner(Threshold); } // doInitialization - Initializes the vector of functions that have been // annotated with the noinline attribute. bool SimpleInliner::doInitialization(CallGraph &CG) { - + CA.setTargetData(getAnalysisIfAvailable<TargetData>()); + Module &M = CG.getModule(); - + for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) if (!I->isDeclaration() && I->hasFnAttr(Attribute::NoInline)) @@ -85,34 +87,34 @@ bool SimpleInliner::doInitialization(CallGraph &CG) { // Get llvm.noinline GlobalVariable *GV = M.getNamedGlobal("llvm.noinline"); - + if (GV == 0) return false; // Don't crash on invalid code if (!GV->hasDefinitiveInitializer()) return false; - + const ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer()); - + if (InitList == 0) return false; // Iterate over each element and add to the NeverInline set for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) { - + // Get Source const Constant *Elt = InitList->getOperand(i); - + if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(Elt)) - if (CE->getOpcode() == Instruction::BitCast) + if (CE->getOpcode() == Instruction::BitCast) Elt = CE->getOperand(0); - + // Insert into set of functions to never inline if (const Function *F = dyn_cast<Function>(Elt)) NeverInline.insert(F); } - + return false; } diff --git a/contrib/llvm/lib/Transforms/IPO/Inliner.cpp b/contrib/llvm/lib/Transforms/IPO/Inliner.cpp index 57f3e77..f00935b 100644 --- a/contrib/llvm/lib/Transforms/IPO/Inliner.cpp +++ b/contrib/llvm/lib/Transforms/IPO/Inliner.cpp @@ -62,7 +62,7 @@ void Inliner::getAnalysisUsage(AnalysisUsage &Info) const { } -typedef DenseMap<const ArrayType*, std::vector<AllocaInst*> > +typedef DenseMap<ArrayType*, std::vector<AllocaInst*> > InlinedArrayAllocasTy; /// InlineCallIfPossible - If it is possible to inline the specified call site, @@ -139,7 +139,7 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI, // Don't bother trying to merge array allocations (they will usually be // canonicalized to be an allocation *of* an array), or allocations whose // type is not itself an array (because we're afraid of pessimizing SRoA). - const ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType()); + ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType()); if (ATy == 0 || AI->isArrayAllocation()) continue; diff --git a/contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp b/contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp index 848944d..4f96afe4 100644 --- a/contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp +++ b/contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp @@ -23,6 +23,7 @@ #include "llvm/Analysis/LoopPass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/FunctionUtils.h" #include "llvm/ADT/Statistic.h" #include <fstream> @@ -53,12 +54,12 @@ namespace { char LoopExtractor::ID = 0; INITIALIZE_PASS_BEGIN(LoopExtractor, "loop-extract", - "Extract loops into new functions", false, false) + "Extract loops into new functions", false, false) INITIALIZE_PASS_DEPENDENCY(BreakCriticalEdges) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) INITIALIZE_PASS_DEPENDENCY(DominatorTree) INITIALIZE_PASS_END(LoopExtractor, "loop-extract", - "Extract loops into new functions", false, false) + "Extract loops into new functions", false, false) namespace { /// SingleLoopExtractor - For bugpoint. @@ -100,9 +101,9 @@ bool LoopExtractor::runOnLoop(Loop *L, LPPassManager &LPM) { L->getHeader()->getParent()->getEntryBlock().getTerminator(); if (!isa<BranchInst>(EntryTI) || !cast<BranchInst>(EntryTI)->isUnconditional() || - EntryTI->getSuccessor(0) != L->getHeader()) + EntryTI->getSuccessor(0) != L->getHeader()) { ShouldExtractLoop = true; - else { + } else { // Check to see if any exits from the loop are more than just return // blocks. SmallVector<BasicBlock*, 8> ExitBlocks; @@ -113,6 +114,21 @@ bool LoopExtractor::runOnLoop(Loop *L, LPPassManager &LPM) { break; } } + + if (ShouldExtractLoop) { + // We must omit landing pads. Landing pads must accompany the invoke + // instruction. But this would result in a loop in the extracted + // function. An infinite cycle occurs when it tries to extract that loop as + // well. + SmallVector<BasicBlock*, 8> ExitBlocks; + L->getExitBlocks(ExitBlocks); + for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) + if (ExitBlocks[i]->isLandingPad()) { + ShouldExtractLoop = false; + break; + } + } + if (ShouldExtractLoop) { if (NumLoops == 0) return Changed; --NumLoops; @@ -149,6 +165,7 @@ namespace { /// BlocksToNotExtract list. class BlockExtractorPass : public ModulePass { void LoadFile(const char *Filename); + void SplitLandingPadPreds(Function *F); std::vector<BasicBlock*> BlocksToNotExtract; std::vector<std::pair<std::string, std::string> > BlocksToNotExtractByName; @@ -171,8 +188,7 @@ INITIALIZE_PASS(BlockExtractorPass, "extract-blocks", // createBlockExtractorPass - This pass extracts all blocks (except those // specified in the argument list) from the functions in the module. // -ModulePass *llvm::createBlockExtractorPass() -{ +ModulePass *llvm::createBlockExtractorPass() { return new BlockExtractorPass(); } @@ -194,6 +210,37 @@ void BlockExtractorPass::LoadFile(const char *Filename) { } } +/// SplitLandingPadPreds - The landing pad needs to be extracted with the invoke +/// instruction. The critical edge breaker will refuse to break critical edges +/// to a landing pad. So do them here. After this method runs, all landing pads +/// should have only one predecessor. +void BlockExtractorPass::SplitLandingPadPreds(Function *F) { + for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) { + InvokeInst *II = dyn_cast<InvokeInst>(I); + if (!II) continue; + BasicBlock *Parent = II->getParent(); + BasicBlock *LPad = II->getUnwindDest(); + + // Look through the landing pad's predecessors. If one of them ends in an + // 'invoke', then we want to split the landing pad. + bool Split = false; + for (pred_iterator + PI = pred_begin(LPad), PE = pred_end(LPad); PI != PE; ++PI) { + BasicBlock *BB = *PI; + if (BB->isLandingPad() && BB != Parent && + isa<InvokeInst>(Parent->getTerminator())) { + Split = true; + break; + } + } + + if (!Split) continue; + + SmallVector<BasicBlock*, 2> NewBBs; + SplitLandingPadPredecessors(LPad, Parent, ".1", ".2", 0, NewBBs); + } +} + bool BlockExtractorPass::runOnModule(Module &M) { std::set<BasicBlock*> TranslatedBlocksToNotExtract; for (unsigned i = 0, e = BlocksToNotExtract.size(); i != e; ++i) { @@ -236,13 +283,21 @@ bool BlockExtractorPass::runOnModule(Module &M) { // Now that we know which blocks to not extract, figure out which ones we WANT // to extract. std::vector<BasicBlock*> BlocksToExtract; - for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) + for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) { + SplitLandingPadPreds(&*F); for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) if (!TranslatedBlocksToNotExtract.count(BB)) BlocksToExtract.push_back(BB); + } - for (unsigned i = 0, e = BlocksToExtract.size(); i != e; ++i) - ExtractBasicBlock(BlocksToExtract[i]); + for (unsigned i = 0, e = BlocksToExtract.size(); i != e; ++i) { + SmallVector<BasicBlock*, 2> BlocksToExtractVec; + BlocksToExtractVec.push_back(BlocksToExtract[i]); + if (const InvokeInst *II = + dyn_cast<InvokeInst>(BlocksToExtract[i]->getTerminator())) + BlocksToExtractVec.push_back(II->getUnwindDest()); + ExtractBasicBlock(BlocksToExtractVec); + } return !BlocksToExtract.empty(); } diff --git a/contrib/llvm/lib/Transforms/IPO/LowerSetJmp.cpp b/contrib/llvm/lib/Transforms/IPO/LowerSetJmp.cpp deleted file mode 100644 index 659476b..0000000 --- a/contrib/llvm/lib/Transforms/IPO/LowerSetJmp.cpp +++ /dev/null @@ -1,547 +0,0 @@ -//===- LowerSetJmp.cpp - Code pertaining to lowering set/long jumps -------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file implements the lowering of setjmp and longjmp to use the -// LLVM invoke and unwind instructions as necessary. -// -// Lowering of longjmp is fairly trivial. We replace the call with a -// call to the LLVM library function "__llvm_sjljeh_throw_longjmp()". -// This unwinds the stack for us calling all of the destructors for -// objects allocated on the stack. -// -// At a setjmp call, the basic block is split and the setjmp removed. -// The calls in a function that have a setjmp are converted to invoke -// where the except part checks to see if it's a longjmp exception and, -// if so, if it's handled in the function. If it is, then it gets the -// value returned by the longjmp and goes to where the basic block was -// split. Invoke instructions are handled in a similar fashion with the -// original except block being executed if it isn't a longjmp except -// that is handled by that function. -// -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// FIXME: This pass doesn't deal with PHI statements just yet. That is, -// we expect this to occur before SSAification is done. This would seem -// to make sense, but in general, it might be a good idea to make this -// pass invokable via the "opt" command at will. -//===----------------------------------------------------------------------===// - -#define DEBUG_TYPE "lowersetjmp" -#include "llvm/Transforms/IPO.h" -#include "llvm/Constants.h" -#include "llvm/DerivedTypes.h" -#include "llvm/Instructions.h" -#include "llvm/Intrinsics.h" -#include "llvm/LLVMContext.h" -#include "llvm/Module.h" -#include "llvm/Pass.h" -#include "llvm/Support/CallSite.h" -#include "llvm/Support/CFG.h" -#include "llvm/Support/InstVisitor.h" -#include "llvm/Transforms/Utils/Local.h" -#include "llvm/ADT/DepthFirstIterator.h" -#include "llvm/ADT/Statistic.h" -#include <map> -using namespace llvm; - -STATISTIC(LongJmpsTransformed, "Number of longjmps transformed"); -STATISTIC(SetJmpsTransformed , "Number of setjmps transformed"); -STATISTIC(CallsTransformed , "Number of calls invokified"); -STATISTIC(InvokesTransformed , "Number of invokes modified"); - -namespace { - //===--------------------------------------------------------------------===// - // LowerSetJmp pass implementation. - class LowerSetJmp : public ModulePass, public InstVisitor<LowerSetJmp> { - // LLVM library functions... - Constant *InitSJMap; // __llvm_sjljeh_init_setjmpmap - Constant *DestroySJMap; // __llvm_sjljeh_destroy_setjmpmap - Constant *AddSJToMap; // __llvm_sjljeh_add_setjmp_to_map - Constant *ThrowLongJmp; // __llvm_sjljeh_throw_longjmp - Constant *TryCatchLJ; // __llvm_sjljeh_try_catching_longjmp_exception - Constant *IsLJException; // __llvm_sjljeh_is_longjmp_exception - Constant *GetLJValue; // __llvm_sjljeh_get_longjmp_value - - typedef std::pair<SwitchInst*, CallInst*> SwitchValuePair; - - // Keep track of those basic blocks reachable via a depth-first search of - // the CFG from a setjmp call. We only need to transform those "call" and - // "invoke" instructions that are reachable from the setjmp call site. - std::set<BasicBlock*> DFSBlocks; - - // The setjmp map is going to hold information about which setjmps - // were called (each setjmp gets its own number) and with which - // buffer it was called. - std::map<Function*, AllocaInst*> SJMap; - - // The rethrow basic block map holds the basic block to branch to if - // the exception isn't handled in the current function and needs to - // be rethrown. - std::map<const Function*, BasicBlock*> RethrowBBMap; - - // The preliminary basic block map holds a basic block that grabs the - // exception and determines if it's handled by the current function. - std::map<const Function*, BasicBlock*> PrelimBBMap; - - // The switch/value map holds a switch inst/call inst pair. The - // switch inst controls which handler (if any) gets called and the - // value is the value returned to that handler by the call to - // __llvm_sjljeh_get_longjmp_value. - std::map<const Function*, SwitchValuePair> SwitchValMap; - - // A map of which setjmps we've seen so far in a function. - std::map<const Function*, unsigned> SetJmpIDMap; - - AllocaInst* GetSetJmpMap(Function* Func); - BasicBlock* GetRethrowBB(Function* Func); - SwitchValuePair GetSJSwitch(Function* Func, BasicBlock* Rethrow); - - void TransformLongJmpCall(CallInst* Inst); - void TransformSetJmpCall(CallInst* Inst); - - bool IsTransformableFunction(StringRef Name); - public: - static char ID; // Pass identification, replacement for typeid - LowerSetJmp() : ModulePass(ID) { - initializeLowerSetJmpPass(*PassRegistry::getPassRegistry()); - } - - void visitCallInst(CallInst& CI); - void visitInvokeInst(InvokeInst& II); - void visitReturnInst(ReturnInst& RI); - void visitUnwindInst(UnwindInst& UI); - - bool runOnModule(Module& M); - bool doInitialization(Module& M); - }; -} // end anonymous namespace - -char LowerSetJmp::ID = 0; -INITIALIZE_PASS(LowerSetJmp, "lowersetjmp", "Lower Set Jump", false, false) - -// run - Run the transformation on the program. We grab the function -// prototypes for longjmp and setjmp. If they are used in the program, -// then we can go directly to the places they're at and transform them. -bool LowerSetJmp::runOnModule(Module& M) { - bool Changed = false; - - // These are what the functions are called. - Function* SetJmp = M.getFunction("llvm.setjmp"); - Function* LongJmp = M.getFunction("llvm.longjmp"); - - // This program doesn't have longjmp and setjmp calls. - if ((!LongJmp || LongJmp->use_empty()) && - (!SetJmp || SetJmp->use_empty())) return false; - - // Initialize some values and functions we'll need to transform the - // setjmp/longjmp functions. - doInitialization(M); - - if (SetJmp) { - for (Value::use_iterator B = SetJmp->use_begin(), E = SetJmp->use_end(); - B != E; ++B) { - BasicBlock* BB = cast<Instruction>(*B)->getParent(); - for (df_ext_iterator<BasicBlock*> I = df_ext_begin(BB, DFSBlocks), - E = df_ext_end(BB, DFSBlocks); I != E; ++I) - /* empty */; - } - - while (!SetJmp->use_empty()) { - assert(isa<CallInst>(SetJmp->use_back()) && - "User of setjmp intrinsic not a call?"); - TransformSetJmpCall(cast<CallInst>(SetJmp->use_back())); - Changed = true; - } - } - - if (LongJmp) - while (!LongJmp->use_empty()) { - assert(isa<CallInst>(LongJmp->use_back()) && - "User of longjmp intrinsic not a call?"); - TransformLongJmpCall(cast<CallInst>(LongJmp->use_back())); - Changed = true; - } - - // Now go through the affected functions and convert calls and invokes - // to new invokes... - for (std::map<Function*, AllocaInst*>::iterator - B = SJMap.begin(), E = SJMap.end(); B != E; ++B) { - Function* F = B->first; - for (Function::iterator BB = F->begin(), BE = F->end(); BB != BE; ++BB) - for (BasicBlock::iterator IB = BB->begin(), IE = BB->end(); IB != IE; ) { - visit(*IB++); - if (IB != BB->end() && IB->getParent() != BB) - break; // The next instruction got moved to a different block! - } - } - - DFSBlocks.clear(); - SJMap.clear(); - RethrowBBMap.clear(); - PrelimBBMap.clear(); - SwitchValMap.clear(); - SetJmpIDMap.clear(); - - return Changed; -} - -// doInitialization - For the lower long/setjmp pass, this ensures that a -// module contains a declaration for the intrisic functions we are going -// to call to convert longjmp and setjmp calls. -// -// This function is always successful, unless it isn't. -bool LowerSetJmp::doInitialization(Module& M) -{ - const Type *SBPTy = Type::getInt8PtrTy(M.getContext()); - const Type *SBPPTy = PointerType::getUnqual(SBPTy); - - // N.B. See llvm/runtime/GCCLibraries/libexception/SJLJ-Exception.h for - // a description of the following library functions. - - // void __llvm_sjljeh_init_setjmpmap(void**) - InitSJMap = M.getOrInsertFunction("__llvm_sjljeh_init_setjmpmap", - Type::getVoidTy(M.getContext()), - SBPPTy, (Type *)0); - // void __llvm_sjljeh_destroy_setjmpmap(void**) - DestroySJMap = M.getOrInsertFunction("__llvm_sjljeh_destroy_setjmpmap", - Type::getVoidTy(M.getContext()), - SBPPTy, (Type *)0); - - // void __llvm_sjljeh_add_setjmp_to_map(void**, void*, unsigned) - AddSJToMap = M.getOrInsertFunction("__llvm_sjljeh_add_setjmp_to_map", - Type::getVoidTy(M.getContext()), - SBPPTy, SBPTy, - Type::getInt32Ty(M.getContext()), - (Type *)0); - - // void __llvm_sjljeh_throw_longjmp(int*, int) - ThrowLongJmp = M.getOrInsertFunction("__llvm_sjljeh_throw_longjmp", - Type::getVoidTy(M.getContext()), SBPTy, - Type::getInt32Ty(M.getContext()), - (Type *)0); - - // unsigned __llvm_sjljeh_try_catching_longjmp_exception(void **) - TryCatchLJ = - M.getOrInsertFunction("__llvm_sjljeh_try_catching_longjmp_exception", - Type::getInt32Ty(M.getContext()), SBPPTy, (Type *)0); - - // bool __llvm_sjljeh_is_longjmp_exception() - IsLJException = M.getOrInsertFunction("__llvm_sjljeh_is_longjmp_exception", - Type::getInt1Ty(M.getContext()), - (Type *)0); - - // int __llvm_sjljeh_get_longjmp_value() - GetLJValue = M.getOrInsertFunction("__llvm_sjljeh_get_longjmp_value", - Type::getInt32Ty(M.getContext()), - (Type *)0); - return true; -} - -// IsTransformableFunction - Return true if the function name isn't one -// of the ones we don't want transformed. Currently, don't transform any -// "llvm.{setjmp,longjmp}" functions and none of the setjmp/longjmp error -// handling functions (beginning with __llvm_sjljeh_...they don't throw -// exceptions). -bool LowerSetJmp::IsTransformableFunction(StringRef Name) { - return !Name.startswith("__llvm_sjljeh_"); -} - -// TransformLongJmpCall - Transform a longjmp call into a call to the -// internal __llvm_sjljeh_throw_longjmp function. It then takes care of -// throwing the exception for us. -void LowerSetJmp::TransformLongJmpCall(CallInst* Inst) -{ - const Type* SBPTy = Type::getInt8PtrTy(Inst->getContext()); - - // Create the call to "__llvm_sjljeh_throw_longjmp". This takes the - // same parameters as "longjmp", except that the buffer is cast to a - // char*. It returns "void", so it doesn't need to replace any of - // Inst's uses and doesn't get a name. - CastInst* CI = - new BitCastInst(Inst->getArgOperand(0), SBPTy, "LJBuf", Inst); - Value *Args[] = { CI, Inst->getArgOperand(1) }; - CallInst::Create(ThrowLongJmp, Args, "", Inst); - - SwitchValuePair& SVP = SwitchValMap[Inst->getParent()->getParent()]; - - // If the function has a setjmp call in it (they are transformed first) - // we should branch to the basic block that determines if this longjmp - // is applicable here. Otherwise, issue an unwind. - if (SVP.first) - BranchInst::Create(SVP.first->getParent(), Inst); - else - new UnwindInst(Inst->getContext(), Inst); - - // Remove all insts after the branch/unwind inst. Go from back to front to - // avoid replaceAllUsesWith if possible. - BasicBlock *BB = Inst->getParent(); - Instruction *Removed; - do { - Removed = &BB->back(); - // If the removed instructions have any users, replace them now. - if (!Removed->use_empty()) - Removed->replaceAllUsesWith(UndefValue::get(Removed->getType())); - Removed->eraseFromParent(); - } while (Removed != Inst); - - ++LongJmpsTransformed; -} - -// GetSetJmpMap - Retrieve (create and initialize, if necessary) the -// setjmp map. This map is going to hold information about which setjmps -// were called (each setjmp gets its own number) and with which buffer it -// was called. There can be only one! -AllocaInst* LowerSetJmp::GetSetJmpMap(Function* Func) -{ - if (SJMap[Func]) return SJMap[Func]; - - // Insert the setjmp map initialization before the first instruction in - // the function. - Instruction* Inst = Func->getEntryBlock().begin(); - assert(Inst && "Couldn't find even ONE instruction in entry block!"); - - // Fill in the alloca and call to initialize the SJ map. - const Type *SBPTy = - Type::getInt8PtrTy(Func->getContext()); - AllocaInst* Map = new AllocaInst(SBPTy, 0, "SJMap", Inst); - CallInst::Create(InitSJMap, Map, "", Inst); - return SJMap[Func] = Map; -} - -// GetRethrowBB - Only one rethrow basic block is needed per function. -// If this is a longjmp exception but not handled in this block, this BB -// performs the rethrow. -BasicBlock* LowerSetJmp::GetRethrowBB(Function* Func) -{ - if (RethrowBBMap[Func]) return RethrowBBMap[Func]; - - // The basic block we're going to jump to if we need to rethrow the - // exception. - BasicBlock* Rethrow = - BasicBlock::Create(Func->getContext(), "RethrowExcept", Func); - - // Fill in the "Rethrow" BB with a call to rethrow the exception. This - // is the last instruction in the BB since at this point the runtime - // should exit this function and go to the next function. - new UnwindInst(Func->getContext(), Rethrow); - return RethrowBBMap[Func] = Rethrow; -} - -// GetSJSwitch - Return the switch statement that controls which handler -// (if any) gets called and the value returned to that handler. -LowerSetJmp::SwitchValuePair LowerSetJmp::GetSJSwitch(Function* Func, - BasicBlock* Rethrow) -{ - if (SwitchValMap[Func].first) return SwitchValMap[Func]; - - BasicBlock* LongJmpPre = - BasicBlock::Create(Func->getContext(), "LongJmpBlkPre", Func); - - // Keep track of the preliminary basic block for some of the other - // transformations. - PrelimBBMap[Func] = LongJmpPre; - - // Grab the exception. - CallInst* Cond = CallInst::Create(IsLJException, "IsLJExcept", LongJmpPre); - - // The "decision basic block" gets the number associated with the - // setjmp call returning to switch on and the value returned by - // longjmp. - BasicBlock* DecisionBB = - BasicBlock::Create(Func->getContext(), "LJDecisionBB", Func); - - BranchInst::Create(DecisionBB, Rethrow, Cond, LongJmpPre); - - // Fill in the "decision" basic block. - CallInst* LJVal = CallInst::Create(GetLJValue, "LJVal", DecisionBB); - CallInst* SJNum = CallInst::Create(TryCatchLJ, GetSetJmpMap(Func), "SJNum", - DecisionBB); - - SwitchInst* SI = SwitchInst::Create(SJNum, Rethrow, 0, DecisionBB); - return SwitchValMap[Func] = SwitchValuePair(SI, LJVal); -} - -// TransformSetJmpCall - The setjmp call is a bit trickier to transform. -// We're going to convert all setjmp calls to nops. Then all "call" and -// "invoke" instructions in the function are converted to "invoke" where -// the "except" branch is used when returning from a longjmp call. -void LowerSetJmp::TransformSetJmpCall(CallInst* Inst) -{ - BasicBlock* ABlock = Inst->getParent(); - Function* Func = ABlock->getParent(); - - // Add this setjmp to the setjmp map. - const Type* SBPTy = - Type::getInt8PtrTy(Inst->getContext()); - CastInst* BufPtr = - new BitCastInst(Inst->getArgOperand(0), SBPTy, "SBJmpBuf", Inst); - Value *Args[] = { - GetSetJmpMap(Func), BufPtr, - ConstantInt::get(Type::getInt32Ty(Inst->getContext()), SetJmpIDMap[Func]++) - }; - CallInst::Create(AddSJToMap, Args, "", Inst); - - // We are guaranteed that there are no values live across basic blocks - // (because we are "not in SSA form" yet), but there can still be values live - // in basic blocks. Because of this, splitting the setjmp block can cause - // values above the setjmp to not dominate uses which are after the setjmp - // call. For all of these occasions, we must spill the value to the stack. - // - std::set<Instruction*> InstrsAfterCall; - - // The call is probably very close to the end of the basic block, for the - // common usage pattern of: 'if (setjmp(...))', so keep track of the - // instructions after the call. - for (BasicBlock::iterator I = ++BasicBlock::iterator(Inst), E = ABlock->end(); - I != E; ++I) - InstrsAfterCall.insert(I); - - for (BasicBlock::iterator II = ABlock->begin(); - II != BasicBlock::iterator(Inst); ++II) - // Loop over all of the uses of instruction. If any of them are after the - // call, "spill" the value to the stack. - for (Value::use_iterator UI = II->use_begin(), E = II->use_end(); - UI != E; ++UI) { - User *U = *UI; - if (cast<Instruction>(U)->getParent() != ABlock || - InstrsAfterCall.count(cast<Instruction>(U))) { - DemoteRegToStack(*II); - break; - } - } - InstrsAfterCall.clear(); - - // Change the setjmp call into a branch statement. We'll remove the - // setjmp call in a little bit. No worries. - BasicBlock* SetJmpContBlock = ABlock->splitBasicBlock(Inst); - assert(SetJmpContBlock && "Couldn't split setjmp BB!!"); - - SetJmpContBlock->setName(ABlock->getName()+"SetJmpCont"); - - // Add the SetJmpContBlock to the set of blocks reachable from a setjmp. - DFSBlocks.insert(SetJmpContBlock); - - // This PHI node will be in the new block created from the - // splitBasicBlock call. - PHINode* PHI = PHINode::Create(Type::getInt32Ty(Inst->getContext()), 2, - "SetJmpReturn", Inst); - - // Coming from a call to setjmp, the return is 0. - PHI->addIncoming(Constant::getNullValue(Type::getInt32Ty(Inst->getContext())), - ABlock); - - // Add the case for this setjmp's number... - SwitchValuePair SVP = GetSJSwitch(Func, GetRethrowBB(Func)); - SVP.first->addCase(ConstantInt::get(Type::getInt32Ty(Inst->getContext()), - SetJmpIDMap[Func] - 1), - SetJmpContBlock); - - // Value coming from the handling of the exception. - PHI->addIncoming(SVP.second, SVP.second->getParent()); - - // Replace all uses of this instruction with the PHI node created by - // the eradication of setjmp. - Inst->replaceAllUsesWith(PHI); - Inst->eraseFromParent(); - - ++SetJmpsTransformed; -} - -// visitCallInst - This converts all LLVM call instructions into invoke -// instructions. The except part of the invoke goes to the "LongJmpBlkPre" -// that grabs the exception and proceeds to determine if it's a longjmp -// exception or not. -void LowerSetJmp::visitCallInst(CallInst& CI) -{ - if (CI.getCalledFunction()) - if (!IsTransformableFunction(CI.getCalledFunction()->getName()) || - CI.getCalledFunction()->isIntrinsic()) return; - - BasicBlock* OldBB = CI.getParent(); - - // If not reachable from a setjmp call, don't transform. - if (!DFSBlocks.count(OldBB)) return; - - BasicBlock* NewBB = OldBB->splitBasicBlock(CI); - assert(NewBB && "Couldn't split BB of \"call\" instruction!!"); - DFSBlocks.insert(NewBB); - NewBB->setName("Call2Invoke"); - - Function* Func = OldBB->getParent(); - - // Construct the new "invoke" instruction. - TerminatorInst* Term = OldBB->getTerminator(); - CallSite CS(&CI); - std::vector<Value*> Params(CS.arg_begin(), CS.arg_end()); - InvokeInst* II = - InvokeInst::Create(CI.getCalledValue(), NewBB, PrelimBBMap[Func], - Params, CI.getName(), Term); - II->setCallingConv(CI.getCallingConv()); - II->setAttributes(CI.getAttributes()); - - // Replace the old call inst with the invoke inst and remove the call. - CI.replaceAllUsesWith(II); - CI.eraseFromParent(); - - // The old terminator is useless now that we have the invoke inst. - Term->eraseFromParent(); - ++CallsTransformed; -} - -// visitInvokeInst - Converting the "invoke" instruction is fairly -// straight-forward. The old exception part is replaced by a query asking -// if this is a longjmp exception. If it is, then it goes to the longjmp -// exception blocks. Otherwise, control is passed the old exception. -void LowerSetJmp::visitInvokeInst(InvokeInst& II) -{ - if (II.getCalledFunction()) - if (!IsTransformableFunction(II.getCalledFunction()->getName()) || - II.getCalledFunction()->isIntrinsic()) return; - - BasicBlock* BB = II.getParent(); - - // If not reachable from a setjmp call, don't transform. - if (!DFSBlocks.count(BB)) return; - - BasicBlock* ExceptBB = II.getUnwindDest(); - - Function* Func = BB->getParent(); - BasicBlock* NewExceptBB = BasicBlock::Create(II.getContext(), - "InvokeExcept", Func); - - // If this is a longjmp exception, then branch to the preliminary BB of - // the longjmp exception handling. Otherwise, go to the old exception. - CallInst* IsLJExcept = CallInst::Create(IsLJException, "IsLJExcept", - NewExceptBB); - - BranchInst::Create(PrelimBBMap[Func], ExceptBB, IsLJExcept, NewExceptBB); - - II.setUnwindDest(NewExceptBB); - ++InvokesTransformed; -} - -// visitReturnInst - We want to destroy the setjmp map upon exit from the -// function. -void LowerSetJmp::visitReturnInst(ReturnInst &RI) { - Function* Func = RI.getParent()->getParent(); - CallInst::Create(DestroySJMap, GetSetJmpMap(Func), "", &RI); -} - -// visitUnwindInst - We want to destroy the setjmp map upon exit from the -// function. -void LowerSetJmp::visitUnwindInst(UnwindInst &UI) { - Function* Func = UI.getParent()->getParent(); - CallInst::Create(DestroySJMap, GetSetJmpMap(Func), "", &UI); -} - -ModulePass *llvm::createLowerSetJmpPass() { - return new LowerSetJmp(); -} - diff --git a/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp b/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp index 7796d05..0b01c38 100644 --- a/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp +++ b/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp @@ -76,7 +76,7 @@ STATISTIC(NumDoubleWeak, "Number of new functions created"); /// functions that will compare equal, without looking at the instructions /// inside the function. static unsigned profileFunction(const Function *F) { - const FunctionType *FTy = F->getFunctionType(); + FunctionType *FTy = F->getFunctionType(); FoldingSetNodeID ID; ID.AddInteger(F->size()); @@ -185,7 +185,7 @@ private: } /// Compare two Types, treating all pointer types as equal. - bool isEquivalentType(const Type *Ty1, const Type *Ty2) const; + bool isEquivalentType(Type *Ty1, Type *Ty2) const; // The two functions undergoing comparison. const Function *F1, *F2; @@ -200,8 +200,8 @@ private: // Any two pointers in the same address space are equivalent, intptr_t and // pointers are equivalent. Otherwise, standard type equivalence rules apply. -bool FunctionComparator::isEquivalentType(const Type *Ty1, - const Type *Ty2) const { +bool FunctionComparator::isEquivalentType(Type *Ty1, + Type *Ty2) const { if (Ty1 == Ty2) return true; if (Ty1->getTypeID() != Ty2->getTypeID()) { @@ -233,14 +233,14 @@ bool FunctionComparator::isEquivalentType(const Type *Ty1, return true; case Type::PointerTyID: { - const PointerType *PTy1 = cast<PointerType>(Ty1); - const PointerType *PTy2 = cast<PointerType>(Ty2); + PointerType *PTy1 = cast<PointerType>(Ty1); + PointerType *PTy2 = cast<PointerType>(Ty2); return PTy1->getAddressSpace() == PTy2->getAddressSpace(); } case Type::StructTyID: { - const StructType *STy1 = cast<StructType>(Ty1); - const StructType *STy2 = cast<StructType>(Ty2); + StructType *STy1 = cast<StructType>(Ty1); + StructType *STy2 = cast<StructType>(Ty2); if (STy1->getNumElements() != STy2->getNumElements()) return false; @@ -255,8 +255,8 @@ bool FunctionComparator::isEquivalentType(const Type *Ty1, } case Type::FunctionTyID: { - const FunctionType *FTy1 = cast<FunctionType>(Ty1); - const FunctionType *FTy2 = cast<FunctionType>(Ty2); + FunctionType *FTy1 = cast<FunctionType>(Ty1); + FunctionType *FTy2 = cast<FunctionType>(Ty2); if (FTy1->getNumParams() != FTy2->getNumParams() || FTy1->isVarArg() != FTy2->isVarArg()) return false; @@ -272,8 +272,8 @@ bool FunctionComparator::isEquivalentType(const Type *Ty1, } case Type::ArrayTyID: { - const ArrayType *ATy1 = cast<ArrayType>(Ty1); - const ArrayType *ATy2 = cast<ArrayType>(Ty2); + ArrayType *ATy1 = cast<ArrayType>(Ty1); + ArrayType *ATy2 = cast<ArrayType>(Ty2); return ATy1->getNumElements() == ATy2->getNumElements() && isEquivalentType(ATy1->getElementType(), ATy2->getElementType()); } @@ -305,10 +305,14 @@ bool FunctionComparator::isEquivalentOperation(const Instruction *I1, // Check special state that is a part of some instructions. if (const LoadInst *LI = dyn_cast<LoadInst>(I1)) return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() && - LI->getAlignment() == cast<LoadInst>(I2)->getAlignment(); + LI->getAlignment() == cast<LoadInst>(I2)->getAlignment() && + LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() && + LI->getSynchScope() == cast<LoadInst>(I2)->getSynchScope(); if (const StoreInst *SI = dyn_cast<StoreInst>(I1)) return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() && - SI->getAlignment() == cast<StoreInst>(I2)->getAlignment(); + SI->getAlignment() == cast<StoreInst>(I2)->getAlignment() && + SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() && + SI->getSynchScope() == cast<StoreInst>(I2)->getSynchScope(); if (const CmpInst *CI = dyn_cast<CmpInst>(I1)) return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate(); if (const CallInst *CI = dyn_cast<CallInst>(I1)) @@ -317,22 +321,22 @@ bool FunctionComparator::isEquivalentOperation(const Instruction *I1, if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1)) return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() && CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes(); - if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1)) { - if (IVI->getNumIndices() != cast<InsertValueInst>(I2)->getNumIndices()) - return false; - for (unsigned i = 0, e = IVI->getNumIndices(); i != e; ++i) - if (IVI->idx_begin()[i] != cast<InsertValueInst>(I2)->idx_begin()[i]) - return false; - return true; - } - if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1)) { - if (EVI->getNumIndices() != cast<ExtractValueInst>(I2)->getNumIndices()) - return false; - for (unsigned i = 0, e = EVI->getNumIndices(); i != e; ++i) - if (EVI->idx_begin()[i] != cast<ExtractValueInst>(I2)->idx_begin()[i]) - return false; - return true; - } + if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1)) + return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices(); + if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1)) + return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices(); + if (const FenceInst *FI = dyn_cast<FenceInst>(I1)) + return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() && + FI->getSynchScope() == cast<FenceInst>(I2)->getSynchScope(); + if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1)) + return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() && + CXI->getOrdering() == cast<AtomicCmpXchgInst>(I2)->getOrdering() && + CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I2)->getSynchScope(); + if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1)) + return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() && + RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() && + RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() && + RMWI->getSynchScope() == cast<AtomicRMWInst>(I2)->getSynchScope(); return true; } @@ -346,9 +350,9 @@ bool FunctionComparator::isEquivalentGEP(const GEPOperator *GEP1, SmallVector<Value *, 8> Indices1(GEP1->idx_begin(), GEP1->idx_end()); SmallVector<Value *, 8> Indices2(GEP2->idx_begin(), GEP2->idx_end()); uint64_t Offset1 = TD->getIndexedOffset(GEP1->getPointerOperandType(), - Indices1.data(), Indices1.size()); + Indices1); uint64_t Offset2 = TD->getIndexedOffset(GEP2->getPointerOperandType(), - Indices2.data(), Indices2.size()); + Indices2); return Offset1 == Offset2; } @@ -725,7 +729,7 @@ void MergeFunctions::writeThunk(Function *F, Function *G) { SmallVector<Value *, 16> Args; unsigned i = 0; - const FunctionType *FFTy = F->getFunctionType(); + FunctionType *FFTy = F->getFunctionType(); for (Function::arg_iterator AI = NewG->arg_begin(), AE = NewG->arg_end(); AI != AE; ++AI) { Args.push_back(Builder.CreateBitCast(AI, FFTy->getParamType(i))); diff --git a/contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp b/contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp new file mode 100644 index 0000000..8fdfd72 --- /dev/null +++ b/contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp @@ -0,0 +1,343 @@ +//===- PassManagerBuilder.cpp - Build Standard Pass -----------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the PassManagerBuilder class, which is used to set up a +// "standard" optimization sequence suitable for languages like C and C++. +// +//===----------------------------------------------------------------------===// + + +#include "llvm/Transforms/IPO/PassManagerBuilder.h" + +#include "llvm-c/Transforms/PassManagerBuilder.h" + +#include "llvm/PassManager.h" +#include "llvm/DefaultPasses.h" +#include "llvm/PassManager.h" +#include "llvm/Analysis/Passes.h" +#include "llvm/Analysis/Verifier.h" +#include "llvm/Target/TargetLibraryInfo.h" +#include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/IPO.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/Support/ManagedStatic.h" + +using namespace llvm; + +PassManagerBuilder::PassManagerBuilder() { + OptLevel = 2; + SizeLevel = 0; + LibraryInfo = 0; + Inliner = 0; + DisableSimplifyLibCalls = false; + DisableUnitAtATime = false; + DisableUnrollLoops = false; +} + +PassManagerBuilder::~PassManagerBuilder() { + delete LibraryInfo; + delete Inliner; +} + +/// Set of global extensions, automatically added as part of the standard set. +static ManagedStatic<SmallVector<std::pair<PassManagerBuilder::ExtensionPointTy, + PassManagerBuilder::ExtensionFn>, 8> > GlobalExtensions; + +void PassManagerBuilder::addGlobalExtension( + PassManagerBuilder::ExtensionPointTy Ty, + PassManagerBuilder::ExtensionFn Fn) { + GlobalExtensions->push_back(std::make_pair(Ty, Fn)); +} + +void PassManagerBuilder::addExtension(ExtensionPointTy Ty, ExtensionFn Fn) { + Extensions.push_back(std::make_pair(Ty, Fn)); +} + +void PassManagerBuilder::addExtensionsToPM(ExtensionPointTy ETy, + PassManagerBase &PM) const { + for (unsigned i = 0, e = GlobalExtensions->size(); i != e; ++i) + if ((*GlobalExtensions)[i].first == ETy) + (*GlobalExtensions)[i].second(*this, PM); + for (unsigned i = 0, e = Extensions.size(); i != e; ++i) + if (Extensions[i].first == ETy) + Extensions[i].second(*this, PM); +} + +void +PassManagerBuilder::addInitialAliasAnalysisPasses(PassManagerBase &PM) const { + // Add TypeBasedAliasAnalysis before BasicAliasAnalysis so that + // BasicAliasAnalysis wins if they disagree. This is intended to help + // support "obvious" type-punning idioms. + PM.add(createTypeBasedAliasAnalysisPass()); + PM.add(createBasicAliasAnalysisPass()); +} + +void PassManagerBuilder::populateFunctionPassManager(FunctionPassManager &FPM) { + addExtensionsToPM(EP_EarlyAsPossible, FPM); + + // Add LibraryInfo if we have some. + if (LibraryInfo) FPM.add(new TargetLibraryInfo(*LibraryInfo)); + + if (OptLevel == 0) return; + + addInitialAliasAnalysisPasses(FPM); + + FPM.add(createCFGSimplificationPass()); + FPM.add(createScalarReplAggregatesPass()); + FPM.add(createEarlyCSEPass()); + FPM.add(createLowerExpectIntrinsicPass()); +} + +void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) { + // If all optimizations are disabled, just run the always-inline pass. + if (OptLevel == 0) { + if (Inliner) { + MPM.add(Inliner); + Inliner = 0; + } + return; + } + + // Add LibraryInfo if we have some. + if (LibraryInfo) MPM.add(new TargetLibraryInfo(*LibraryInfo)); + + addInitialAliasAnalysisPasses(MPM); + + if (!DisableUnitAtATime) { + MPM.add(createGlobalOptimizerPass()); // Optimize out global vars + + MPM.add(createIPSCCPPass()); // IP SCCP + MPM.add(createDeadArgEliminationPass()); // Dead argument elimination + + MPM.add(createInstructionCombiningPass());// Clean up after IPCP & DAE + MPM.add(createCFGSimplificationPass()); // Clean up after IPCP & DAE + } + + // Start of CallGraph SCC passes. + if (!DisableUnitAtATime) + MPM.add(createPruneEHPass()); // Remove dead EH info + if (Inliner) { + MPM.add(Inliner); + Inliner = 0; + } + if (!DisableUnitAtATime) + MPM.add(createFunctionAttrsPass()); // Set readonly/readnone attrs + if (OptLevel > 2) + MPM.add(createArgumentPromotionPass()); // Scalarize uninlined fn args + + // Start of function pass. + // Break up aggregate allocas, using SSAUpdater. + MPM.add(createScalarReplAggregatesPass(-1, false)); + MPM.add(createEarlyCSEPass()); // Catch trivial redundancies + if (!DisableSimplifyLibCalls) + MPM.add(createSimplifyLibCallsPass()); // Library Call Optimizations + MPM.add(createJumpThreadingPass()); // Thread jumps. + MPM.add(createCorrelatedValuePropagationPass()); // Propagate conditionals + MPM.add(createCFGSimplificationPass()); // Merge & remove BBs + MPM.add(createInstructionCombiningPass()); // Combine silly seq's + + MPM.add(createTailCallEliminationPass()); // Eliminate tail calls + MPM.add(createCFGSimplificationPass()); // Merge & remove BBs + MPM.add(createReassociatePass()); // Reassociate expressions + MPM.add(createLoopRotatePass()); // Rotate Loop + MPM.add(createLICMPass()); // Hoist loop invariants + MPM.add(createLoopUnswitchPass(SizeLevel || OptLevel < 3)); + MPM.add(createInstructionCombiningPass()); + MPM.add(createIndVarSimplifyPass()); // Canonicalize indvars + MPM.add(createLoopIdiomPass()); // Recognize idioms like memset. + MPM.add(createLoopDeletionPass()); // Delete dead loops + if (!DisableUnrollLoops) + MPM.add(createLoopUnrollPass()); // Unroll small loops + addExtensionsToPM(EP_LoopOptimizerEnd, MPM); + + if (OptLevel > 1) + MPM.add(createGVNPass()); // Remove redundancies + MPM.add(createMemCpyOptPass()); // Remove memcpy / form memset + MPM.add(createSCCPPass()); // Constant prop with SCCP + + // Run instcombine after redundancy elimination to exploit opportunities + // opened up by them. + MPM.add(createInstructionCombiningPass()); + MPM.add(createJumpThreadingPass()); // Thread jumps + MPM.add(createCorrelatedValuePropagationPass()); + MPM.add(createDeadStoreEliminationPass()); // Delete dead stores + + addExtensionsToPM(EP_ScalarOptimizerLate, MPM); + + MPM.add(createAggressiveDCEPass()); // Delete dead instructions + MPM.add(createCFGSimplificationPass()); // Merge & remove BBs + MPM.add(createInstructionCombiningPass()); // Clean up after everything. + + if (!DisableUnitAtATime) { + // FIXME: We shouldn't bother with this anymore. + MPM.add(createStripDeadPrototypesPass()); // Get rid of dead prototypes + + // GlobalOpt already deletes dead functions and globals, at -O3 try a + // late pass of GlobalDCE. It is capable of deleting dead cycles. + if (OptLevel > 2) + MPM.add(createGlobalDCEPass()); // Remove dead fns and globals. + + if (OptLevel > 1) + MPM.add(createConstantMergePass()); // Merge dup global constants + } +} + +void PassManagerBuilder::populateLTOPassManager(PassManagerBase &PM, + bool Internalize, + bool RunInliner) { + // Provide AliasAnalysis services for optimizations. + addInitialAliasAnalysisPasses(PM); + + // Now that composite has been compiled, scan through the module, looking + // for a main function. If main is defined, mark all other functions + // internal. + if (Internalize) + PM.add(createInternalizePass(true)); + + // Propagate constants at call sites into the functions they call. This + // opens opportunities for globalopt (and inlining) by substituting function + // pointers passed as arguments to direct uses of functions. + PM.add(createIPSCCPPass()); + + // Now that we internalized some globals, see if we can hack on them! + PM.add(createGlobalOptimizerPass()); + + // Linking modules together can lead to duplicated global constants, only + // keep one copy of each constant. + PM.add(createConstantMergePass()); + + // Remove unused arguments from functions. + PM.add(createDeadArgEliminationPass()); + + // Reduce the code after globalopt and ipsccp. Both can open up significant + // simplification opportunities, and both can propagate functions through + // function pointers. When this happens, we often have to resolve varargs + // calls, etc, so let instcombine do this. + PM.add(createInstructionCombiningPass()); + + // Inline small functions + if (RunInliner) + PM.add(createFunctionInliningPass()); + + PM.add(createPruneEHPass()); // Remove dead EH info. + + // Optimize globals again if we ran the inliner. + if (RunInliner) + PM.add(createGlobalOptimizerPass()); + PM.add(createGlobalDCEPass()); // Remove dead functions. + + // If we didn't decide to inline a function, check to see if we can + // transform it to pass arguments by value instead of by reference. + PM.add(createArgumentPromotionPass()); + + // The IPO passes may leave cruft around. Clean up after them. + PM.add(createInstructionCombiningPass()); + PM.add(createJumpThreadingPass()); + // Break up allocas + PM.add(createScalarReplAggregatesPass()); + + // Run a few AA driven optimizations here and now, to cleanup the code. + PM.add(createFunctionAttrsPass()); // Add nocapture. + PM.add(createGlobalsModRefPass()); // IP alias analysis. + + PM.add(createLICMPass()); // Hoist loop invariants. + PM.add(createGVNPass()); // Remove redundancies. + PM.add(createMemCpyOptPass()); // Remove dead memcpys. + // Nuke dead stores. + PM.add(createDeadStoreEliminationPass()); + + // Cleanup and simplify the code after the scalar optimizations. + PM.add(createInstructionCombiningPass()); + + PM.add(createJumpThreadingPass()); + + // Delete basic blocks, which optimization passes may have killed. + PM.add(createCFGSimplificationPass()); + + // Now that we have optimized the program, discard unreachable functions. + PM.add(createGlobalDCEPass()); +} + +LLVMPassManagerBuilderRef LLVMPassManagerBuilderCreate(void) { + PassManagerBuilder *PMB = new PassManagerBuilder(); + return wrap(PMB); +} + +void LLVMPassManagerBuilderDispose(LLVMPassManagerBuilderRef PMB) { + PassManagerBuilder *Builder = unwrap(PMB); + delete Builder; +} + +void +LLVMPassManagerBuilderSetOptLevel(LLVMPassManagerBuilderRef PMB, + unsigned OptLevel) { + PassManagerBuilder *Builder = unwrap(PMB); + Builder->OptLevel = OptLevel; +} + +void +LLVMPassManagerBuilderSetSizeLevel(LLVMPassManagerBuilderRef PMB, + unsigned SizeLevel) { + PassManagerBuilder *Builder = unwrap(PMB); + Builder->SizeLevel = SizeLevel; +} + +void +LLVMPassManagerBuilderSetDisableUnitAtATime(LLVMPassManagerBuilderRef PMB, + LLVMBool Value) { + PassManagerBuilder *Builder = unwrap(PMB); + Builder->DisableUnitAtATime = Value; +} + +void +LLVMPassManagerBuilderSetDisableUnrollLoops(LLVMPassManagerBuilderRef PMB, + LLVMBool Value) { + PassManagerBuilder *Builder = unwrap(PMB); + Builder->DisableUnrollLoops = Value; +} + +void +LLVMPassManagerBuilderSetDisableSimplifyLibCalls(LLVMPassManagerBuilderRef PMB, + LLVMBool Value) { + PassManagerBuilder *Builder = unwrap(PMB); + Builder->DisableSimplifyLibCalls = Value; +} + +void +LLVMPassManagerBuilderUseInlinerWithThreshold(LLVMPassManagerBuilderRef PMB, + unsigned Threshold) { + PassManagerBuilder *Builder = unwrap(PMB); + Builder->Inliner = createFunctionInliningPass(Threshold); +} + +void +LLVMPassManagerBuilderPopulateFunctionPassManager(LLVMPassManagerBuilderRef PMB, + LLVMPassManagerRef PM) { + PassManagerBuilder *Builder = unwrap(PMB); + FunctionPassManager *FPM = unwrap<FunctionPassManager>(PM); + Builder->populateFunctionPassManager(*FPM); +} + +void +LLVMPassManagerBuilderPopulateModulePassManager(LLVMPassManagerBuilderRef PMB, + LLVMPassManagerRef PM) { + PassManagerBuilder *Builder = unwrap(PMB); + PassManagerBase *MPM = unwrap(PM); + Builder->populateModulePassManager(*MPM); +} + +void LLVMPassManagerBuilderPopulateLTOPassManager(LLVMPassManagerBuilderRef PMB, + LLVMPassManagerRef PM, + bool Internalize, + bool RunInliner) { + PassManagerBuilder *Builder = unwrap(PMB); + PassManagerBase *LPM = unwrap(PM); + Builder->populateLTOPassManager(*LPM, Internalize, RunInliner); +} + diff --git a/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp b/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp index b7e63dc..cbb80f0 100644 --- a/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp +++ b/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp @@ -101,8 +101,9 @@ bool PruneEH::runOnSCC(CallGraphSCC &SCC) { // Check to see if this function performs an unwind or calls an // unwinding function. for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) { - if (CheckUnwind && isa<UnwindInst>(BB->getTerminator())) { - // Uses unwind! + if (CheckUnwind && (isa<UnwindInst>(BB->getTerminator()) || + isa<ResumeInst>(BB->getTerminator()))) { + // Uses unwind / resume! SCCMightUnwind = true; } else if (CheckReturn && isa<ReturnInst>(BB->getTerminator())) { SCCMightReturn = true; diff --git a/contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp b/contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp index 0fbaff1..b5caa9a 100644 --- a/contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp +++ b/contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp @@ -180,7 +180,7 @@ static void StripTypeNames(Module &M, bool PreserveDbgInfo) { for (unsigned i = 0, e = StructTypes.size(); i != e; ++i) { StructType *STy = StructTypes[i]; - if (STy->isAnonymous() || STy->getName().empty()) continue; + if (STy->isLiteral() || STy->getName().empty()) continue; if (PreserveDbgInfo && STy->getName().startswith("llvm.dbg")) continue; |