diff options
Diffstat (limited to 'contrib/llvm/tools/clang/lib/Analysis')
13 files changed, 2139 insertions, 927 deletions
diff --git a/contrib/llvm/tools/clang/lib/Analysis/AnalysisContext.cpp b/contrib/llvm/tools/clang/lib/Analysis/AnalysisContext.cpp index 678f02f..3dd194b 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/AnalysisContext.cpp +++ b/contrib/llvm/tools/clang/lib/Analysis/AnalysisContext.cpp @@ -24,25 +24,44 @@ #include "clang/Analysis/CFG.h" #include "clang/Analysis/CFGStmtMap.h" #include "clang/Analysis/Support/BumpVector.h" +#include "clang/Analysis/Support/SaveAndRestore.h" #include "llvm/ADT/SmallSet.h" #include "llvm/Support/ErrorHandling.h" using namespace clang; +typedef llvm::DenseMap<const void *, ManagedAnalysis *> ManagedAnalysisMap; + AnalysisContext::AnalysisContext(const Decl *d, idx::TranslationUnit *tu, - bool useUnoptimizedCFG, - bool addehedges, - bool addImplicitDtors, - bool addInitializers) + const CFG::BuildOptions &buildOptions) : D(d), TU(tu), + cfgBuildOptions(buildOptions), forcedBlkExprs(0), - builtCFG(false), builtCompleteCFG(false), - useUnoptimizedCFG(useUnoptimizedCFG), - ReferencedBlockVars(0) -{ + builtCFG(false), + builtCompleteCFG(false), + ReferencedBlockVars(0), + ManagedAnalyses(0) +{ + cfgBuildOptions.forcedBlkExprs = &forcedBlkExprs; +} + +AnalysisContext::AnalysisContext(const Decl *d, + idx::TranslationUnit *tu) +: D(d), TU(tu), + forcedBlkExprs(0), + builtCFG(false), + builtCompleteCFG(false), + ReferencedBlockVars(0), + ManagedAnalyses(0) +{ cfgBuildOptions.forcedBlkExprs = &forcedBlkExprs; - cfgBuildOptions.AddEHEdges = addehedges; +} + +AnalysisContextManager::AnalysisContextManager(bool useUnoptimizedCFG, + bool addImplicitDtors, + bool addInitializers) { + cfgBuildOptions.PruneTriviallyFalseEdges = !useUnoptimizedCFG; cfgBuildOptions.AddImplicitDtors = addImplicitDtors; cfgBuildOptions.AddInitializers = addInitializers; } @@ -53,7 +72,7 @@ void AnalysisContextManager::clear() { Contexts.clear(); } -Stmt *AnalysisContext::getBody() { +Stmt *AnalysisContext::getBody() const { if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) return FD->getBody(); else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) @@ -95,7 +114,7 @@ AnalysisContext::getBlockForRegisteredExpression(const Stmt *stmt) { } CFG *AnalysisContext::getCFG() { - if (useUnoptimizedCFG) + if (!cfgBuildOptions.PruneTriviallyFalseEdges) return getUnoptimizedCFG(); if (!builtCFG) { @@ -110,9 +129,10 @@ CFG *AnalysisContext::getCFG() { CFG *AnalysisContext::getUnoptimizedCFG() { if (!builtCompleteCFG) { - CFG::BuildOptions B = cfgBuildOptions; - B.PruneTriviallyFalseEdges = false; - completeCFG.reset(CFG::buildCFG(D, getBody(), &D->getASTContext(), B)); + SaveAndRestore<bool> NotPrune(cfgBuildOptions.PruneTriviallyFalseEdges, + false); + completeCFG.reset(CFG::buildCFG(D, getBody(), &D->getASTContext(), + cfgBuildOptions)); // Even when the cfg is not successfully built, we don't // want to try building it again. builtCompleteCFG = true; @@ -160,36 +180,11 @@ PseudoConstantAnalysis *AnalysisContext::getPseudoConstantAnalysis() { return PCA.get(); } -LiveVariables *AnalysisContext::getLiveVariables() { - if (!liveness) { - if (CFG *c = getCFG()) { - liveness.reset(new LiveVariables(*this)); - liveness->runOnCFG(*c); - liveness->runOnAllBlocks(*c, 0, true); - } - } - - return liveness.get(); -} - -LiveVariables *AnalysisContext::getRelaxedLiveVariables() { - if (!relaxedLiveness) - if (CFG *c = getCFG()) { - relaxedLiveness.reset(new LiveVariables(*this, false)); - relaxedLiveness->runOnCFG(*c); - relaxedLiveness->runOnAllBlocks(*c, 0, true); - } - - return relaxedLiveness.get(); -} - AnalysisContext *AnalysisContextManager::getContext(const Decl *D, idx::TranslationUnit *TU) { AnalysisContext *&AC = Contexts[D]; if (!AC) - AC = new AnalysisContext(D, TU, UseUnoptimizedCFG, false, - AddImplicitDtors, AddInitializers); - + AC = new AnalysisContext(D, TU, cfgBuildOptions); return AC; } @@ -201,7 +196,7 @@ void LocationContext::ProfileCommon(llvm::FoldingSetNodeID &ID, ContextKind ck, AnalysisContext *ctx, const LocationContext *parent, - const void* data) { + const void *data) { ID.AddInteger(ck); ID.AddPointer(ctx); ID.AddPointer(parent); @@ -392,13 +387,29 @@ AnalysisContext::getReferencedBlockVars(const BlockDecl *BD) { return std::make_pair(V->begin(), V->end()); } +ManagedAnalysis *&AnalysisContext::getAnalysisImpl(const void *tag) { + if (!ManagedAnalyses) + ManagedAnalyses = new ManagedAnalysisMap(); + ManagedAnalysisMap *M = (ManagedAnalysisMap*) ManagedAnalyses; + return (*M)[tag]; +} + //===----------------------------------------------------------------------===// // Cleanup. //===----------------------------------------------------------------------===// +ManagedAnalysis::~ManagedAnalysis() {} + AnalysisContext::~AnalysisContext() { delete forcedBlkExprs; delete ReferencedBlockVars; + // Release the managed analyses. + if (ManagedAnalyses) { + ManagedAnalysisMap *M = (ManagedAnalysisMap*) ManagedAnalyses; + for (ManagedAnalysisMap::iterator I = M->begin(), E = M->end(); I!=E; ++I) + delete I->second; + delete M; + } } AnalysisContextManager::~AnalysisContextManager() { diff --git a/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp b/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp index f231c14..83c7384 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp +++ b/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp @@ -29,9 +29,9 @@ using namespace clang; namespace { -static SourceLocation GetEndLoc(Decl* D) { - if (VarDecl* VD = dyn_cast<VarDecl>(D)) - if (Expr* Ex = VD->getInit()) +static SourceLocation GetEndLoc(Decl *D) { + if (VarDecl *VD = dyn_cast<VarDecl>(D)) + if (Expr *Ex = VD->getInit()) return Ex->getSourceRange().getEnd(); return D->getLocation(); } @@ -121,16 +121,16 @@ public: *this = Scope->Prev; } - VarDecl* const* operator->() const { + VarDecl *const* operator->() const { assert (Scope && "Dereferencing invalid iterator is not allowed"); assert (VarIter != 0 && "Iterator has invalid value of VarIter member"); return &Scope->Vars[VarIter - 1]; } - VarDecl* operator*() const { + VarDecl *operator*() const { return *this->operator->(); } - const_iterator& operator++() { + const_iterator &operator++() { if (!Scope) return *this; @@ -146,10 +146,10 @@ public: return P; } - bool operator==(const const_iterator& rhs) const { + bool operator==(const const_iterator &rhs) const { return Scope == rhs.Scope && VarIter == rhs.VarIter; } - bool operator!=(const const_iterator& rhs) const { + bool operator!=(const const_iterator &rhs) const { return !(*this == rhs); } @@ -179,7 +179,7 @@ public: /// Begin of scope in direction of CFG building (backwards). const_iterator begin() const { return const_iterator(*this, Vars.size()); } - void addVar(VarDecl* VD) { + void addVar(VarDecl *VD) { Vars.push_back(VD, ctx); } }; @@ -205,7 +205,7 @@ int LocalScope::const_iterator::distance(LocalScope::const_iterator L) { /// and LocalScope::const_iterator that specifies position in LocalScope graph. struct BlockScopePosPair { BlockScopePosPair() : block(0) {} - BlockScopePosPair(CFGBlock* b, LocalScope::const_iterator scopePos) + BlockScopePosPair(CFGBlock *b, LocalScope::const_iterator scopePos) : block(b), scopePosition(scopePos) {} CFGBlock *block; @@ -252,13 +252,13 @@ class CFGBuilder { ASTContext *Context; llvm::OwningPtr<CFG> cfg; - CFGBlock* Block; - CFGBlock* Succ; + CFGBlock *Block; + CFGBlock *Succ; JumpTarget ContinueJumpTarget; JumpTarget BreakJumpTarget; - CFGBlock* SwitchTerminatedBlock; - CFGBlock* DefaultCaseBlock; - CFGBlock* TryTerminatedBlock; + CFGBlock *SwitchTerminatedBlock; + CFGBlock *DefaultCaseBlock; + CFGBlock *TryTerminatedBlock; // Current position in local scope. LocalScope::const_iterator ScopePos; @@ -305,7 +305,7 @@ private: // Visitors to walk an AST and construct the CFG. CFGBlock *VisitAddrLabelExpr(AddrLabelExpr *A, AddStmtChoice asc); CFGBlock *VisitBinaryOperator(BinaryOperator *B, AddStmtChoice asc); - CFGBlock *VisitBlockExpr(BlockExpr* E, AddStmtChoice asc); + CFGBlock *VisitBlockExpr(BlockExpr *E, AddStmtChoice asc); CFGBlock *VisitBreakStmt(BreakStmt *B); CFGBlock *VisitCXXCatchStmt(CXXCatchStmt *S); CFGBlock *VisitExprWithCleanups(ExprWithCleanups *E, @@ -328,11 +328,11 @@ private: AddStmtChoice asc); CFGBlock *VisitContinueStmt(ContinueStmt *C); CFGBlock *VisitDeclStmt(DeclStmt *DS); - CFGBlock *VisitDeclSubExpr(DeclStmt* DS); + CFGBlock *VisitDeclSubExpr(DeclStmt *DS); CFGBlock *VisitDefaultStmt(DefaultStmt *D); CFGBlock *VisitDoStmt(DoStmt *D); CFGBlock *VisitForStmt(ForStmt *F); - CFGBlock *VisitGotoStmt(GotoStmt* G); + CFGBlock *VisitGotoStmt(GotoStmt *G); CFGBlock *VisitIfStmt(IfStmt *I); CFGBlock *VisitImplicitCastExpr(ImplicitCastExpr *E, AddStmtChoice asc); CFGBlock *VisitIndirectGotoStmt(IndirectGotoStmt *I); @@ -343,7 +343,7 @@ private: CFGBlock *VisitObjCAtThrowStmt(ObjCAtThrowStmt *S); CFGBlock *VisitObjCAtTryStmt(ObjCAtTryStmt *S); CFGBlock *VisitObjCForCollectionStmt(ObjCForCollectionStmt *S); - CFGBlock *VisitReturnStmt(ReturnStmt* R); + CFGBlock *VisitReturnStmt(ReturnStmt *R); CFGBlock *VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E, AddStmtChoice asc); CFGBlock *VisitStmtExpr(StmtExpr *S, AddStmtChoice asc); @@ -353,7 +353,7 @@ private: CFGBlock *Visit(Stmt *S, AddStmtChoice asc = AddStmtChoice::NotAlwaysAdd); CFGBlock *VisitStmt(Stmt *S, AddStmtChoice asc); - CFGBlock *VisitChildren(Stmt* S); + CFGBlock *VisitChildren(Stmt *S); // Visitors to walk an AST and generate destructors of temporaries in // full expression. @@ -367,34 +367,35 @@ private: bool BindToTemporary); // NYS == Not Yet Supported - CFGBlock* NYS() { + CFGBlock *NYS() { badCFG = true; return Block; } void autoCreateBlock() { if (!Block) Block = createBlock(); } CFGBlock *createBlock(bool add_successor = true); + CFGBlock *createNoReturnBlock(); CFGBlock *addStmt(Stmt *S) { return Visit(S, AddStmtChoice::AlwaysAdd); } CFGBlock *addInitializer(CXXCtorInitializer *I); void addAutomaticObjDtors(LocalScope::const_iterator B, - LocalScope::const_iterator E, Stmt* S); + LocalScope::const_iterator E, Stmt *S); void addImplicitDtorsForDestructor(const CXXDestructorDecl *DD); // Local scopes creation. LocalScope* createOrReuseLocalScope(LocalScope* Scope); - void addLocalScopeForStmt(Stmt* S); - LocalScope* addLocalScopeForDeclStmt(DeclStmt* DS, LocalScope* Scope = NULL); - LocalScope* addLocalScopeForVarDecl(VarDecl* VD, LocalScope* Scope = NULL); + void addLocalScopeForStmt(Stmt *S); + LocalScope* addLocalScopeForDeclStmt(DeclStmt *DS, LocalScope* Scope = NULL); + LocalScope* addLocalScopeForVarDecl(VarDecl *VD, LocalScope* Scope = NULL); - void addLocalScopeAndDtors(Stmt* S); + void addLocalScopeAndDtors(Stmt *S); // Interface to CFGBlock - adding CFGElements. void appendStmt(CFGBlock *B, const Stmt *S) { - if (alwaysAdd(S)) + if (alwaysAdd(S) && cachedEntry) cachedEntry->second = B; // All block-level expressions should have already been IgnoreParens()ed. @@ -413,12 +414,11 @@ private: void appendTemporaryDtor(CFGBlock *B, CXXBindTemporaryExpr *E) { B->appendTemporaryDtor(E, cfg->getBumpVectorContext()); } + void appendAutomaticObjDtor(CFGBlock *B, VarDecl *VD, Stmt *S) { + B->appendAutomaticObjDtor(VD, S, cfg->getBumpVectorContext()); + } - void insertAutomaticObjDtors(CFGBlock* Blk, CFGBlock::iterator I, - LocalScope::const_iterator B, LocalScope::const_iterator E, Stmt* S); - void appendAutomaticObjDtors(CFGBlock* Blk, LocalScope::const_iterator B, - LocalScope::const_iterator E, Stmt* S); - void prependAutomaticObjDtorsWithTerminator(CFGBlock* Blk, + void prependAutomaticObjDtorsWithTerminator(CFGBlock *Blk, LocalScope::const_iterator B, LocalScope::const_iterator E); void addSuccessor(CFGBlock *B, CFGBlock *S) { @@ -437,20 +437,12 @@ private: /// tryEvaluateBool - Try and evaluate the Stmt and return 0 or 1 /// if we can evaluate to a known value, otherwise return -1. TryResult tryEvaluateBool(Expr *S) { - Expr::EvalResult Result; - if (!tryEvaluate(S, Result)) + bool Result; + if (!BuildOpts.PruneTriviallyFalseEdges || + S->isTypeDependent() || S->isValueDependent() || + !S->EvaluateAsBooleanCondition(Result, *Context)) return TryResult(); - - if (Result.Val.isInt()) - return Result.Val.getInt().getBoolValue(); - - if (Result.Val.isLValue()) { - const Expr *e = Result.Val.getLValueBase(); - const CharUnits &c = Result.Val.getLValueOffset(); - if (!e && c.isZero()) - return false; - } - return TryResult(); + return Result; } }; @@ -461,15 +453,17 @@ inline bool AddStmtChoice::alwaysAdd(CFGBuilder &builder, } bool CFGBuilder::alwaysAdd(const Stmt *stmt) { + bool shouldAdd = BuildOpts.alwaysAdd(stmt); + if (!BuildOpts.forcedBlkExprs) - return false; + return shouldAdd; if (lastLookup == stmt) { if (cachedEntry) { assert(cachedEntry->first == stmt); return true; } - return false; + return shouldAdd; } lastLookup = stmt; @@ -480,13 +474,13 @@ bool CFGBuilder::alwaysAdd(const Stmt *stmt) { if (!fb) { // No need to update 'cachedEntry', since it will always be null. assert(cachedEntry == 0); - return false; + return shouldAdd; } CFG::BuildOptions::ForcedBlkExprs::iterator itr = fb->find(stmt); if (itr == fb->end()) { cachedEntry = 0; - return false; + return shouldAdd; } cachedEntry = &*itr; @@ -512,7 +506,7 @@ static const VariableArrayType *FindVA(const Type *t) { /// body (compound statement). The ownership of the returned CFG is /// transferred to the caller. If CFG construction fails, this method returns /// NULL. -CFG* CFGBuilder::buildCFG(const Decl *D, Stmt* Statement) { +CFG* CFGBuilder::buildCFG(const Decl *D, Stmt *Statement) { assert(cfg.get()); if (!Statement) return NULL; @@ -552,8 +546,8 @@ CFG* CFGBuilder::buildCFG(const Decl *D, Stmt* Statement) { for (BackpatchBlocksTy::iterator I = BackpatchBlocks.begin(), E = BackpatchBlocks.end(); I != E; ++I ) { - CFGBlock* B = I->block; - GotoStmt* G = cast<GotoStmt>(B->getTerminator()); + CFGBlock *B = I->block; + GotoStmt *G = cast<GotoStmt>(B->getTerminator()); LabelMapTy::iterator LI = LabelMap.find(G->getLabel()); // If there is no target for the goto, then we are looking at an @@ -567,7 +561,7 @@ CFG* CFGBuilder::buildCFG(const Decl *D, Stmt* Statement) { } // Add successors to the Indirect Goto Dispatch block (if we have one). - if (CFGBlock* B = cfg->getIndirectGotoBlock()) + if (CFGBlock *B = cfg->getIndirectGotoBlock()) for (LabelSetTy::iterator I = AddressTakenLabels.begin(), E = AddressTakenLabels.end(); I != E; ++I ) { @@ -589,13 +583,23 @@ CFG* CFGBuilder::buildCFG(const Decl *D, Stmt* Statement) { /// createBlock - Used to lazily create blocks that are connected /// to the current (global) succcessor. -CFGBlock* CFGBuilder::createBlock(bool add_successor) { - CFGBlock* B = cfg->createBlock(); +CFGBlock *CFGBuilder::createBlock(bool add_successor) { + CFGBlock *B = cfg->createBlock(); if (add_successor && Succ) addSuccessor(B, Succ); return B; } +/// createNoReturnBlock - Used to create a block is a 'noreturn' point in the +/// CFG. It is *not* connected to the current (global) successor, and instead +/// directly tied to the exit block in order to be reachable. +CFGBlock *CFGBuilder::createNoReturnBlock() { + CFGBlock *B = createBlock(false); + B->setHasNoReturnElement(); + addSuccessor(B, &cfg->getExit()); + return B; +} + /// addInitializer - Add C++ base or member initializer element to CFG. CFGBlock *CFGBuilder::addInitializer(CXXCtorInitializer *I) { if (!BuildOpts.AddInitializers) @@ -638,15 +642,41 @@ CFGBlock *CFGBuilder::addInitializer(CXXCtorInitializer *I) { /// for objects in range of local scope positions. Use S as trigger statement /// for destructors. void CFGBuilder::addAutomaticObjDtors(LocalScope::const_iterator B, - LocalScope::const_iterator E, Stmt* S) { + LocalScope::const_iterator E, Stmt *S) { if (!BuildOpts.AddImplicitDtors) return; if (B == E) return; - autoCreateBlock(); - appendAutomaticObjDtors(Block, B, E, S); + CFGBlock::iterator InsertPos; + + // We need to append the destructors in reverse order, but any one of them + // may be a no-return destructor which changes the CFG. As a result, buffer + // this sequence up and replay them in reverse order when appending onto the + // CFGBlock(s). + SmallVector<VarDecl*, 10> Decls; + Decls.reserve(B.distance(E)); + for (LocalScope::const_iterator I = B; I != E; ++I) + Decls.push_back(*I); + + for (SmallVectorImpl<VarDecl*>::reverse_iterator I = Decls.rbegin(), + E = Decls.rend(); + I != E; ++I) { + // If this destructor is marked as a no-return destructor, we need to + // create a new block for the destructor which does not have as a successor + // anything built thus far: control won't flow out of this block. + QualType Ty = (*I)->getType().getNonReferenceType(); + if (const ArrayType *AT = Context->getAsArrayType(Ty)) + Ty = AT->getElementType(); + const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); + if (cast<FunctionType>(Dtor->getType())->getNoReturnAttr()) + Block = createNoReturnBlock(); + else + autoCreateBlock(); + + appendAutomaticObjDtor(Block, *I, S); + } } /// addImplicitDtorsForDestructor - Add implicit destructors generated for @@ -711,7 +741,7 @@ LocalScope* CFGBuilder::createOrReuseLocalScope(LocalScope* Scope) { /// addLocalScopeForStmt - Add LocalScope to local scopes tree for statement /// that should create implicit scope (e.g. if/else substatements). -void CFGBuilder::addLocalScopeForStmt(Stmt* S) { +void CFGBuilder::addLocalScopeForStmt(Stmt *S) { if (!BuildOpts.AddImplicitDtors) return; @@ -721,9 +751,7 @@ void CFGBuilder::addLocalScopeForStmt(Stmt* S) { if (CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) { for (CompoundStmt::body_iterator BI = CS->body_begin(), BE = CS->body_end() ; BI != BE; ++BI) { - Stmt *SI = *BI; - if (LabelStmt *LS = dyn_cast<LabelStmt>(SI)) - SI = LS->getSubStmt(); + Stmt *SI = (*BI)->stripLabelLikeStatements(); if (DeclStmt *DS = dyn_cast<DeclStmt>(SI)) Scope = addLocalScopeForDeclStmt(DS, Scope); } @@ -732,22 +760,20 @@ void CFGBuilder::addLocalScopeForStmt(Stmt* S) { // For any other statement scope will be implicit and as such will be // interesting only for DeclStmt. - if (LabelStmt *LS = dyn_cast<LabelStmt>(S)) - S = LS->getSubStmt(); - if (DeclStmt *DS = dyn_cast<DeclStmt>(S)) + if (DeclStmt *DS = dyn_cast<DeclStmt>(S->stripLabelLikeStatements())) addLocalScopeForDeclStmt(DS); } /// addLocalScopeForDeclStmt - Add LocalScope for declaration statement. Will /// reuse Scope if not NULL. -LocalScope* CFGBuilder::addLocalScopeForDeclStmt(DeclStmt* DS, +LocalScope* CFGBuilder::addLocalScopeForDeclStmt(DeclStmt *DS, LocalScope* Scope) { if (!BuildOpts.AddImplicitDtors) return Scope; for (DeclStmt::decl_iterator DI = DS->decl_begin(), DE = DS->decl_end() ; DI != DE; ++DI) { - if (VarDecl* VD = dyn_cast<VarDecl>(*DI)) + if (VarDecl *VD = dyn_cast<VarDecl>(*DI)) Scope = addLocalScopeForVarDecl(VD, Scope); } return Scope; @@ -756,7 +782,7 @@ LocalScope* CFGBuilder::addLocalScopeForDeclStmt(DeclStmt* DS, /// addLocalScopeForVarDecl - Add LocalScope for variable declaration. It will /// create add scope for automatic objects and temporary objects bound to /// const reference. Will reuse Scope if not NULL. -LocalScope* CFGBuilder::addLocalScopeForVarDecl(VarDecl* VD, +LocalScope* CFGBuilder::addLocalScopeForVarDecl(VarDecl *VD, LocalScope* Scope) { if (!BuildOpts.AddImplicitDtors) return Scope; @@ -788,7 +814,7 @@ LocalScope* CFGBuilder::addLocalScopeForVarDecl(VarDecl* VD, } // Check if type is a C++ class with non-trivial destructor. - if (const CXXRecordDecl* CD = QT->getAsCXXRecordDecl()) + if (const CXXRecordDecl *CD = QT->getAsCXXRecordDecl()) if (!CD->hasTrivialDestructor()) { // Add the variable to scope Scope = createOrReuseLocalScope(Scope); @@ -800,7 +826,7 @@ LocalScope* CFGBuilder::addLocalScopeForVarDecl(VarDecl* VD, /// addLocalScopeAndDtors - For given statement add local scope for it and /// add destructors that will cleanup the scope. Will reuse Scope if not NULL. -void CFGBuilder::addLocalScopeAndDtors(Stmt* S) { +void CFGBuilder::addLocalScopeAndDtors(Stmt *S) { if (!BuildOpts.AddImplicitDtors) return; @@ -809,40 +835,27 @@ void CFGBuilder::addLocalScopeAndDtors(Stmt* S) { addAutomaticObjDtors(ScopePos, scopeBeginPos, S); } -/// insertAutomaticObjDtors - Insert destructor CFGElements for variables with -/// automatic storage duration to CFGBlock's elements vector. Insertion will be -/// performed in place specified with iterator. -void CFGBuilder::insertAutomaticObjDtors(CFGBlock* Blk, CFGBlock::iterator I, - LocalScope::const_iterator B, LocalScope::const_iterator E, Stmt* S) { - BumpVectorContext& C = cfg->getBumpVectorContext(); - I = Blk->beginAutomaticObjDtorsInsert(I, B.distance(E), C); - while (B != E) - I = Blk->insertAutomaticObjDtor(I, *B++, S); -} - -/// appendAutomaticObjDtors - Append destructor CFGElements for variables with -/// automatic storage duration to CFGBlock's elements vector. Elements will be -/// appended to physical end of the vector which happens to be logical -/// beginning. -void CFGBuilder::appendAutomaticObjDtors(CFGBlock* Blk, - LocalScope::const_iterator B, LocalScope::const_iterator E, Stmt* S) { - insertAutomaticObjDtors(Blk, Blk->begin(), B, E, S); -} - /// prependAutomaticObjDtorsWithTerminator - Prepend destructor CFGElements for /// variables with automatic storage duration to CFGBlock's elements vector. /// Elements will be prepended to physical beginning of the vector which /// happens to be logical end. Use blocks terminator as statement that specifies /// destructors call site. -void CFGBuilder::prependAutomaticObjDtorsWithTerminator(CFGBlock* Blk, +/// FIXME: This mechanism for adding automatic destructors doesn't handle +/// no-return destructors properly. +void CFGBuilder::prependAutomaticObjDtorsWithTerminator(CFGBlock *Blk, LocalScope::const_iterator B, LocalScope::const_iterator E) { - insertAutomaticObjDtors(Blk, Blk->end(), B, E, Blk->getTerminator()); + BumpVectorContext &C = cfg->getBumpVectorContext(); + CFGBlock::iterator InsertPos + = Blk->beginAutomaticObjDtorsInsert(Blk->end(), B.distance(E), C); + for (LocalScope::const_iterator I = B; I != E; ++I) + InsertPos = Blk->insertAutomaticObjDtor(InsertPos, *I, + Blk->getTerminator()); } /// Visit - Walk the subtree of a statement and add extra /// blocks for ternary operators, &&, and ||. We also process "," and /// DeclStmts (which may contain nested control-flow). -CFGBlock* CFGBuilder::Visit(Stmt * S, AddStmtChoice asc) { +CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc) { if (!S) { badCFG = true; return 0; @@ -996,7 +1009,7 @@ CFGBlock *CFGBuilder::VisitStmt(Stmt *S, AddStmtChoice asc) { } /// VisitChildren - Visit the children of a Stmt. -CFGBlock *CFGBuilder::VisitChildren(Stmt* Terminator) { +CFGBlock *CFGBuilder::VisitChildren(Stmt *Terminator) { CFGBlock *lastBlock = Block; for (Stmt::child_range I = Terminator->children(); I; ++I) if (Stmt *child = *I) @@ -1031,20 +1044,20 @@ CFGBlock *CFGBuilder::VisitUnaryOperator(UnaryOperator *U, CFGBlock *CFGBuilder::VisitBinaryOperator(BinaryOperator *B, AddStmtChoice asc) { if (B->isLogicalOp()) { // && or || - CFGBlock* ConfluenceBlock = Block ? Block : createBlock(); + CFGBlock *ConfluenceBlock = Block ? Block : createBlock(); appendStmt(ConfluenceBlock, B); if (badCFG) return 0; // create the block evaluating the LHS - CFGBlock* LHSBlock = createBlock(false); + CFGBlock *LHSBlock = createBlock(false); LHSBlock->setTerminator(B); // create the block evaluating the RHS Succ = ConfluenceBlock; Block = NULL; - CFGBlock* RHSBlock = addStmt(B->getRHS()); + CFGBlock *RHSBlock = addStmt(B->getRHS()); if (RHSBlock) { if (badCFG) @@ -1191,13 +1204,13 @@ CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) { return 0; } - Block = createBlock(!NoReturn); + if (NoReturn) + Block = createNoReturnBlock(); + else + Block = createBlock(); + appendStmt(Block, C); - if (NoReturn) { - // Wire this to the exit block directly. - addSuccessor(Block, &cfg->getExit()); - } if (AddEHEdge) { // Add exceptional edges. if (TryTerminatedBlock) @@ -1211,7 +1224,7 @@ CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) { CFGBlock *CFGBuilder::VisitChooseExpr(ChooseExpr *C, AddStmtChoice asc) { - CFGBlock* ConfluenceBlock = Block ? Block : createBlock(); + CFGBlock *ConfluenceBlock = Block ? Block : createBlock(); appendStmt(ConfluenceBlock, C); if (badCFG) return 0; @@ -1219,13 +1232,13 @@ CFGBlock *CFGBuilder::VisitChooseExpr(ChooseExpr *C, AddStmtChoice alwaysAdd = asc.withAlwaysAdd(true); Succ = ConfluenceBlock; Block = NULL; - CFGBlock* LHSBlock = Visit(C->getLHS(), alwaysAdd); + CFGBlock *LHSBlock = Visit(C->getLHS(), alwaysAdd); if (badCFG) return 0; Succ = ConfluenceBlock; Block = NULL; - CFGBlock* RHSBlock = Visit(C->getRHS(), alwaysAdd); + CFGBlock *RHSBlock = Visit(C->getRHS(), alwaysAdd); if (badCFG) return 0; @@ -1239,9 +1252,9 @@ CFGBlock *CFGBuilder::VisitChooseExpr(ChooseExpr *C, } -CFGBlock* CFGBuilder::VisitCompoundStmt(CompoundStmt* C) { +CFGBlock *CFGBuilder::VisitCompoundStmt(CompoundStmt *C) { addLocalScopeAndDtors(C); - CFGBlock* LastBlock = Block; + CFGBlock *LastBlock = Block; for (CompoundStmt::reverse_body_iterator I=C->body_rbegin(), E=C->body_rend(); I != E; ++I ) { @@ -1264,7 +1277,7 @@ CFGBlock *CFGBuilder::VisitConditionalOperator(AbstractConditionalOperator *C, // Create the confluence block that will "merge" the results of the ternary // expression. - CFGBlock* ConfluenceBlock = Block ? Block : createBlock(); + CFGBlock *ConfluenceBlock = Block ? Block : createBlock(); appendStmt(ConfluenceBlock, C); if (badCFG) return 0; @@ -1277,7 +1290,7 @@ CFGBlock *CFGBuilder::VisitConditionalOperator(AbstractConditionalOperator *C, // e.g: x ?: y is shorthand for: x ? x : y; Succ = ConfluenceBlock; Block = NULL; - CFGBlock* LHSBlock = 0; + CFGBlock *LHSBlock = 0; const Expr *trueExpr = C->getTrueExpr(); if (trueExpr != opaqueValue) { LHSBlock = Visit(C->getTrueExpr(), alwaysAdd); @@ -1290,7 +1303,7 @@ CFGBlock *CFGBuilder::VisitConditionalOperator(AbstractConditionalOperator *C, // Create the block for the RHS expression. Succ = ConfluenceBlock; - CFGBlock* RHSBlock = Visit(C->getFalseExpr(), alwaysAdd); + CFGBlock *RHSBlock = Visit(C->getFalseExpr(), alwaysAdd); if (badCFG) return 0; @@ -1331,7 +1344,7 @@ CFGBlock *CFGBuilder::VisitDeclStmt(DeclStmt *DS) { CFGBlock *B = 0; // FIXME: Add a reverse iterator for DeclStmt to avoid this extra copy. - typedef llvm::SmallVector<Decl*,10> BufTy; + typedef SmallVector<Decl*,10> BufTy; BufTy Buf(DS->decl_begin(), DS->decl_end()); for (BufTy::reverse_iterator I = Buf.rbegin(), E = Buf.rend(); I != E; ++I) { @@ -1355,7 +1368,7 @@ CFGBlock *CFGBuilder::VisitDeclStmt(DeclStmt *DS) { /// VisitDeclSubExpr - Utility method to add block-level expressions for /// DeclStmts and initializers in them. -CFGBlock *CFGBuilder::VisitDeclSubExpr(DeclStmt* DS) { +CFGBlock *CFGBuilder::VisitDeclSubExpr(DeclStmt *DS) { assert(DS->isSingleDecl() && "Can handle single declarations only."); Decl *D = DS->getSingleDecl(); @@ -1414,7 +1427,7 @@ CFGBlock *CFGBuilder::VisitDeclSubExpr(DeclStmt* DS) { return Block; } -CFGBlock* CFGBuilder::VisitIfStmt(IfStmt* I) { +CFGBlock *CFGBuilder::VisitIfStmt(IfStmt *I) { // We may see an if statement in the middle of a basic block, or it may be the // first statement we are processing. In either case, we create a new basic // block. First, we create the blocks for the then...else statements, and @@ -1428,7 +1441,7 @@ CFGBlock* CFGBuilder::VisitIfStmt(IfStmt* I) { // Create local scope for possible condition variable. // Store scope position. Add implicit destructor. - if (VarDecl* VD = I->getConditionVariable()) { + if (VarDecl *VD = I->getConditionVariable()) { LocalScope::const_iterator BeginScopePos = ScopePos; addLocalScopeForVarDecl(VD); addAutomaticObjDtors(ScopePos, BeginScopePos, I); @@ -1443,9 +1456,9 @@ CFGBlock* CFGBuilder::VisitIfStmt(IfStmt* I) { } // Process the false branch. - CFGBlock* ElseBlock = Succ; + CFGBlock *ElseBlock = Succ; - if (Stmt* Else = I->getElse()) { + if (Stmt *Else = I->getElse()) { SaveAndRestore<CFGBlock*> sv(Succ); // NULL out Block so that the recursive call to Visit will @@ -1468,9 +1481,9 @@ CFGBlock* CFGBuilder::VisitIfStmt(IfStmt* I) { } // Process the true branch. - CFGBlock* ThenBlock; + CFGBlock *ThenBlock; { - Stmt* Then = I->getThen(); + Stmt *Then = I->getThen(); assert(Then); SaveAndRestore<CFGBlock*> sv(Succ); Block = NULL; @@ -1526,7 +1539,7 @@ CFGBlock* CFGBuilder::VisitIfStmt(IfStmt* I) { } -CFGBlock* CFGBuilder::VisitReturnStmt(ReturnStmt* R) { +CFGBlock *CFGBuilder::VisitReturnStmt(ReturnStmt *R) { // If we were in the middle of a block we stop processing that block. // // NOTE: If a "return" appears in the middle of a block, this means that the @@ -1546,7 +1559,7 @@ CFGBlock* CFGBuilder::VisitReturnStmt(ReturnStmt* R) { return VisitStmt(R, AddStmtChoice::AlwaysAdd); } -CFGBlock* CFGBuilder::VisitLabelStmt(LabelStmt *L) { +CFGBlock *CFGBuilder::VisitLabelStmt(LabelStmt *L) { // Get the block of the labeled statement. Add it to our map. addStmt(L->getSubStmt()); CFGBlock *LabelBlock = Block; @@ -1575,7 +1588,7 @@ CFGBlock* CFGBuilder::VisitLabelStmt(LabelStmt *L) { return LabelBlock; } -CFGBlock* CFGBuilder::VisitGotoStmt(GotoStmt* G) { +CFGBlock *CFGBuilder::VisitGotoStmt(GotoStmt *G) { // Goto is a control-flow statement. Thus we stop processing the current // block and create a new one. @@ -1597,8 +1610,8 @@ CFGBlock* CFGBuilder::VisitGotoStmt(GotoStmt* G) { return Block; } -CFGBlock* CFGBuilder::VisitForStmt(ForStmt* F) { - CFGBlock* LoopSuccessor = NULL; +CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) { + CFGBlock *LoopSuccessor = NULL; // Save local scope position because in case of condition variable ScopePos // won't be restored when traversing AST. @@ -1607,11 +1620,11 @@ CFGBlock* CFGBuilder::VisitForStmt(ForStmt* F) { // Create local scope for init statement and possible condition variable. // Add destructor for init statement and condition variable. // Store scope position for continue statement. - if (Stmt* Init = F->getInit()) + if (Stmt *Init = F->getInit()) addLocalScopeForStmt(Init); LocalScope::const_iterator LoopBeginScopePos = ScopePos; - if (VarDecl* VD = F->getConditionVariable()) + if (VarDecl *VD = F->getConditionVariable()) addLocalScopeForVarDecl(VD); LocalScope::const_iterator ContinueScopePos = ScopePos; @@ -1634,15 +1647,15 @@ CFGBlock* CFGBuilder::VisitForStmt(ForStmt* F) { // Because of short-circuit evaluation, the condition of the loop can span // multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that // evaluate the condition. - CFGBlock* ExitConditionBlock = createBlock(false); - CFGBlock* EntryConditionBlock = ExitConditionBlock; + CFGBlock *ExitConditionBlock = createBlock(false); + CFGBlock *EntryConditionBlock = ExitConditionBlock; // Set the terminator for the "exit" condition block. ExitConditionBlock->setTerminator(F); // Now add the actual condition to the condition block. Because the condition // itself may contain control-flow, new blocks may be created. - if (Stmt* C = F->getCond()) { + if (Stmt *C = F->getCond()) { Block = ExitConditionBlock; EntryConditionBlock = addStmt(C); if (badCFG) @@ -1691,7 +1704,7 @@ CFGBlock* CFGBuilder::VisitForStmt(ForStmt* F) { // Loop body should end with destructor of Condition variable (if any). addAutomaticObjDtors(ScopePos, LoopBeginScopePos, F); - if (Stmt* I = F->getInc()) { + if (Stmt *I = F->getInc()) { // Generate increment code in its own basic block. This is the target of // continue statements. Succ = addStmt(I); @@ -1723,7 +1736,7 @@ CFGBlock* CFGBuilder::VisitForStmt(ForStmt* F) { // Now populate the body block, and in the process create new blocks as we // walk the body of the loop. - CFGBlock* BodyBlock = addStmt(F->getBody()); + CFGBlock *BodyBlock = addStmt(F->getBody()); if (!BodyBlock) BodyBlock = ContinueJumpTarget.block;//can happen for "for (...;...;...);" @@ -1740,7 +1753,7 @@ CFGBlock* CFGBuilder::VisitForStmt(ForStmt* F) { // If the loop contains initialization, create a new block for those // statements. This block can also contain statements that precede the loop. - if (Stmt* I = F->getInit()) { + if (Stmt *I = F->getInit()) { Block = createBlock(); return addStmt(I); } @@ -1760,7 +1773,7 @@ CFGBlock *CFGBuilder::VisitMemberExpr(MemberExpr *M, AddStmtChoice asc) { return Visit(M->getBase()); } -CFGBlock* CFGBuilder::VisitObjCForCollectionStmt(ObjCForCollectionStmt* S) { +CFGBlock *CFGBuilder::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) { // Objective-C fast enumeration 'for' statements: // http://developer.apple.com/documentation/Cocoa/Conceptual/ObjectiveC // @@ -1793,7 +1806,7 @@ CFGBlock* CFGBuilder::VisitObjCForCollectionStmt(ObjCForCollectionStmt* S) { // a DeclStmt and the other returns a DeclRefExpr. // - CFGBlock* LoopSuccessor = 0; + CFGBlock *LoopSuccessor = 0; if (Block) { if (badCFG) @@ -1804,8 +1817,7 @@ CFGBlock* CFGBuilder::VisitObjCForCollectionStmt(ObjCForCollectionStmt* S) { LoopSuccessor = Succ; // Build the condition blocks. - CFGBlock* ExitConditionBlock = createBlock(false); - CFGBlock* EntryConditionBlock = ExitConditionBlock; + CFGBlock *ExitConditionBlock = createBlock(false); // Set the terminator for the "exit" condition block. ExitConditionBlock->setTerminator(S); @@ -1819,7 +1831,8 @@ CFGBlock* CFGBuilder::VisitObjCForCollectionStmt(ObjCForCollectionStmt* S) { // Walk the 'element' expression to see if there are any side-effects. We // generate new blocks as necessary. We DON'T add the statement by default to // the CFG unless it contains control-flow. - EntryConditionBlock = Visit(S->getElement(), AddStmtChoice::NotAlwaysAdd); + CFGBlock *EntryConditionBlock = Visit(S->getElement(), + AddStmtChoice::NotAlwaysAdd); if (Block) { if (badCFG) return 0; @@ -1840,7 +1853,7 @@ CFGBlock* CFGBuilder::VisitObjCForCollectionStmt(ObjCForCollectionStmt* S) { BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos); ContinueJumpTarget = JumpTarget(EntryConditionBlock, ScopePos); - CFGBlock* BodyBlock = addStmt(S->getBody()); + CFGBlock *BodyBlock = addStmt(S->getBody()); if (!BodyBlock) BodyBlock = EntryConditionBlock; // can happen for "for (X in Y) ;" @@ -1862,7 +1875,7 @@ CFGBlock* CFGBuilder::VisitObjCForCollectionStmt(ObjCForCollectionStmt* S) { return addStmt(S->getCollection()); } -CFGBlock* CFGBuilder::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt* S) { +CFGBlock *CFGBuilder::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) { // FIXME: Add locking 'primitives' to CFG for @synchronized. // Inline the body. @@ -1886,13 +1899,13 @@ CFGBlock* CFGBuilder::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt* S) { return addStmt(S->getSynchExpr()); } -CFGBlock* CFGBuilder::VisitObjCAtTryStmt(ObjCAtTryStmt* S) { +CFGBlock *CFGBuilder::VisitObjCAtTryStmt(ObjCAtTryStmt *S) { // FIXME return NYS(); } -CFGBlock* CFGBuilder::VisitWhileStmt(WhileStmt* W) { - CFGBlock* LoopSuccessor = NULL; +CFGBlock *CFGBuilder::VisitWhileStmt(WhileStmt *W) { + CFGBlock *LoopSuccessor = NULL; // Save local scope position because in case of condition variable ScopePos // won't be restored when traversing AST. @@ -1901,7 +1914,7 @@ CFGBlock* CFGBuilder::VisitWhileStmt(WhileStmt* W) { // Create local scope for possible condition variable. // Store scope position for continue statement. LocalScope::const_iterator LoopBeginScopePos = ScopePos; - if (VarDecl* VD = W->getConditionVariable()) { + if (VarDecl *VD = W->getConditionVariable()) { addLocalScopeForVarDecl(VD); addAutomaticObjDtors(ScopePos, LoopBeginScopePos, W); } @@ -1919,8 +1932,8 @@ CFGBlock* CFGBuilder::VisitWhileStmt(WhileStmt* W) { // Because of short-circuit evaluation, the condition of the loop can span // multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that // evaluate the condition. - CFGBlock* ExitConditionBlock = createBlock(false); - CFGBlock* EntryConditionBlock = ExitConditionBlock; + CFGBlock *ExitConditionBlock = createBlock(false); + CFGBlock *EntryConditionBlock = ExitConditionBlock; // Set the terminator for the "exit" condition block. ExitConditionBlock->setTerminator(W); @@ -1928,7 +1941,7 @@ CFGBlock* CFGBuilder::VisitWhileStmt(WhileStmt* W) { // Now add the actual condition to the condition block. Because the condition // itself may contain control-flow, new blocks may be created. Thus we update // "Succ" after adding the condition. - if (Stmt* C = W->getCond()) { + if (Stmt *C = W->getCond()) { Block = ExitConditionBlock; EntryConditionBlock = addStmt(C); // The condition might finish the current 'Block'. @@ -1990,7 +2003,7 @@ CFGBlock* CFGBuilder::VisitWhileStmt(WhileStmt* W) { addLocalScopeAndDtors(W->getBody()); // Create the body. The returned block is the entry to the loop body. - CFGBlock* BodyBlock = addStmt(W->getBody()); + CFGBlock *BodyBlock = addStmt(W->getBody()); if (!BodyBlock) BodyBlock = ContinueJumpTarget.block; // can happen for "while(...) ;" @@ -2017,13 +2030,13 @@ CFGBlock* CFGBuilder::VisitWhileStmt(WhileStmt* W) { } -CFGBlock *CFGBuilder::VisitObjCAtCatchStmt(ObjCAtCatchStmt* S) { +CFGBlock *CFGBuilder::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) { // FIXME: For now we pretend that @catch and the code it contains does not // exit. return Block; } -CFGBlock* CFGBuilder::VisitObjCAtThrowStmt(ObjCAtThrowStmt* S) { +CFGBlock *CFGBuilder::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) { // FIXME: This isn't complete. We basically treat @throw like a return // statement. @@ -2042,7 +2055,7 @@ CFGBlock* CFGBuilder::VisitObjCAtThrowStmt(ObjCAtThrowStmt* S) { return VisitStmt(S, AddStmtChoice::AlwaysAdd); } -CFGBlock* CFGBuilder::VisitCXXThrowExpr(CXXThrowExpr* T) { +CFGBlock *CFGBuilder::VisitCXXThrowExpr(CXXThrowExpr *T) { // If we were in the middle of a block we stop processing that block. if (badCFG) return 0; @@ -2062,8 +2075,8 @@ CFGBlock* CFGBuilder::VisitCXXThrowExpr(CXXThrowExpr* T) { return VisitStmt(T, AddStmtChoice::AlwaysAdd); } -CFGBlock *CFGBuilder::VisitDoStmt(DoStmt* D) { - CFGBlock* LoopSuccessor = NULL; +CFGBlock *CFGBuilder::VisitDoStmt(DoStmt *D) { + CFGBlock *LoopSuccessor = NULL; // "do...while" is a control-flow statement. Thus we stop processing the // current block. @@ -2077,15 +2090,15 @@ CFGBlock *CFGBuilder::VisitDoStmt(DoStmt* D) { // Because of short-circuit evaluation, the condition of the loop can span // multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that // evaluate the condition. - CFGBlock* ExitConditionBlock = createBlock(false); - CFGBlock* EntryConditionBlock = ExitConditionBlock; + CFGBlock *ExitConditionBlock = createBlock(false); + CFGBlock *EntryConditionBlock = ExitConditionBlock; // Set the terminator for the "exit" condition block. ExitConditionBlock->setTerminator(D); // Now add the actual condition to the condition block. Because the condition // itself may contain control-flow, new blocks may be created. - if (Stmt* C = D->getCond()) { + if (Stmt *C = D->getCond()) { Block = ExitConditionBlock; EntryConditionBlock = addStmt(C); if (Block) { @@ -2101,7 +2114,7 @@ CFGBlock *CFGBuilder::VisitDoStmt(DoStmt* D) { const TryResult &KnownVal = tryEvaluateBool(D->getCond()); // Process the loop body. - CFGBlock* BodyBlock = NULL; + CFGBlock *BodyBlock = NULL; { assert(D->getBody()); @@ -2165,7 +2178,7 @@ CFGBlock *CFGBuilder::VisitDoStmt(DoStmt* D) { return BodyBlock; } -CFGBlock* CFGBuilder::VisitContinueStmt(ContinueStmt* C) { +CFGBlock *CFGBuilder::VisitContinueStmt(ContinueStmt *C) { // "continue" is a control-flow statement. Thus we stop processing the // current block. if (badCFG) @@ -2202,13 +2215,12 @@ CFGBlock *CFGBuilder::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E, VA != 0; VA = FindVA(VA->getElementType().getTypePtr())) lastBlock = addStmt(VA->getSizeExpr()); } - return lastBlock; } /// VisitStmtExpr - Utility method to handle (nested) statement /// expressions (a GCC extension). -CFGBlock* CFGBuilder::VisitStmtExpr(StmtExpr *SE, AddStmtChoice asc) { +CFGBlock *CFGBuilder::VisitStmtExpr(StmtExpr *SE, AddStmtChoice asc) { if (asc.alwaysAdd(*this, SE)) { autoCreateBlock(); appendStmt(Block, SE); @@ -2216,10 +2228,10 @@ CFGBlock* CFGBuilder::VisitStmtExpr(StmtExpr *SE, AddStmtChoice asc) { return VisitCompoundStmt(SE->getSubStmt()); } -CFGBlock* CFGBuilder::VisitSwitchStmt(SwitchStmt* Terminator) { +CFGBlock *CFGBuilder::VisitSwitchStmt(SwitchStmt *Terminator) { // "switch" is a control-flow statement. Thus we stop processing the current // block. - CFGBlock* SwitchSuccessor = NULL; + CFGBlock *SwitchSuccessor = NULL; // Save local scope position because in case of condition variable ScopePos // won't be restored when traversing AST. @@ -2227,7 +2239,7 @@ CFGBlock* CFGBuilder::VisitSwitchStmt(SwitchStmt* Terminator) { // Create local scope for possible condition variable. // Store scope position. Add implicit destructor. - if (VarDecl* VD = Terminator->getConditionVariable()) { + if (VarDecl *VD = Terminator->getConditionVariable()) { LocalScope::const_iterator SwitchBeginScopePos = ScopePos; addLocalScopeForVarDecl(VD); addAutomaticObjDtors(ScopePos, SwitchBeginScopePos, Terminator); @@ -2323,11 +2335,8 @@ static bool shouldAddCase(bool &switchExclusivelyCovered, if (!switchExclusivelyCovered) { if (switchCond->Val.isInt()) { // Evaluate the LHS of the case value. - Expr::EvalResult V1; - CS->getLHS()->Evaluate(V1, Ctx); - assert(V1.Val.isInt()); + const llvm::APSInt &lhsInt = CS->getLHS()->EvaluateKnownConstInt(Ctx); const llvm::APSInt &condInt = switchCond->Val.getInt(); - const llvm::APSInt &lhsInt = V1.Val.getInt(); if (condInt == lhsInt) { addCase = true; @@ -2336,10 +2345,8 @@ static bool shouldAddCase(bool &switchExclusivelyCovered, else if (condInt < lhsInt) { if (const Expr *RHS = CS->getRHS()) { // Evaluate the RHS of the case value. - Expr::EvalResult V2; - RHS->Evaluate(V2, Ctx); - assert(V2.Val.isInt()); - if (V2.Val.getInt() <= condInt) { + const llvm::APSInt &V2 = RHS->EvaluateKnownConstInt(Ctx); + if (V2 <= condInt) { addCase = true; switchExclusivelyCovered = true; } @@ -2352,7 +2359,7 @@ static bool shouldAddCase(bool &switchExclusivelyCovered, return addCase; } -CFGBlock* CFGBuilder::VisitCaseStmt(CaseStmt* CS) { +CFGBlock *CFGBuilder::VisitCaseStmt(CaseStmt *CS) { // CaseStmts are essentially labels, so they are the first statement in a // block. CFGBlock *TopBlock = 0, *LastBlock = 0; @@ -2383,7 +2390,7 @@ CFGBlock* CFGBuilder::VisitCaseStmt(CaseStmt* CS) { addStmt(Sub); } - CFGBlock* CaseBlock = Block; + CFGBlock *CaseBlock = Block; if (!CaseBlock) CaseBlock = createBlock(); @@ -2416,7 +2423,7 @@ CFGBlock* CFGBuilder::VisitCaseStmt(CaseStmt* CS) { return Succ; } -CFGBlock* CFGBuilder::VisitDefaultStmt(DefaultStmt* Terminator) { +CFGBlock *CFGBuilder::VisitDefaultStmt(DefaultStmt *Terminator) { if (Terminator->getSubStmt()) addStmt(Terminator->getSubStmt()); @@ -2450,7 +2457,7 @@ CFGBlock* CFGBuilder::VisitDefaultStmt(DefaultStmt* Terminator) { CFGBlock *CFGBuilder::VisitCXXTryStmt(CXXTryStmt *Terminator) { // "try"/"catch" is a control-flow statement. Thus we stop processing the // current block. - CFGBlock* TrySuccessor = NULL; + CFGBlock *TrySuccessor = NULL; if (Block) { if (badCFG) @@ -2492,8 +2499,8 @@ CFGBlock *CFGBuilder::VisitCXXTryStmt(CXXTryStmt *Terminator) { Succ = TrySuccessor; // Save the current "try" context. - SaveAndRestore<CFGBlock*> save_try(TryTerminatedBlock); - TryTerminatedBlock = NewTryTerminatedBlock; + SaveAndRestore<CFGBlock*> save_try(TryTerminatedBlock, NewTryTerminatedBlock); + cfg->addTryDispatchBlock(TryTerminatedBlock); assert(Terminator->getTryBlock() && "try must contain a non-NULL body"); Block = NULL; @@ -2501,7 +2508,7 @@ CFGBlock *CFGBuilder::VisitCXXTryStmt(CXXTryStmt *Terminator) { return Block; } -CFGBlock* CFGBuilder::VisitCXXCatchStmt(CXXCatchStmt* CS) { +CFGBlock *CFGBuilder::VisitCXXCatchStmt(CXXCatchStmt *CS) { // CXXCatchStmt are treated like labels, so they are the first statement in a // block. @@ -2511,7 +2518,7 @@ CFGBlock* CFGBuilder::VisitCXXCatchStmt(CXXCatchStmt* CS) { // Create local scope for possible exception variable. // Store scope position. Add implicit destructor. - if (VarDecl* VD = CS->getExceptionDecl()) { + if (VarDecl *VD = CS->getExceptionDecl()) { LocalScope::const_iterator BeginScopePos = ScopePos; addLocalScopeForVarDecl(VD); addAutomaticObjDtors(ScopePos, BeginScopePos, CS); @@ -2520,7 +2527,7 @@ CFGBlock* CFGBuilder::VisitCXXCatchStmt(CXXCatchStmt* CS) { if (CS->getHandlerBlock()) addStmt(CS->getHandlerBlock()); - CFGBlock* CatchBlock = Block; + CFGBlock *CatchBlock = Block; if (!CatchBlock) CatchBlock = createBlock(); @@ -2535,7 +2542,7 @@ CFGBlock* CFGBuilder::VisitCXXCatchStmt(CXXCatchStmt* CS) { return CatchBlock; } -CFGBlock* CFGBuilder::VisitCXXForRangeStmt(CXXForRangeStmt* S) { +CFGBlock *CFGBuilder::VisitCXXForRangeStmt(CXXForRangeStmt *S) { // C++0x for-range statements are specified as [stmt.ranged]: // // { @@ -2563,7 +2570,7 @@ CFGBlock* CFGBuilder::VisitCXXForRangeStmt(CXXForRangeStmt* S) { // "for" is a control-flow statement. Thus we stop processing the current // block. - CFGBlock* LoopSuccessor = NULL; + CFGBlock *LoopSuccessor = NULL; if (Block) { if (badCFG) return 0; @@ -2577,7 +2584,7 @@ CFGBlock* CFGBuilder::VisitCXXForRangeStmt(CXXForRangeStmt* S) { BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos); // The block for the __begin != __end expression. - CFGBlock* ConditionBlock = createBlock(false); + CFGBlock *ConditionBlock = createBlock(false); ConditionBlock->setTerminator(S); // Now add the actual condition to the condition block. @@ -2713,9 +2720,9 @@ CFGBlock *CFGBuilder::VisitImplicitCastExpr(ImplicitCastExpr *E, return Visit(E->getSubExpr(), AddStmtChoice()); } -CFGBlock* CFGBuilder::VisitIndirectGotoStmt(IndirectGotoStmt* I) { +CFGBlock *CFGBuilder::VisitIndirectGotoStmt(IndirectGotoStmt *I) { // Lazily create the indirect-goto dispatch block if there isn't one already. - CFGBlock* IBlock = cfg->getIndirectGotoBlock(); + CFGBlock *IBlock = cfg->getIndirectGotoBlock(); if (!IBlock) { IBlock = createBlock(false); @@ -2774,7 +2781,7 @@ CFGBlock *CFGBuilder::VisitChildrenForTemporaryDtors(Stmt *E) { // When visiting children for destructors we want to visit them in reverse // order. Because there's no reverse iterator for children must to reverse // them in helper vector. - typedef llvm::SmallVector<Stmt *, 4> ChildrenVect; + typedef SmallVector<Stmt *, 4> ChildrenVect; ChildrenVect ChildrenRev; for (Stmt::child_range I = E->children(); I; ++I) { if (*I) ChildrenRev.push_back(*I); @@ -2864,7 +2871,16 @@ CFGBlock *CFGBuilder::VisitCXXBindTemporaryExprForTemporaryDtors( if (!BindToTemporary) { // If lifetime of temporary is not prolonged (by assigning to constant // reference) add destructor for it. - autoCreateBlock(); + + // If the destructor is marked as a no-return destructor, we need to create + // a new block for the destructor which does not have as a successor + // anything built thus far. Control won't flow out of this block. + const CXXDestructorDecl *Dtor = E->getTemporary()->getDestructor(); + if (cast<FunctionType>(Dtor->getType())->getNoReturnAttr()) + Block = createNoReturnBlock(); + else + autoCreateBlock(); + appendTemporaryDtor(Block, E); B = Block; } @@ -2937,7 +2953,7 @@ CFGBlock *CFGBuilder::VisitConditionalOperatorForTemporaryDtors( /// createBlock - Constructs and adds a new CFGBlock to the CFG. The block has /// no successors or predecessors. If this is the first block created in the /// CFG, it is automatically set to be the Entry and Exit of the CFG. -CFGBlock* CFG::createBlock() { +CFGBlock *CFG::createBlock() { bool first_block = begin() == end(); // Create the block. @@ -2955,7 +2971,7 @@ CFGBlock* CFG::createBlock() { /// buildCFG - Constructs a CFG from an AST. Ownership of the returned /// CFG is returned to the caller. -CFG* CFG::buildCFG(const Decl *D, Stmt* Statement, ASTContext *C, +CFG* CFG::buildCFG(const Decl *D, Stmt *Statement, ASTContext *C, const BuildOptions &BO) { CFGBuilder Builder(C, BO); return Builder.buildCFG(D, Statement); @@ -3013,17 +3029,17 @@ namespace { typedef llvm::DenseMap<const Stmt*,unsigned> BlkExprMapTy; } -static void FindSubExprAssignments(Stmt *S, - llvm::SmallPtrSet<Expr*,50>& Set) { +static void FindSubExprAssignments(const Stmt *S, + llvm::SmallPtrSet<const Expr*,50>& Set) { if (!S) return; - for (Stmt::child_range I = S->children(); I; ++I) { - Stmt *child = *I; + for (Stmt::const_child_range I = S->children(); I; ++I) { + const Stmt *child = *I; if (!child) continue; - if (BinaryOperator* B = dyn_cast<BinaryOperator>(child)) + if (const BinaryOperator* B = dyn_cast<BinaryOperator>(child)) if (B->isAssignmentOp()) Set.insert(B); FindSubExprAssignments(child, Set); @@ -3037,7 +3053,7 @@ static BlkExprMapTy* PopulateBlkExprMap(CFG& cfg) { // assignments that we want to *possibly* register as a block-level // expression. Basically, if an assignment occurs both in a subexpression and // at the block-level, it is a block-level expression. - llvm::SmallPtrSet<Expr*,50> SubExprAssignments; + llvm::SmallPtrSet<const Expr*,50> SubExprAssignments; for (CFG::iterator I=cfg.begin(), E=cfg.end(); I != E; ++I) for (CFGBlock::iterator BI=(*I)->begin(), EI=(*I)->end(); BI != EI; ++BI) @@ -3053,19 +3069,19 @@ static BlkExprMapTy* PopulateBlkExprMap(CFG& cfg) { const CFGStmt *CS = BI->getAs<CFGStmt>(); if (!CS) continue; - if (Expr* Exp = dyn_cast<Expr>(CS->getStmt())) { + if (const Expr *Exp = dyn_cast<Expr>(CS->getStmt())) { assert((Exp->IgnoreParens() == Exp) && "No parens on block-level exps"); - if (BinaryOperator* B = dyn_cast<BinaryOperator>(Exp)) { + if (const BinaryOperator* B = dyn_cast<BinaryOperator>(Exp)) { // Assignment expressions that are not nested within another // expression are really "statements" whose value is never used by // another expression. if (B->isAssignmentOp() && !SubExprAssignments.count(Exp)) continue; - } else if (const StmtExpr* SE = dyn_cast<StmtExpr>(Exp)) { + } else if (const StmtExpr *SE = dyn_cast<StmtExpr>(Exp)) { // Special handling for statement expressions. The last statement in // the statement expression is also a block-level expr. - const CompoundStmt* C = SE->getSubStmt(); + const CompoundStmt *C = SE->getSubStmt(); if (!C->body_empty()) { const Stmt *Last = C->body_back(); if (const Expr *LastEx = dyn_cast<Expr>(Last)) @@ -3082,7 +3098,7 @@ static BlkExprMapTy* PopulateBlkExprMap(CFG& cfg) { // Look at terminators. The condition is a block-level expression. - Stmt* S = (*I)->getTerminatorCondition(); + Stmt *S = (*I)->getTerminatorCondition(); if (S && M->find(S) == M->end()) { unsigned x = M->size(); @@ -3093,7 +3109,7 @@ static BlkExprMapTy* PopulateBlkExprMap(CFG& cfg) { return M; } -CFG::BlkExprNumTy CFG::getBlkExprNum(const Stmt* S) { +CFG::BlkExprNumTy CFG::getBlkExprNum(const Stmt *S) { assert(S != NULL); if (!BlkExprMap) { BlkExprMap = (void*) PopulateBlkExprMap(*this); } @@ -3223,7 +3239,7 @@ public: void setBlockID(signed i) { currentBlock = i; } void setStmtID(unsigned i) { currentStmt = i; } - virtual bool handledStmt(Stmt* S, llvm::raw_ostream& OS) { + virtual bool handledStmt(Stmt *S, raw_ostream &OS) { StmtMapTy::iterator I = StmtMap.find(S); if (I == StmtMap.end()) @@ -3238,7 +3254,7 @@ public: return true; } - bool handleDecl(const Decl* D, llvm::raw_ostream& OS) { + bool handleDecl(const Decl *D, raw_ostream &OS) { DeclMapTy::iterator I = DeclMap.find(D); if (I == DeclMap.end()) @@ -3260,30 +3276,30 @@ namespace { class CFGBlockTerminatorPrint : public StmtVisitor<CFGBlockTerminatorPrint,void> { - llvm::raw_ostream& OS; + raw_ostream &OS; StmtPrinterHelper* Helper; PrintingPolicy Policy; public: - CFGBlockTerminatorPrint(llvm::raw_ostream& os, StmtPrinterHelper* helper, + CFGBlockTerminatorPrint(raw_ostream &os, StmtPrinterHelper* helper, const PrintingPolicy &Policy) : OS(os), Helper(helper), Policy(Policy) {} - void VisitIfStmt(IfStmt* I) { + void VisitIfStmt(IfStmt *I) { OS << "if "; I->getCond()->printPretty(OS,Helper,Policy); } // Default case. - void VisitStmt(Stmt* Terminator) { + void VisitStmt(Stmt *Terminator) { Terminator->printPretty(OS, Helper, Policy); } - void VisitForStmt(ForStmt* F) { + void VisitForStmt(ForStmt *F) { OS << "for (" ; if (F->getInit()) OS << "..."; OS << "; "; - if (Stmt* C = F->getCond()) + if (Stmt *C = F->getCond()) C->printPretty(OS, Helper, Policy); OS << "; "; if (F->getInc()) @@ -3291,24 +3307,24 @@ public: OS << ")"; } - void VisitWhileStmt(WhileStmt* W) { + void VisitWhileStmt(WhileStmt *W) { OS << "while " ; - if (Stmt* C = W->getCond()) + if (Stmt *C = W->getCond()) C->printPretty(OS, Helper, Policy); } - void VisitDoStmt(DoStmt* D) { + void VisitDoStmt(DoStmt *D) { OS << "do ... while "; - if (Stmt* C = D->getCond()) + if (Stmt *C = D->getCond()) C->printPretty(OS, Helper, Policy); } - void VisitSwitchStmt(SwitchStmt* Terminator) { + void VisitSwitchStmt(SwitchStmt *Terminator) { OS << "switch "; Terminator->getCond()->printPretty(OS, Helper, Policy); } - void VisitCXXTryStmt(CXXTryStmt* CS) { + void VisitCXXTryStmt(CXXTryStmt *CS) { OS << "try ..."; } @@ -3317,13 +3333,13 @@ public: OS << " ? ... : ..."; } - void VisitChooseExpr(ChooseExpr* C) { + void VisitChooseExpr(ChooseExpr *C) { OS << "__builtin_choose_expr( "; C->getCond()->printPretty(OS, Helper, Policy); OS << " )"; } - void VisitIndirectGotoStmt(IndirectGotoStmt* I) { + void VisitIndirectGotoStmt(IndirectGotoStmt *I) { OS << "goto *"; I->getTarget()->printPretty(OS, Helper, Policy); } @@ -3344,26 +3360,26 @@ public: OS << " && ..."; return; default: - assert(false && "Invalid logical operator."); + llvm_unreachable("Invalid logical operator."); } } - void VisitExpr(Expr* E) { + void VisitExpr(Expr *E) { E->printPretty(OS, Helper, Policy); } }; } // end anonymous namespace -static void print_elem(llvm::raw_ostream &OS, StmtPrinterHelper* Helper, +static void print_elem(raw_ostream &OS, StmtPrinterHelper* Helper, const CFGElement &E) { if (const CFGStmt *CS = E.getAs<CFGStmt>()) { - Stmt *S = CS->getStmt(); + const Stmt *S = CS->getStmt(); if (Helper) { // special printing for statement-expressions. - if (StmtExpr* SE = dyn_cast<StmtExpr>(S)) { - CompoundStmt* Sub = SE->getSubStmt(); + if (const StmtExpr *SE = dyn_cast<StmtExpr>(S)) { + const CompoundStmt *Sub = SE->getSubStmt(); if (Sub->children()) { OS << "({ ... ; "; @@ -3373,7 +3389,7 @@ static void print_elem(llvm::raw_ostream &OS, StmtPrinterHelper* Helper, } } // special printing for comma expressions. - if (BinaryOperator* B = dyn_cast<BinaryOperator>(S)) { + if (const BinaryOperator* B = dyn_cast<BinaryOperator>(S)) { if (B->getOpcode() == BO_Comma) { OS << "... , "; Helper->handledStmt(B->getRHS(),OS); @@ -3401,7 +3417,7 @@ static void print_elem(llvm::raw_ostream &OS, StmtPrinterHelper* Helper, else OS << I->getAnyMember()->getName(); OS << "("; - if (Expr* IE = I->getInit()) + if (Expr *IE = I->getInit()) IE->printPretty(OS, Helper, PrintingPolicy(Helper->getLangOpts())); OS << ")"; @@ -3410,7 +3426,7 @@ static void print_elem(llvm::raw_ostream &OS, StmtPrinterHelper* Helper, else OS << " (Member initializer)\n"; } else if (const CFGAutomaticObjDtor *DE = E.getAs<CFGAutomaticObjDtor>()){ - const VarDecl* VD = DE->getVarDecl(); + const VarDecl *VD = DE->getVarDecl(); Helper->handleDecl(VD, OS); const Type* T = VD->getType().getTypePtr(); @@ -3445,8 +3461,8 @@ static void print_elem(llvm::raw_ostream &OS, StmtPrinterHelper* Helper, } } -static void print_block(llvm::raw_ostream& OS, const CFG* cfg, - const CFGBlock& B, +static void print_block(raw_ostream &OS, const CFG* cfg, + const CFGBlock &B, StmtPrinterHelper* Helper, bool print_edges) { if (Helper) Helper->setBlockID(B.getBlockID()); @@ -3464,14 +3480,14 @@ static void print_block(llvm::raw_ostream& OS, const CFG* cfg, OS << " ]\n"; // Print the label of this block. - if (Stmt* Label = const_cast<Stmt*>(B.getLabel())) { + if (Stmt *Label = const_cast<Stmt*>(B.getLabel())) { if (print_edges) OS << " "; - if (LabelStmt* L = dyn_cast<LabelStmt>(Label)) + if (LabelStmt *L = dyn_cast<LabelStmt>(Label)) OS << L->getName(); - else if (CaseStmt* C = dyn_cast<CaseStmt>(Label)) { + else if (CaseStmt *C = dyn_cast<CaseStmt>(Label)) { OS << "case "; C->getLHS()->printPretty(OS, Helper, PrintingPolicy(Helper->getLangOpts())); @@ -3492,7 +3508,7 @@ static void print_block(llvm::raw_ostream& OS, const CFG* cfg, OS << ")"; } else - assert(false && "Invalid label statement in CFGBlock."); + llvm_unreachable("Invalid label statement in CFGBlock."); OS << ":\n"; } @@ -3571,7 +3587,7 @@ static void print_block(llvm::raw_ostream& OS, const CFG* cfg, void CFG::dump(const LangOptions &LO) const { print(llvm::errs(), LO); } /// print - A simple pretty printer of a CFG that outputs to an ostream. -void CFG::print(llvm::raw_ostream &OS, const LangOptions &LO) const { +void CFG::print(raw_ostream &OS, const LangOptions &LO) const { StmtPrinterHelper Helper(this, LO); // Print the entry block. @@ -3598,25 +3614,25 @@ void CFGBlock::dump(const CFG* cfg, const LangOptions &LO) const { /// print - A simple pretty printer of a CFGBlock that outputs to an ostream. /// Generally this will only be called from CFG::print. -void CFGBlock::print(llvm::raw_ostream& OS, const CFG* cfg, +void CFGBlock::print(raw_ostream &OS, const CFG* cfg, const LangOptions &LO) const { StmtPrinterHelper Helper(cfg, LO); print_block(OS, cfg, *this, &Helper, true); } /// printTerminator - A simple pretty printer of the terminator of a CFGBlock. -void CFGBlock::printTerminator(llvm::raw_ostream &OS, +void CFGBlock::printTerminator(raw_ostream &OS, const LangOptions &LO) const { CFGBlockTerminatorPrint TPrinter(OS, NULL, PrintingPolicy(LO)); TPrinter.Visit(const_cast<Stmt*>(getTerminator().getStmt())); } -Stmt* CFGBlock::getTerminatorCondition() { +Stmt *CFGBlock::getTerminatorCondition() { Stmt *Terminator = this->Terminator; if (!Terminator) return NULL; - Expr* E = NULL; + Expr *E = NULL; switch (Terminator->getStmtClass()) { default: @@ -3693,7 +3709,7 @@ struct DOTGraphTraits<const CFG*> : public DefaultDOTGraphTraits { DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {} - static std::string getNodeLabel(const CFGBlock* Node, const CFG* Graph) { + static std::string getNodeLabel(const CFGBlock *Node, const CFG* Graph) { #ifndef NDEBUG std::string OutSStr; diff --git a/contrib/llvm/tools/clang/lib/Analysis/CFGReachabilityAnalysis.cpp b/contrib/llvm/tools/clang/lib/Analysis/CFGReachabilityAnalysis.cpp index 65cd089..e77e72f 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/CFGReachabilityAnalysis.cpp +++ b/contrib/llvm/tools/clang/lib/Analysis/CFGReachabilityAnalysis.cpp @@ -40,7 +40,7 @@ bool CFGReverseBlockReachabilityAnalysis::isReachable(const CFGBlock *Src, // Maps reachability to a common node by walking the predecessors of the // destination node. void CFGReverseBlockReachabilityAnalysis::mapReachability(const CFGBlock *Dst) { - llvm::SmallVector<const CFGBlock *, 11> worklist; + SmallVector<const CFGBlock *, 11> worklist; llvm::BitVector visited(analyzed.size()); ReachableSet &DstReachability = reachable[Dst->getBlockID()]; diff --git a/contrib/llvm/tools/clang/lib/Analysis/CFGStmtMap.cpp b/contrib/llvm/tools/clang/lib/Analysis/CFGStmtMap.cpp index 1fd5eed..16df676 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/CFGStmtMap.cpp +++ b/contrib/llvm/tools/clang/lib/Analysis/CFGStmtMap.cpp @@ -19,7 +19,7 @@ using namespace clang; -typedef llvm::DenseMap<Stmt*,CFGBlock*> SMap; +typedef llvm::DenseMap<const Stmt*, CFGBlock*> SMap; static SMap *AsMap(void *m) { return (SMap*) m; } CFGStmtMap::~CFGStmtMap() { delete AsMap(M); } diff --git a/contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp b/contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp index 90f7092..8acd189 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp +++ b/contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp @@ -17,12 +17,9 @@ #include "clang/AST/DeclObjC.h" #include "llvm/ADT/StringExtras.h" #include "llvm/Support/ErrorHandling.h" - using namespace clang; using namespace ento; -using llvm::StringRef; - // The "fundamental rule" for naming conventions of methods: // (url broken into two lines) // http://developer.apple.com/documentation/Cocoa/Conceptual/ @@ -43,6 +40,7 @@ cocoa::NamingConvention cocoa::deriveNamingConvention(Selector S, case OMF_None: case OMF_autorelease: case OMF_dealloc: + case OMF_finalize: case OMF_release: case OMF_retain: case OMF_retainCount: @@ -63,11 +61,11 @@ cocoa::NamingConvention cocoa::deriveNamingConvention(Selector S, return NoConvention; } -bool cocoa::isRefType(QualType RetTy, llvm::StringRef Prefix, - llvm::StringRef Name) { +bool cocoa::isRefType(QualType RetTy, StringRef Prefix, + StringRef Name) { // Recursively walk the typedef stack, allowing typedefs of reference types. while (const TypedefType *TD = dyn_cast<TypedefType>(RetTy.getTypePtr())) { - llvm::StringRef TDName = TD->getDecl()->getIdentifier()->getName(); + StringRef TDName = TD->getDecl()->getIdentifier()->getName(); if (TDName.startswith(Prefix) && TDName.endswith("Ref")) return true; @@ -127,10 +125,16 @@ bool cocoa::isCocoaObjectRef(QualType Ty) { return false; } -bool coreFoundation::followsCreateRule(llvm::StringRef functionName) { - llvm::StringRef::iterator it = functionName.begin(); - llvm::StringRef::iterator start = it; - llvm::StringRef::iterator endI = functionName.end(); +bool coreFoundation::followsCreateRule(const FunctionDecl *fn) { + // For now, *just* base this on the function name, not on anything else. + + const IdentifierInfo *ident = fn->getIdentifier(); + if (!ident) return false; + StringRef functionName = ident->getName(); + + StringRef::iterator it = functionName.begin(); + StringRef::iterator start = it; + StringRef::iterator endI = functionName.end(); while (true) { // Scan for the start of 'create' or 'copy'. @@ -138,6 +142,10 @@ bool coreFoundation::followsCreateRule(llvm::StringRef functionName) { // Search for the first character. It can either be 'C' or 'c'. char ch = *it; if (ch == 'C' || ch == 'c') { + // Make sure this isn't something like 'recreate' or 'Scopy'. + if (ch == 'c' && it != start && isalpha(*(it - 1))) + continue; + ++it; break; } @@ -149,14 +157,13 @@ bool coreFoundation::followsCreateRule(llvm::StringRef functionName) { // Scan for *lowercase* 'reate' or 'opy', followed by no lowercase // character. - llvm::StringRef suffix = functionName.substr(it - start); + StringRef suffix = functionName.substr(it - start); if (suffix.startswith("reate")) { it += 5; } else if (suffix.startswith("opy")) { it += 3; - } - else { + } else { // Keep scanning. continue; } diff --git a/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp index 7c1e453..a26f0ad 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp +++ b/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp @@ -209,8 +209,7 @@ clang::analyze_format_string::ParseLengthModifier(FormatSpecifier &FS, bool ArgTypeResult::matchesType(ASTContext &C, QualType argTy) const { switch (K) { case InvalidTy: - assert(false && "ArgTypeResult must be valid"); - return true; + llvm_unreachable("ArgTypeResult must be valid"); case UnknownTy: return true; @@ -312,8 +311,7 @@ bool ArgTypeResult::matchesType(ASTContext &C, QualType argTy) const { QualType ArgTypeResult::getRepresentativeType(ASTContext &C) const { switch (K) { case InvalidTy: - assert(false && "No representative type for Invalid ArgTypeResult"); - // Fall-through. + llvm_unreachable("No representative type for Invalid ArgTypeResult"); case UnknownTy: return QualType(); case SpecificTy: @@ -379,7 +377,7 @@ analyze_format_string::LengthModifier::toString() const { // Methods on OptionalAmount. //===----------------------------------------------------------------------===// -void OptionalAmount::toString(llvm::raw_ostream &os) const { +void OptionalAmount::toString(raw_ostream &os) const { switch (hs) { case Invalid: case NotSpecified: diff --git a/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp b/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp index 7b36f85..62c5455 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp +++ b/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp @@ -1,392 +1,674 @@ -//=- LiveVariables.cpp - Live Variable Analysis for Source CFGs -*- C++ --*-==// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file implements Live Variables analysis for source-level CFGs. -// -//===----------------------------------------------------------------------===// - #include "clang/Analysis/Analyses/LiveVariables.h" -#include "clang/Basic/SourceManager.h" -#include "clang/AST/ASTContext.h" -#include "clang/AST/Expr.h" +#include "clang/AST/Stmt.h" #include "clang/Analysis/CFG.h" -#include "clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h" -#include "clang/Analysis/FlowSensitive/DataflowSolver.h" -#include "clang/Analysis/Support/SaveAndRestore.h" #include "clang/Analysis/AnalysisContext.h" -#include "llvm/ADT/SmallPtrSet.h" -#include "llvm/ADT/SmallVector.h" -#include "llvm/Support/raw_ostream.h" +#include "clang/AST/StmtVisitor.h" -using namespace clang; - -//===----------------------------------------------------------------------===// -// Useful constants. -//===----------------------------------------------------------------------===// +#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/ADT/DenseMap.h" -static const bool Alive = true; -static const bool Dead = false; +#include <deque> +#include <algorithm> +#include <vector> -//===----------------------------------------------------------------------===// -// Dataflow initialization logic. -//===----------------------------------------------------------------------===// +using namespace clang; namespace { -class RegisterDecls - : public CFGRecStmtDeclVisitor<RegisterDecls> { - - LiveVariables::AnalysisDataTy& AD; - typedef llvm::SmallVector<VarDecl*, 20> AlwaysLiveTy; - AlwaysLiveTy AlwaysLive; +// FIXME: This is copy-pasted from ThreadSafety.c. I wanted a patch that +// contained working code before refactoring the implementation of both +// files. +class CFGBlockSet { + llvm::BitVector VisitedBlockIDs; + +public: + // po_iterator requires this iterator, but the only interface needed is the + // value_type typedef. + struct iterator { + typedef const CFGBlock *value_type; + }; + + CFGBlockSet() {} + CFGBlockSet(const CFG *G) : VisitedBlockIDs(G->getNumBlockIDs(), false) {} + + /// \brief Set the bit associated with a particular CFGBlock. + /// This is the important method for the SetType template parameter. + bool insert(const CFGBlock *Block) { + // Note that insert() is called by po_iterator, which doesn't check to make + // sure that Block is non-null. Moreover, the CFGBlock iterator will + // occasionally hand out null pointers for pruned edges, so we catch those + // here. + if (Block == 0) + return false; // if an edge is trivially false. + if (VisitedBlockIDs.test(Block->getBlockID())) + return false; + VisitedBlockIDs.set(Block->getBlockID()); + return true; + } + + /// \brief Check if the bit for a CFGBlock has been already set. + /// This method is for tracking visited blocks in the main threadsafety loop. + /// Block must not be null. + bool alreadySet(const CFGBlock *Block) { + return VisitedBlockIDs.test(Block->getBlockID()); + } +}; +/// \brief We create a helper class which we use to iterate through CFGBlocks in +/// the topological order. +class TopologicallySortedCFG { + typedef llvm::po_iterator<const CFG*, CFGBlockSet, true> po_iterator; + + std::vector<const CFGBlock*> Blocks; + + typedef llvm::DenseMap<const CFGBlock *, unsigned> BlockOrderTy; + BlockOrderTy BlockOrder; + + +public: + typedef std::vector<const CFGBlock*>::reverse_iterator iterator; + + TopologicallySortedCFG(const CFG *CFGraph) { + Blocks.reserve(CFGraph->getNumBlockIDs()); + CFGBlockSet BSet(CFGraph); + + for (po_iterator I = po_iterator::begin(CFGraph, BSet), + E = po_iterator::end(CFGraph, BSet); I != E; ++I) { + BlockOrder[*I] = Blocks.size() + 1; + Blocks.push_back(*I); + } + } + + iterator begin() { + return Blocks.rbegin(); + } + + iterator end() { + return Blocks.rend(); + } + + bool empty() { + return begin() == end(); + } + + struct BlockOrderCompare; + friend struct BlockOrderCompare; + + struct BlockOrderCompare { + const TopologicallySortedCFG &TSC; + public: + BlockOrderCompare(const TopologicallySortedCFG &tsc) : TSC(tsc) {} + + bool operator()(const CFGBlock *b1, const CFGBlock *b2) const { + TopologicallySortedCFG::BlockOrderTy::const_iterator b1It = TSC.BlockOrder.find(b1); + TopologicallySortedCFG::BlockOrderTy::const_iterator b2It = TSC.BlockOrder.find(b2); + + unsigned b1V = (b1It == TSC.BlockOrder.end()) ? 0 : b1It->second; + unsigned b2V = (b2It == TSC.BlockOrder.end()) ? 0 : b2It->second; + return b1V > b2V; + } + }; + + BlockOrderCompare getComparator() const { + return BlockOrderCompare(*this); + } +}; +class DataflowWorklist { + SmallVector<const CFGBlock *, 20> worklist; + llvm::BitVector enqueuedBlocks; + TopologicallySortedCFG TSC; public: - RegisterDecls(LiveVariables::AnalysisDataTy& ad) : AD(ad) {} + DataflowWorklist(const CFG &cfg) + : enqueuedBlocks(cfg.getNumBlockIDs()), + TSC(&cfg) {} + + void enqueueBlock(const CFGBlock *block); + void enqueueSuccessors(const CFGBlock *block); + void enqueuePredecessors(const CFGBlock *block); - ~RegisterDecls() { + const CFGBlock *dequeue(); - AD.AlwaysLive.resetValues(AD); + void sortWorklist(); +}; - for (AlwaysLiveTy::iterator I = AlwaysLive.begin(), E = AlwaysLive.end(); - I != E; ++ I) - AD.AlwaysLive(*I, AD) = Alive; - } +} - void VisitImplicitParamDecl(ImplicitParamDecl* IPD) { - // Register the VarDecl for tracking. - AD.Register(IPD); +void DataflowWorklist::enqueueBlock(const clang::CFGBlock *block) { + if (block && !enqueuedBlocks[block->getBlockID()]) { + enqueuedBlocks[block->getBlockID()] = true; + worklist.push_back(block); + } +} + +void DataflowWorklist::enqueueSuccessors(const clang::CFGBlock *block) { + const unsigned OldWorklistSize = worklist.size(); + for (CFGBlock::const_succ_iterator I = block->succ_begin(), + E = block->succ_end(); I != E; ++I) { + enqueueBlock(*I); } - void VisitVarDecl(VarDecl* VD) { - // Register the VarDecl for tracking. - AD.Register(VD); + if (OldWorklistSize == 0 || OldWorklistSize == worklist.size()) + return; - // Does the variable have global storage? If so, it is always live. - if (VD->hasGlobalStorage()) - AlwaysLive.push_back(VD); + sortWorklist(); +} + +void DataflowWorklist::enqueuePredecessors(const clang::CFGBlock *block) { + const unsigned OldWorklistSize = worklist.size(); + for (CFGBlock::const_pred_iterator I = block->pred_begin(), + E = block->pred_end(); I != E; ++I) { + enqueueBlock(*I); } + + if (OldWorklistSize == 0 || OldWorklistSize == worklist.size()) + return; - CFG& getCFG() { return AD.getCFG(); } -}; -} // end anonymous namespace + sortWorklist(); +} + +void DataflowWorklist::sortWorklist() { + std::sort(worklist.begin(), worklist.end(), TSC.getComparator()); +} -LiveVariables::LiveVariables(AnalysisContext &AC, bool killAtAssign) { - // Register all referenced VarDecls. - CFG &cfg = *AC.getCFG(); - getAnalysisData().setCFG(cfg); - getAnalysisData().setContext(AC.getASTContext()); - getAnalysisData().AC = &AC; - getAnalysisData().killAtAssign = killAtAssign; - RegisterDecls R(getAnalysisData()); - cfg.VisitBlockStmts(R); +const CFGBlock *DataflowWorklist::dequeue() { + if (worklist.empty()) + return 0; + const CFGBlock *b = worklist.back(); + worklist.pop_back(); + enqueuedBlocks[b->getBlockID()] = false; + return b; +} - // Register all parameters even if they didn't occur in the function body. - if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(AC.getDecl())) - for (FunctionDecl::param_const_iterator PI = FD->param_begin(), - PE = FD->param_end(); PI != PE; ++PI) - getAnalysisData().Register(*PI); +namespace { +class LiveVariablesImpl { +public: + AnalysisContext &analysisContext; + std::vector<LiveVariables::LivenessValues> cfgBlockValues; + llvm::ImmutableSet<const Stmt *>::Factory SSetFact; + llvm::ImmutableSet<const VarDecl *>::Factory DSetFact; + llvm::DenseMap<const CFGBlock *, LiveVariables::LivenessValues> blocksEndToLiveness; + llvm::DenseMap<const CFGBlock *, LiveVariables::LivenessValues> blocksBeginToLiveness; + llvm::DenseMap<const Stmt *, LiveVariables::LivenessValues> stmtsToLiveness; + llvm::DenseMap<const DeclRefExpr *, unsigned> inAssignment; + const bool killAtAssign; + + LiveVariables::LivenessValues + merge(LiveVariables::LivenessValues valsA, + LiveVariables::LivenessValues valsB); + + LiveVariables::LivenessValues runOnBlock(const CFGBlock *block, + LiveVariables::LivenessValues val, + LiveVariables::Observer *obs = 0); + + void dumpBlockLiveness(const SourceManager& M); + + LiveVariablesImpl(AnalysisContext &ac, bool KillAtAssign) + : analysisContext(ac), + SSetFact(false), // Do not canonicalize ImmutableSets by default. + DSetFact(false), // This is a *major* performance win. + killAtAssign(KillAtAssign) {} +}; +} + +static LiveVariablesImpl &getImpl(void *x) { + return *((LiveVariablesImpl *) x); } //===----------------------------------------------------------------------===// -// Transfer functions. +// Operations and queries on LivenessValues. //===----------------------------------------------------------------------===// +bool LiveVariables::LivenessValues::isLive(const Stmt *S) const { + return liveStmts.contains(S); +} + +bool LiveVariables::LivenessValues::isLive(const VarDecl *D) const { + return liveDecls.contains(D); +} + namespace { + template <typename SET> + SET mergeSets(SET A, SET B) { + if (A.isEmpty()) + return B; + + for (typename SET::iterator it = B.begin(), ei = B.end(); it != ei; ++it) { + A = A.add(*it); + } + return A; + } +} -class TransferFuncs : public CFGRecStmtVisitor<TransferFuncs>{ - LiveVariables::AnalysisDataTy& AD; - LiveVariables::ValTy LiveState; - const CFGBlock *currentBlock; -public: - TransferFuncs(LiveVariables::AnalysisDataTy& ad) : AD(ad), currentBlock(0) {} - - LiveVariables::ValTy& getVal() { return LiveState; } - CFG& getCFG() { return AD.getCFG(); } - - void VisitDeclRefExpr(DeclRefExpr* DR); - void VisitBinaryOperator(BinaryOperator* B); - void VisitBlockExpr(BlockExpr *B); - void VisitAssign(BinaryOperator* B); - void VisitDeclStmt(DeclStmt* DS); - void BlockStmt_VisitObjCForCollectionStmt(ObjCForCollectionStmt* S); - void VisitUnaryOperator(UnaryOperator* U); - void Visit(Stmt *S); - void VisitTerminator(CFGBlock* B); +LiveVariables::LivenessValues +LiveVariablesImpl::merge(LiveVariables::LivenessValues valsA, + LiveVariables::LivenessValues valsB) { + + llvm::ImmutableSetRef<const Stmt *> + SSetRefA(valsA.liveStmts.getRootWithoutRetain(), SSetFact.getTreeFactory()), + SSetRefB(valsB.liveStmts.getRootWithoutRetain(), SSetFact.getTreeFactory()); + + + llvm::ImmutableSetRef<const VarDecl *> + DSetRefA(valsA.liveDecls.getRootWithoutRetain(), DSetFact.getTreeFactory()), + DSetRefB(valsB.liveDecls.getRootWithoutRetain(), DSetFact.getTreeFactory()); - /// VisitConditionVariableInit - Handle the initialization of condition - /// variables at branches. Valid statements include IfStmt, ForStmt, - /// WhileStmt, and SwitchStmt. - void VisitConditionVariableInit(Stmt *S); - void SetTopValue(LiveVariables::ValTy& V) { - V = AD.AlwaysLive; - } + SSetRefA = mergeSets(SSetRefA, SSetRefB); + DSetRefA = mergeSets(DSetRefA, DSetRefB); - void setCurrentBlock(const CFGBlock *block) { - currentBlock = block; - } -}; + // asImmutableSet() canonicalizes the tree, allowing us to do an easy + // comparison afterwards. + return LiveVariables::LivenessValues(SSetRefA.asImmutableSet(), + DSetRefA.asImmutableSet()); +} -void TransferFuncs::Visit(Stmt *S) { +bool LiveVariables::LivenessValues::equals(const LivenessValues &V) const { + return liveStmts == V.liveStmts && liveDecls == V.liveDecls; +} - if (S == getCurrentBlkStmt()) { +//===----------------------------------------------------------------------===// +// Query methods. +//===----------------------------------------------------------------------===// - if (AD.Observer) - AD.Observer->ObserveStmt(S, currentBlock, AD, LiveState); +static bool isAlwaysAlive(const VarDecl *D) { + return D->hasGlobalStorage(); +} - if (getCFG().isBlkExpr(S)) - LiveState(S, AD) = Dead; +bool LiveVariables::isLive(const CFGBlock *B, const VarDecl *D) { + return isAlwaysAlive(D) || getImpl(impl).blocksEndToLiveness[B].isLive(D); +} - StmtVisitor<TransferFuncs,void>::Visit(S); - } - else if (!getCFG().isBlkExpr(S)) { +bool LiveVariables::isLive(const Stmt *S, const VarDecl *D) { + return isAlwaysAlive(D) || getImpl(impl).stmtsToLiveness[S].isLive(D); +} - if (AD.Observer) - AD.Observer->ObserveStmt(S, currentBlock, AD, LiveState); +bool LiveVariables::isLive(const Stmt *Loc, const Stmt *S) { + return getImpl(impl).stmtsToLiveness[Loc].isLive(S); +} - StmtVisitor<TransferFuncs,void>::Visit(S); +//===----------------------------------------------------------------------===// +// Dataflow computation. +//===----------------------------------------------------------------------===// - } - else { - // For block-level expressions, mark that they are live. - LiveState(S, AD) = Alive; - } +namespace { +class TransferFunctions : public StmtVisitor<TransferFunctions> { + LiveVariablesImpl &LV; + LiveVariables::LivenessValues &val; + LiveVariables::Observer *observer; + const CFGBlock *currentBlock; +public: + TransferFunctions(LiveVariablesImpl &im, + LiveVariables::LivenessValues &Val, + LiveVariables::Observer *Observer, + const CFGBlock *CurrentBlock) + : LV(im), val(Val), observer(Observer), currentBlock(CurrentBlock) {} + + void VisitBinaryOperator(BinaryOperator *BO); + void VisitBlockExpr(BlockExpr *BE); + void VisitDeclRefExpr(DeclRefExpr *DR); + void VisitDeclStmt(DeclStmt *DS); + void VisitObjCForCollectionStmt(ObjCForCollectionStmt *OS); + void VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *UE); + void VisitUnaryOperator(UnaryOperator *UO); + void Visit(Stmt *S); +}; } + +static const VariableArrayType *FindVA(QualType Ty) { + const Type *ty = Ty.getTypePtr(); + while (const ArrayType *VT = dyn_cast<ArrayType>(ty)) { + if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(VT)) + if (VAT->getSizeExpr()) + return VAT; + + ty = VT->getElementType().getTypePtr(); + } -void TransferFuncs::VisitConditionVariableInit(Stmt *S) { - assert(!getCFG().isBlkExpr(S)); - CFGRecStmtVisitor<TransferFuncs>::VisitConditionVariableInit(S); + return 0; } -void TransferFuncs::VisitTerminator(CFGBlock* B) { - - const Stmt* E = B->getTerminatorCondition(); - - if (!E) - return; +void TransferFunctions::Visit(Stmt *S) { + if (observer) + observer->observeStmt(S, currentBlock, val); + + StmtVisitor<TransferFunctions>::Visit(S); + + if (isa<Expr>(S)) { + val.liveStmts = LV.SSetFact.remove(val.liveStmts, S); + } - assert (getCFG().isBlkExpr(E)); - LiveState(E, AD) = Alive; + // Mark all children expressions live. + + switch (S->getStmtClass()) { + default: + break; + case Stmt::StmtExprClass: { + // For statement expressions, look through the compound statement. + S = cast<StmtExpr>(S)->getSubStmt(); + break; + } + case Stmt::CXXMemberCallExprClass: { + // Include the implicit "this" pointer as being live. + CXXMemberCallExpr *CE = cast<CXXMemberCallExpr>(S); + if (Expr *ImplicitObj = CE->getImplicitObjectArgument()) { + ImplicitObj = ImplicitObj->IgnoreParens(); + val.liveStmts = LV.SSetFact.add(val.liveStmts, ImplicitObj); + } + break; + } + case Stmt::DeclStmtClass: { + const DeclStmt *DS = cast<DeclStmt>(S); + if (const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl())) { + for (const VariableArrayType* VA = FindVA(VD->getType()); + VA != 0; VA = FindVA(VA->getElementType())) { + val.liveStmts = LV.SSetFact.add(val.liveStmts, + VA->getSizeExpr()->IgnoreParens()); + } + } + break; + } + // FIXME: These cases eventually shouldn't be needed. + case Stmt::ExprWithCleanupsClass: { + S = cast<ExprWithCleanups>(S)->getSubExpr(); + break; + } + case Stmt::CXXBindTemporaryExprClass: { + S = cast<CXXBindTemporaryExpr>(S)->getSubExpr(); + break; + } + case Stmt::UnaryExprOrTypeTraitExprClass: { + // No need to unconditionally visit subexpressions. + return; + } + } + + for (Stmt::child_iterator it = S->child_begin(), ei = S->child_end(); + it != ei; ++it) { + if (Stmt *child = *it) { + if (Expr *Ex = dyn_cast<Expr>(child)) + child = Ex->IgnoreParens(); + + val.liveStmts = LV.SSetFact.add(val.liveStmts, child); + } + } } -void TransferFuncs::VisitDeclRefExpr(DeclRefExpr* DR) { - if (VarDecl* V = dyn_cast<VarDecl>(DR->getDecl())) - LiveState(V, AD) = Alive; +void TransferFunctions::VisitBinaryOperator(BinaryOperator *B) { + if (B->isAssignmentOp()) { + if (!LV.killAtAssign) + return; + + // Assigning to a variable? + Expr *LHS = B->getLHS()->IgnoreParens(); + + if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(LHS)) + if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { + // Assignments to references don't kill the ref's address + if (VD->getType()->isReferenceType()) + return; + + if (!isAlwaysAlive(VD)) { + // The variable is now dead. + val.liveDecls = LV.DSetFact.remove(val.liveDecls, VD); + } + + if (observer) + observer->observerKill(DR); + } + } } - -void TransferFuncs::VisitBlockExpr(BlockExpr *BE) { + +void TransferFunctions::VisitBlockExpr(BlockExpr *BE) { AnalysisContext::referenced_decls_iterator I, E; - llvm::tie(I, E) = AD.AC->getReferencedBlockVars(BE->getBlockDecl()); + llvm::tie(I, E) = + LV.analysisContext.getReferencedBlockVars(BE->getBlockDecl()); for ( ; I != E ; ++I) { - DeclBitVector_Types::Idx i = AD.getIdx(*I); - if (i.isValid()) - LiveState.getBit(i) = Alive; + const VarDecl *VD = *I; + if (isAlwaysAlive(VD)) + continue; + val.liveDecls = LV.DSetFact.add(val.liveDecls, VD); } } -void TransferFuncs::VisitBinaryOperator(BinaryOperator* B) { - if (B->isAssignmentOp()) VisitAssign(B); - else VisitStmt(B); +void TransferFunctions::VisitDeclRefExpr(DeclRefExpr *DR) { + if (const VarDecl *D = dyn_cast<VarDecl>(DR->getDecl())) + if (!isAlwaysAlive(D) && LV.inAssignment.find(DR) == LV.inAssignment.end()) + val.liveDecls = LV.DSetFact.add(val.liveDecls, D); } -void -TransferFuncs::BlockStmt_VisitObjCForCollectionStmt(ObjCForCollectionStmt* S) { - - // This is a block-level expression. Its value is 'dead' before this point. - LiveState(S, AD) = Dead; - - // This represents a 'use' of the collection. - Visit(S->getCollection()); +void TransferFunctions::VisitDeclStmt(DeclStmt *DS) { + for (DeclStmt::decl_iterator DI=DS->decl_begin(), DE = DS->decl_end(); + DI != DE; ++DI) + if (VarDecl *VD = dyn_cast<VarDecl>(*DI)) { + if (!isAlwaysAlive(VD)) + val.liveDecls = LV.DSetFact.remove(val.liveDecls, VD); + } +} - // This represents a 'kill' for the variable. - Stmt* Element = S->getElement(); - DeclRefExpr* DR = 0; - VarDecl* VD = 0; +void TransferFunctions::VisitObjCForCollectionStmt(ObjCForCollectionStmt *OS) { + // Kill the iteration variable. + DeclRefExpr *DR = 0; + const VarDecl *VD = 0; - if (DeclStmt* DS = dyn_cast<DeclStmt>(Element)) + Stmt *element = OS->getElement(); + if (DeclStmt *DS = dyn_cast<DeclStmt>(element)) { VD = cast<VarDecl>(DS->getSingleDecl()); - else { - Expr* ElemExpr = cast<Expr>(Element)->IgnoreParens(); - if ((DR = dyn_cast<DeclRefExpr>(ElemExpr))) - VD = cast<VarDecl>(DR->getDecl()); - else { - Visit(ElemExpr); - return; - } } - + else if ((DR = dyn_cast<DeclRefExpr>(cast<Expr>(element)->IgnoreParens()))) { + VD = cast<VarDecl>(DR->getDecl()); + } + if (VD) { - LiveState(VD, AD) = Dead; - if (AD.Observer && DR) { AD.Observer->ObserverKill(DR); } + val.liveDecls = LV.DSetFact.remove(val.liveDecls, VD); + if (observer && DR) + observer->observerKill(DR); } } +void TransferFunctions:: +VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *UE) +{ + // While sizeof(var) doesn't technically extend the liveness of 'var', it + // does extent the liveness of metadata if 'var' is a VariableArrayType. + // We handle that special case here. + if (UE->getKind() != UETT_SizeOf || UE->isArgumentType()) + return; -void TransferFuncs::VisitUnaryOperator(UnaryOperator* U) { - Expr *E = U->getSubExpr(); + const Expr *subEx = UE->getArgumentExpr(); + if (subEx->getType()->isVariableArrayType()) { + assert(subEx->isLValue()); + val.liveStmts = LV.SSetFact.add(val.liveStmts, subEx->IgnoreParens()); + } +} - switch (U->getOpcode()) { +void TransferFunctions::VisitUnaryOperator(UnaryOperator *UO) { + // Treat ++/-- as a kill. + // Note we don't actually have to do anything if we don't have an observer, + // since a ++/-- acts as both a kill and a "use". + if (!observer) + return; + + switch (UO->getOpcode()) { + default: + return; case UO_PostInc: - case UO_PostDec: + case UO_PostDec: case UO_PreInc: case UO_PreDec: - // Walk through the subexpressions, blasting through ParenExprs - // until we either find a DeclRefExpr or some non-DeclRefExpr - // expression. - if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(E->IgnoreParens())) - if (VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl())) { - // Treat the --/++ operator as a kill. - if (AD.Observer) { AD.Observer->ObserverKill(DR); } - LiveState(VD, AD) = Alive; - return VisitDeclRefExpr(DR); - } - - // Fall-through. - - default: - return Visit(E); + break; } + + if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(UO->getSubExpr()->IgnoreParens())) + if (isa<VarDecl>(DR->getDecl())) { + // Treat ++/-- as a kill. + observer->observerKill(DR); + } } -void TransferFuncs::VisitAssign(BinaryOperator* B) { - Expr* LHS = B->getLHS(); +LiveVariables::LivenessValues +LiveVariablesImpl::runOnBlock(const CFGBlock *block, + LiveVariables::LivenessValues val, + LiveVariables::Observer *obs) { - // Assigning to a variable? - if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(LHS->IgnoreParens())) { - // Assignments to references don't kill the ref's address - if (DR->getDecl()->getType()->isReferenceType()) { - VisitDeclRefExpr(DR); - } else { - if (AD.killAtAssign) { - // Update liveness inforamtion. - unsigned bit = AD.getIdx(DR->getDecl()); - LiveState.getDeclBit(bit) = Dead | AD.AlwaysLive.getDeclBit(bit); - - if (AD.Observer) { AD.Observer->ObserverKill(DR); } - } - // Handle things like +=, etc., which also generate "uses" - // of a variable. Do this just by visiting the subexpression. - if (B->getOpcode() != BO_Assign) - VisitDeclRefExpr(DR); - } + TransferFunctions TF(*this, val, obs, block); + + // Visit the terminator (if any). + if (const Stmt *term = block->getTerminator()) + TF.Visit(const_cast<Stmt*>(term)); + + // Apply the transfer function for all Stmts in the block. + for (CFGBlock::const_reverse_iterator it = block->rbegin(), + ei = block->rend(); it != ei; ++it) { + const CFGElement &elem = *it; + if (!isa<CFGStmt>(elem)) + continue; + + const Stmt *S = cast<CFGStmt>(elem).getStmt(); + TF.Visit(const_cast<Stmt*>(S)); + stmtsToLiveness[S] = val; } - else // Not assigning to a variable. Process LHS as usual. - Visit(LHS); - - Visit(B->getRHS()); + return val; } -void TransferFuncs::VisitDeclStmt(DeclStmt* DS) { - // Declarations effectively "kill" a variable since they cannot - // possibly be live before they are declared. - for (DeclStmt::decl_iterator DI=DS->decl_begin(), DE = DS->decl_end(); - DI != DE; ++DI) - if (VarDecl* VD = dyn_cast<VarDecl>(*DI)) { - // Update liveness information by killing the VarDecl. - unsigned bit = AD.getIdx(VD); - LiveState.getDeclBit(bit) = Dead | AD.AlwaysLive.getDeclBit(bit); - - // The initializer is evaluated after the variable comes into scope, but - // before the DeclStmt (which binds the value to the variable). - // Since this is a reverse dataflow analysis, we must evaluate the - // transfer function for this expression after the DeclStmt. If the - // initializer references the variable (which is bad) then we extend - // its liveness. - if (Expr* Init = VD->getInit()) - Visit(Init); - - if (const VariableArrayType* VT = - AD.getContext().getAsVariableArrayType(VD->getType())) { - StmtIterator I(const_cast<VariableArrayType*>(VT)); - StmtIterator E; - for (; I != E; ++I) Visit(*I); - } - } +void LiveVariables::runOnAllBlocks(LiveVariables::Observer &obs) { + const CFG *cfg = getImpl(impl).analysisContext.getCFG(); + for (CFG::const_iterator it = cfg->begin(), ei = cfg->end(); it != ei; ++it) + getImpl(impl).runOnBlock(*it, getImpl(impl).blocksEndToLiveness[*it], &obs); } -} // end anonymous namespace +LiveVariables::LiveVariables(void *im) : impl(im) {} -//===----------------------------------------------------------------------===// -// Merge operator: if something is live on any successor block, it is live -// in the current block (a set union). -//===----------------------------------------------------------------------===// - -namespace { - typedef StmtDeclBitVector_Types::Union Merge; - typedef DataflowSolver<LiveVariables, TransferFuncs, Merge> Solver; -} // end anonymous namespace - -//===----------------------------------------------------------------------===// -// External interface to run Liveness analysis. -//===----------------------------------------------------------------------===// - -void LiveVariables::runOnCFG(CFG& cfg) { - Solver S(*this); - S.runOnCFG(cfg); -} - -void LiveVariables::runOnAllBlocks(const CFG& cfg, - LiveVariables::ObserverTy* Obs, - bool recordStmtValues) { - Solver S(*this); - SaveAndRestore<LiveVariables::ObserverTy*> SRObs(getAnalysisData().Observer, - Obs); - S.runOnAllBlocks(cfg, recordStmtValues); +LiveVariables::~LiveVariables() { + delete (LiveVariablesImpl*) impl; } -//===----------------------------------------------------------------------===// -// liveness queries -// - -bool LiveVariables::isLive(const CFGBlock* B, const VarDecl* D) const { - DeclBitVector_Types::Idx i = getAnalysisData().getIdx(D); - return i.isValid() ? getBlockData(B).getBit(i) : false; +LiveVariables * +LiveVariables::computeLiveness(AnalysisContext &AC, + bool killAtAssign) { + + // No CFG? Bail out. + CFG *cfg = AC.getCFG(); + if (!cfg) + return 0; + + LiveVariablesImpl *LV = new LiveVariablesImpl(AC, killAtAssign); + + // Construct the dataflow worklist. Enqueue the exit block as the + // start of the analysis. + DataflowWorklist worklist(*cfg); + llvm::BitVector everAnalyzedBlock(cfg->getNumBlockIDs()); + + // FIXME: we should enqueue using post order. + for (CFG::const_iterator it = cfg->begin(), ei = cfg->end(); it != ei; ++it) { + const CFGBlock *block = *it; + worklist.enqueueBlock(block); + + // FIXME: Scan for DeclRefExprs using in the LHS of an assignment. + // We need to do this because we lack context in the reverse analysis + // to determine if a DeclRefExpr appears in such a context, and thus + // doesn't constitute a "use". + if (killAtAssign) + for (CFGBlock::const_iterator bi = block->begin(), be = block->end(); + bi != be; ++bi) { + if (const CFGStmt *cs = bi->getAs<CFGStmt>()) { + if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(cs->getStmt())) { + if (BO->getOpcode() == BO_Assign) { + if (const DeclRefExpr *DR = + dyn_cast<DeclRefExpr>(BO->getLHS()->IgnoreParens())) { + LV->inAssignment[DR] = 1; + } + } + } + } + } + } + + worklist.sortWorklist(); + + while (const CFGBlock *block = worklist.dequeue()) { + // Determine if the block's end value has changed. If not, we + // have nothing left to do for this block. + LivenessValues &prevVal = LV->blocksEndToLiveness[block]; + + // Merge the values of all successor blocks. + LivenessValues val; + for (CFGBlock::const_succ_iterator it = block->succ_begin(), + ei = block->succ_end(); it != ei; ++it) { + if (const CFGBlock *succ = *it) { + val = LV->merge(val, LV->blocksBeginToLiveness[succ]); + } + } + + if (!everAnalyzedBlock[block->getBlockID()]) + everAnalyzedBlock[block->getBlockID()] = true; + else if (prevVal.equals(val)) + continue; + + prevVal = val; + + // Update the dataflow value for the start of this block. + LV->blocksBeginToLiveness[block] = LV->runOnBlock(block, val); + + // Enqueue the value to the predecessors. + worklist.enqueuePredecessors(block); + } + + return new LiveVariables(LV); } -bool LiveVariables::isLive(const ValTy& Live, const VarDecl* D) const { - DeclBitVector_Types::Idx i = getAnalysisData().getIdx(D); - return i.isValid() ? Live.getBit(i) : false; +static bool compare_entries(const CFGBlock *A, const CFGBlock *B) { + return A->getBlockID() < B->getBlockID(); } -bool LiveVariables::isLive(const Stmt* Loc, const Stmt* StmtVal) const { - return getStmtData(Loc)(StmtVal,getAnalysisData()); +static bool compare_vd_entries(const Decl *A, const Decl *B) { + SourceLocation ALoc = A->getLocStart(); + SourceLocation BLoc = B->getLocStart(); + return ALoc.getRawEncoding() < BLoc.getRawEncoding(); } -bool LiveVariables::isLive(const Stmt* Loc, const VarDecl* D) const { - return getStmtData(Loc)(D,getAnalysisData()); +void LiveVariables::dumpBlockLiveness(const SourceManager &M) { + getImpl(impl).dumpBlockLiveness(M); } -//===----------------------------------------------------------------------===// -// printing liveness state for debugging -// +void LiveVariablesImpl::dumpBlockLiveness(const SourceManager &M) { + std::vector<const CFGBlock *> vec; + for (llvm::DenseMap<const CFGBlock *, LiveVariables::LivenessValues>::iterator + it = blocksEndToLiveness.begin(), ei = blocksEndToLiveness.end(); + it != ei; ++it) { + vec.push_back(it->first); + } + std::sort(vec.begin(), vec.end(), compare_entries); -void LiveVariables::dumpLiveness(const ValTy& V, const SourceManager& SM) const { - const AnalysisDataTy& AD = getAnalysisData(); + std::vector<const VarDecl*> declVec; - for (AnalysisDataTy::decl_iterator I = AD.begin_decl(), - E = AD.end_decl(); I!=E; ++I) - if (V.getDeclBit(I->second)) { - llvm::errs() << " " << I->first->getIdentifier()->getName() << " <"; - I->first->getLocation().dump(SM); + for (std::vector<const CFGBlock *>::iterator + it = vec.begin(), ei = vec.end(); it != ei; ++it) { + llvm::errs() << "\n[ B" << (*it)->getBlockID() + << " (live variables at block exit) ]\n"; + + LiveVariables::LivenessValues vals = blocksEndToLiveness[*it]; + declVec.clear(); + + for (llvm::ImmutableSet<const VarDecl *>::iterator si = + vals.liveDecls.begin(), + se = vals.liveDecls.end(); si != se; ++si) { + declVec.push_back(*si); + } + + std::sort(declVec.begin(), declVec.end(), compare_vd_entries); + + for (std::vector<const VarDecl*>::iterator di = declVec.begin(), + de = declVec.end(); di != de; ++di) { + llvm::errs() << " " << (*di)->getDeclName().getAsString() + << " <"; + (*di)->getLocation().dump(M); llvm::errs() << ">\n"; } -} - -void LiveVariables::dumpBlockLiveness(const SourceManager& M) const { - for (BlockDataMapTy::const_iterator I = getBlockDataMap().begin(), - E = getBlockDataMap().end(); I!=E; ++I) { - llvm::errs() << "\n[ B" << I->first->getBlockID() - << " (live variables at block exit) ]\n"; - dumpLiveness(I->second,M); } - - llvm::errs() << "\n"; + llvm::errs() << "\n"; } + +const void *LiveVariables::getTag() { static int x; return &x; } +const void *RelaxedLiveVariables::getTag() { static int x; return &x; } diff --git a/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp index ce2690f..2bb39cf 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp +++ b/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp @@ -38,8 +38,7 @@ static bool ParsePrecision(FormatStringHandler &H, PrintfSpecifier &FS, unsigned *argIndex) { if (argIndex) { FS.setPrecision(ParseNonPositionAmount(Beg, E, *argIndex)); - } - else { + } else { const OptionalAmount Amt = ParsePositionAmount(H, Start, Beg, E, analyze_format_string::PrecisionPos); if (Amt.isInvalid()) @@ -404,6 +403,7 @@ bool PrintfSpecifier::fixType(QualType QT) { case BuiltinType::Char32: case BuiltinType::UInt128: case BuiltinType::Int128: + case BuiltinType::Half: // Integral types which are non-trivial to correct. return false; @@ -477,15 +477,14 @@ bool PrintfSpecifier::fixType(QualType QT) { CS.setKind(ConversionSpecifier::uArg); HasAlternativeForm = 0; HasPlusPrefix = 0; - } - else { - assert(0 && "Unexpected type"); + } else { + llvm_unreachable("Unexpected type"); } return true; } -void PrintfSpecifier::toString(llvm::raw_ostream &os) const { +void PrintfSpecifier::toString(raw_ostream &os) const { // Whilst some features have no defined order, we are using the order // appearing in the C99 standard (ISO/IEC 9899:1999 (E) 7.19.6.1) os << "%"; diff --git a/contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp b/contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp new file mode 100644 index 0000000..3a0bbd5 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp @@ -0,0 +1,51 @@ +//==- ProgramPoint.cpp - Program Points for Path-Sensitive Analysis -*- C++ -*-/ +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the interface ProgramPoint, which identifies a +// distinct location in a function. +// +//===----------------------------------------------------------------------===// + +#include "clang/Analysis/ProgramPoint.h" + +using namespace clang; + +ProgramPointTag::~ProgramPointTag() {} + +ProgramPoint ProgramPoint::getProgramPoint(const Stmt *S, ProgramPoint::Kind K, + const LocationContext *LC, + const ProgramPointTag *tag){ + switch (K) { + default: + llvm_unreachable("Unhandled ProgramPoint kind"); + case ProgramPoint::PreStmtKind: + return PreStmt(S, LC, tag); + case ProgramPoint::PostStmtKind: + return PostStmt(S, LC, tag); + case ProgramPoint::PreLoadKind: + return PreLoad(S, LC, tag); + case ProgramPoint::PostLoadKind: + return PostLoad(S, LC, tag); + case ProgramPoint::PreStoreKind: + return PreStore(S, LC, tag); + case ProgramPoint::PostStoreKind: + return PostStore(S, LC, tag); + case ProgramPoint::PostLValueKind: + return PostLValue(S, LC, tag); + case ProgramPoint::PostPurgeDeadSymbolsKind: + return PostPurgeDeadSymbols(S, LC, tag); + } +} + +SimpleProgramPointTag::SimpleProgramPointTag(StringRef description) + : desc(description) {} + +StringRef SimpleProgramPointTag::getTagDescription() const { + return desc; +} diff --git a/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp b/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp index ff96eb4..8f24c43 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp +++ b/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp @@ -83,7 +83,7 @@ void PseudoConstantAnalysis::RunAnalysis() { WorkList.push_back(DeclBody); while (!WorkList.empty()) { - const Stmt* Head = WorkList.front(); + const Stmt *Head = WorkList.front(); WorkList.pop_front(); if (const Expr *Ex = dyn_cast<Expr>(Head)) diff --git a/contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp b/contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp index c5b17fc..4931771 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp +++ b/contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp @@ -25,22 +25,163 @@ using namespace clang; -static SourceLocation GetUnreachableLoc(const CFGBlock &b, SourceRange &R1, - SourceRange &R2) { - const Stmt *S = 0; - unsigned sn = 0; - R1 = R2 = SourceRange(); +namespace { +class DeadCodeScan { + llvm::BitVector Visited; + llvm::BitVector &Reachable; + llvm::SmallVector<const CFGBlock *, 10> WorkList; + + typedef llvm::SmallVector<std::pair<const CFGBlock *, const Stmt *>, 12> + DeferredLocsTy; + + DeferredLocsTy DeferredLocs; + +public: + DeadCodeScan(llvm::BitVector &reachable) + : Visited(reachable.size()), + Reachable(reachable) {} + + void enqueue(const CFGBlock *block); + unsigned scanBackwards(const CFGBlock *Start, + clang::reachable_code::Callback &CB); + + bool isDeadCodeRoot(const CFGBlock *Block); + + const Stmt *findDeadCode(const CFGBlock *Block); + + void reportDeadCode(const Stmt *S, + clang::reachable_code::Callback &CB); +}; +} + +void DeadCodeScan::enqueue(const CFGBlock *block) { + unsigned blockID = block->getBlockID(); + if (Reachable[blockID] || Visited[blockID]) + return; + Visited[blockID] = true; + WorkList.push_back(block); +} + +bool DeadCodeScan::isDeadCodeRoot(const clang::CFGBlock *Block) { + bool isDeadRoot = true; + + for (CFGBlock::const_pred_iterator I = Block->pred_begin(), + E = Block->pred_end(); I != E; ++I) { + if (const CFGBlock *PredBlock = *I) { + unsigned blockID = PredBlock->getBlockID(); + if (Visited[blockID]) { + isDeadRoot = false; + continue; + } + if (!Reachable[blockID]) { + isDeadRoot = false; + Visited[blockID] = true; + WorkList.push_back(PredBlock); + continue; + } + } + } + + return isDeadRoot; +} + +static bool isValidDeadStmt(const Stmt *S) { + if (S->getLocStart().isInvalid()) + return false; + if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) + return BO->getOpcode() != BO_Comma; + return true; +} + +const Stmt *DeadCodeScan::findDeadCode(const clang::CFGBlock *Block) { + for (CFGBlock::const_iterator I = Block->begin(), E = Block->end(); I!=E; ++I) + if (const CFGStmt *CS = I->getAs<CFGStmt>()) { + const Stmt *S = CS->getStmt(); + if (isValidDeadStmt(S)) + return S; + } + + if (CFGTerminator T = Block->getTerminator()) { + const Stmt *S = T.getStmt(); + if (isValidDeadStmt(S)) + return S; + } + + return 0; +} + +static int SrcCmp(const void *p1, const void *p2) { + return + ((std::pair<const CFGBlock *, const Stmt *>*) p2)->second->getLocStart() < + ((std::pair<const CFGBlock *, const Stmt *>*) p1)->second->getLocStart(); +} + +unsigned DeadCodeScan::scanBackwards(const clang::CFGBlock *Start, + clang::reachable_code::Callback &CB) { + + unsigned count = 0; + enqueue(Start); + + while (!WorkList.empty()) { + const CFGBlock *Block = WorkList.pop_back_val(); + + // It is possible that this block has been marked reachable after + // it was enqueued. + if (Reachable[Block->getBlockID()]) + continue; + + // Look for any dead code within the block. + const Stmt *S = findDeadCode(Block); + + if (!S) { + // No dead code. Possibly an empty block. Look at dead predecessors. + for (CFGBlock::const_pred_iterator I = Block->pred_begin(), + E = Block->pred_end(); I != E; ++I) { + if (const CFGBlock *predBlock = *I) + enqueue(predBlock); + } + continue; + } + + // Specially handle macro-expanded code. + if (S->getLocStart().isMacroID()) { + count += clang::reachable_code::ScanReachableFromBlock(Block, Reachable); + continue; + } + + if (isDeadCodeRoot(Block)) { + reportDeadCode(S, CB); + count += clang::reachable_code::ScanReachableFromBlock(Block, Reachable); + } + else { + // Record this statement as the possibly best location in a + // strongly-connected component of dead code for emitting a + // warning. + DeferredLocs.push_back(std::make_pair(Block, S)); + } + } - if (sn < b.size()) { - const CFGStmt *CS = b[sn].getAs<CFGStmt>(); - if (!CS) - return SourceLocation(); + // If we didn't find a dead root, then report the dead code with the + // earliest location. + if (!DeferredLocs.empty()) { + llvm::array_pod_sort(DeferredLocs.begin(), DeferredLocs.end(), SrcCmp); + for (DeferredLocsTy::iterator I = DeferredLocs.begin(), + E = DeferredLocs.end(); I != E; ++I) { + const CFGBlock *block = I->first; + if (Reachable[block->getBlockID()]) + continue; + reportDeadCode(I->second, CB); + count += clang::reachable_code::ScanReachableFromBlock(block, Reachable); + } + } + + return count; +} - S = CS->getStmt(); - } else if (b.getTerminator()) - S = b.getTerminator(); - else - return SourceLocation(); +static SourceLocation GetUnreachableLoc(const Stmt *S, + SourceRange &R1, + SourceRange &R2) { + R1 = R2 = SourceRange(); if (const Expr *Ex = dyn_cast<Expr>(S)) S = Ex->IgnoreParenImpCasts(); @@ -48,24 +189,6 @@ static SourceLocation GetUnreachableLoc(const CFGBlock &b, SourceRange &R1, switch (S->getStmtClass()) { case Expr::BinaryOperatorClass: { const BinaryOperator *BO = cast<BinaryOperator>(S); - if (BO->getOpcode() == BO_Comma) { - if (sn+1 < b.size()) - return b[sn+1].getAs<CFGStmt>()->getStmt()->getLocStart(); - const CFGBlock *n = &b; - while (1) { - if (n->getTerminator()) - return n->getTerminator()->getLocStart(); - if (n->succ_size() != 1) - return SourceLocation(); - n = n[0].succ_begin()[0]; - if (n->pred_size() != 1) - return SourceLocation(); - if (!n->empty()) - return n[0][0].getAs<CFGStmt>()->getStmt()->getLocStart(); - } - } - R1 = BO->getLHS()->getSourceRange(); - R2 = BO->getRHS()->getSourceRange(); return BO->getOperatorLoc(); } case Expr::UnaryOperatorClass: { @@ -120,177 +243,87 @@ static SourceLocation GetUnreachableLoc(const CFGBlock &b, SourceRange &R1, return S->getLocStart(); } -static SourceLocation MarkLiveTop(const CFGBlock *Start, - llvm::BitVector &reachable, - SourceManager &SM) { - - // Prep work worklist. - llvm::SmallVector<const CFGBlock*, 32> WL; - WL.push_back(Start); - +void DeadCodeScan::reportDeadCode(const Stmt *S, + clang::reachable_code::Callback &CB) { SourceRange R1, R2; - SourceLocation top = GetUnreachableLoc(*Start, R1, R2); - - bool FromMainFile = false; - bool FromSystemHeader = false; - bool TopValid = false; - - if (top.isValid()) { - FromMainFile = SM.isFromMainFile(top); - FromSystemHeader = SM.isInSystemHeader(top); - TopValid = true; - } - - // Solve - CFGBlock::FilterOptions FO; - FO.IgnoreDefaultsWithCoveredEnums = 1; - - while (!WL.empty()) { - const CFGBlock *item = WL.back(); - WL.pop_back(); - - SourceLocation c = GetUnreachableLoc(*item, R1, R2); - if (c.isValid() - && (!TopValid - || (SM.isFromMainFile(c) && !FromMainFile) - || (FromSystemHeader && !SM.isInSystemHeader(c)) - || SM.isBeforeInTranslationUnit(c, top))) { - top = c; - FromMainFile = SM.isFromMainFile(top); - FromSystemHeader = SM.isInSystemHeader(top); - } - - reachable.set(item->getBlockID()); - for (CFGBlock::filtered_succ_iterator I = - item->filtered_succ_start_end(FO); I.hasMore(); ++I) - if (const CFGBlock *B = *I) { - unsigned blockID = B->getBlockID(); - if (!reachable[blockID]) { - reachable.set(blockID); - WL.push_back(B); - } - } - } - - return top; + SourceLocation Loc = GetUnreachableLoc(S, R1, R2); + CB.HandleUnreachable(Loc, R1, R2); } -static int LineCmp(const void *p1, const void *p2) { - SourceLocation *Line1 = (SourceLocation *)p1; - SourceLocation *Line2 = (SourceLocation *)p2; - return !(*Line1 < *Line2); -} - -namespace { -struct ErrLoc { - SourceLocation Loc; - SourceRange R1; - SourceRange R2; - ErrLoc(SourceLocation l, SourceRange r1, SourceRange r2) - : Loc(l), R1(r1), R2(r2) { } -}; -} namespace clang { namespace reachable_code { - -/// ScanReachableFromBlock - Mark all blocks reachable from Start. -/// Returns the total number of blocks that were marked reachable. -unsigned ScanReachableFromBlock(const CFGBlock &Start, + +unsigned ScanReachableFromBlock(const CFGBlock *Start, llvm::BitVector &Reachable) { unsigned count = 0; - llvm::SmallVector<const CFGBlock*, 32> WL; - + // Prep work queue - Reachable.set(Start.getBlockID()); - ++count; - WL.push_back(&Start); - + SmallVector<const CFGBlock*, 32> WL; + + // The entry block may have already been marked reachable + // by the caller. + if (!Reachable[Start->getBlockID()]) { + ++count; + Reachable[Start->getBlockID()] = true; + } + + WL.push_back(Start); + // Find the reachable blocks from 'Start'. - CFGBlock::FilterOptions FO; - FO.IgnoreDefaultsWithCoveredEnums = 1; - while (!WL.empty()) { - const CFGBlock *item = WL.back(); - WL.pop_back(); - - // Look at the successors and mark then reachable. - for (CFGBlock::filtered_succ_iterator I= item->filtered_succ_start_end(FO); - I.hasMore(); ++I) + const CFGBlock *item = WL.pop_back_val(); + + // Look at the successors and mark then reachable. + for (CFGBlock::const_succ_iterator I = item->succ_begin(), + E = item->succ_end(); I != E; ++I) if (const CFGBlock *B = *I) { unsigned blockID = B->getBlockID(); if (!Reachable[blockID]) { Reachable.set(blockID); - ++count; WL.push_back(B); + ++count; } } } return count; } - + void FindUnreachableCode(AnalysisContext &AC, Callback &CB) { CFG *cfg = AC.getCFG(); if (!cfg) return; - // Scan for reachable blocks. + // Scan for reachable blocks from the entrance of the CFG. + // If there are no unreachable blocks, we're done. llvm::BitVector reachable(cfg->getNumBlockIDs()); - unsigned numReachable = ScanReachableFromBlock(cfg->getEntry(), reachable); - - // If there are no unreachable blocks, we're done. + unsigned numReachable = ScanReachableFromBlock(&cfg->getEntry(), reachable); if (numReachable == cfg->getNumBlockIDs()) return; - - SourceRange R1, R2; - - llvm::SmallVector<ErrLoc, 24> lines; - bool AddEHEdges = AC.getAddEHEdges(); - - // First, give warnings for blocks with no predecessors, as they - // can't be part of a loop. - for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { - CFGBlock &b = **I; - if (!reachable[b.getBlockID()]) { - if (b.pred_empty()) { - if (!AddEHEdges - && dyn_cast_or_null<CXXTryStmt>(b.getTerminator().getStmt())) { - // When not adding EH edges from calls, catch clauses - // can otherwise seem dead. Avoid noting them as dead. - numReachable += ScanReachableFromBlock(b, reachable); - continue; - } - SourceLocation c = GetUnreachableLoc(b, R1, R2); - if (!c.isValid()) { - // Blocks without a location can't produce a warning, so don't mark - // reachable blocks from here as live. - reachable.set(b.getBlockID()); - ++numReachable; - continue; - } - lines.push_back(ErrLoc(c, R1, R2)); - // Avoid excessive errors by marking everything reachable from here - numReachable += ScanReachableFromBlock(b, reachable); - } + + // If there aren't explicit EH edges, we should include the 'try' dispatch + // blocks as roots. + if (!AC.getCFGBuildOptions().AddEHEdges) { + for (CFG::try_block_iterator I = cfg->try_blocks_begin(), + E = cfg->try_blocks_end() ; I != E; ++I) { + numReachable += ScanReachableFromBlock(*I, reachable); } + if (numReachable == cfg->getNumBlockIDs()) + return; } - if (numReachable < cfg->getNumBlockIDs()) { - // And then give warnings for the tops of loops. - for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { - CFGBlock &b = **I; - if (!reachable[b.getBlockID()]) - // Avoid excessive errors by marking everything reachable from here - lines.push_back(ErrLoc(MarkLiveTop(&b, reachable, - AC.getASTContext().getSourceManager()), - SourceRange(), SourceRange())); - } + // There are some unreachable blocks. We need to find the root blocks that + // contain code that should be considered unreachable. + for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { + const CFGBlock *block = *I; + // A block may have been marked reachable during this loop. + if (reachable[block->getBlockID()]) + continue; + + DeadCodeScan DS(reachable); + numReachable += DS.scanBackwards(block, CB); + + if (numReachable == cfg->getNumBlockIDs()) + return; } - - llvm::array_pod_sort(lines.begin(), lines.end(), LineCmp); - - for (llvm::SmallVectorImpl<ErrLoc>::iterator I=lines.begin(), E=lines.end(); - I != E; ++I) - if (I->Loc.isValid()) - CB.HandleUnreachable(I->Loc, I->R1, I->R2); } }} // end namespace clang::reachable_code diff --git a/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp new file mode 100644 index 0000000..5a12913 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp @@ -0,0 +1,799 @@ +//===- ThreadSafety.cpp ----------------------------------------*- C++ --*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// A intra-procedural analysis for thread safety (e.g. deadlocks and race +// conditions), based off of an annotation system. +// +// See http://clang.llvm.org/docs/LanguageExtensions.html#threadsafety for more +// information. +// +//===----------------------------------------------------------------------===// + +#include "clang/Analysis/Analyses/ThreadSafety.h" +#include "clang/Analysis/AnalysisContext.h" +#include "clang/Analysis/CFG.h" +#include "clang/Analysis/CFGStmtMap.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/ExprCXX.h" +#include "clang/AST/StmtCXX.h" +#include "clang/AST/StmtVisitor.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Basic/SourceLocation.h" +#include "llvm/ADT/BitVector.h" +#include "llvm/ADT/FoldingSet.h" +#include "llvm/ADT/ImmutableMap.h" +#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringRef.h" +#include <algorithm> +#include <vector> + +using namespace clang; +using namespace thread_safety; + +// Key method definition +ThreadSafetyHandler::~ThreadSafetyHandler() {} + +// Helper function +static Expr *getParent(Expr *Exp) { + if (MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) + return ME->getBase(); + if (CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(Exp)) + return CE->getImplicitObjectArgument(); + return 0; +} + +namespace { +/// \brief Implements a set of CFGBlocks using a BitVector. +/// +/// This class contains a minimal interface, primarily dictated by the SetType +/// template parameter of the llvm::po_iterator template, as used with external +/// storage. We also use this set to keep track of which CFGBlocks we visit +/// during the analysis. +class CFGBlockSet { + llvm::BitVector VisitedBlockIDs; + +public: + // po_iterator requires this iterator, but the only interface needed is the + // value_type typedef. + struct iterator { + typedef const CFGBlock *value_type; + }; + + CFGBlockSet() {} + CFGBlockSet(const CFG *G) : VisitedBlockIDs(G->getNumBlockIDs(), false) {} + + /// \brief Set the bit associated with a particular CFGBlock. + /// This is the important method for the SetType template parameter. + bool insert(const CFGBlock *Block) { + // Note that insert() is called by po_iterator, which doesn't check to make + // sure that Block is non-null. Moreover, the CFGBlock iterator will + // occasionally hand out null pointers for pruned edges, so we catch those + // here. + if (Block == 0) + return false; // if an edge is trivially false. + if (VisitedBlockIDs.test(Block->getBlockID())) + return false; + VisitedBlockIDs.set(Block->getBlockID()); + return true; + } + + /// \brief Check if the bit for a CFGBlock has been already set. + /// This method is for tracking visited blocks in the main threadsafety loop. + /// Block must not be null. + bool alreadySet(const CFGBlock *Block) { + return VisitedBlockIDs.test(Block->getBlockID()); + } +}; + +/// \brief We create a helper class which we use to iterate through CFGBlocks in +/// the topological order. +class TopologicallySortedCFG { + typedef llvm::po_iterator<const CFG*, CFGBlockSet, true> po_iterator; + + std::vector<const CFGBlock*> Blocks; + +public: + typedef std::vector<const CFGBlock*>::reverse_iterator iterator; + + TopologicallySortedCFG(const CFG *CFGraph) { + Blocks.reserve(CFGraph->getNumBlockIDs()); + CFGBlockSet BSet(CFGraph); + + for (po_iterator I = po_iterator::begin(CFGraph, BSet), + E = po_iterator::end(CFGraph, BSet); I != E; ++I) { + Blocks.push_back(*I); + } + } + + iterator begin() { + return Blocks.rbegin(); + } + + iterator end() { + return Blocks.rend(); + } + + bool empty() { + return begin() == end(); + } +}; + +/// \brief A MutexID object uniquely identifies a particular mutex, and +/// is built from an Expr* (i.e. calling a lock function). +/// +/// Thread-safety analysis works by comparing lock expressions. Within the +/// body of a function, an expression such as "x->foo->bar.mu" will resolve to +/// a particular mutex object at run-time. Subsequent occurrences of the same +/// expression (where "same" means syntactic equality) will refer to the same +/// run-time object if three conditions hold: +/// (1) Local variables in the expression, such as "x" have not changed. +/// (2) Values on the heap that affect the expression have not changed. +/// (3) The expression involves only pure function calls. +/// The current implementation assumes, but does not verify, that multiple uses +/// of the same lock expression satisfies these criteria. +/// +/// Clang introduces an additional wrinkle, which is that it is difficult to +/// derive canonical expressions, or compare expressions directly for equality. +/// Thus, we identify a mutex not by an Expr, but by the set of named +/// declarations that are referenced by the Expr. In other words, +/// x->foo->bar.mu will be a four element vector with the Decls for +/// mu, bar, and foo, and x. The vector will uniquely identify the expression +/// for all practical purposes. +/// +/// Note we will need to perform substitution on "this" and function parameter +/// names when constructing a lock expression. +/// +/// For example: +/// class C { Mutex Mu; void lock() EXCLUSIVE_LOCK_FUNCTION(this->Mu); }; +/// void myFunc(C *X) { ... X->lock() ... } +/// The original expression for the mutex acquired by myFunc is "this->Mu", but +/// "X" is substituted for "this" so we get X->Mu(); +/// +/// For another example: +/// foo(MyList *L) EXCLUSIVE_LOCKS_REQUIRED(L->Mu) { ... } +/// MyList *MyL; +/// foo(MyL); // requires lock MyL->Mu to be held +class MutexID { + SmallVector<NamedDecl*, 2> DeclSeq; + + /// Build a Decl sequence representing the lock from the given expression. + /// Recursive function that bottoms out when the final DeclRefExpr is reached. + // FIXME: Lock expressions that involve array indices or function calls. + // FIXME: Deal with LockReturned attribute. + void buildMutexID(Expr *Exp, Expr *Parent) { + if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Exp)) { + NamedDecl *ND = cast<NamedDecl>(DRE->getDecl()->getCanonicalDecl()); + DeclSeq.push_back(ND); + } else if (MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) { + NamedDecl *ND = ME->getMemberDecl(); + DeclSeq.push_back(ND); + buildMutexID(ME->getBase(), Parent); + } else if (isa<CXXThisExpr>(Exp)) { + if (Parent) + buildMutexID(Parent, 0); + else + return; // mutexID is still valid in this case + } else if (CastExpr *CE = dyn_cast<CastExpr>(Exp)) + buildMutexID(CE->getSubExpr(), Parent); + else + DeclSeq.clear(); // invalid lock expression + } + +public: + MutexID(Expr *LExpr, Expr *ParentExpr) { + buildMutexID(LExpr, ParentExpr); + } + + /// If we encounter part of a lock expression we cannot parse + bool isValid() const { + return !DeclSeq.empty(); + } + + bool operator==(const MutexID &other) const { + return DeclSeq == other.DeclSeq; + } + + bool operator!=(const MutexID &other) const { + return !(*this == other); + } + + // SmallVector overloads Operator< to do lexicographic ordering. Note that + // we use pointer equality (and <) to compare NamedDecls. This means the order + // of MutexIDs in a lockset is nondeterministic. In order to output + // diagnostics in a deterministic ordering, we must order all diagnostics to + // output by SourceLocation when iterating through this lockset. + bool operator<(const MutexID &other) const { + return DeclSeq < other.DeclSeq; + } + + /// \brief Returns the name of the first Decl in the list for a given MutexID; + /// e.g. the lock expression foo.bar() has name "bar". + /// The caret will point unambiguously to the lock expression, so using this + /// name in diagnostics is a way to get simple, and consistent, mutex names. + /// We do not want to output the entire expression text for security reasons. + StringRef getName() const { + assert(isValid()); + return DeclSeq.front()->getName(); + } + + void Profile(llvm::FoldingSetNodeID &ID) const { + for (SmallVectorImpl<NamedDecl*>::const_iterator I = DeclSeq.begin(), + E = DeclSeq.end(); I != E; ++I) { + ID.AddPointer(*I); + } + } +}; + +/// \brief This is a helper class that stores info about the most recent +/// accquire of a Lock. +/// +/// The main body of the analysis maps MutexIDs to LockDatas. +struct LockData { + SourceLocation AcquireLoc; + + /// \brief LKind stores whether a lock is held shared or exclusively. + /// Note that this analysis does not currently support either re-entrant + /// locking or lock "upgrading" and "downgrading" between exclusive and + /// shared. + /// + /// FIXME: add support for re-entrant locking and lock up/downgrading + LockKind LKind; + + LockData(SourceLocation AcquireLoc, LockKind LKind) + : AcquireLoc(AcquireLoc), LKind(LKind) {} + + bool operator==(const LockData &other) const { + return AcquireLoc == other.AcquireLoc && LKind == other.LKind; + } + + bool operator!=(const LockData &other) const { + return !(*this == other); + } + + void Profile(llvm::FoldingSetNodeID &ID) const { + ID.AddInteger(AcquireLoc.getRawEncoding()); + ID.AddInteger(LKind); + } +}; + +/// A Lockset maps each MutexID (defined above) to information about how it has +/// been locked. +typedef llvm::ImmutableMap<MutexID, LockData> Lockset; + +/// \brief We use this class to visit different types of expressions in +/// CFGBlocks, and build up the lockset. +/// An expression may cause us to add or remove locks from the lockset, or else +/// output error messages related to missing locks. +/// FIXME: In future, we may be able to not inherit from a visitor. +class BuildLockset : public StmtVisitor<BuildLockset> { + ThreadSafetyHandler &Handler; + Lockset LSet; + Lockset::Factory &LocksetFactory; + + // Helper functions + void removeLock(SourceLocation UnlockLoc, Expr *LockExp, Expr *Parent); + void addLock(SourceLocation LockLoc, Expr *LockExp, Expr *Parent, + LockKind LK); + const ValueDecl *getValueDecl(Expr *Exp); + void warnIfMutexNotHeld (const NamedDecl *D, Expr *Exp, AccessKind AK, + Expr *MutexExp, ProtectedOperationKind POK); + void checkAccess(Expr *Exp, AccessKind AK); + void checkDereference(Expr *Exp, AccessKind AK); + + template <class AttrType> + void addLocksToSet(LockKind LK, Attr *Attr, CXXMemberCallExpr *Exp); + + /// \brief Returns true if the lockset contains a lock, regardless of whether + /// the lock is held exclusively or shared. + bool locksetContains(MutexID Lock) const { + return LSet.lookup(Lock); + } + + /// \brief Returns true if the lockset contains a lock with the passed in + /// locktype. + bool locksetContains(MutexID Lock, LockKind KindRequested) const { + const LockData *LockHeld = LSet.lookup(Lock); + return (LockHeld && KindRequested == LockHeld->LKind); + } + + /// \brief Returns true if the lockset contains a lock with at least the + /// passed in locktype. So for example, if we pass in LK_Shared, this function + /// returns true if the lock is held LK_Shared or LK_Exclusive. If we pass in + /// LK_Exclusive, this function returns true if the lock is held LK_Exclusive. + bool locksetContainsAtLeast(MutexID Lock, LockKind KindRequested) const { + switch (KindRequested) { + case LK_Shared: + return locksetContains(Lock); + case LK_Exclusive: + return locksetContains(Lock, KindRequested); + } + llvm_unreachable("Unknown LockKind"); + } + +public: + BuildLockset(ThreadSafetyHandler &Handler, Lockset LS, Lockset::Factory &F) + : StmtVisitor<BuildLockset>(), Handler(Handler), LSet(LS), + LocksetFactory(F) {} + + Lockset getLockset() { + return LSet; + } + + void VisitUnaryOperator(UnaryOperator *UO); + void VisitBinaryOperator(BinaryOperator *BO); + void VisitCastExpr(CastExpr *CE); + void VisitCXXMemberCallExpr(CXXMemberCallExpr *Exp); +}; + +/// \brief Add a new lock to the lockset, warning if the lock is already there. +/// \param LockLoc The source location of the acquire +/// \param LockExp The lock expression corresponding to the lock to be added +void BuildLockset::addLock(SourceLocation LockLoc, Expr *LockExp, Expr *Parent, + LockKind LK) { + // FIXME: deal with acquired before/after annotations. We can write a first + // pass that does the transitive lookup lazily, and refine afterwards. + MutexID Mutex(LockExp, Parent); + if (!Mutex.isValid()) { + Handler.handleInvalidLockExp(LockExp->getExprLoc()); + return; + } + + LockData NewLock(LockLoc, LK); + + // FIXME: Don't always warn when we have support for reentrant locks. + if (locksetContains(Mutex)) + Handler.handleDoubleLock(Mutex.getName(), LockLoc); + LSet = LocksetFactory.add(LSet, Mutex, NewLock); +} + +/// \brief Remove a lock from the lockset, warning if the lock is not there. +/// \param LockExp The lock expression corresponding to the lock to be removed +/// \param UnlockLoc The source location of the unlock (only used in error msg) +void BuildLockset::removeLock(SourceLocation UnlockLoc, Expr *LockExp, + Expr *Parent) { + MutexID Mutex(LockExp, Parent); + if (!Mutex.isValid()) { + Handler.handleInvalidLockExp(LockExp->getExprLoc()); + return; + } + + Lockset NewLSet = LocksetFactory.remove(LSet, Mutex); + if(NewLSet == LSet) + Handler.handleUnmatchedUnlock(Mutex.getName(), UnlockLoc); + + LSet = NewLSet; +} + +/// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs +const ValueDecl *BuildLockset::getValueDecl(Expr *Exp) { + if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Exp)) + return DR->getDecl(); + + if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) + return ME->getMemberDecl(); + + return 0; +} + +/// \brief Warn if the LSet does not contain a lock sufficient to protect access +/// of at least the passed in AccessType. +void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, Expr *Exp, + AccessKind AK, Expr *MutexExp, + ProtectedOperationKind POK) { + LockKind LK = getLockKindFromAccessKind(AK); + Expr *Parent = getParent(Exp); + MutexID Mutex(MutexExp, Parent); + if (!Mutex.isValid()) + Handler.handleInvalidLockExp(MutexExp->getExprLoc()); + else if (!locksetContainsAtLeast(Mutex, LK)) + Handler.handleMutexNotHeld(D, POK, Mutex.getName(), LK, Exp->getExprLoc()); +} + + +/// \brief This method identifies variable dereferences and checks pt_guarded_by +/// and pt_guarded_var annotations. Note that we only check these annotations +/// at the time a pointer is dereferenced. +/// FIXME: We need to check for other types of pointer dereferences +/// (e.g. [], ->) and deal with them here. +/// \param Exp An expression that has been read or written. +void BuildLockset::checkDereference(Expr *Exp, AccessKind AK) { + UnaryOperator *UO = dyn_cast<UnaryOperator>(Exp); + if (!UO || UO->getOpcode() != clang::UO_Deref) + return; + Exp = UO->getSubExpr()->IgnoreParenCasts(); + + const ValueDecl *D = getValueDecl(Exp); + if(!D || !D->hasAttrs()) + return; + + if (D->getAttr<PtGuardedVarAttr>() && LSet.isEmpty()) + Handler.handleNoMutexHeld(D, POK_VarDereference, AK, Exp->getExprLoc()); + + const AttrVec &ArgAttrs = D->getAttrs(); + for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i) + if (PtGuardedByAttr *PGBAttr = dyn_cast<PtGuardedByAttr>(ArgAttrs[i])) + warnIfMutexNotHeld(D, Exp, AK, PGBAttr->getArg(), POK_VarDereference); +} + +/// \brief Checks guarded_by and guarded_var attributes. +/// Whenever we identify an access (read or write) of a DeclRefExpr or +/// MemberExpr, we need to check whether there are any guarded_by or +/// guarded_var attributes, and make sure we hold the appropriate mutexes. +void BuildLockset::checkAccess(Expr *Exp, AccessKind AK) { + const ValueDecl *D = getValueDecl(Exp); + if(!D || !D->hasAttrs()) + return; + + if (D->getAttr<GuardedVarAttr>() && LSet.isEmpty()) + Handler.handleNoMutexHeld(D, POK_VarAccess, AK, Exp->getExprLoc()); + + const AttrVec &ArgAttrs = D->getAttrs(); + for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i) + if (GuardedByAttr *GBAttr = dyn_cast<GuardedByAttr>(ArgAttrs[i])) + warnIfMutexNotHeld(D, Exp, AK, GBAttr->getArg(), POK_VarAccess); +} + +/// \brief For unary operations which read and write a variable, we need to +/// check whether we hold any required mutexes. Reads are checked in +/// VisitCastExpr. +void BuildLockset::VisitUnaryOperator(UnaryOperator *UO) { + switch (UO->getOpcode()) { + case clang::UO_PostDec: + case clang::UO_PostInc: + case clang::UO_PreDec: + case clang::UO_PreInc: { + Expr *SubExp = UO->getSubExpr()->IgnoreParenCasts(); + checkAccess(SubExp, AK_Written); + checkDereference(SubExp, AK_Written); + break; + } + default: + break; + } +} + +/// For binary operations which assign to a variable (writes), we need to check +/// whether we hold any required mutexes. +/// FIXME: Deal with non-primitive types. +void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) { + if (!BO->isAssignmentOp()) + return; + Expr *LHSExp = BO->getLHS()->IgnoreParenCasts(); + checkAccess(LHSExp, AK_Written); + checkDereference(LHSExp, AK_Written); +} + +/// Whenever we do an LValue to Rvalue cast, we are reading a variable and +/// need to ensure we hold any required mutexes. +/// FIXME: Deal with non-primitive types. +void BuildLockset::VisitCastExpr(CastExpr *CE) { + if (CE->getCastKind() != CK_LValueToRValue) + return; + Expr *SubExp = CE->getSubExpr()->IgnoreParenCasts(); + checkAccess(SubExp, AK_Read); + checkDereference(SubExp, AK_Read); +} + +/// \brief This function, parameterized by an attribute type, is used to add a +/// set of locks specified as attribute arguments to the lockset. +template <typename AttrType> +void BuildLockset::addLocksToSet(LockKind LK, Attr *Attr, + CXXMemberCallExpr *Exp) { + typedef typename AttrType::args_iterator iterator_type; + SourceLocation ExpLocation = Exp->getExprLoc(); + Expr *Parent = Exp->getImplicitObjectArgument(); + AttrType *SpecificAttr = cast<AttrType>(Attr); + + if (SpecificAttr->args_size() == 0) { + // The mutex held is the "this" object. + addLock(ExpLocation, Parent, 0, LK); + return; + } + + for (iterator_type I = SpecificAttr->args_begin(), + E = SpecificAttr->args_end(); I != E; ++I) + addLock(ExpLocation, *I, Parent, LK); +} + +/// \brief When visiting CXXMemberCallExprs we need to examine the attributes on +/// the method that is being called and add, remove or check locks in the +/// lockset accordingly. +/// +/// FIXME: For classes annotated with one of the guarded annotations, we need +/// to treat const method calls as reads and non-const method calls as writes, +/// and check that the appropriate locks are held. Non-const method calls with +/// the same signature as const method calls can be also treated as reads. +/// +/// FIXME: We need to also visit CallExprs to catch/check global functions. +/// +/// FIXME: Do not flag an error for member variables accessed in constructors/ +/// destructors +void BuildLockset::VisitCXXMemberCallExpr(CXXMemberCallExpr *Exp) { + NamedDecl *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl()); + + SourceLocation ExpLocation = Exp->getExprLoc(); + Expr *Parent = Exp->getImplicitObjectArgument(); + + if(!D || !D->hasAttrs()) + return; + + AttrVec &ArgAttrs = D->getAttrs(); + for(unsigned i = 0; i < ArgAttrs.size(); ++i) { + Attr *Attr = ArgAttrs[i]; + switch (Attr->getKind()) { + // When we encounter an exclusive lock function, we need to add the lock + // to our lockset with kind exclusive. + case attr::ExclusiveLockFunction: + addLocksToSet<ExclusiveLockFunctionAttr>(LK_Exclusive, Attr, Exp); + break; + + // When we encounter a shared lock function, we need to add the lock + // to our lockset with kind shared. + case attr::SharedLockFunction: + addLocksToSet<SharedLockFunctionAttr>(LK_Shared, Attr, Exp); + break; + + // When we encounter an unlock function, we need to remove unlocked + // mutexes from the lockset, and flag a warning if they are not there. + case attr::UnlockFunction: { + UnlockFunctionAttr *UFAttr = cast<UnlockFunctionAttr>(Attr); + + if (UFAttr->args_size() == 0) { // The lock held is the "this" object. + removeLock(ExpLocation, Parent, 0); + break; + } + + for (UnlockFunctionAttr::args_iterator I = UFAttr->args_begin(), + E = UFAttr->args_end(); I != E; ++I) + removeLock(ExpLocation, *I, Parent); + break; + } + + case attr::ExclusiveLocksRequired: { + ExclusiveLocksRequiredAttr *ELRAttr = + cast<ExclusiveLocksRequiredAttr>(Attr); + + for (ExclusiveLocksRequiredAttr::args_iterator + I = ELRAttr->args_begin(), E = ELRAttr->args_end(); I != E; ++I) + warnIfMutexNotHeld(D, Exp, AK_Written, *I, POK_FunctionCall); + break; + } + + case attr::SharedLocksRequired: { + SharedLocksRequiredAttr *SLRAttr = cast<SharedLocksRequiredAttr>(Attr); + + for (SharedLocksRequiredAttr::args_iterator I = SLRAttr->args_begin(), + E = SLRAttr->args_end(); I != E; ++I) + warnIfMutexNotHeld(D, Exp, AK_Read, *I, POK_FunctionCall); + break; + } + + case attr::LocksExcluded: { + LocksExcludedAttr *LEAttr = cast<LocksExcludedAttr>(Attr); + for (LocksExcludedAttr::args_iterator I = LEAttr->args_begin(), + E = LEAttr->args_end(); I != E; ++I) { + MutexID Mutex(*I, Parent); + if (!Mutex.isValid()) + Handler.handleInvalidLockExp((*I)->getExprLoc()); + else if (locksetContains(Mutex)) + Handler.handleFunExcludesLock(D->getName(), Mutex.getName(), + ExpLocation); + } + break; + } + + // Ignore other (non thread-safety) attributes + default: + break; + } + } +} + +} // end anonymous namespace + +/// \brief Compute the intersection of two locksets and issue warnings for any +/// locks in the symmetric difference. +/// +/// This function is used at a merge point in the CFG when comparing the lockset +/// of each branch being merged. For example, given the following sequence: +/// A; if () then B; else C; D; we need to check that the lockset after B and C +/// are the same. In the event of a difference, we use the intersection of these +/// two locksets at the start of D. +static Lockset intersectAndWarn(ThreadSafetyHandler &Handler, + const Lockset LSet1, const Lockset LSet2, + Lockset::Factory &Fact, LockErrorKind LEK) { + Lockset Intersection = LSet1; + for (Lockset::iterator I = LSet2.begin(), E = LSet2.end(); I != E; ++I) { + const MutexID &LSet2Mutex = I.getKey(); + const LockData &LSet2LockData = I.getData(); + if (const LockData *LD = LSet1.lookup(LSet2Mutex)) { + if (LD->LKind != LSet2LockData.LKind) { + Handler.handleExclusiveAndShared(LSet2Mutex.getName(), + LSet2LockData.AcquireLoc, + LD->AcquireLoc); + if (LD->LKind != LK_Exclusive) + Intersection = Fact.add(Intersection, LSet2Mutex, LSet2LockData); + } + } else { + Handler.handleMutexHeldEndOfScope(LSet2Mutex.getName(), + LSet2LockData.AcquireLoc, LEK); + } + } + + for (Lockset::iterator I = LSet1.begin(), E = LSet1.end(); I != E; ++I) { + if (!LSet2.contains(I.getKey())) { + const MutexID &Mutex = I.getKey(); + const LockData &MissingLock = I.getData(); + Handler.handleMutexHeldEndOfScope(Mutex.getName(), + MissingLock.AcquireLoc, LEK); + Intersection = Fact.remove(Intersection, Mutex); + } + } + return Intersection; +} + +static Lockset addLock(ThreadSafetyHandler &Handler, + Lockset::Factory &LocksetFactory, + Lockset &LSet, Expr *LockExp, LockKind LK, + SourceLocation Loc) { + MutexID Mutex(LockExp, 0); + if (!Mutex.isValid()) { + Handler.handleInvalidLockExp(LockExp->getExprLoc()); + return LSet; + } + LockData NewLock(Loc, LK); + return LocksetFactory.add(LSet, Mutex, NewLock); +} + +namespace clang { +namespace thread_safety { +/// \brief Check a function's CFG for thread-safety violations. +/// +/// We traverse the blocks in the CFG, compute the set of mutexes that are held +/// at the end of each block, and issue warnings for thread safety violations. +/// Each block in the CFG is traversed exactly once. +void runThreadSafetyAnalysis(AnalysisContext &AC, + ThreadSafetyHandler &Handler) { + CFG *CFGraph = AC.getCFG(); + if (!CFGraph) return; + const Decl *D = AC.getDecl(); + if (D && D->getAttr<NoThreadSafetyAnalysisAttr>()) return; + + Lockset::Factory LocksetFactory; + + // FIXME: Swith to SmallVector? Otherwise improve performance impact? + std::vector<Lockset> EntryLocksets(CFGraph->getNumBlockIDs(), + LocksetFactory.getEmptyMap()); + std::vector<Lockset> ExitLocksets(CFGraph->getNumBlockIDs(), + LocksetFactory.getEmptyMap()); + + // We need to explore the CFG via a "topological" ordering. + // That way, we will be guaranteed to have information about required + // predecessor locksets when exploring a new block. + TopologicallySortedCFG SortedGraph(CFGraph); + CFGBlockSet VisitedBlocks(CFGraph); + + if (!SortedGraph.empty() && D->hasAttrs()) { + const CFGBlock *FirstBlock = *SortedGraph.begin(); + Lockset &InitialLockset = EntryLocksets[FirstBlock->getBlockID()]; + const AttrVec &ArgAttrs = D->getAttrs(); + for(unsigned i = 0; i < ArgAttrs.size(); ++i) { + Attr *Attr = ArgAttrs[i]; + SourceLocation AttrLoc = Attr->getLocation(); + if (SharedLocksRequiredAttr *SLRAttr + = dyn_cast<SharedLocksRequiredAttr>(Attr)) { + for (SharedLocksRequiredAttr::args_iterator + SLRIter = SLRAttr->args_begin(), + SLREnd = SLRAttr->args_end(); SLRIter != SLREnd; ++SLRIter) + InitialLockset = addLock(Handler, LocksetFactory, InitialLockset, + *SLRIter, LK_Shared, + AttrLoc); + } else if (ExclusiveLocksRequiredAttr *ELRAttr + = dyn_cast<ExclusiveLocksRequiredAttr>(Attr)) { + for (ExclusiveLocksRequiredAttr::args_iterator + ELRIter = ELRAttr->args_begin(), + ELREnd = ELRAttr->args_end(); ELRIter != ELREnd; ++ELRIter) + InitialLockset = addLock(Handler, LocksetFactory, InitialLockset, + *ELRIter, LK_Exclusive, + AttrLoc); + } + } + } + + for (TopologicallySortedCFG::iterator I = SortedGraph.begin(), + E = SortedGraph.end(); I!= E; ++I) { + const CFGBlock *CurrBlock = *I; + int CurrBlockID = CurrBlock->getBlockID(); + + VisitedBlocks.insert(CurrBlock); + + // Use the default initial lockset in case there are no predecessors. + Lockset &Entryset = EntryLocksets[CurrBlockID]; + Lockset &Exitset = ExitLocksets[CurrBlockID]; + + // Iterate through the predecessor blocks and warn if the lockset for all + // predecessors is not the same. We take the entry lockset of the current + // block to be the intersection of all previous locksets. + // FIXME: By keeping the intersection, we may output more errors in future + // for a lock which is not in the intersection, but was in the union. We + // may want to also keep the union in future. As an example, let's say + // the intersection contains Mutex L, and the union contains L and M. + // Later we unlock M. At this point, we would output an error because we + // never locked M; although the real error is probably that we forgot to + // lock M on all code paths. Conversely, let's say that later we lock M. + // In this case, we should compare against the intersection instead of the + // union because the real error is probably that we forgot to unlock M on + // all code paths. + bool LocksetInitialized = false; + for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), + PE = CurrBlock->pred_end(); PI != PE; ++PI) { + + // if *PI -> CurrBlock is a back edge + if (*PI == 0 || !VisitedBlocks.alreadySet(*PI)) + continue; + + int PrevBlockID = (*PI)->getBlockID(); + if (!LocksetInitialized) { + Entryset = ExitLocksets[PrevBlockID]; + LocksetInitialized = true; + } else { + Entryset = intersectAndWarn(Handler, Entryset, + ExitLocksets[PrevBlockID], LocksetFactory, + LEK_LockedSomePredecessors); + } + } + + BuildLockset LocksetBuilder(Handler, Entryset, LocksetFactory); + for (CFGBlock::const_iterator BI = CurrBlock->begin(), + BE = CurrBlock->end(); BI != BE; ++BI) { + if (const CFGStmt *CfgStmt = dyn_cast<CFGStmt>(&*BI)) + LocksetBuilder.Visit(const_cast<Stmt*>(CfgStmt->getStmt())); + } + Exitset = LocksetBuilder.getLockset(); + + // For every back edge from CurrBlock (the end of the loop) to another block + // (FirstLoopBlock) we need to check that the Lockset of Block is equal to + // the one held at the beginning of FirstLoopBlock. We can look up the + // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map. + for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), + SE = CurrBlock->succ_end(); SI != SE; ++SI) { + + // if CurrBlock -> *SI is *not* a back edge + if (*SI == 0 || !VisitedBlocks.alreadySet(*SI)) + continue; + + CFGBlock *FirstLoopBlock = *SI; + Lockset PreLoop = EntryLocksets[FirstLoopBlock->getBlockID()]; + Lockset LoopEnd = ExitLocksets[CurrBlockID]; + intersectAndWarn(Handler, LoopEnd, PreLoop, LocksetFactory, + LEK_LockedSomeLoopIterations); + } + } + + Lockset InitialLockset = EntryLocksets[CFGraph->getEntry().getBlockID()]; + Lockset FinalLockset = ExitLocksets[CFGraph->getExit().getBlockID()]; + + // FIXME: Should we call this function for all blocks which exit the function? + intersectAndWarn(Handler, InitialLockset, FinalLockset, LocksetFactory, + LEK_LockedAtEndOfFunction); +} + +/// \brief Helper function that returns a LockKind required for the given level +/// of access. +LockKind getLockKindFromAccessKind(AccessKind AK) { + switch (AK) { + case AK_Read : + return LK_Shared; + case AK_Written : + return LK_Exclusive; + } + llvm_unreachable("Unknown AccessKind"); +} +}} // end namespace clang::thread_safety diff --git a/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp b/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp index 1d6959d..9e98560 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp +++ b/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp @@ -123,13 +123,7 @@ public: bool hasNoDeclarations() const { return declToIndex.size() == 0; } - - bool hasEntry(const VarDecl *vd) const { - return declToIndex.getValueIndex(vd).hasValue(); - } - - bool hasValues(const CFGBlock *block); - + void resetScratch(); ValueVector &getScratch() { return scratch; } @@ -170,7 +164,7 @@ ValueVector &CFGBlockValues::lazyCreate(ValueVector *&bv) { /// This function pattern matches for a '&&' or '||' that appears at /// the beginning of a CFGBlock that also (1) has a terminator and /// (2) has no other elements. If such an expression is found, it is returned. -static BinaryOperator *getLogicalOperatorInChain(const CFGBlock *block) { +static const BinaryOperator *getLogicalOperatorInChain(const CFGBlock *block) { if (block->empty()) return 0; @@ -178,7 +172,7 @@ static BinaryOperator *getLogicalOperatorInChain(const CFGBlock *block) { if (!cstmt) return 0; - BinaryOperator *b = llvm::dyn_cast_or_null<BinaryOperator>(cstmt->getStmt()); + const BinaryOperator *b = dyn_cast_or_null<BinaryOperator>(cstmt->getStmt()); if (!b || !b->isLogicalOp()) return 0; @@ -209,11 +203,6 @@ ValueVector &CFGBlockValues::getValueVector(const CFGBlock *block, return lazyCreate(vals[idx].first); } -bool CFGBlockValues::hasValues(const CFGBlock *block) { - unsigned idx = block->getBlockID(); - return vals[idx].second != 0; -} - BVPair &CFGBlockValues::getValueVectors(const clang::CFGBlock *block, bool shouldLazyCreate) { unsigned idx = block->getBlockID(); @@ -223,13 +212,6 @@ BVPair &CFGBlockValues::getValueVectors(const clang::CFGBlock *block, return vals[idx]; } -void CFGBlockValues::mergeIntoScratch(ValueVector const &source, - bool isFirst) { - if (isFirst) - scratch = source; - else - scratch |= source; -} #if 0 static void printVector(const CFGBlock *block, ValueVector &bv, unsigned num) { @@ -240,8 +222,24 @@ static void printVector(const CFGBlock *block, ValueVector &bv, } llvm::errs() << " : " << num << '\n'; } + +static void printVector(const char *name, ValueVector const &bv) { + llvm::errs() << name << " : "; + for (unsigned i = 0; i < bv.size(); ++i) { + llvm::errs() << ' ' << bv[i]; + } + llvm::errs() << "\n"; +} #endif +void CFGBlockValues::mergeIntoScratch(ValueVector const &source, + bool isFirst) { + if (isFirst) + scratch = source; + else + scratch |= source; +} + bool CFGBlockValues::updateValueVectorWithScratch(const CFGBlock *block) { ValueVector &dst = getValueVector(block, 0); bool changed = (dst != scratch); @@ -283,7 +281,7 @@ ValueVector::reference CFGBlockValues::operator[](const VarDecl *vd) { namespace { class DataflowWorklist { - llvm::SmallVector<const CFGBlock *, 20> worklist; + SmallVector<const CFGBlock *, 20> worklist; llvm::BitVector enqueuedBlocks; public: DataflowWorklist(const CFG &cfg) : enqueuedBlocks(cfg.getNumBlockIDs()) {} @@ -336,23 +334,34 @@ public: const VarDecl *getDecl() const { return vd; } }; -class TransferFunctions : public CFGRecStmtVisitor<TransferFunctions> { +class TransferFunctions : public StmtVisitor<TransferFunctions> { CFGBlockValues &vals; const CFG &cfg; AnalysisContext ∾ UninitVariablesHandler *handler; - const DeclRefExpr *currentDR; - const Expr *currentVoidCast; - const bool flagBlockUses; + + /// The last DeclRefExpr seen when analyzing a block. Used to + /// cheat when detecting cases when the address of a variable is taken. + DeclRefExpr *lastDR; + + /// The last lvalue-to-rvalue conversion of a variable whose value + /// was uninitialized. Normally this results in a warning, but it is + /// possible to either silence the warning in some cases, or we + /// propagate the uninitialized value. + CastExpr *lastLoad; + + /// For some expressions, we want to ignore any post-processing after + /// visitation. + bool skipProcessUses; + public: TransferFunctions(CFGBlockValues &vals, const CFG &cfg, AnalysisContext &ac, - UninitVariablesHandler *handler, - bool flagBlockUses) - : vals(vals), cfg(cfg), ac(ac), handler(handler), currentDR(0), - currentVoidCast(0), flagBlockUses(flagBlockUses) {} + UninitVariablesHandler *handler) + : vals(vals), cfg(cfg), ac(ac), handler(handler), + lastDR(0), lastLoad(0), + skipProcessUses(false) {} - const CFG &getCFG() { return cfg; } void reportUninit(const DeclRefExpr *ex, const VarDecl *vd, bool isAlwaysUninit); @@ -362,53 +371,59 @@ public: void VisitUnaryOperator(UnaryOperator *uo); void VisitBinaryOperator(BinaryOperator *bo); void VisitCastExpr(CastExpr *ce); - void VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *se); - void VisitCXXTypeidExpr(CXXTypeidExpr *E); - void BlockStmt_VisitObjCForCollectionStmt(ObjCForCollectionStmt *fs); + void VisitObjCForCollectionStmt(ObjCForCollectionStmt *fs); + void Visit(Stmt *s); bool isTrackedVar(const VarDecl *vd) { return ::isTrackedVar(vd, cast<DeclContext>(ac.getDecl())); } FindVarResult findBlockVarDecl(Expr *ex); + + void ProcessUses(Stmt *s = 0); }; } +static const Expr *stripCasts(ASTContext &C, const Expr *Ex) { + while (Ex) { + Ex = Ex->IgnoreParenNoopCasts(C); + if (const CastExpr *CE = dyn_cast<CastExpr>(Ex)) { + if (CE->getCastKind() == CK_LValueBitCast) { + Ex = CE->getSubExpr(); + continue; + } + } + break; + } + return Ex; +} + void TransferFunctions::reportUninit(const DeclRefExpr *ex, const VarDecl *vd, bool isAlwaysUnit) { if (handler) handler->handleUseOfUninitVariable(ex, vd, isAlwaysUnit); } -FindVarResult TransferFunctions::findBlockVarDecl(Expr* ex) { - if (DeclRefExpr* dr = dyn_cast<DeclRefExpr>(ex->IgnoreParenCasts())) +FindVarResult TransferFunctions::findBlockVarDecl(Expr *ex) { + if (DeclRefExpr *dr = dyn_cast<DeclRefExpr>(ex->IgnoreParenCasts())) if (VarDecl *vd = dyn_cast<VarDecl>(dr->getDecl())) if (isTrackedVar(vd)) return FindVarResult(vd, dr); return FindVarResult(0, 0); } -void TransferFunctions::BlockStmt_VisitObjCForCollectionStmt( - ObjCForCollectionStmt *fs) { - - Visit(fs->getCollection()); - +void TransferFunctions::VisitObjCForCollectionStmt(ObjCForCollectionStmt *fs) { // This represents an initialization of the 'element' value. Stmt *element = fs->getElement(); - const VarDecl* vd = 0; + const VarDecl *vd = 0; - if (DeclStmt* ds = dyn_cast<DeclStmt>(element)) { + if (DeclStmt *ds = dyn_cast<DeclStmt>(element)) { vd = cast<VarDecl>(ds->getSingleDecl()); if (!isTrackedVar(vd)) vd = 0; - } - else { + } else { // Initialize the value of the reference variable. const FindVarResult &res = findBlockVarDecl(cast<Expr>(element)); vd = res.getDecl(); - if (!vd) { - Visit(element); - return; - } } if (vd) @@ -416,14 +431,10 @@ void TransferFunctions::BlockStmt_VisitObjCForCollectionStmt( } void TransferFunctions::VisitBlockExpr(BlockExpr *be) { - if (!flagBlockUses || !handler) - return; const BlockDecl *bd = be->getBlockDecl(); for (BlockDecl::capture_const_iterator i = bd->capture_begin(), e = bd->capture_end() ; i != e; ++i) { const VarDecl *vd = i->getVariable(); - if (!vd->hasLocalStorage()) - continue; if (!isTrackedVar(vd)) continue; if (i->isByRef()) { @@ -431,19 +442,27 @@ void TransferFunctions::VisitBlockExpr(BlockExpr *be) { continue; } Value v = vals[vd]; - if (isUninitialized(v)) + if (handler && isUninitialized(v)) handler->handleUseOfUninitVariable(be, vd, isAlwaysUninit(v)); } } +void TransferFunctions::VisitDeclRefExpr(DeclRefExpr *dr) { + // Record the last DeclRefExpr seen. This is an lvalue computation. + // We use this value to later detect if a variable "escapes" the analysis. + if (const VarDecl *vd = dyn_cast<VarDecl>(dr->getDecl())) + if (isTrackedVar(vd)) { + ProcessUses(); + lastDR = dr; + } +} + void TransferFunctions::VisitDeclStmt(DeclStmt *ds) { for (DeclStmt::decl_iterator DI = ds->decl_begin(), DE = ds->decl_end(); DI != DE; ++DI) { if (VarDecl *vd = dyn_cast<VarDecl>(*DI)) { if (isTrackedVar(vd)) { if (Expr *init = vd->getInit()) { - Visit(init); - // If the initializer consists solely of a reference to itself, we // explicitly mark the variable as uninitialized. This allows code // like the following: @@ -454,56 +473,48 @@ void TransferFunctions::VisitDeclStmt(DeclStmt *ds) { // clients can detect this pattern and adjust their reporting // appropriately, but we need to continue to analyze subsequent uses // of the variable. - DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(init->IgnoreParenImpCasts()); - vals[vd] = (DRE && DRE->getDecl() == vd) ? Uninitialized - : Initialized; + if (init == lastLoad) { + const DeclRefExpr *DR + = cast<DeclRefExpr>(stripCasts(ac.getASTContext(), + lastLoad->getSubExpr())); + if (DR->getDecl() == vd) { + // int x = x; + // Propagate uninitialized value, but don't immediately report + // a problem. + vals[vd] = Uninitialized; + lastLoad = 0; + lastDR = 0; + if (handler) + handler->handleSelfInit(vd); + return; + } + } + + // All other cases: treat the new variable as initialized. + // This is a minor optimization to reduce the propagation + // of the analysis, since we will have already reported + // the use of the uninitialized value (which visiting the + // initializer). + vals[vd] = Initialized; } - } else if (Stmt *init = vd->getInit()) { - Visit(init); } } } } -void TransferFunctions::VisitDeclRefExpr(DeclRefExpr *dr) { - // We assume that DeclRefExprs wrapped in an lvalue-to-rvalue cast - // cannot be block-level expressions. Therefore, we determine if - // a DeclRefExpr is involved in a "load" by comparing it to the current - // DeclRefExpr found when analyzing the last lvalue-to-rvalue CastExpr. - // If a DeclRefExpr is not involved in a load, we are essentially computing - // its address, either for assignment to a reference or via the '&' operator. - // In such cases, treat the variable as being initialized, since this - // analysis isn't powerful enough to do alias tracking. - if (dr != currentDR) - if (const VarDecl *vd = dyn_cast<VarDecl>(dr->getDecl())) - if (isTrackedVar(vd)) - vals[vd] = Initialized; -} - void TransferFunctions::VisitBinaryOperator(clang::BinaryOperator *bo) { if (bo->isAssignmentOp()) { const FindVarResult &res = findBlockVarDecl(bo->getLHS()); - if (const VarDecl* vd = res.getDecl()) { - // We assume that DeclRefExprs wrapped in a BinaryOperator "assignment" - // cannot be block-level expressions. Therefore, we determine if - // a DeclRefExpr is involved in a "load" by comparing it to the current - // DeclRefExpr found when analyzing the last lvalue-to-rvalue CastExpr. - SaveAndRestore<const DeclRefExpr*> lastDR(currentDR, - res.getDeclRefExpr()); - Visit(bo->getRHS()); - Visit(bo->getLHS()); - + if (const VarDecl *vd = res.getDecl()) { ValueVector::reference val = vals[vd]; if (isUninitialized(val)) { if (bo->getOpcode() != BO_Assign) reportUninit(res.getDeclRefExpr(), vd, isAlwaysUninit(val)); - val = Initialized; + else + val = Initialized; } - return; } } - Visit(bo->getRHS()); - Visit(bo->getLHS()); } void TransferFunctions::VisitUnaryOperator(clang::UnaryOperator *uo) { @@ -514,86 +525,88 @@ void TransferFunctions::VisitUnaryOperator(clang::UnaryOperator *uo) { case clang::UO_PreInc: { const FindVarResult &res = findBlockVarDecl(uo->getSubExpr()); if (const VarDecl *vd = res.getDecl()) { - // We assume that DeclRefExprs wrapped in a unary operator ++/-- - // cannot be block-level expressions. Therefore, we determine if - // a DeclRefExpr is involved in a "load" by comparing it to the current - // DeclRefExpr found when analyzing the last lvalue-to-rvalue CastExpr. - SaveAndRestore<const DeclRefExpr*> lastDR(currentDR, - res.getDeclRefExpr()); - Visit(uo->getSubExpr()); + assert(res.getDeclRefExpr() == lastDR); + // We null out lastDR to indicate we have fully processed it + // and we don't want the auto-value setting in Visit(). + lastDR = 0; ValueVector::reference val = vals[vd]; - if (isUninitialized(val)) { + if (isUninitialized(val)) reportUninit(res.getDeclRefExpr(), vd, isAlwaysUninit(val)); - // Don't cascade warnings. - val = Initialized; - } - return; } break; } default: break; } - Visit(uo->getSubExpr()); } void TransferFunctions::VisitCastExpr(clang::CastExpr *ce) { if (ce->getCastKind() == CK_LValueToRValue) { const FindVarResult &res = findBlockVarDecl(ce->getSubExpr()); - if (const VarDecl *vd = res.getDecl()) { - // We assume that DeclRefExprs wrapped in an lvalue-to-rvalue cast - // cannot be block-level expressions. Therefore, we determine if - // a DeclRefExpr is involved in a "load" by comparing it to the current - // DeclRefExpr found when analyzing the last lvalue-to-rvalue CastExpr. - // Here we update 'currentDR' to be the one associated with this - // lvalue-to-rvalue cast. Then, when we analyze the DeclRefExpr, we - // will know that we are not computing its lvalue for other purposes - // than to perform a load. - SaveAndRestore<const DeclRefExpr*> lastDR(currentDR, - res.getDeclRefExpr()); - Visit(ce->getSubExpr()); - if (currentVoidCast != ce) { - Value val = vals[vd]; - if (isUninitialized(val)) { - reportUninit(res.getDeclRefExpr(), vd, isAlwaysUninit(val)); - // Don't cascade warnings. - vals[vd] = Initialized; - } - } - return; + if (res.getDecl()) { + assert(res.getDeclRefExpr() == lastDR); + lastLoad = ce; } } + else if (ce->getCastKind() == CK_NoOp || + ce->getCastKind() == CK_LValueBitCast) { + skipProcessUses = true; + } else if (CStyleCastExpr *cse = dyn_cast<CStyleCastExpr>(ce)) { if (cse->getType()->isVoidType()) { // e.g. (void) x; - SaveAndRestore<const Expr *> - lastVoidCast(currentVoidCast, cse->getSubExpr()->IgnoreParens()); - Visit(cse->getSubExpr()); - return; + if (lastLoad == cse->getSubExpr()) { + // Squelch any detected load of an uninitialized value if + // we cast it to void. + lastLoad = 0; + lastDR = 0; + } } } - Visit(ce->getSubExpr()); } -void TransferFunctions::VisitUnaryExprOrTypeTraitExpr( - UnaryExprOrTypeTraitExpr *se) { - if (se->getKind() == UETT_SizeOf) { - if (se->getType()->isConstantSizeType()) +void TransferFunctions::Visit(clang::Stmt *s) { + skipProcessUses = false; + StmtVisitor<TransferFunctions>::Visit(s); + if (!skipProcessUses) + ProcessUses(s); +} + +void TransferFunctions::ProcessUses(Stmt *s) { + // This method is typically called after visiting a CFGElement statement + // in the CFG. We delay processing of reporting many loads of uninitialized + // values until here. + if (lastLoad) { + // If we just visited the lvalue-to-rvalue cast, there is nothing + // left to do. + if (lastLoad == s) + return; + + const DeclRefExpr *DR = + cast<DeclRefExpr>(stripCasts(ac.getASTContext(), + lastLoad->getSubExpr())); + const VarDecl *VD = cast<VarDecl>(DR->getDecl()); + + // If we reach here, we may have seen a load of an uninitialized value + // and it hasn't been casted to void or otherwise handled. In this + // situation, report the incident. + if (isUninitialized(vals[VD])) + reportUninit(DR, VD, isAlwaysUninit(vals[VD])); + + lastLoad = 0; + + if (DR == lastDR) { + lastDR = 0; return; - // Handle VLAs. - Visit(se->getArgumentExpr()); + } } -} -void TransferFunctions::VisitCXXTypeidExpr(CXXTypeidExpr *E) { - // typeid(expression) is potentially evaluated when the argument is - // a glvalue of polymorphic type. (C++ 5.2.8p2-3) - if (!E->isTypeOperand() && E->Classify(ac.getASTContext()).isGLValue()) { - QualType SubExprTy = E->getExprOperand()->getType(); - if (const RecordType *Record = SubExprTy->getAs<RecordType>()) - if (cast<CXXRecordDecl>(Record->getDecl())->isPolymorphic()) - Visit(E->getExprOperand()); + // Any other uses of 'lastDR' involve taking an lvalue of variable. + // In this case, it "escapes" the analysis. + if (lastDR && lastDR != s) { + vals[cast<VarDecl>(lastDR->getDecl())] = Initialized; + lastDR = 0; } } @@ -604,8 +617,7 @@ void TransferFunctions::VisitCXXTypeidExpr(CXXTypeidExpr *E) { static bool runOnBlock(const CFGBlock *block, const CFG &cfg, AnalysisContext &ac, CFGBlockValues &vals, llvm::BitVector &wasAnalyzed, - UninitVariablesHandler *handler = 0, - bool flagBlockUses = false) { + UninitVariablesHandler *handler = 0) { wasAnalyzed[block->getBlockID()] = true; @@ -623,8 +635,7 @@ static bool runOnBlock(const CFGBlock *block, const CFG &cfg, vals.mergeIntoScratch(*(vB.second ? vB.second : vB.first), false); valsAB.first = vA.first; valsAB.second = &vals.getScratch(); - } - else { + } else { // Merge the 'T' bits from the first and second. assert(b->getOpcode() == BO_LOr); vals.mergeIntoScratch(*vA.first, true); @@ -640,17 +651,21 @@ static bool runOnBlock(const CFGBlock *block, const CFG &cfg, bool isFirst = true; for (CFGBlock::const_pred_iterator I = block->pred_begin(), E = block->pred_end(); I != E; ++I) { - vals.mergeIntoScratch(vals.getValueVector(*I, block), isFirst); - isFirst = false; + const CFGBlock *pred = *I; + if (wasAnalyzed[pred->getBlockID()]) { + vals.mergeIntoScratch(vals.getValueVector(pred, block), isFirst); + isFirst = false; + } } // Apply the transfer function. - TransferFunctions tf(vals, cfg, ac, handler, flagBlockUses); + TransferFunctions tf(vals, cfg, ac, handler); for (CFGBlock::const_iterator I = block->begin(), E = block->end(); I != E; ++I) { if (const CFGStmt *cs = dyn_cast<CFGStmt>(&*I)) { - tf.BlockStmt_Visit(cs->getStmt()); + tf.Visit(const_cast<Stmt*>(cs->getStmt())); } } + tf.ProcessUses(); return vals.updateValueVectorWithScratch(block); } @@ -685,6 +700,7 @@ void clang::runUninitializedVariablesAnalysis( llvm::BitVector previouslyVisited(cfg.getNumBlockIDs()); worklist.enqueueSuccessors(&cfg.getEntry()); llvm::BitVector wasAnalyzed(cfg.getNumBlockIDs(), false); + wasAnalyzed[cfg.getEntry().getBlockID()] = true; while (const CFGBlock *block = worklist.dequeue()) { // Did the block change? @@ -697,9 +713,9 @@ void clang::runUninitializedVariablesAnalysis( // Run through the blocks one more time, and report uninitialized variabes. for (CFG::const_iterator BI = cfg.begin(), BE = cfg.end(); BI != BE; ++BI) { - if (wasAnalyzed[(*BI)->getBlockID()]) { - runOnBlock(*BI, cfg, ac, vals, wasAnalyzed, &handler, - /* flagBlockUses */ true); + const CFGBlock *block = *BI; + if (wasAnalyzed[block->getBlockID()]) { + runOnBlock(block, cfg, ac, vals, wasAnalyzed, &handler); ++stats.NumBlockVisits; } } |