summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/tools/clang/lib/Analysis
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/tools/clang/lib/Analysis')
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp63
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CFG.cpp500
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CallGraph.cpp7
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp138
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp19
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp122
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp193
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp1641
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp793
12 files changed, 2180 insertions, 1305 deletions
diff --git a/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp b/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp
index 659cc6d..7de7f39 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp
@@ -12,6 +12,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
@@ -34,11 +35,9 @@ typedef llvm::DenseMap<const void *, ManagedAnalysis *> ManagedAnalysisMap;
AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
const Decl *d,
- idx::TranslationUnit *tu,
const CFG::BuildOptions &buildOptions)
: Manager(Mgr),
D(d),
- TU(tu),
cfgBuildOptions(buildOptions),
forcedBlkExprs(0),
builtCFG(false),
@@ -50,11 +49,9 @@ AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
}
AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
- const Decl *d,
- idx::TranslationUnit *tu)
+ const Decl *d)
: Manager(Mgr),
D(d),
- TU(tu),
forcedBlkExprs(0),
builtCFG(false),
builtCompleteCFG(false),
@@ -184,8 +181,16 @@ void AnalysisDeclContext::dumpCFG(bool ShowColors) {
}
ParentMap &AnalysisDeclContext::getParentMap() {
- if (!PM)
+ if (!PM) {
PM.reset(new ParentMap(getBody()));
+ if (const CXXConstructorDecl *C = dyn_cast<CXXConstructorDecl>(getDecl())) {
+ for (CXXConstructorDecl::init_const_iterator I = C->init_begin(),
+ E = C->init_end();
+ I != E; ++I) {
+ PM->addStmt((*I)->getInit());
+ }
+ }
+ }
return *PM;
}
@@ -195,11 +200,10 @@ PseudoConstantAnalysis *AnalysisDeclContext::getPseudoConstantAnalysis() {
return PCA.get();
}
-AnalysisDeclContext *AnalysisDeclContextManager::getContext(const Decl *D,
- idx::TranslationUnit *TU) {
+AnalysisDeclContext *AnalysisDeclContextManager::getContext(const Decl *D) {
AnalysisDeclContext *&AC = Contexts[D];
if (!AC)
- AC = new AnalysisDeclContext(this, D, TU, cfgBuildOptions);
+ AC = new AnalysisDeclContext(this, D, cfgBuildOptions);
return AC;
}
@@ -209,6 +213,14 @@ AnalysisDeclContext::getStackFrame(LocationContext const *Parent, const Stmt *S,
return getLocationContextManager().getStackFrame(this, Parent, S, Blk, Idx);
}
+const BlockInvocationContext *
+AnalysisDeclContext::getBlockInvocationContext(const LocationContext *parent,
+ const clang::BlockDecl *BD,
+ const void *ContextData) {
+ return getLocationContextManager().getBlockInvocationContext(this, parent,
+ BD, ContextData);
+}
+
LocationContextManager & AnalysisDeclContext::getLocationContextManager() {
assert(Manager &&
"Cannot create LocationContexts without an AnalysisDeclContextManager!");
@@ -239,7 +251,7 @@ void ScopeContext::Profile(llvm::FoldingSetNodeID &ID) {
}
void BlockInvocationContext::Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getAnalysisDeclContext(), getParent(), BD);
+ Profile(ID, getAnalysisDeclContext(), getParent(), BD, ContextData);
}
//===----------------------------------------------------------------------===//
@@ -288,6 +300,24 @@ LocationContextManager::getScope(AnalysisDeclContext *ctx,
return getLocationContext<ScopeContext, Stmt>(ctx, parent, s);
}
+const BlockInvocationContext *
+LocationContextManager::getBlockInvocationContext(AnalysisDeclContext *ctx,
+ const LocationContext *parent,
+ const BlockDecl *BD,
+ const void *ContextData) {
+ llvm::FoldingSetNodeID ID;
+ BlockInvocationContext::Profile(ID, ctx, parent, BD, ContextData);
+ void *InsertPos;
+ BlockInvocationContext *L =
+ cast_or_null<BlockInvocationContext>(Contexts.FindNodeOrInsertPos(ID,
+ InsertPos));
+ if (!L) {
+ L = new BlockInvocationContext(ctx, parent, BD, ContextData);
+ Contexts.InsertNode(L, InsertPos);
+ }
+ return L;
+}
+
//===----------------------------------------------------------------------===//
// LocationContext methods.
//===----------------------------------------------------------------------===//
@@ -302,19 +332,6 @@ const StackFrameContext *LocationContext::getCurrentStackFrame() const {
return NULL;
}
-const StackFrameContext *
-LocationContext::getStackFrameForDeclContext(const DeclContext *DC) const {
- const LocationContext *LC = this;
- while (LC) {
- if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LC)) {
- if (cast<DeclContext>(SFC->getDecl()) == DC)
- return SFC;
- }
- LC = LC->getParent();
- }
- return NULL;
-}
-
bool LocationContext::isParentOf(const LocationContext *LC) const {
do {
const LocationContext *Parent = LC->getParent();
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp b/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp
index 2f1f1cb..05c5385 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp
@@ -1,4 +1,4 @@
-//===--- CFG.cpp - Classes for representing and building CFGs----*- C++ -*-===//
+ //===--- CFG.cpp - Classes for representing and building CFGs----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,6 +14,7 @@
#include "llvm/Support/SaveAndRestore.h"
#include "clang/Analysis/CFG.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/PrettyPrinter.h"
@@ -312,19 +313,6 @@ private:
CFGBlock *VisitAddrLabelExpr(AddrLabelExpr *A, AddStmtChoice asc);
CFGBlock *VisitBinaryOperator(BinaryOperator *B, AddStmtChoice asc);
CFGBlock *VisitBreakStmt(BreakStmt *B);
- CFGBlock *VisitCXXCatchStmt(CXXCatchStmt *S);
- CFGBlock *VisitExprWithCleanups(ExprWithCleanups *E,
- AddStmtChoice asc);
- CFGBlock *VisitCXXThrowExpr(CXXThrowExpr *T);
- CFGBlock *VisitCXXTryStmt(CXXTryStmt *S);
- CFGBlock *VisitCXXForRangeStmt(CXXForRangeStmt *S);
- CFGBlock *VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E,
- AddStmtChoice asc);
- CFGBlock *VisitCXXConstructExpr(CXXConstructExpr *C, AddStmtChoice asc);
- CFGBlock *VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E,
- AddStmtChoice asc);
- CFGBlock *VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *C,
- AddStmtChoice asc);
CFGBlock *VisitCallExpr(CallExpr *C, AddStmtChoice asc);
CFGBlock *VisitCaseStmt(CaseStmt *C);
CFGBlock *VisitChooseExpr(ChooseExpr *C, AddStmtChoice asc);
@@ -332,31 +320,47 @@ private:
CFGBlock *VisitConditionalOperator(AbstractConditionalOperator *C,
AddStmtChoice asc);
CFGBlock *VisitContinueStmt(ContinueStmt *C);
+ CFGBlock *VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E,
+ AddStmtChoice asc);
+ CFGBlock *VisitCXXCatchStmt(CXXCatchStmt *S);
+ CFGBlock *VisitCXXConstructExpr(CXXConstructExpr *C, AddStmtChoice asc);
+ CFGBlock *VisitCXXForRangeStmt(CXXForRangeStmt *S);
+ CFGBlock *VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E,
+ AddStmtChoice asc);
+ CFGBlock *VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *C,
+ AddStmtChoice asc);
+ CFGBlock *VisitCXXThrowExpr(CXXThrowExpr *T);
+ CFGBlock *VisitCXXTryStmt(CXXTryStmt *S);
CFGBlock *VisitDeclStmt(DeclStmt *DS);
CFGBlock *VisitDeclSubExpr(DeclStmt *DS);
CFGBlock *VisitDefaultStmt(DefaultStmt *D);
CFGBlock *VisitDoStmt(DoStmt *D);
- CFGBlock *VisitLambdaExpr(LambdaExpr *E, AddStmtChoice asc);
+ CFGBlock *VisitExprWithCleanups(ExprWithCleanups *E, AddStmtChoice asc);
CFGBlock *VisitForStmt(ForStmt *F);
CFGBlock *VisitGotoStmt(GotoStmt *G);
CFGBlock *VisitIfStmt(IfStmt *I);
CFGBlock *VisitImplicitCastExpr(ImplicitCastExpr *E, AddStmtChoice asc);
CFGBlock *VisitIndirectGotoStmt(IndirectGotoStmt *I);
CFGBlock *VisitLabelStmt(LabelStmt *L);
- CFGBlock *VisitLambdaExpr(LambdaExpr *L);
+ CFGBlock *VisitLambdaExpr(LambdaExpr *E, AddStmtChoice asc);
+ CFGBlock *VisitLogicalOperator(BinaryOperator *B);
+ std::pair<CFGBlock *, CFGBlock *> VisitLogicalOperator(BinaryOperator *B,
+ Stmt *Term,
+ CFGBlock *TrueBlock,
+ CFGBlock *FalseBlock);
CFGBlock *VisitMemberExpr(MemberExpr *M, AddStmtChoice asc);
CFGBlock *VisitObjCAtCatchStmt(ObjCAtCatchStmt *S);
- CFGBlock *VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S);
CFGBlock *VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S);
CFGBlock *VisitObjCAtThrowStmt(ObjCAtThrowStmt *S);
CFGBlock *VisitObjCAtTryStmt(ObjCAtTryStmt *S);
+ CFGBlock *VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S);
CFGBlock *VisitObjCForCollectionStmt(ObjCForCollectionStmt *S);
- CFGBlock *VisitReturnStmt(ReturnStmt *R);
CFGBlock *VisitPseudoObjectExpr(PseudoObjectExpr *E);
- CFGBlock *VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E,
- AddStmtChoice asc);
+ CFGBlock *VisitReturnStmt(ReturnStmt *R);
CFGBlock *VisitStmtExpr(StmtExpr *S, AddStmtChoice asc);
CFGBlock *VisitSwitchStmt(SwitchStmt *S);
+ CFGBlock *VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E,
+ AddStmtChoice asc);
CFGBlock *VisitUnaryOperator(UnaryOperator *U, AddStmtChoice asc);
CFGBlock *VisitWhileStmt(WhileStmt *W);
@@ -772,13 +776,12 @@ void CFGBuilder::addAutomaticObjDtors(LocalScope::const_iterator B,
// If this destructor is marked as a no-return destructor, we need to
// create a new block for the destructor which does not have as a successor
// anything built thus far: control won't flow out of this block.
- QualType Ty;
- if ((*I)->getType()->isReferenceType()) {
+ QualType Ty = (*I)->getType();
+ if (Ty->isReferenceType()) {
Ty = getReferenceInitTemporaryType(*Context, (*I)->getInit());
- } else {
- Ty = Context->getBaseElementType((*I)->getType());
}
-
+ Ty = Context->getBaseElementType(Ty);
+
const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
if (cast<FunctionType>(Dtor->getType())->getNoReturnAttr())
Block = createNoReturnBlock();
@@ -1070,9 +1073,6 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc) {
case Stmt::LambdaExprClass:
return VisitLambdaExpr(cast<LambdaExpr>(S), asc);
- case Stmt::AttributedStmtClass:
- return Visit(cast<AttributedStmt>(S)->getSubStmt(), asc);
-
case Stmt::MemberExprClass:
return VisitMemberExpr(cast<MemberExpr>(S), asc);
@@ -1166,55 +1166,111 @@ CFGBlock *CFGBuilder::VisitUnaryOperator(UnaryOperator *U,
return Visit(U->getSubExpr(), AddStmtChoice());
}
-CFGBlock *CFGBuilder::VisitBinaryOperator(BinaryOperator *B,
- AddStmtChoice asc) {
- if (B->isLogicalOp()) { // && or ||
- CFGBlock *ConfluenceBlock = Block ? Block : createBlock();
- appendStmt(ConfluenceBlock, B);
+CFGBlock *CFGBuilder::VisitLogicalOperator(BinaryOperator *B) {
+ CFGBlock *ConfluenceBlock = Block ? Block : createBlock();
+ appendStmt(ConfluenceBlock, B);
- if (badCFG)
- return 0;
+ if (badCFG)
+ return 0;
- // create the block evaluating the LHS
- CFGBlock *LHSBlock = createBlock(false);
- LHSBlock->setTerminator(B);
+ return VisitLogicalOperator(B, 0, ConfluenceBlock, ConfluenceBlock).first;
+}
- // create the block evaluating the RHS
- Succ = ConfluenceBlock;
- Block = NULL;
- CFGBlock *RHSBlock = addStmt(B->getRHS());
+std::pair<CFGBlock*, CFGBlock*>
+CFGBuilder::VisitLogicalOperator(BinaryOperator *B,
+ Stmt *Term,
+ CFGBlock *TrueBlock,
+ CFGBlock *FalseBlock) {
- if (RHSBlock) {
- if (badCFG)
- return 0;
- } else {
- // Create an empty block for cases where the RHS doesn't require
- // any explicit statements in the CFG.
- RHSBlock = createBlock();
+ // Introspect the RHS. If it is a nested logical operation, we recursively
+ // build the CFG using this function. Otherwise, resort to default
+ // CFG construction behavior.
+ Expr *RHS = B->getRHS()->IgnoreParens();
+ CFGBlock *RHSBlock, *ExitBlock;
+
+ do {
+ if (BinaryOperator *B_RHS = dyn_cast<BinaryOperator>(RHS))
+ if (B_RHS->isLogicalOp()) {
+ llvm::tie(RHSBlock, ExitBlock) =
+ VisitLogicalOperator(B_RHS, Term, TrueBlock, FalseBlock);
+ break;
+ }
+
+ // The RHS is not a nested logical operation. Don't push the terminator
+ // down further, but instead visit RHS and construct the respective
+ // pieces of the CFG, and link up the RHSBlock with the terminator
+ // we have been provided.
+ ExitBlock = RHSBlock = createBlock(false);
+
+ if (!Term) {
+ assert(TrueBlock == FalseBlock);
+ addSuccessor(RHSBlock, TrueBlock);
+ }
+ else {
+ RHSBlock->setTerminator(Term);
+ TryResult KnownVal = tryEvaluateBool(RHS);
+ addSuccessor(RHSBlock, KnownVal.isFalse() ? NULL : TrueBlock);
+ addSuccessor(RHSBlock, KnownVal.isTrue() ? NULL : FalseBlock);
}
- // Generate the blocks for evaluating the LHS.
- Block = LHSBlock;
- CFGBlock *EntryLHSBlock = addStmt(B->getLHS());
+ Block = RHSBlock;
+ RHSBlock = addStmt(RHS);
+ }
+ while (false);
- // See if this is a known constant.
- TryResult KnownVal = tryEvaluateBool(B->getLHS());
- if (KnownVal.isKnown() && (B->getOpcode() == BO_LOr))
- KnownVal.negate();
+ if (badCFG)
+ return std::make_pair((CFGBlock*)0, (CFGBlock*)0);
+
+ // Generate the blocks for evaluating the LHS.
+ Expr *LHS = B->getLHS()->IgnoreParens();
+
+ if (BinaryOperator *B_LHS = dyn_cast<BinaryOperator>(LHS))
+ if (B_LHS->isLogicalOp()) {
+ if (B->getOpcode() == BO_LOr)
+ FalseBlock = RHSBlock;
+ else
+ TrueBlock = RHSBlock;
- // Now link the LHSBlock with RHSBlock.
- if (B->getOpcode() == BO_LOr) {
- addSuccessor(LHSBlock, KnownVal.isTrue() ? NULL : ConfluenceBlock);
- addSuccessor(LHSBlock, KnownVal.isFalse() ? NULL : RHSBlock);
- } else {
- assert(B->getOpcode() == BO_LAnd);
- addSuccessor(LHSBlock, KnownVal.isFalse() ? NULL : RHSBlock);
- addSuccessor(LHSBlock, KnownVal.isTrue() ? NULL : ConfluenceBlock);
+ // For the LHS, treat 'B' as the terminator that we want to sink
+ // into the nested branch. The RHS always gets the top-most
+ // terminator.
+ return VisitLogicalOperator(B_LHS, B, TrueBlock, FalseBlock);
}
- return EntryLHSBlock;
+ // Create the block evaluating the LHS.
+ // This contains the '&&' or '||' as the terminator.
+ CFGBlock *LHSBlock = createBlock(false);
+ LHSBlock->setTerminator(B);
+
+ Block = LHSBlock;
+ CFGBlock *EntryLHSBlock = addStmt(LHS);
+
+ if (badCFG)
+ return std::make_pair((CFGBlock*)0, (CFGBlock*)0);
+
+ // See if this is a known constant.
+ TryResult KnownVal = tryEvaluateBool(LHS);
+
+ // Now link the LHSBlock with RHSBlock.
+ if (B->getOpcode() == BO_LOr) {
+ addSuccessor(LHSBlock, KnownVal.isFalse() ? NULL : TrueBlock);
+ addSuccessor(LHSBlock, KnownVal.isTrue() ? NULL : RHSBlock);
+ } else {
+ assert(B->getOpcode() == BO_LAnd);
+ addSuccessor(LHSBlock, KnownVal.isFalse() ? NULL : RHSBlock);
+ addSuccessor(LHSBlock, KnownVal.isTrue() ? NULL : FalseBlock);
}
+ return std::make_pair(EntryLHSBlock, ExitBlock);
+}
+
+
+CFGBlock *CFGBuilder::VisitBinaryOperator(BinaryOperator *B,
+ AddStmtChoice asc) {
+ // && or ||
+ if (B->isLogicalOp())
+ return VisitLogicalOperator(B);
+
if (B->getOpcode() == BO_Comma) { // ,
autoCreateBlock();
appendStmt(Block, B);
@@ -1284,7 +1340,7 @@ static bool CanThrow(Expr *E, ASTContext &Ctx) {
const FunctionType *FT = Ty->getAs<FunctionType>();
if (FT) {
if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FT))
- if (Proto->getExceptionSpecType() != EST_Uninstantiated &&
+ if (!isUnresolvedExceptionSpec(Proto->getExceptionSpecType()) &&
Proto->isNothrow(Ctx))
return false;
}
@@ -1435,6 +1491,12 @@ CFGBlock *CFGBuilder::VisitConditionalOperator(AbstractConditionalOperator *C,
if (badCFG)
return 0;
+ // If the condition is a logical '&&' or '||', build a more accurate CFG.
+ if (BinaryOperator *Cond =
+ dyn_cast<BinaryOperator>(C->getCond()->IgnoreParens()))
+ if (Cond->isLogicalOp())
+ return VisitLogicalOperator(Cond, C, LHSBlock, RHSBlock).first;
+
// Create the block that will contain the condition.
Block = createBlock(false);
@@ -1471,11 +1533,10 @@ CFGBlock *CFGBuilder::VisitDeclStmt(DeclStmt *DS) {
CFGBlock *B = 0;
- // FIXME: Add a reverse iterator for DeclStmt to avoid this extra copy.
- typedef SmallVector<Decl*,10> BufTy;
- BufTy Buf(DS->decl_begin(), DS->decl_end());
-
- for (BufTy::reverse_iterator I = Buf.rbegin(), E = Buf.rend(); I != E; ++I) {
+ // Build an individual DeclStmt for each decl.
+ for (DeclStmt::reverse_decl_iterator I = DS->decl_rbegin(),
+ E = DS->decl_rend();
+ I != E; ++I) {
// Get the alignment of the new DeclStmt, padding out to >=8 bytes.
unsigned A = llvm::AlignOf<DeclStmt>::Alignment < 8
? 8 : llvm::AlignOf<DeclStmt>::Alignment;
@@ -1645,6 +1706,19 @@ CFGBlock *CFGBuilder::VisitIfStmt(IfStmt *I) {
}
}
+ // Specially handle "if (expr1 || ...)" and "if (expr1 && ...)" by
+ // having these handle the actual control-flow jump. Note that
+ // if we introduce a condition variable, e.g. "if (int x = exp1 || exp2)"
+ // we resort to the old control-flow behavior. This special handling
+ // removes infeasible paths from the control-flow graph by having the
+ // control-flow transfer of '&&' or '||' go directly into the then/else
+ // blocks directly.
+ if (!I->getConditionVariable())
+ if (BinaryOperator *Cond =
+ dyn_cast<BinaryOperator>(I->getCond()->IgnoreParens()))
+ if (Cond->isLogicalOp())
+ return VisitLogicalOperator(Cond, I, ThenBlock, ElseBlock).first;
+
// Now create a new block containing the if statement.
Block = createBlock(false);
@@ -1795,75 +1869,26 @@ CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
SaveAndRestore<JumpTarget> save_break(BreakJumpTarget);
BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
- // Because of short-circuit evaluation, the condition of the loop can span
- // multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that
- // evaluate the condition.
- CFGBlock *ExitConditionBlock = createBlock(false);
- CFGBlock *EntryConditionBlock = ExitConditionBlock;
-
- // Set the terminator for the "exit" condition block.
- ExitConditionBlock->setTerminator(F);
-
- // Now add the actual condition to the condition block. Because the condition
- // itself may contain control-flow, new blocks may be created.
- if (Stmt *C = F->getCond()) {
- Block = ExitConditionBlock;
- EntryConditionBlock = addStmt(C);
- if (badCFG)
- return 0;
- assert(Block == EntryConditionBlock ||
- (Block == 0 && EntryConditionBlock == Succ));
-
- // If this block contains a condition variable, add both the condition
- // variable and initializer to the CFG.
- if (VarDecl *VD = F->getConditionVariable()) {
- if (Expr *Init = VD->getInit()) {
- autoCreateBlock();
- appendStmt(Block, F->getConditionVariableDeclStmt());
- EntryConditionBlock = addStmt(Init);
- assert(Block == EntryConditionBlock);
- }
- }
-
- if (Block) {
- if (badCFG)
- return 0;
- }
- }
-
- // The condition block is the implicit successor for the loop body as well as
- // any code above the loop.
- Succ = EntryConditionBlock;
-
- // See if this is a known constant.
- TryResult KnownVal(true);
-
- if (F->getCond())
- KnownVal = tryEvaluateBool(F->getCond());
+ CFGBlock *BodyBlock = 0, *TransitionBlock = 0;
// Now create the loop body.
{
assert(F->getBody());
- // Save the current values for Block, Succ, and continue targets.
- SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
- SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget);
+ // Save the current values for Block, Succ, continue and break targets.
+ SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
+ SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget);
- // Create a new block to contain the (bottom) of the loop body.
- Block = NULL;
-
- // Loop body should end with destructor of Condition variable (if any).
- addAutomaticObjDtors(ScopePos, LoopBeginScopePos, F);
+ // Create an empty block to represent the transition block for looping back
+ // to the head of the loop. If we have increment code, it will
+ // go in this block as well.
+ Block = Succ = TransitionBlock = createBlock(false);
+ TransitionBlock->setLoopTarget(F);
if (Stmt *I = F->getInc()) {
// Generate increment code in its own basic block. This is the target of
// continue statements.
Succ = addStmt(I);
- } else {
- // No increment code. Create a special, empty, block that is used as the
- // target block for "looping back" to the start of the loop.
- assert(Succ == EntryConditionBlock);
- Succ = Block ? Block : createBlock();
}
// Finish up the increment (or empty) block if it hasn't been already.
@@ -1874,11 +1899,13 @@ CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
Block = 0;
}
- ContinueJumpTarget = JumpTarget(Succ, ContinueScopePos);
+ // The starting block for the loop increment is the block that should
+ // represent the 'loop target' for looping back to the start of the loop.
+ ContinueJumpTarget = JumpTarget(Succ, ContinueScopePos);
+ ContinueJumpTarget.block->setLoopTarget(F);
- // The starting block for the loop increment is the block that should
- // represent the 'loop target' for looping back to the start of the loop.
- ContinueJumpTarget.block->setLoopTarget(F);
+ // Loop body should end with destructor of Condition variable (if any).
+ addAutomaticObjDtors(ScopePos, LoopBeginScopePos, F);
// If body is not a compound statement create implicit scope
// and add destructors.
@@ -1887,20 +1914,79 @@ CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
// Now populate the body block, and in the process create new blocks as we
// walk the body of the loop.
- CFGBlock *BodyBlock = addStmt(F->getBody());
+ BodyBlock = addStmt(F->getBody());
- if (!BodyBlock)
- BodyBlock = ContinueJumpTarget.block;//can happen for "for (...;...;...);"
+ if (!BodyBlock) {
+ // In the case of "for (...;...;...);" we can have a null BodyBlock.
+ // Use the continue jump target as the proxy for the body.
+ BodyBlock = ContinueJumpTarget.block;
+ }
else if (badCFG)
return 0;
+ }
+
+ // Because of short-circuit evaluation, the condition of the loop can span
+ // multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that
+ // evaluate the condition.
+ CFGBlock *EntryConditionBlock = 0, *ExitConditionBlock = 0;
+
+ do {
+ Expr *C = F->getCond();
+
+ // Specially handle logical operators, which have a slightly
+ // more optimal CFG representation.
+ if (BinaryOperator *Cond =
+ dyn_cast_or_null<BinaryOperator>(C ? C->IgnoreParens() : 0))
+ if (Cond->isLogicalOp()) {
+ llvm::tie(EntryConditionBlock, ExitConditionBlock) =
+ VisitLogicalOperator(Cond, F, BodyBlock, LoopSuccessor);
+ break;
+ }
- // This new body block is a successor to our "exit" condition block.
+ // The default case when not handling logical operators.
+ EntryConditionBlock = ExitConditionBlock = createBlock(false);
+ ExitConditionBlock->setTerminator(F);
+
+ // See if this is a known constant.
+ TryResult KnownVal(true);
+
+ if (C) {
+ // Now add the actual condition to the condition block.
+ // Because the condition itself may contain control-flow, new blocks may
+ // be created. Thus we update "Succ" after adding the condition.
+ Block = ExitConditionBlock;
+ EntryConditionBlock = addStmt(C);
+
+ // If this block contains a condition variable, add both the condition
+ // variable and initializer to the CFG.
+ if (VarDecl *VD = F->getConditionVariable()) {
+ if (Expr *Init = VD->getInit()) {
+ autoCreateBlock();
+ appendStmt(Block, F->getConditionVariableDeclStmt());
+ EntryConditionBlock = addStmt(Init);
+ assert(Block == EntryConditionBlock);
+ }
+ }
+
+ if (Block && badCFG)
+ return 0;
+
+ KnownVal = tryEvaluateBool(C);
+ }
+
+ // Add the loop body entry as a successor to the condition.
addSuccessor(ExitConditionBlock, KnownVal.isFalse() ? NULL : BodyBlock);
- }
+ // Link up the condition block with the code that follows the loop. (the
+ // false branch).
+ addSuccessor(ExitConditionBlock, KnownVal.isTrue() ? NULL : LoopSuccessor);
- // Link up the condition block with the code that follows the loop. (the
- // false branch).
- addSuccessor(ExitConditionBlock, KnownVal.isTrue() ? NULL : LoopSuccessor);
+ } while (false);
+
+ // Link up the loop-back block to the entry condition block.
+ addSuccessor(TransitionBlock, EntryConditionBlock);
+
+ // The condition block is the implicit successor for any code above the loop.
+ Succ = EntryConditionBlock;
// If the loop contains initialization, create a new block for those
// statements. This block can also contain statements that precede the loop.
@@ -2108,74 +2194,30 @@ CFGBlock *CFGBuilder::VisitWhileStmt(WhileStmt *W) {
return 0;
LoopSuccessor = Block;
Block = 0;
- } else
+ } else {
LoopSuccessor = Succ;
-
- // Because of short-circuit evaluation, the condition of the loop can span
- // multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that
- // evaluate the condition.
- CFGBlock *ExitConditionBlock = createBlock(false);
- CFGBlock *EntryConditionBlock = ExitConditionBlock;
-
- // Set the terminator for the "exit" condition block.
- ExitConditionBlock->setTerminator(W);
-
- // Now add the actual condition to the condition block. Because the condition
- // itself may contain control-flow, new blocks may be created. Thus we update
- // "Succ" after adding the condition.
- if (Stmt *C = W->getCond()) {
- Block = ExitConditionBlock;
- EntryConditionBlock = addStmt(C);
- // The condition might finish the current 'Block'.
- Block = EntryConditionBlock;
-
- // If this block contains a condition variable, add both the condition
- // variable and initializer to the CFG.
- if (VarDecl *VD = W->getConditionVariable()) {
- if (Expr *Init = VD->getInit()) {
- autoCreateBlock();
- appendStmt(Block, W->getConditionVariableDeclStmt());
- EntryConditionBlock = addStmt(Init);
- assert(Block == EntryConditionBlock);
- }
- }
-
- if (Block) {
- if (badCFG)
- return 0;
- }
}
- // The condition block is the implicit successor for the loop body as well as
- // any code above the loop.
- Succ = EntryConditionBlock;
-
- // See if this is a known constant.
- const TryResult& KnownVal = tryEvaluateBool(W->getCond());
+ CFGBlock *BodyBlock = 0, *TransitionBlock = 0;
// Process the loop body.
{
assert(W->getBody());
- // Save the current values for Block, Succ, and continue and break targets
+ // Save the current values for Block, Succ, continue and break targets.
SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget),
- save_break(BreakJumpTarget);
+ save_break(BreakJumpTarget);
// Create an empty block to represent the transition block for looping back
// to the head of the loop.
- Block = 0;
- assert(Succ == EntryConditionBlock);
- Succ = createBlock();
- Succ->setLoopTarget(W);
+ Succ = TransitionBlock = createBlock(false);
+ TransitionBlock->setLoopTarget(W);
ContinueJumpTarget = JumpTarget(Succ, LoopBeginScopePos);
// All breaks should go to the code following the loop.
BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
- // NULL out Block to force lazy instantiation of blocks for the body.
- Block = NULL;
-
// Loop body should end with destructor of Condition variable (if any).
addAutomaticObjDtors(ScopePos, LoopBeginScopePos, W);
@@ -2185,22 +2227,69 @@ CFGBlock *CFGBuilder::VisitWhileStmt(WhileStmt *W) {
addLocalScopeAndDtors(W->getBody());
// Create the body. The returned block is the entry to the loop body.
- CFGBlock *BodyBlock = addStmt(W->getBody());
+ BodyBlock = addStmt(W->getBody());
if (!BodyBlock)
BodyBlock = ContinueJumpTarget.block; // can happen for "while(...) ;"
- else if (Block) {
- if (badCFG)
- return 0;
+ else if (Block && badCFG)
+ return 0;
+ }
+
+ // Because of short-circuit evaluation, the condition of the loop can span
+ // multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that
+ // evaluate the condition.
+ CFGBlock *EntryConditionBlock = 0, *ExitConditionBlock = 0;
+
+ do {
+ Expr *C = W->getCond();
+
+ // Specially handle logical operators, which have a slightly
+ // more optimal CFG representation.
+ if (BinaryOperator *Cond = dyn_cast<BinaryOperator>(C->IgnoreParens()))
+ if (Cond->isLogicalOp()) {
+ llvm::tie(EntryConditionBlock, ExitConditionBlock) =
+ VisitLogicalOperator(Cond, W, BodyBlock,
+ LoopSuccessor);
+ break;
+ }
+
+ // The default case when not handling logical operators.
+ EntryConditionBlock = ExitConditionBlock = createBlock(false);
+ ExitConditionBlock->setTerminator(W);
+
+ // Now add the actual condition to the condition block.
+ // Because the condition itself may contain control-flow, new blocks may
+ // be created. Thus we update "Succ" after adding the condition.
+ Block = ExitConditionBlock;
+ Block = EntryConditionBlock = addStmt(C);
+
+ // If this block contains a condition variable, add both the condition
+ // variable and initializer to the CFG.
+ if (VarDecl *VD = W->getConditionVariable()) {
+ if (Expr *Init = VD->getInit()) {
+ autoCreateBlock();
+ appendStmt(Block, W->getConditionVariableDeclStmt());
+ EntryConditionBlock = addStmt(Init);
+ assert(Block == EntryConditionBlock);
+ }
}
+ if (Block && badCFG)
+ return 0;
+
+ // See if this is a known constant.
+ const TryResult& KnownVal = tryEvaluateBool(C);
+
// Add the loop body entry as a successor to the condition.
addSuccessor(ExitConditionBlock, KnownVal.isFalse() ? NULL : BodyBlock);
- }
+ // Link up the condition block with the code that follows the loop. (the
+ // false branch).
+ addSuccessor(ExitConditionBlock, KnownVal.isTrue() ? NULL : LoopSuccessor);
- // Link up the condition block with the code that follows the loop. (the
- // false branch).
- addSuccessor(ExitConditionBlock, KnownVal.isTrue() ? NULL : LoopSuccessor);
+ } while(false);
+
+ // Link up the loop-back block to the entry condition block.
+ addSuccessor(TransitionBlock, EntryConditionBlock);
// There can be no more statements in the condition block since we loop back
// to this block. NULL out Block to force lazy creation of another block.
@@ -3203,8 +3292,8 @@ CFGImplicitDtor::getDestructorDecl(ASTContext &astContext) const {
}
bool CFGImplicitDtor::isNoReturn(ASTContext &astContext) const {
- if (const CXXDestructorDecl *cdecl = getDestructorDecl(astContext)) {
- QualType ty = cdecl->getType();
+ if (const CXXDestructorDecl *decl = getDestructorDecl(astContext)) {
+ QualType ty = decl->getType();
return cast<FunctionType>(ty)->getNoReturnAttr();
}
return false;
@@ -3631,8 +3720,7 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper* Helper,
const Type* T = VD->getType().getTypePtr();
if (const ReferenceType* RT = T->getAs<ReferenceType>())
T = RT->getPointeeType().getTypePtr();
- else if (const Type *ET = T->getArrayElementTypeNoTypeQual())
- T = ET;
+ T = T->getBaseElementTypeUnsafe();
OS << ".~" << T->getAsCXXRecordDecl()->getName().str() << "()";
OS << " (Implicit destructor)\n";
@@ -3644,11 +3732,7 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper* Helper,
} else if (const CFGMemberDtor *ME = E.getAs<CFGMemberDtor>()) {
const FieldDecl *FD = ME->getFieldDecl();
-
- const Type *T = FD->getType().getTypePtr();
- if (const Type *ET = T->getArrayElementTypeNoTypeQual())
- T = ET;
-
+ const Type *T = FD->getType()->getBaseElementTypeUnsafe();
OS << "this->" << FD->getName();
OS << ".~" << T->getAsCXXRecordDecl()->getName() << "()";
OS << " (Member object destructor)\n";
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CallGraph.cpp b/contrib/llvm/tools/clang/lib/Analysis/CallGraph.cpp
index 96a16c3..6b75956 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/CallGraph.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/CallGraph.cpp
@@ -25,12 +25,11 @@ namespace {
/// given function body.
class CGBuilder : public StmtVisitor<CGBuilder> {
CallGraph *G;
- const Decl *FD;
CallGraphNode *CallerNode;
public:
- CGBuilder(CallGraph *g, const Decl *D, CallGraphNode *N)
- : G(g), FD(D), CallerNode(N) {}
+ CGBuilder(CallGraph *g, CallGraphNode *N)
+ : G(g), CallerNode(N) {}
void VisitStmt(Stmt *S) { VisitChildren(S); }
@@ -99,7 +98,7 @@ void CallGraph::addNodeForDecl(Decl* D, bool IsGlobal) {
Root->addCallee(Node, this);
// Process all the calls by this function as well.
- CGBuilder builder(this, D, Node);
+ CGBuilder builder(this, Node);
if (Stmt *Body = D->getBody())
builder.Visit(Body);
}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp b/contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp
index 7e9e38f..ce973af 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp
@@ -17,6 +17,8 @@
#include "clang/AST/DeclObjC.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ErrorHandling.h"
+#include <cctype>
+
using namespace clang;
using namespace ento;
diff --git a/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp
index 51fac49..ff2f777 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp
@@ -15,7 +15,7 @@
#include "FormatStringParsing.h"
#include "clang/Basic/LangOptions.h"
-using clang::analyze_format_string::ArgTypeResult;
+using clang::analyze_format_string::ArgType;
using clang::analyze_format_string::FormatStringHandler;
using clang::analyze_format_string::FormatSpecifier;
using clang::analyze_format_string::LengthModifier;
@@ -229,18 +229,34 @@ clang::analyze_format_string::ParseLengthModifier(FormatSpecifier &FS,
}
//===----------------------------------------------------------------------===//
-// Methods on ArgTypeResult.
+// Methods on ArgType.
//===----------------------------------------------------------------------===//
-bool ArgTypeResult::matchesType(ASTContext &C, QualType argTy) const {
+bool ArgType::matchesType(ASTContext &C, QualType argTy) const {
+ if (Ptr) {
+ // It has to be a pointer.
+ const PointerType *PT = argTy->getAs<PointerType>();
+ if (!PT)
+ return false;
+
+ // We cannot write through a const qualified pointer.
+ if (PT->getPointeeType().isConstQualified())
+ return false;
+
+ argTy = PT->getPointeeType();
+ }
+
switch (K) {
case InvalidTy:
- llvm_unreachable("ArgTypeResult must be valid");
+ llvm_unreachable("ArgType must be valid");
case UnknownTy:
return true;
case AnyCharTy: {
+ if (const EnumType *ETy = argTy->getAs<EnumType>())
+ argTy = ETy->getDecl()->getIntegerType();
+
if (const BuiltinType *BT = argTy->getAs<BuiltinType>())
switch (BT->getKind()) {
default:
@@ -255,7 +271,10 @@ bool ArgTypeResult::matchesType(ASTContext &C, QualType argTy) const {
}
case SpecificTy: {
+ if (const EnumType *ETy = argTy->getAs<EnumType>())
+ argTy = ETy->getDecl()->getIntegerType();
argTy = C.getCanonicalType(argTy).getUnqualifiedType();
+
if (T == argTy)
return true;
// Check for "compatible types".
@@ -265,10 +284,9 @@ bool ArgTypeResult::matchesType(ASTContext &C, QualType argTy) const {
break;
case BuiltinType::Char_S:
case BuiltinType::SChar:
- return T == C.UnsignedCharTy;
case BuiltinType::Char_U:
case BuiltinType::UChar:
- return T == C.SignedCharTy;
+ return T == C.UnsignedCharTy || T == C.SignedCharTy;
case BuiltinType::Short:
return T == C.UnsignedShortTy;
case BuiltinType::UShort:
@@ -319,20 +337,21 @@ bool ArgTypeResult::matchesType(ASTContext &C, QualType argTy) const {
}
case WIntTy: {
- // Instead of doing a lookup for the definition of 'wint_t' (which
- // is defined by the system headers) instead see if wchar_t and
- // the argument type promote to the same type.
- QualType PromoWChar =
- C.getWCharType()->isPromotableIntegerType()
- ? C.getPromotedIntegerType(C.getWCharType()) : C.getWCharType();
+
QualType PromoArg =
argTy->isPromotableIntegerType()
? C.getPromotedIntegerType(argTy) : argTy;
- PromoWChar = C.getCanonicalType(PromoWChar).getUnqualifiedType();
+ QualType WInt = C.getCanonicalType(C.getWIntType()).getUnqualifiedType();
PromoArg = C.getCanonicalType(PromoArg).getUnqualifiedType();
- return PromoWChar == PromoArg;
+ // If the promoted argument is the corresponding signed type of the
+ // wint_t type, then it should match.
+ if (PromoArg->hasSignedIntegerRepresentation() &&
+ C.getCorrespondingUnsignedType(PromoArg) == WInt)
+ return true;
+
+ return WInt == PromoArg;
}
case CPointerTy:
@@ -358,40 +377,63 @@ bool ArgTypeResult::matchesType(ASTContext &C, QualType argTy) const {
}
}
- llvm_unreachable("Invalid ArgTypeResult Kind!");
+ llvm_unreachable("Invalid ArgType Kind!");
}
-QualType ArgTypeResult::getRepresentativeType(ASTContext &C) const {
+QualType ArgType::getRepresentativeType(ASTContext &C) const {
+ QualType Res;
switch (K) {
case InvalidTy:
- llvm_unreachable("No representative type for Invalid ArgTypeResult");
+ llvm_unreachable("No representative type for Invalid ArgType");
case UnknownTy:
- return QualType();
+ llvm_unreachable("No representative type for Unknown ArgType");
case AnyCharTy:
- return C.CharTy;
+ Res = C.CharTy;
+ break;
case SpecificTy:
- return T;
+ Res = T;
+ break;
case CStrTy:
- return C.getPointerType(C.CharTy);
+ Res = C.getPointerType(C.CharTy);
+ break;
case WCStrTy:
- return C.getPointerType(C.getWCharType());
+ Res = C.getPointerType(C.getWCharType());
+ break;
case ObjCPointerTy:
- return C.ObjCBuiltinIdTy;
+ Res = C.ObjCBuiltinIdTy;
+ break;
case CPointerTy:
- return C.VoidPtrTy;
+ Res = C.VoidPtrTy;
+ break;
case WIntTy: {
- QualType WC = C.getWCharType();
- return WC->isPromotableIntegerType() ? C.getPromotedIntegerType(WC) : WC;
+ Res = C.getWIntType();
+ break;
}
}
- llvm_unreachable("Invalid ArgTypeResult Kind!");
+ if (Ptr)
+ Res = C.getPointerType(Res);
+ return Res;
}
-std::string ArgTypeResult::getRepresentativeTypeName(ASTContext &C) const {
+std::string ArgType::getRepresentativeTypeName(ASTContext &C) const {
std::string S = getRepresentativeType(C).getAsString();
- if (Name && S != Name)
- return std::string("'") + Name + "' (aka '" + S + "')";
+
+ std::string Alias;
+ if (Name) {
+ // Use a specific name for this type, e.g. "size_t".
+ Alias = Name;
+ if (Ptr) {
+ // If ArgType is actually a pointer to T, append an asterisk.
+ Alias += (Alias[Alias.size()-1] == '*') ? "*" : " *";
+ }
+ // If Alias is the same as the underlying type, e.g. wchar_t, then drop it.
+ if (S == Alias)
+ Alias.clear();
+ }
+
+ if (!Alias.empty())
+ return std::string("'") + Alias + "' (aka '" + S + "')";
return std::string("'") + S + "'";
}
@@ -400,7 +442,7 @@ std::string ArgTypeResult::getRepresentativeTypeName(ASTContext &C) const {
// Methods on OptionalAmount.
//===----------------------------------------------------------------------===//
-ArgTypeResult
+ArgType
analyze_format_string::OptionalAmount::getArgType(ASTContext &Ctx) const {
return Ctx.IntTy;
}
@@ -686,3 +728,37 @@ bool FormatSpecifier::hasStandardLengthConversionCombination() const {
}
return true;
}
+
+bool FormatSpecifier::namedTypeToLengthModifier(QualType QT,
+ LengthModifier &LM) {
+ assert(isa<TypedefType>(QT) && "Expected a TypedefType");
+ const TypedefNameDecl *Typedef = cast<TypedefType>(QT)->getDecl();
+
+ for (;;) {
+ const IdentifierInfo *Identifier = Typedef->getIdentifier();
+ if (Identifier->getName() == "size_t") {
+ LM.setKind(LengthModifier::AsSizeT);
+ return true;
+ } else if (Identifier->getName() == "ssize_t") {
+ // Not C99, but common in Unix.
+ LM.setKind(LengthModifier::AsSizeT);
+ return true;
+ } else if (Identifier->getName() == "intmax_t") {
+ LM.setKind(LengthModifier::AsIntMax);
+ return true;
+ } else if (Identifier->getName() == "uintmax_t") {
+ LM.setKind(LengthModifier::AsIntMax);
+ return true;
+ } else if (Identifier->getName() == "ptrdiff_t") {
+ LM.setKind(LengthModifier::AsPtrDiff);
+ return true;
+ }
+
+ QualType T = Typedef->getUnderlyingType();
+ if (!isa<TypedefType>(T))
+ break;
+
+ Typedef = cast<TypedefType>(T)->getDecl();
+ }
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp b/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp
index ff6607d..38f8199 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp
@@ -284,6 +284,14 @@ void TransferFunctions::Visit(Stmt *S) {
}
break;
}
+ case Stmt::ObjCMessageExprClass: {
+ // In calls to super, include the implicit "self" pointer as being live.
+ ObjCMessageExpr *CE = cast<ObjCMessageExpr>(S);
+ if (CE->getReceiverKind() == ObjCMessageExpr::SuperInstance)
+ val.liveDecls = LV.DSetFact.add(val.liveDecls,
+ LV.analysisContext.getSelfDecl());
+ break;
+ }
case Stmt::DeclStmtClass: {
const DeclStmt *DS = cast<DeclStmt>(S);
if (const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl())) {
@@ -455,6 +463,12 @@ LiveVariablesImpl::runOnBlock(const CFGBlock *block,
for (CFGBlock::const_reverse_iterator it = block->rbegin(),
ei = block->rend(); it != ei; ++it) {
const CFGElement &elem = *it;
+
+ if (const CFGAutomaticObjDtor *Dtor = dyn_cast<CFGAutomaticObjDtor>(&elem)){
+ val.liveDecls = DSetFact.add(val.liveDecls, Dtor->getVarDecl());
+ continue;
+ }
+
if (!isa<CFGStmt>(elem))
continue;
@@ -486,6 +500,11 @@ LiveVariables::computeLiveness(AnalysisDeclContext &AC,
if (!cfg)
return 0;
+ // The analysis currently has scalability issues for very large CFGs.
+ // Bail out if it looks too large.
+ if (cfg->getNumBlockIDs() > 300000)
+ return 0;
+
LiveVariablesImpl *LV = new LiveVariablesImpl(AC, killAtAssign);
// Construct the dataflow worklist. Enqueue the exit block as the
diff --git a/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp
index 4b2a19e..2b350ce 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp
@@ -15,7 +15,7 @@
#include "clang/Analysis/Analyses/FormatString.h"
#include "FormatStringParsing.h"
-using clang::analyze_format_string::ArgTypeResult;
+using clang::analyze_format_string::ArgType;
using clang::analyze_format_string::FormatStringHandler;
using clang::analyze_format_string::LengthModifier;
using clang::analyze_format_string::OptionalAmount;
@@ -249,20 +249,20 @@ bool clang::analyze_format_string::ParsePrintfString(FormatStringHandler &H,
// Methods on PrintfSpecifier.
//===----------------------------------------------------------------------===//
-ArgTypeResult PrintfSpecifier::getArgType(ASTContext &Ctx,
- bool IsObjCLiteral) const {
+ArgType PrintfSpecifier::getArgType(ASTContext &Ctx,
+ bool IsObjCLiteral) const {
const PrintfConversionSpecifier &CS = getConversionSpecifier();
if (!CS.consumesDataArgument())
- return ArgTypeResult::Invalid();
+ return ArgType::Invalid();
if (CS.getKind() == ConversionSpecifier::cArg)
switch (LM.getKind()) {
case LengthModifier::None: return Ctx.IntTy;
case LengthModifier::AsLong:
- return ArgTypeResult(ArgTypeResult::WIntTy, "wint_t");
+ return ArgType(ArgType::WIntTy, "wint_t");
default:
- return ArgTypeResult::Invalid();
+ return ArgType::Invalid();
}
if (CS.isIntArg())
@@ -271,22 +271,22 @@ ArgTypeResult PrintfSpecifier::getArgType(ASTContext &Ctx,
// GNU extension.
return Ctx.LongLongTy;
case LengthModifier::None: return Ctx.IntTy;
- case LengthModifier::AsChar: return ArgTypeResult::AnyCharTy;
+ case LengthModifier::AsChar: return ArgType::AnyCharTy;
case LengthModifier::AsShort: return Ctx.ShortTy;
case LengthModifier::AsLong: return Ctx.LongTy;
case LengthModifier::AsLongLong:
case LengthModifier::AsQuad:
return Ctx.LongLongTy;
case LengthModifier::AsIntMax:
- return ArgTypeResult(Ctx.getIntMaxType(), "intmax_t");
+ return ArgType(Ctx.getIntMaxType(), "intmax_t");
case LengthModifier::AsSizeT:
// FIXME: How to get the corresponding signed version of size_t?
- return ArgTypeResult();
+ return ArgType();
case LengthModifier::AsPtrDiff:
- return ArgTypeResult(Ctx.getPointerDiffType(), "ptrdiff_t");
+ return ArgType(Ctx.getPointerDiffType(), "ptrdiff_t");
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
- return ArgTypeResult::Invalid();
+ return ArgType::Invalid();
}
if (CS.isUIntArg())
@@ -302,16 +302,16 @@ ArgTypeResult PrintfSpecifier::getArgType(ASTContext &Ctx,
case LengthModifier::AsQuad:
return Ctx.UnsignedLongLongTy;
case LengthModifier::AsIntMax:
- return ArgTypeResult(Ctx.getUIntMaxType(), "uintmax_t");
+ return ArgType(Ctx.getUIntMaxType(), "uintmax_t");
case LengthModifier::AsSizeT:
- return ArgTypeResult(Ctx.getSizeType(), "size_t");
+ return ArgType(Ctx.getSizeType(), "size_t");
case LengthModifier::AsPtrDiff:
// FIXME: How to get the corresponding unsigned
// version of ptrdiff_t?
- return ArgTypeResult();
+ return ArgType();
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
- return ArgTypeResult::Invalid();
+ return ArgType::Invalid();
}
if (CS.isDoubleArg()) {
@@ -320,37 +320,90 @@ ArgTypeResult PrintfSpecifier::getArgType(ASTContext &Ctx,
return Ctx.DoubleTy;
}
+ if (CS.getKind() == ConversionSpecifier::nArg) {
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ return ArgType::PtrTo(Ctx.IntTy);
+ case LengthModifier::AsChar:
+ return ArgType::PtrTo(Ctx.SignedCharTy);
+ case LengthModifier::AsShort:
+ return ArgType::PtrTo(Ctx.ShortTy);
+ case LengthModifier::AsLong:
+ return ArgType::PtrTo(Ctx.LongTy);
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsQuad:
+ return ArgType::PtrTo(Ctx.LongLongTy);
+ case LengthModifier::AsIntMax:
+ return ArgType::PtrTo(ArgType(Ctx.getIntMaxType(), "intmax_t"));
+ case LengthModifier::AsSizeT:
+ return ArgType(); // FIXME: ssize_t
+ case LengthModifier::AsPtrDiff:
+ return ArgType::PtrTo(ArgType(Ctx.getPointerDiffType(), "ptrdiff_t"));
+ case LengthModifier::AsLongDouble:
+ return ArgType(); // FIXME: Is this a known extension?
+ case LengthModifier::AsAllocate:
+ case LengthModifier::AsMAllocate:
+ return ArgType::Invalid();
+ }
+ }
+
switch (CS.getKind()) {
case ConversionSpecifier::sArg:
if (LM.getKind() == LengthModifier::AsWideChar) {
if (IsObjCLiteral)
return Ctx.getPointerType(Ctx.UnsignedShortTy.withConst());
- return ArgTypeResult(ArgTypeResult::WCStrTy, "wchar_t *");
+ return ArgType(ArgType::WCStrTy, "wchar_t *");
}
- return ArgTypeResult::CStrTy;
+ return ArgType::CStrTy;
case ConversionSpecifier::SArg:
if (IsObjCLiteral)
return Ctx.getPointerType(Ctx.UnsignedShortTy.withConst());
- return ArgTypeResult(ArgTypeResult::WCStrTy, "wchar_t *");
+ return ArgType(ArgType::WCStrTy, "wchar_t *");
case ConversionSpecifier::CArg:
if (IsObjCLiteral)
return Ctx.UnsignedShortTy;
- return ArgTypeResult(Ctx.WCharTy, "wchar_t");
+ return ArgType(Ctx.WCharTy, "wchar_t");
case ConversionSpecifier::pArg:
- return ArgTypeResult::CPointerTy;
+ return ArgType::CPointerTy;
case ConversionSpecifier::ObjCObjArg:
- return ArgTypeResult::ObjCPointerTy;
+ return ArgType::ObjCPointerTy;
default:
break;
}
// FIXME: Handle other cases.
- return ArgTypeResult();
+ return ArgType();
}
bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
ASTContext &Ctx, bool IsObjCLiteral) {
- // Handle strings first (char *, wchar_t *)
+ // %n is different from other conversion specifiers; don't try to fix it.
+ if (CS.getKind() == ConversionSpecifier::nArg)
+ return false;
+
+ // Handle Objective-C objects first. Note that while the '%@' specifier will
+ // not warn for structure pointer or void pointer arguments (because that's
+ // how CoreFoundation objects are implemented), we only show a fixit for '%@'
+ // if we know it's an object (block, id, class, or __attribute__((NSObject))).
+ if (QT->isObjCRetainableType()) {
+ if (!IsObjCLiteral)
+ return false;
+
+ CS.setKind(ConversionSpecifier::ObjCObjArg);
+
+ // Disable irrelevant flags
+ HasThousandsGrouping = false;
+ HasPlusPrefix = false;
+ HasSpacePrefix = false;
+ HasAlternativeForm = false;
+ HasLeadingZeroes = false;
+ Precision.setHowSpecified(OptionalAmount::NotSpecified);
+ LM.setKind(LengthModifier::None);
+
+ return true;
+ }
+
+ // Handle strings next (char *, wchar_t *)
if (QT->isPointerType() && (QT->getPointeeType()->isAnyCharacterType())) {
CS.setKind(ConversionSpecifier::sArg);
@@ -367,6 +420,10 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
return true;
}
+ // If it's an enum, get its underlying type.
+ if (const EnumType *ETy = QT->getAs<EnumType>())
+ QT = ETy->getDecl()->getIntegerType();
+
// We can only work with builtin types.
const BuiltinType *BT = QT->getAs<BuiltinType>();
if (!BT)
@@ -429,24 +486,11 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
}
// Handle size_t, ptrdiff_t, etc. that have dedicated length modifiers in C99.
- if (isa<TypedefType>(QT) && (LangOpt.C99 || LangOpt.CPlusPlus0x)) {
- const IdentifierInfo *Identifier = QT.getBaseTypeIdentifier();
- if (Identifier->getName() == "size_t") {
- LM.setKind(LengthModifier::AsSizeT);
- } else if (Identifier->getName() == "ssize_t") {
- // Not C99, but common in Unix.
- LM.setKind(LengthModifier::AsSizeT);
- } else if (Identifier->getName() == "intmax_t") {
- LM.setKind(LengthModifier::AsIntMax);
- } else if (Identifier->getName() == "uintmax_t") {
- LM.setKind(LengthModifier::AsIntMax);
- } else if (Identifier->getName() == "ptrdiff_t") {
- LM.setKind(LengthModifier::AsPtrDiff);
- }
- }
+ if (isa<TypedefType>(QT) && (LangOpt.C99 || LangOpt.CPlusPlus0x))
+ namedTypeToLengthModifier(QT, LM);
// If fixing the length modifier was enough, we are done.
- const analyze_printf::ArgTypeResult &ATR = getArgType(Ctx, IsObjCLiteral);
+ const analyze_printf::ArgType &ATR = getArgType(Ctx, IsObjCLiteral);
if (hasValidLengthModifier() && ATR.isValid() && ATR.matchesType(Ctx, QT))
return true;
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp b/contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp
index 3f711b4..7d67e8a 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp
@@ -36,8 +36,10 @@ ProgramPoint ProgramPoint::getProgramPoint(const Stmt *S, ProgramPoint::Kind K,
return PreStore(S, LC, tag);
case ProgramPoint::PostLValueKind:
return PostLValue(S, LC, tag);
- case ProgramPoint::PostPurgeDeadSymbolsKind:
- return PostPurgeDeadSymbols(S, LC, tag);
+ case ProgramPoint::PostStmtPurgeDeadSymbolsKind:
+ return PostStmtPurgeDeadSymbols(S, LC, tag);
+ case ProgramPoint::PreStmtPurgeDeadSymbolsKind:
+ return PreStmtPurgeDeadSymbols(S, LC, tag);
}
}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp b/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp
index c8b491a..5d659ce 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
#include "clang/AST/Stmt.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include <deque>
using namespace clang;
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp
index 6bc4adb..2942400 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp
@@ -15,12 +15,11 @@
#include "clang/Analysis/Analyses/FormatString.h"
#include "FormatStringParsing.h"
-using clang::analyze_format_string::ArgTypeResult;
+using clang::analyze_format_string::ArgType;
using clang::analyze_format_string::FormatStringHandler;
using clang::analyze_format_string::LengthModifier;
using clang::analyze_format_string::OptionalAmount;
using clang::analyze_format_string::ConversionSpecifier;
-using clang::analyze_scanf::ScanfArgTypeResult;
using clang::analyze_scanf::ScanfConversionSpecifier;
using clang::analyze_scanf::ScanfSpecifier;
using clang::UpdateOnReturn;
@@ -194,37 +193,42 @@ static ScanfSpecifierResult ParseScanfSpecifier(FormatStringHandler &H,
return ScanfSpecifierResult(Start, FS);
}
-ScanfArgTypeResult ScanfSpecifier::getArgType(ASTContext &Ctx) const {
+ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const {
const ScanfConversionSpecifier &CS = getConversionSpecifier();
if (!CS.consumesDataArgument())
- return ScanfArgTypeResult::Invalid();
+ return ArgType::Invalid();
switch(CS.getKind()) {
// Signed int.
case ConversionSpecifier::dArg:
case ConversionSpecifier::iArg:
switch (LM.getKind()) {
- case LengthModifier::None: return ArgTypeResult(Ctx.IntTy);
+ case LengthModifier::None:
+ return ArgType::PtrTo(Ctx.IntTy);
case LengthModifier::AsChar:
- return ArgTypeResult(ArgTypeResult::AnyCharTy);
- case LengthModifier::AsShort: return ArgTypeResult(Ctx.ShortTy);
- case LengthModifier::AsLong: return ArgTypeResult(Ctx.LongTy);
+ return ArgType::PtrTo(ArgType::AnyCharTy);
+ case LengthModifier::AsShort:
+ return ArgType::PtrTo(Ctx.ShortTy);
+ case LengthModifier::AsLong:
+ return ArgType::PtrTo(Ctx.LongTy);
case LengthModifier::AsLongLong:
case LengthModifier::AsQuad:
- return ArgTypeResult(Ctx.LongLongTy);
+ return ArgType::PtrTo(Ctx.LongLongTy);
case LengthModifier::AsIntMax:
- return ScanfArgTypeResult(Ctx.getIntMaxType(), "intmax_t *");
+ return ArgType::PtrTo(ArgType(Ctx.getIntMaxType(), "intmax_t"));
case LengthModifier::AsSizeT:
// FIXME: ssize_t.
- return ScanfArgTypeResult();
+ return ArgType();
case LengthModifier::AsPtrDiff:
- return ScanfArgTypeResult(Ctx.getPointerDiffType(), "ptrdiff_t *");
+ return ArgType::PtrTo(ArgType(Ctx.getPointerDiffType(), "ptrdiff_t"));
case LengthModifier::AsLongDouble:
// GNU extension.
- return ArgTypeResult(Ctx.LongLongTy);
- case LengthModifier::AsAllocate: return ScanfArgTypeResult::Invalid();
- case LengthModifier::AsMAllocate: return ScanfArgTypeResult::Invalid();
+ return ArgType::PtrTo(Ctx.LongLongTy);
+ case LengthModifier::AsAllocate:
+ return ArgType::Invalid();
+ case LengthModifier::AsMAllocate:
+ return ArgType::Invalid();
}
// Unsigned int.
@@ -233,25 +237,31 @@ ScanfArgTypeResult ScanfSpecifier::getArgType(ASTContext &Ctx) const {
case ConversionSpecifier::xArg:
case ConversionSpecifier::XArg:
switch (LM.getKind()) {
- case LengthModifier::None: return ArgTypeResult(Ctx.UnsignedIntTy);
- case LengthModifier::AsChar: return ArgTypeResult(Ctx.UnsignedCharTy);
- case LengthModifier::AsShort: return ArgTypeResult(Ctx.UnsignedShortTy);
- case LengthModifier::AsLong: return ArgTypeResult(Ctx.UnsignedLongTy);
+ case LengthModifier::None:
+ return ArgType::PtrTo(Ctx.UnsignedIntTy);
+ case LengthModifier::AsChar:
+ return ArgType::PtrTo(Ctx.UnsignedCharTy);
+ case LengthModifier::AsShort:
+ return ArgType::PtrTo(Ctx.UnsignedShortTy);
+ case LengthModifier::AsLong:
+ return ArgType::PtrTo(Ctx.UnsignedLongTy);
case LengthModifier::AsLongLong:
case LengthModifier::AsQuad:
- return ArgTypeResult(Ctx.UnsignedLongLongTy);
+ return ArgType::PtrTo(Ctx.UnsignedLongLongTy);
case LengthModifier::AsIntMax:
- return ScanfArgTypeResult(Ctx.getUIntMaxType(), "uintmax_t *");
+ return ArgType::PtrTo(ArgType(Ctx.getUIntMaxType(), "uintmax_t"));
case LengthModifier::AsSizeT:
- return ScanfArgTypeResult(Ctx.getSizeType(), "size_t *");
+ return ArgType::PtrTo(ArgType(Ctx.getSizeType(), "size_t"));
case LengthModifier::AsPtrDiff:
// FIXME: Unsigned version of ptrdiff_t?
- return ScanfArgTypeResult();
+ return ArgType();
case LengthModifier::AsLongDouble:
// GNU extension.
- return ArgTypeResult(Ctx.UnsignedLongLongTy);
- case LengthModifier::AsAllocate: return ScanfArgTypeResult::Invalid();
- case LengthModifier::AsMAllocate: return ScanfArgTypeResult::Invalid();
+ return ArgType::PtrTo(Ctx.UnsignedLongLongTy);
+ case LengthModifier::AsAllocate:
+ return ArgType::Invalid();
+ case LengthModifier::AsMAllocate:
+ return ArgType::Invalid();
}
// Float.
@@ -264,12 +274,14 @@ ScanfArgTypeResult ScanfSpecifier::getArgType(ASTContext &Ctx) const {
case ConversionSpecifier::gArg:
case ConversionSpecifier::GArg:
switch (LM.getKind()) {
- case LengthModifier::None: return ArgTypeResult(Ctx.FloatTy);
- case LengthModifier::AsLong: return ArgTypeResult(Ctx.DoubleTy);
+ case LengthModifier::None:
+ return ArgType::PtrTo(Ctx.FloatTy);
+ case LengthModifier::AsLong:
+ return ArgType::PtrTo(Ctx.DoubleTy);
case LengthModifier::AsLongDouble:
- return ArgTypeResult(Ctx.LongDoubleTy);
+ return ArgType::PtrTo(Ctx.LongDoubleTy);
default:
- return ScanfArgTypeResult::Invalid();
+ return ArgType::Invalid();
}
// Char, string and scanlist.
@@ -277,37 +289,65 @@ ScanfArgTypeResult ScanfSpecifier::getArgType(ASTContext &Ctx) const {
case ConversionSpecifier::sArg:
case ConversionSpecifier::ScanListArg:
switch (LM.getKind()) {
- case LengthModifier::None: return ScanfArgTypeResult::CStrTy;
+ case LengthModifier::None:
+ return ArgType::PtrTo(ArgType::AnyCharTy);
case LengthModifier::AsLong:
- return ScanfArgTypeResult(ScanfArgTypeResult::WCStrTy, "wchar_t *");
+ return ArgType::PtrTo(ArgType(Ctx.getWCharType(), "wchar_t"));
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
- return ScanfArgTypeResult(ArgTypeResult::CStrTy);
+ return ArgType::PtrTo(ArgType::CStrTy);
default:
- return ScanfArgTypeResult::Invalid();
+ return ArgType::Invalid();
}
case ConversionSpecifier::CArg:
case ConversionSpecifier::SArg:
// FIXME: Mac OS X specific?
switch (LM.getKind()) {
case LengthModifier::None:
- return ScanfArgTypeResult(ScanfArgTypeResult::WCStrTy, "wchar_t *");
+ return ArgType::PtrTo(ArgType(Ctx.getWCharType(), "wchar_t"));
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
- return ScanfArgTypeResult(ArgTypeResult::WCStrTy, "wchar_t **");
+ return ArgType::PtrTo(ArgType(ArgType::WCStrTy, "wchar_t *"));
default:
- return ScanfArgTypeResult::Invalid();
+ return ArgType::Invalid();
}
// Pointer.
case ConversionSpecifier::pArg:
- return ScanfArgTypeResult(ArgTypeResult(ArgTypeResult::CPointerTy));
+ return ArgType::PtrTo(ArgType::CPointerTy);
+
+ // Write-back.
+ case ConversionSpecifier::nArg:
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ return ArgType::PtrTo(Ctx.IntTy);
+ case LengthModifier::AsChar:
+ return ArgType::PtrTo(Ctx.SignedCharTy);
+ case LengthModifier::AsShort:
+ return ArgType::PtrTo(Ctx.ShortTy);
+ case LengthModifier::AsLong:
+ return ArgType::PtrTo(Ctx.LongTy);
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsQuad:
+ return ArgType::PtrTo(Ctx.LongLongTy);
+ case LengthModifier::AsIntMax:
+ return ArgType::PtrTo(ArgType(Ctx.getIntMaxType(), "intmax_t"));
+ case LengthModifier::AsSizeT:
+ return ArgType(); // FIXME: ssize_t
+ case LengthModifier::AsPtrDiff:
+ return ArgType::PtrTo(ArgType(Ctx.getPointerDiffType(), "ptrdiff_t"));
+ case LengthModifier::AsLongDouble:
+ return ArgType(); // FIXME: Is this a known extension?
+ case LengthModifier::AsAllocate:
+ case LengthModifier::AsMAllocate:
+ return ArgType::Invalid();
+ }
default:
break;
}
- return ScanfArgTypeResult();
+ return ArgType();
}
bool ScanfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
@@ -315,7 +355,16 @@ bool ScanfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
if (!QT->isPointerType())
return false;
+ // %n is different from other conversion specifiers; don't try to fix it.
+ if (CS.getKind() == ConversionSpecifier::nArg)
+ return false;
+
QualType PT = QT->getPointeeType();
+
+ // If it's an enum, get its underlying type.
+ if (const EnumType *ETy = QT->getAs<EnumType>())
+ QT = ETy->getDecl()->getIntegerType();
+
const BuiltinType *BT = PT->getAs<BuiltinType>();
if (!BT)
return false;
@@ -377,25 +426,12 @@ bool ScanfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
}
// Handle size_t, ptrdiff_t, etc. that have dedicated length modifiers in C99.
- if (isa<TypedefType>(PT) && (LangOpt.C99 || LangOpt.CPlusPlus0x)) {
- const IdentifierInfo *Identifier = QT.getBaseTypeIdentifier();
- if (Identifier->getName() == "size_t") {
- LM.setKind(LengthModifier::AsSizeT);
- } else if (Identifier->getName() == "ssize_t") {
- // Not C99, but common in Unix.
- LM.setKind(LengthModifier::AsSizeT);
- } else if (Identifier->getName() == "intmax_t") {
- LM.setKind(LengthModifier::AsIntMax);
- } else if (Identifier->getName() == "uintmax_t") {
- LM.setKind(LengthModifier::AsIntMax);
- } else if (Identifier->getName() == "ptrdiff_t") {
- LM.setKind(LengthModifier::AsPtrDiff);
- }
- }
+ if (isa<TypedefType>(PT) && (LangOpt.C99 || LangOpt.CPlusPlus0x))
+ namedTypeToLengthModifier(PT, LM);
// If fixing the length modifier was enough, we are done.
- const analyze_scanf::ScanfArgTypeResult &ATR = getArgType(Ctx);
- if (hasValidLengthModifier() && ATR.isValid() && ATR.matchesType(Ctx, QT))
+ const analyze_scanf::ArgType &AT = getArgType(Ctx);
+ if (hasValidLengthModifier() && AT.isValid() && AT.matchesType(Ctx, QT))
return true;
// Figure out the conversion specifier.
@@ -452,48 +488,3 @@ bool clang::analyze_format_string::ParseScanfString(FormatStringHandler &H,
assert(I == E && "Format string not exhausted");
return false;
}
-
-bool ScanfArgTypeResult::matchesType(ASTContext& C, QualType argTy) const {
- switch (K) {
- case InvalidTy:
- llvm_unreachable("ArgTypeResult must be valid");
- case UnknownTy:
- return true;
- case CStrTy:
- return ArgTypeResult(ArgTypeResult::CStrTy).matchesType(C, argTy);
- case WCStrTy:
- return ArgTypeResult(ArgTypeResult::WCStrTy).matchesType(C, argTy);
- case PtrToArgTypeResultTy: {
- const PointerType *PT = argTy->getAs<PointerType>();
- if (!PT)
- return false;
- return A.matchesType(C, PT->getPointeeType());
- }
- }
-
- llvm_unreachable("Invalid ScanfArgTypeResult Kind!");
-}
-
-QualType ScanfArgTypeResult::getRepresentativeType(ASTContext &C) const {
- switch (K) {
- case InvalidTy:
- llvm_unreachable("No representative type for Invalid ArgTypeResult");
- case UnknownTy:
- return QualType();
- case CStrTy:
- return C.getPointerType(C.CharTy);
- case WCStrTy:
- return C.getPointerType(C.getWCharType());
- case PtrToArgTypeResultTy:
- return C.getPointerType(A.getRepresentativeType(C));
- }
-
- llvm_unreachable("Invalid ScanfArgTypeResult Kind!");
-}
-
-std::string ScanfArgTypeResult::getRepresentativeTypeName(ASTContext& C) const {
- std::string S = getRepresentativeType(C).getAsString();
- if (!Name)
- return std::string("'") + S + "'";
- return std::string("'") + Name + "' (aka '" + S + "')";
-}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp
index 2f7e794..5954682 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp
@@ -26,6 +26,7 @@
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/OperatorKinds.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableMap.h"
@@ -45,8 +46,15 @@ ThreadSafetyHandler::~ThreadSafetyHandler() {}
namespace {
-/// \brief A MutexID object uniquely identifies a particular mutex, and
-/// is built from an Expr* (i.e. calling a lock function).
+/// SExpr implements a simple expression language that is used to store,
+/// compare, and pretty-print C++ expressions. Unlike a clang Expr, a SExpr
+/// does not capture surface syntax, and it does not distinguish between
+/// C++ concepts, like pointers and references, that have no real semantic
+/// differences. This simplicity allows SExprs to be meaningfully compared,
+/// e.g.
+/// (x) = x
+/// (*this).foo = this->foo
+/// *&a = a
///
/// Thread-safety analysis works by comparing lock expressions. Within the
/// body of a function, an expression such as "x->foo->bar.mu" will resolve to
@@ -59,41 +67,194 @@ namespace {
///
/// The current implementation assumes, but does not verify, that multiple uses
/// of the same lock expression satisfies these criteria.
-///
-/// Clang introduces an additional wrinkle, which is that it is difficult to
-/// derive canonical expressions, or compare expressions directly for equality.
-/// Thus, we identify a mutex not by an Expr, but by the list of named
-/// declarations that are referenced by the Expr. In other words,
-/// x->foo->bar.mu will be a four element vector with the Decls for
-/// mu, bar, and foo, and x. The vector will uniquely identify the expression
-/// for all practical purposes. Null is used to denote 'this'.
-///
-/// Note we will need to perform substitution on "this" and function parameter
-/// names when constructing a lock expression.
-///
-/// For example:
-/// class C { Mutex Mu; void lock() EXCLUSIVE_LOCK_FUNCTION(this->Mu); };
-/// void myFunc(C *X) { ... X->lock() ... }
-/// The original expression for the mutex acquired by myFunc is "this->Mu", but
-/// "X" is substituted for "this" so we get X->Mu();
-///
-/// For another example:
-/// foo(MyList *L) EXCLUSIVE_LOCKS_REQUIRED(L->Mu) { ... }
-/// MyList *MyL;
-/// foo(MyL); // requires lock MyL->Mu to be held
-class MutexID {
- SmallVector<NamedDecl*, 2> DeclSeq;
-
- /// Build a Decl sequence representing the lock from the given expression.
+class SExpr {
+private:
+ enum ExprOp {
+ EOP_Nop, //< No-op
+ EOP_Wildcard, //< Matches anything.
+ EOP_This, //< This keyword.
+ EOP_NVar, //< Named variable.
+ EOP_LVar, //< Local variable.
+ EOP_Dot, //< Field access
+ EOP_Call, //< Function call
+ EOP_MCall, //< Method call
+ EOP_Index, //< Array index
+ EOP_Unary, //< Unary operation
+ EOP_Binary, //< Binary operation
+ EOP_Unknown //< Catchall for everything else
+ };
+
+
+ class SExprNode {
+ private:
+ unsigned char Op; //< Opcode of the root node
+ unsigned char Flags; //< Additional opcode-specific data
+ unsigned short Sz; //< Number of child nodes
+ const void* Data; //< Additional opcode-specific data
+
+ public:
+ SExprNode(ExprOp O, unsigned F, const void* D)
+ : Op(static_cast<unsigned char>(O)),
+ Flags(static_cast<unsigned char>(F)), Sz(1), Data(D)
+ { }
+
+ unsigned size() const { return Sz; }
+ void setSize(unsigned S) { Sz = S; }
+
+ ExprOp kind() const { return static_cast<ExprOp>(Op); }
+
+ const NamedDecl* getNamedDecl() const {
+ assert(Op == EOP_NVar || Op == EOP_LVar || Op == EOP_Dot);
+ return reinterpret_cast<const NamedDecl*>(Data);
+ }
+
+ const NamedDecl* getFunctionDecl() const {
+ assert(Op == EOP_Call || Op == EOP_MCall);
+ return reinterpret_cast<const NamedDecl*>(Data);
+ }
+
+ bool isArrow() const { return Op == EOP_Dot && Flags == 1; }
+ void setArrow(bool A) { Flags = A ? 1 : 0; }
+
+ unsigned arity() const {
+ switch (Op) {
+ case EOP_Nop: return 0;
+ case EOP_Wildcard: return 0;
+ case EOP_NVar: return 0;
+ case EOP_LVar: return 0;
+ case EOP_This: return 0;
+ case EOP_Dot: return 1;
+ case EOP_Call: return Flags+1; // First arg is function.
+ case EOP_MCall: return Flags+1; // First arg is implicit obj.
+ case EOP_Index: return 2;
+ case EOP_Unary: return 1;
+ case EOP_Binary: return 2;
+ case EOP_Unknown: return Flags;
+ }
+ return 0;
+ }
+
+ bool operator==(const SExprNode& Other) const {
+ // Ignore flags and size -- they don't matter.
+ return (Op == Other.Op &&
+ Data == Other.Data);
+ }
+
+ bool operator!=(const SExprNode& Other) const {
+ return !(*this == Other);
+ }
+
+ bool matches(const SExprNode& Other) const {
+ return (*this == Other) ||
+ (Op == EOP_Wildcard) ||
+ (Other.Op == EOP_Wildcard);
+ }
+ };
+
+
+ /// \brief Encapsulates the lexical context of a function call. The lexical
+ /// context includes the arguments to the call, including the implicit object
+ /// argument. When an attribute containing a mutex expression is attached to
+ /// a method, the expression may refer to formal parameters of the method.
+ /// Actual arguments must be substituted for formal parameters to derive
+ /// the appropriate mutex expression in the lexical context where the function
+ /// is called. PrevCtx holds the context in which the arguments themselves
+ /// should be evaluated; multiple calling contexts can be chained together
+ /// by the lock_returned attribute.
+ struct CallingContext {
+ const NamedDecl* AttrDecl; // The decl to which the attribute is attached.
+ Expr* SelfArg; // Implicit object argument -- e.g. 'this'
+ bool SelfArrow; // is Self referred to with -> or .?
+ unsigned NumArgs; // Number of funArgs
+ Expr** FunArgs; // Function arguments
+ CallingContext* PrevCtx; // The previous context; or 0 if none.
+
+ CallingContext(const NamedDecl *D = 0, Expr *S = 0,
+ unsigned N = 0, Expr **A = 0, CallingContext *P = 0)
+ : AttrDecl(D), SelfArg(S), SelfArrow(false),
+ NumArgs(N), FunArgs(A), PrevCtx(P)
+ { }
+ };
+
+ typedef SmallVector<SExprNode, 4> NodeVector;
+
+private:
+ // A SExpr is a list of SExprNodes in prefix order. The Size field allows
+ // the list to be traversed as a tree.
+ NodeVector NodeVec;
+
+private:
+ unsigned makeNop() {
+ NodeVec.push_back(SExprNode(EOP_Nop, 0, 0));
+ return NodeVec.size()-1;
+ }
+
+ unsigned makeWildcard() {
+ NodeVec.push_back(SExprNode(EOP_Wildcard, 0, 0));
+ return NodeVec.size()-1;
+ }
+
+ unsigned makeNamedVar(const NamedDecl *D) {
+ NodeVec.push_back(SExprNode(EOP_NVar, 0, D));
+ return NodeVec.size()-1;
+ }
+
+ unsigned makeLocalVar(const NamedDecl *D) {
+ NodeVec.push_back(SExprNode(EOP_LVar, 0, D));
+ return NodeVec.size()-1;
+ }
+
+ unsigned makeThis() {
+ NodeVec.push_back(SExprNode(EOP_This, 0, 0));
+ return NodeVec.size()-1;
+ }
+
+ unsigned makeDot(const NamedDecl *D, bool Arrow) {
+ NodeVec.push_back(SExprNode(EOP_Dot, Arrow ? 1 : 0, D));
+ return NodeVec.size()-1;
+ }
+
+ unsigned makeCall(unsigned NumArgs, const NamedDecl *D) {
+ NodeVec.push_back(SExprNode(EOP_Call, NumArgs, D));
+ return NodeVec.size()-1;
+ }
+
+ unsigned makeMCall(unsigned NumArgs, const NamedDecl *D) {
+ NodeVec.push_back(SExprNode(EOP_MCall, NumArgs, D));
+ return NodeVec.size()-1;
+ }
+
+ unsigned makeIndex() {
+ NodeVec.push_back(SExprNode(EOP_Index, 0, 0));
+ return NodeVec.size()-1;
+ }
+
+ unsigned makeUnary() {
+ NodeVec.push_back(SExprNode(EOP_Unary, 0, 0));
+ return NodeVec.size()-1;
+ }
+
+ unsigned makeBinary() {
+ NodeVec.push_back(SExprNode(EOP_Binary, 0, 0));
+ return NodeVec.size()-1;
+ }
+
+ unsigned makeUnknown(unsigned Arity) {
+ NodeVec.push_back(SExprNode(EOP_Unknown, Arity, 0));
+ return NodeVec.size()-1;
+ }
+
+ /// Build an SExpr from the given C++ expression.
/// Recursive function that terminates on DeclRefExpr.
- /// Note: this function merely creates a MutexID; it does not check to
+ /// Note: this function merely creates a SExpr; it does not check to
/// ensure that the original expression is a valid mutex expression.
- void buildMutexID(Expr *Exp, const NamedDecl *D, Expr *Parent,
- unsigned NumArgs, Expr **FunArgs) {
- if (!Exp) {
- DeclSeq.clear();
- return;
- }
+ ///
+ /// NDeref returns the number of Derefence and AddressOf operations
+ /// preceeding the Expr; this is used to decide whether to pretty-print
+ /// SExprs with . or ->.
+ unsigned buildSExpr(Expr *Exp, CallingContext* CallCtx, int* NDeref = 0) {
+ if (!Exp)
+ return 0;
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Exp)) {
NamedDecl *ND = cast<NamedDecl>(DRE->getDecl()->getCanonicalDecl());
@@ -103,144 +264,246 @@ class MutexID {
cast<FunctionDecl>(PV->getDeclContext())->getCanonicalDecl();
unsigned i = PV->getFunctionScopeIndex();
- if (FunArgs && FD == D->getCanonicalDecl()) {
+ if (CallCtx && CallCtx->FunArgs &&
+ FD == CallCtx->AttrDecl->getCanonicalDecl()) {
// Substitute call arguments for references to function parameters
- assert(i < NumArgs);
- buildMutexID(FunArgs[i], D, 0, 0, 0);
- return;
+ assert(i < CallCtx->NumArgs);
+ return buildSExpr(CallCtx->FunArgs[i], CallCtx->PrevCtx, NDeref);
}
// Map the param back to the param of the original function declaration.
- DeclSeq.push_back(FD->getParamDecl(i));
- return;
+ makeNamedVar(FD->getParamDecl(i));
+ return 1;
}
// Not a function parameter -- just store the reference.
- DeclSeq.push_back(ND);
- } else if (MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) {
- NamedDecl *ND = ME->getMemberDecl();
- DeclSeq.push_back(ND);
- buildMutexID(ME->getBase(), D, Parent, NumArgs, FunArgs);
+ makeNamedVar(ND);
+ return 1;
} else if (isa<CXXThisExpr>(Exp)) {
- if (Parent)
- buildMutexID(Parent, D, 0, 0, 0);
+ // Substitute parent for 'this'
+ if (CallCtx && CallCtx->SelfArg) {
+ if (!CallCtx->SelfArrow && NDeref)
+ // 'this' is a pointer, but self is not, so need to take address.
+ --(*NDeref);
+ return buildSExpr(CallCtx->SelfArg, CallCtx->PrevCtx, NDeref);
+ }
else {
- DeclSeq.push_back(0); // Use 0 to represent 'this'.
- return; // mutexID is still valid in this case
+ makeThis();
+ return 1;
}
+ } else if (MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) {
+ NamedDecl *ND = ME->getMemberDecl();
+ int ImplicitDeref = ME->isArrow() ? 1 : 0;
+ unsigned Root = makeDot(ND, false);
+ unsigned Sz = buildSExpr(ME->getBase(), CallCtx, &ImplicitDeref);
+ NodeVec[Root].setArrow(ImplicitDeref > 0);
+ NodeVec[Root].setSize(Sz + 1);
+ return Sz + 1;
} else if (CXXMemberCallExpr *CMCE = dyn_cast<CXXMemberCallExpr>(Exp)) {
- DeclSeq.push_back(CMCE->getMethodDecl()->getCanonicalDecl());
- buildMutexID(CMCE->getImplicitObjectArgument(),
- D, Parent, NumArgs, FunArgs);
+ // When calling a function with a lock_returned attribute, replace
+ // the function call with the expression in lock_returned.
+ if (LockReturnedAttr* At =
+ CMCE->getMethodDecl()->getAttr<LockReturnedAttr>()) {
+ CallingContext LRCallCtx(CMCE->getMethodDecl());
+ LRCallCtx.SelfArg = CMCE->getImplicitObjectArgument();
+ LRCallCtx.SelfArrow =
+ dyn_cast<MemberExpr>(CMCE->getCallee())->isArrow();
+ LRCallCtx.NumArgs = CMCE->getNumArgs();
+ LRCallCtx.FunArgs = CMCE->getArgs();
+ LRCallCtx.PrevCtx = CallCtx;
+ return buildSExpr(At->getArg(), &LRCallCtx);
+ }
+ // Hack to treat smart pointers and iterators as pointers;
+ // ignore any method named get().
+ if (CMCE->getMethodDecl()->getNameAsString() == "get" &&
+ CMCE->getNumArgs() == 0) {
+ if (NDeref && dyn_cast<MemberExpr>(CMCE->getCallee())->isArrow())
+ ++(*NDeref);
+ return buildSExpr(CMCE->getImplicitObjectArgument(), CallCtx, NDeref);
+ }
unsigned NumCallArgs = CMCE->getNumArgs();
+ unsigned Root =
+ makeMCall(NumCallArgs, CMCE->getMethodDecl()->getCanonicalDecl());
+ unsigned Sz = buildSExpr(CMCE->getImplicitObjectArgument(), CallCtx);
Expr** CallArgs = CMCE->getArgs();
for (unsigned i = 0; i < NumCallArgs; ++i) {
- buildMutexID(CallArgs[i], D, Parent, NumArgs, FunArgs);
+ Sz += buildSExpr(CallArgs[i], CallCtx);
}
+ NodeVec[Root].setSize(Sz + 1);
+ return Sz + 1;
} else if (CallExpr *CE = dyn_cast<CallExpr>(Exp)) {
- buildMutexID(CE->getCallee(), D, Parent, NumArgs, FunArgs);
+ if (LockReturnedAttr* At =
+ CE->getDirectCallee()->getAttr<LockReturnedAttr>()) {
+ CallingContext LRCallCtx(CE->getDirectCallee());
+ LRCallCtx.NumArgs = CE->getNumArgs();
+ LRCallCtx.FunArgs = CE->getArgs();
+ LRCallCtx.PrevCtx = CallCtx;
+ return buildSExpr(At->getArg(), &LRCallCtx);
+ }
+ // Treat smart pointers and iterators as pointers;
+ // ignore the * and -> operators.
+ if (CXXOperatorCallExpr *OE = dyn_cast<CXXOperatorCallExpr>(CE)) {
+ OverloadedOperatorKind k = OE->getOperator();
+ if (k == OO_Star) {
+ if (NDeref) ++(*NDeref);
+ return buildSExpr(OE->getArg(0), CallCtx, NDeref);
+ }
+ else if (k == OO_Arrow) {
+ return buildSExpr(OE->getArg(0), CallCtx, NDeref);
+ }
+ }
unsigned NumCallArgs = CE->getNumArgs();
+ unsigned Root = makeCall(NumCallArgs, 0);
+ unsigned Sz = buildSExpr(CE->getCallee(), CallCtx);
Expr** CallArgs = CE->getArgs();
for (unsigned i = 0; i < NumCallArgs; ++i) {
- buildMutexID(CallArgs[i], D, Parent, NumArgs, FunArgs);
+ Sz += buildSExpr(CallArgs[i], CallCtx);
}
+ NodeVec[Root].setSize(Sz+1);
+ return Sz+1;
} else if (BinaryOperator *BOE = dyn_cast<BinaryOperator>(Exp)) {
- buildMutexID(BOE->getLHS(), D, Parent, NumArgs, FunArgs);
- buildMutexID(BOE->getRHS(), D, Parent, NumArgs, FunArgs);
+ unsigned Root = makeBinary();
+ unsigned Sz = buildSExpr(BOE->getLHS(), CallCtx);
+ Sz += buildSExpr(BOE->getRHS(), CallCtx);
+ NodeVec[Root].setSize(Sz);
+ return Sz;
} else if (UnaryOperator *UOE = dyn_cast<UnaryOperator>(Exp)) {
- buildMutexID(UOE->getSubExpr(), D, Parent, NumArgs, FunArgs);
+ // Ignore & and * operators -- they're no-ops.
+ // However, we try to figure out whether the expression is a pointer,
+ // so we can use . and -> appropriately in error messages.
+ if (UOE->getOpcode() == UO_Deref) {
+ if (NDeref) ++(*NDeref);
+ return buildSExpr(UOE->getSubExpr(), CallCtx, NDeref);
+ }
+ if (UOE->getOpcode() == UO_AddrOf) {
+ if (DeclRefExpr* DRE = dyn_cast<DeclRefExpr>(UOE->getSubExpr())) {
+ if (DRE->getDecl()->isCXXInstanceMember()) {
+ // This is a pointer-to-member expression, e.g. &MyClass::mu_.
+ // We interpret this syntax specially, as a wildcard.
+ unsigned Root = makeDot(DRE->getDecl(), false);
+ makeWildcard();
+ NodeVec[Root].setSize(2);
+ return 2;
+ }
+ }
+ if (NDeref) --(*NDeref);
+ return buildSExpr(UOE->getSubExpr(), CallCtx, NDeref);
+ }
+ unsigned Root = makeUnary();
+ unsigned Sz = buildSExpr(UOE->getSubExpr(), CallCtx);
+ NodeVec[Root].setSize(Sz);
+ return Sz;
} else if (ArraySubscriptExpr *ASE = dyn_cast<ArraySubscriptExpr>(Exp)) {
- buildMutexID(ASE->getBase(), D, Parent, NumArgs, FunArgs);
- buildMutexID(ASE->getIdx(), D, Parent, NumArgs, FunArgs);
+ unsigned Root = makeIndex();
+ unsigned Sz = buildSExpr(ASE->getBase(), CallCtx);
+ Sz += buildSExpr(ASE->getIdx(), CallCtx);
+ NodeVec[Root].setSize(Sz);
+ return Sz;
} else if (AbstractConditionalOperator *CE =
- dyn_cast<AbstractConditionalOperator>(Exp)) {
- buildMutexID(CE->getCond(), D, Parent, NumArgs, FunArgs);
- buildMutexID(CE->getTrueExpr(), D, Parent, NumArgs, FunArgs);
- buildMutexID(CE->getFalseExpr(), D, Parent, NumArgs, FunArgs);
+ dyn_cast<AbstractConditionalOperator>(Exp)) {
+ unsigned Root = makeUnknown(3);
+ unsigned Sz = buildSExpr(CE->getCond(), CallCtx);
+ Sz += buildSExpr(CE->getTrueExpr(), CallCtx);
+ Sz += buildSExpr(CE->getFalseExpr(), CallCtx);
+ NodeVec[Root].setSize(Sz);
+ return Sz;
} else if (ChooseExpr *CE = dyn_cast<ChooseExpr>(Exp)) {
- buildMutexID(CE->getCond(), D, Parent, NumArgs, FunArgs);
- buildMutexID(CE->getLHS(), D, Parent, NumArgs, FunArgs);
- buildMutexID(CE->getRHS(), D, Parent, NumArgs, FunArgs);
+ unsigned Root = makeUnknown(3);
+ unsigned Sz = buildSExpr(CE->getCond(), CallCtx);
+ Sz += buildSExpr(CE->getLHS(), CallCtx);
+ Sz += buildSExpr(CE->getRHS(), CallCtx);
+ NodeVec[Root].setSize(Sz);
+ return Sz;
} else if (CastExpr *CE = dyn_cast<CastExpr>(Exp)) {
- buildMutexID(CE->getSubExpr(), D, Parent, NumArgs, FunArgs);
+ return buildSExpr(CE->getSubExpr(), CallCtx, NDeref);
} else if (ParenExpr *PE = dyn_cast<ParenExpr>(Exp)) {
- buildMutexID(PE->getSubExpr(), D, Parent, NumArgs, FunArgs);
+ return buildSExpr(PE->getSubExpr(), CallCtx, NDeref);
+ } else if (ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Exp)) {
+ return buildSExpr(EWC->getSubExpr(), CallCtx, NDeref);
+ } else if (CXXBindTemporaryExpr *E = dyn_cast<CXXBindTemporaryExpr>(Exp)) {
+ return buildSExpr(E->getSubExpr(), CallCtx, NDeref);
} else if (isa<CharacterLiteral>(Exp) ||
- isa<CXXNullPtrLiteralExpr>(Exp) ||
- isa<GNUNullExpr>(Exp) ||
- isa<CXXBoolLiteralExpr>(Exp) ||
- isa<FloatingLiteral>(Exp) ||
- isa<ImaginaryLiteral>(Exp) ||
- isa<IntegerLiteral>(Exp) ||
- isa<StringLiteral>(Exp) ||
- isa<ObjCStringLiteral>(Exp)) {
- return; // FIXME: Ignore literals for now
+ isa<CXXNullPtrLiteralExpr>(Exp) ||
+ isa<GNUNullExpr>(Exp) ||
+ isa<CXXBoolLiteralExpr>(Exp) ||
+ isa<FloatingLiteral>(Exp) ||
+ isa<ImaginaryLiteral>(Exp) ||
+ isa<IntegerLiteral>(Exp) ||
+ isa<StringLiteral>(Exp) ||
+ isa<ObjCStringLiteral>(Exp)) {
+ makeNop();
+ return 1; // FIXME: Ignore literals for now
} else {
- // Ignore. FIXME: mark as invalid expression?
+ makeNop();
+ return 1; // Ignore. FIXME: mark as invalid expression?
}
}
- /// \brief Construct a MutexID from an expression.
+ /// \brief Construct a SExpr from an expression.
/// \param MutexExp The original mutex expression within an attribute
/// \param DeclExp An expression involving the Decl on which the attribute
/// occurs.
/// \param D The declaration to which the lock/unlock attribute is attached.
- void buildMutexIDFromExp(Expr *MutexExp, Expr *DeclExp, const NamedDecl *D) {
- Expr *Parent = 0;
- unsigned NumArgs = 0;
- Expr **FunArgs = 0;
+ void buildSExprFromExpr(Expr *MutexExp, Expr *DeclExp, const NamedDecl *D) {
+ CallingContext CallCtx(D);
// If we are processing a raw attribute expression, with no substitutions.
if (DeclExp == 0) {
- buildMutexID(MutexExp, D, 0, 0, 0);
+ buildSExpr(MutexExp, 0);
return;
}
- // Examine DeclExp to find Parent and FunArgs, which are used to substitute
+ // Examine DeclExp to find SelfArg and FunArgs, which are used to substitute
// for formal parameters when we call buildMutexID later.
if (MemberExpr *ME = dyn_cast<MemberExpr>(DeclExp)) {
- Parent = ME->getBase();
+ CallCtx.SelfArg = ME->getBase();
+ CallCtx.SelfArrow = ME->isArrow();
} else if (CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(DeclExp)) {
- Parent = CE->getImplicitObjectArgument();
- NumArgs = CE->getNumArgs();
- FunArgs = CE->getArgs();
+ CallCtx.SelfArg = CE->getImplicitObjectArgument();
+ CallCtx.SelfArrow = dyn_cast<MemberExpr>(CE->getCallee())->isArrow();
+ CallCtx.NumArgs = CE->getNumArgs();
+ CallCtx.FunArgs = CE->getArgs();
} else if (CallExpr *CE = dyn_cast<CallExpr>(DeclExp)) {
- NumArgs = CE->getNumArgs();
- FunArgs = CE->getArgs();
+ CallCtx.NumArgs = CE->getNumArgs();
+ CallCtx.FunArgs = CE->getArgs();
} else if (CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(DeclExp)) {
- Parent = 0; // FIXME -- get the parent from DeclStmt
- NumArgs = CE->getNumArgs();
- FunArgs = CE->getArgs();
+ CallCtx.SelfArg = 0; // FIXME -- get the parent from DeclStmt
+ CallCtx.NumArgs = CE->getNumArgs();
+ CallCtx.FunArgs = CE->getArgs();
} else if (D && isa<CXXDestructorDecl>(D)) {
// There's no such thing as a "destructor call" in the AST.
- Parent = DeclExp;
+ CallCtx.SelfArg = DeclExp;
}
// If the attribute has no arguments, then assume the argument is "this".
if (MutexExp == 0) {
- buildMutexID(Parent, D, 0, 0, 0);
+ buildSExpr(CallCtx.SelfArg, 0);
return;
}
- buildMutexID(MutexExp, D, Parent, NumArgs, FunArgs);
+ // For most attributes.
+ buildSExpr(MutexExp, &CallCtx);
}
-public:
- explicit MutexID(clang::Decl::EmptyShell e) {
- DeclSeq.clear();
+ /// \brief Get index of next sibling of node i.
+ unsigned getNextSibling(unsigned i) const {
+ return i + NodeVec[i].size();
}
+public:
+ explicit SExpr(clang::Decl::EmptyShell e) { NodeVec.clear(); }
+
/// \param MutexExp The original mutex expression within an attribute
/// \param DeclExp An expression involving the Decl on which the attribute
/// occurs.
/// \param D The declaration to which the lock/unlock attribute is attached.
/// Caller must check isValid() after construction.
- MutexID(Expr* MutexExp, Expr *DeclExp, const NamedDecl* D) {
- buildMutexIDFromExp(MutexExp, DeclExp, D);
+ SExpr(Expr* MutexExp, Expr *DeclExp, const NamedDecl* D) {
+ buildSExprFromExpr(MutexExp, DeclExp, D);
}
/// Return true if this is a valid decl sequence.
/// Caller must call this by hand after construction to handle errors.
bool isValid() const {
- return !DeclSeq.empty();
+ return !NodeVec.empty();
}
/// Issue a warning about an invalid lock expression
@@ -255,44 +518,144 @@ public:
Handler.handleInvalidLockExp(Loc);
}
- bool operator==(const MutexID &other) const {
- return DeclSeq == other.DeclSeq;
+ bool operator==(const SExpr &other) const {
+ return NodeVec == other.NodeVec;
}
- bool operator!=(const MutexID &other) const {
+ bool operator!=(const SExpr &other) const {
return !(*this == other);
}
- // SmallVector overloads Operator< to do lexicographic ordering. Note that
- // we use pointer equality (and <) to compare NamedDecls. This means the order
- // of MutexIDs in a lockset is nondeterministic. In order to output
- // diagnostics in a deterministic ordering, we must order all diagnostics to
- // output by SourceLocation when iterating through this lockset.
- bool operator<(const MutexID &other) const {
- return DeclSeq < other.DeclSeq;
+ bool matches(const SExpr &Other, unsigned i = 0, unsigned j = 0) const {
+ if (NodeVec[i].matches(Other.NodeVec[j])) {
+ unsigned n = NodeVec[i].arity();
+ bool Result = true;
+ unsigned ci = i+1; // first child of i
+ unsigned cj = j+1; // first child of j
+ for (unsigned k = 0; k < n;
+ ++k, ci=getNextSibling(ci), cj = Other.getNextSibling(cj)) {
+ Result = Result && matches(Other, ci, cj);
+ }
+ return Result;
+ }
+ return false;
}
- /// \brief Returns the name of the first Decl in the list for a given MutexID;
- /// e.g. the lock expression foo.bar() has name "bar".
- /// The caret will point unambiguously to the lock expression, so using this
- /// name in diagnostics is a way to get simple, and consistent, mutex names.
- /// We do not want to output the entire expression text for security reasons.
- std::string getName() const {
+ /// \brief Pretty print a lock expression for use in error messages.
+ std::string toString(unsigned i = 0) const {
assert(isValid());
- if (!DeclSeq.front())
- return "this"; // Use 0 to represent 'this'.
- return DeclSeq.front()->getNameAsString();
+ if (i >= NodeVec.size())
+ return "";
+
+ const SExprNode* N = &NodeVec[i];
+ switch (N->kind()) {
+ case EOP_Nop:
+ return "_";
+ case EOP_Wildcard:
+ return "(?)";
+ case EOP_This:
+ return "this";
+ case EOP_NVar:
+ case EOP_LVar: {
+ return N->getNamedDecl()->getNameAsString();
+ }
+ case EOP_Dot: {
+ if (NodeVec[i+1].kind() == EOP_Wildcard) {
+ std::string S = "&";
+ S += N->getNamedDecl()->getQualifiedNameAsString();
+ return S;
+ }
+ std::string FieldName = N->getNamedDecl()->getNameAsString();
+ if (NodeVec[i+1].kind() == EOP_This)
+ return FieldName;
+
+ std::string S = toString(i+1);
+ if (N->isArrow())
+ return S + "->" + FieldName;
+ else
+ return S + "." + FieldName;
+ }
+ case EOP_Call: {
+ std::string S = toString(i+1) + "(";
+ unsigned NumArgs = N->arity()-1;
+ unsigned ci = getNextSibling(i+1);
+ for (unsigned k=0; k<NumArgs; ++k, ci = getNextSibling(ci)) {
+ S += toString(ci);
+ if (k+1 < NumArgs) S += ",";
+ }
+ S += ")";
+ return S;
+ }
+ case EOP_MCall: {
+ std::string S = "";
+ if (NodeVec[i+1].kind() != EOP_This)
+ S = toString(i+1) + ".";
+ if (const NamedDecl *D = N->getFunctionDecl())
+ S += D->getNameAsString() + "(";
+ else
+ S += "#(";
+ unsigned NumArgs = N->arity()-1;
+ unsigned ci = getNextSibling(i+1);
+ for (unsigned k=0; k<NumArgs; ++k, ci = getNextSibling(ci)) {
+ S += toString(ci);
+ if (k+1 < NumArgs) S += ",";
+ }
+ S += ")";
+ return S;
+ }
+ case EOP_Index: {
+ std::string S1 = toString(i+1);
+ std::string S2 = toString(i+1 + NodeVec[i+1].size());
+ return S1 + "[" + S2 + "]";
+ }
+ case EOP_Unary: {
+ std::string S = toString(i+1);
+ return "#" + S;
+ }
+ case EOP_Binary: {
+ std::string S1 = toString(i+1);
+ std::string S2 = toString(i+1 + NodeVec[i+1].size());
+ return "(" + S1 + "#" + S2 + ")";
+ }
+ case EOP_Unknown: {
+ unsigned NumChildren = N->arity();
+ if (NumChildren == 0)
+ return "(...)";
+ std::string S = "(";
+ unsigned ci = i+1;
+ for (unsigned j = 0; j < NumChildren; ++j, ci = getNextSibling(ci)) {
+ S += toString(ci);
+ if (j+1 < NumChildren) S += "#";
+ }
+ S += ")";
+ return S;
+ }
+ }
+ return "";
}
+};
- void Profile(llvm::FoldingSetNodeID &ID) const {
- for (SmallVectorImpl<NamedDecl*>::const_iterator I = DeclSeq.begin(),
- E = DeclSeq.end(); I != E; ++I) {
- ID.AddPointer(*I);
- }
+
+
+/// \brief A short list of SExprs
+class MutexIDList : public SmallVector<SExpr, 3> {
+public:
+ /// \brief Return true if the list contains the specified SExpr
+ /// Performs a linear search, because these lists are almost always very small.
+ bool contains(const SExpr& M) {
+ for (iterator I=begin(),E=end(); I != E; ++I)
+ if ((*I) == M) return true;
+ return false;
+ }
+
+ /// \brief Push M onto list, bud discard duplicates
+ void push_back_nodup(const SExpr& M) {
+ if (!contains(M)) push_back(M);
}
};
+
/// \brief This is a helper class that stores info about the most recent
/// accquire of a Lock.
///
@@ -307,14 +670,18 @@ struct LockData {
///
/// FIXME: add support for re-entrant locking and lock up/downgrading
LockKind LKind;
- MutexID UnderlyingMutex; // for ScopedLockable objects
+ bool Managed; // for ScopedLockable objects
+ SExpr UnderlyingMutex; // for ScopedLockable objects
- LockData(SourceLocation AcquireLoc, LockKind LKind)
- : AcquireLoc(AcquireLoc), LKind(LKind), UnderlyingMutex(Decl::EmptyShell())
+ LockData(SourceLocation AcquireLoc, LockKind LKind, bool M = false)
+ : AcquireLoc(AcquireLoc), LKind(LKind), Managed(M),
+ UnderlyingMutex(Decl::EmptyShell())
{}
- LockData(SourceLocation AcquireLoc, LockKind LKind, const MutexID &Mu)
- : AcquireLoc(AcquireLoc), LKind(LKind), UnderlyingMutex(Mu) {}
+ LockData(SourceLocation AcquireLoc, LockKind LKind, const SExpr &Mu)
+ : AcquireLoc(AcquireLoc), LKind(LKind), Managed(false),
+ UnderlyingMutex(Mu)
+ {}
bool operator==(const LockData &other) const {
return AcquireLoc == other.AcquireLoc && LKind == other.LKind;
@@ -331,10 +698,102 @@ struct LockData {
};
-/// A Lockset maps each MutexID (defined above) to information about how it has
+/// \brief A FactEntry stores a single fact that is known at a particular point
+/// in the program execution. Currently, this is information regarding a lock
+/// that is held at that point.
+struct FactEntry {
+ SExpr MutID;
+ LockData LDat;
+
+ FactEntry(const SExpr& M, const LockData& L)
+ : MutID(M), LDat(L)
+ { }
+};
+
+
+typedef unsigned short FactID;
+
+/// \brief FactManager manages the memory for all facts that are created during
+/// the analysis of a single routine.
+class FactManager {
+private:
+ std::vector<FactEntry> Facts;
+
+public:
+ FactID newLock(const SExpr& M, const LockData& L) {
+ Facts.push_back(FactEntry(M,L));
+ return static_cast<unsigned short>(Facts.size() - 1);
+ }
+
+ const FactEntry& operator[](FactID F) const { return Facts[F]; }
+ FactEntry& operator[](FactID F) { return Facts[F]; }
+};
+
+
+/// \brief A FactSet is the set of facts that are known to be true at a
+/// particular program point. FactSets must be small, because they are
+/// frequently copied, and are thus implemented as a set of indices into a
+/// table maintained by a FactManager. A typical FactSet only holds 1 or 2
+/// locks, so we can get away with doing a linear search for lookup. Note
+/// that a hashtable or map is inappropriate in this case, because lookups
+/// may involve partial pattern matches, rather than exact matches.
+class FactSet {
+private:
+ typedef SmallVector<FactID, 4> FactVec;
+
+ FactVec FactIDs;
+
+public:
+ typedef FactVec::iterator iterator;
+ typedef FactVec::const_iterator const_iterator;
+
+ iterator begin() { return FactIDs.begin(); }
+ const_iterator begin() const { return FactIDs.begin(); }
+
+ iterator end() { return FactIDs.end(); }
+ const_iterator end() const { return FactIDs.end(); }
+
+ bool isEmpty() const { return FactIDs.size() == 0; }
+
+ FactID addLock(FactManager& FM, const SExpr& M, const LockData& L) {
+ FactID F = FM.newLock(M, L);
+ FactIDs.push_back(F);
+ return F;
+ }
+
+ bool removeLock(FactManager& FM, const SExpr& M) {
+ unsigned n = FactIDs.size();
+ if (n == 0)
+ return false;
+
+ for (unsigned i = 0; i < n-1; ++i) {
+ if (FM[FactIDs[i]].MutID.matches(M)) {
+ FactIDs[i] = FactIDs[n-1];
+ FactIDs.pop_back();
+ return true;
+ }
+ }
+ if (FM[FactIDs[n-1]].MutID.matches(M)) {
+ FactIDs.pop_back();
+ return true;
+ }
+ return false;
+ }
+
+ LockData* findLock(FactManager& FM, const SExpr& M) const {
+ for (const_iterator I=begin(), E=end(); I != E; ++I) {
+ if (FM[*I].MutID.matches(M)) return &FM[*I].LDat;
+ }
+ return 0;
+ }
+};
+
+
+
+/// A Lockset maps each SExpr (defined above) to information about how it has
/// been locked.
-typedef llvm::ImmutableMap<MutexID, LockData> Lockset;
-typedef llvm::ImmutableMap<NamedDecl*, unsigned> LocalVarContext;
+typedef llvm::ImmutableMap<SExpr, LockData> Lockset;
+typedef llvm::ImmutableMap<const NamedDecl*, unsigned> LocalVarContext;
class LocalVariableMap;
@@ -345,15 +804,15 @@ enum CFGBlockSide { CBS_Entry, CBS_Exit };
/// maintained for each block in the CFG. See LocalVariableMap for more
/// information about the contexts.
struct CFGBlockInfo {
- Lockset EntrySet; // Lockset held at entry to block
- Lockset ExitSet; // Lockset held at exit from block
+ FactSet EntrySet; // Lockset held at entry to block
+ FactSet ExitSet; // Lockset held at exit from block
LocalVarContext EntryContext; // Context held at entry to block
LocalVarContext ExitContext; // Context held at exit from block
SourceLocation EntryLoc; // Location of first statement in block
SourceLocation ExitLoc; // Location of last statement in block.
unsigned EntryIndex; // Used to replay contexts later
- const Lockset &getSet(CFGBlockSide Side) const {
+ const FactSet &getSet(CFGBlockSide Side) const {
return Side == CBS_Entry ? EntrySet : ExitSet;
}
SourceLocation getLocation(CFGBlockSide Side) const {
@@ -361,14 +820,12 @@ struct CFGBlockInfo {
}
private:
- CFGBlockInfo(Lockset EmptySet, LocalVarContext EmptyCtx)
- : EntrySet(EmptySet), ExitSet(EmptySet),
- EntryContext(EmptyCtx), ExitContext(EmptyCtx)
+ CFGBlockInfo(LocalVarContext EmptyCtx)
+ : EntryContext(EmptyCtx), ExitContext(EmptyCtx)
{ }
public:
- static CFGBlockInfo getEmptyBlockInfo(Lockset::Factory &F,
- LocalVariableMap &M);
+ static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M);
};
@@ -398,21 +855,21 @@ public:
public:
friend class LocalVariableMap;
- NamedDecl *Dec; // The original declaration for this variable.
- Expr *Exp; // The expression for this variable, OR
- unsigned Ref; // Reference to another VarDefinition
- Context Ctx; // The map with which Exp should be interpreted.
+ const NamedDecl *Dec; // The original declaration for this variable.
+ const Expr *Exp; // The expression for this variable, OR
+ unsigned Ref; // Reference to another VarDefinition
+ Context Ctx; // The map with which Exp should be interpreted.
bool isReference() { return !Exp; }
private:
// Create ordinary variable definition
- VarDefinition(NamedDecl *D, Expr *E, Context C)
+ VarDefinition(const NamedDecl *D, const Expr *E, Context C)
: Dec(D), Exp(E), Ref(0), Ctx(C)
{ }
// Create reference to previous definition
- VarDefinition(NamedDecl *D, unsigned R, Context C)
+ VarDefinition(const NamedDecl *D, unsigned R, Context C)
: Dec(D), Exp(0), Ref(R), Ctx(C)
{ }
};
@@ -430,7 +887,7 @@ public:
}
/// Look up a definition, within the given context.
- const VarDefinition* lookup(NamedDecl *D, Context Ctx) {
+ const VarDefinition* lookup(const NamedDecl *D, Context Ctx) {
const unsigned *i = Ctx.lookup(D);
if (!i)
return 0;
@@ -441,7 +898,7 @@ public:
/// Look up the definition for D within the given context. Returns
/// NULL if the expression is not statically known. If successful, also
/// modifies Ctx to hold the context of the return Expr.
- Expr* lookupExpr(NamedDecl *D, Context &Ctx) {
+ const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) {
const unsigned *P = Ctx.lookup(D);
if (!P)
return 0;
@@ -476,7 +933,7 @@ public:
llvm::errs() << "Undefined";
return;
}
- NamedDecl *Dec = VarDefinitions[i].Dec;
+ const NamedDecl *Dec = VarDefinitions[i].Dec;
if (!Dec) {
llvm::errs() << "<<NULL>>";
return;
@@ -488,7 +945,7 @@ public:
/// Dumps an ASCII representation of the variable map to llvm::errs()
void dump() {
for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) {
- Expr *Exp = VarDefinitions[i].Exp;
+ const Expr *Exp = VarDefinitions[i].Exp;
unsigned Ref = VarDefinitions[i].Ref;
dumpVarDefinitionName(i);
@@ -504,7 +961,7 @@ public:
/// Dumps an ASCII representation of a Context to llvm::errs()
void dumpContext(Context C) {
for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
- NamedDecl *D = I.getKey();
+ const NamedDecl *D = I.getKey();
D->printName(llvm::errs());
const unsigned *i = C.lookup(D);
llvm::errs() << " -> ";
@@ -528,7 +985,7 @@ protected:
// Adds a new definition to the given context, and returns a new context.
// This method should be called when declaring a new variable.
- Context addDefinition(NamedDecl *D, Expr *Exp, Context Ctx) {
+ Context addDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) {
assert(!Ctx.contains(D));
unsigned newID = VarDefinitions.size();
Context NewCtx = ContextFactory.add(Ctx, D, newID);
@@ -537,7 +994,7 @@ protected:
}
// Add a new reference to an existing definition.
- Context addReference(NamedDecl *D, unsigned i, Context Ctx) {
+ Context addReference(const NamedDecl *D, unsigned i, Context Ctx) {
unsigned newID = VarDefinitions.size();
Context NewCtx = ContextFactory.add(Ctx, D, newID);
VarDefinitions.push_back(VarDefinition(D, i, Ctx));
@@ -546,7 +1003,7 @@ protected:
// Updates a definition only if that definition is already in the map.
// This method should be called when assigning to an existing variable.
- Context updateDefinition(NamedDecl *D, Expr *Exp, Context Ctx) {
+ Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) {
if (Ctx.contains(D)) {
unsigned newID = VarDefinitions.size();
Context NewCtx = ContextFactory.remove(Ctx, D);
@@ -559,7 +1016,7 @@ protected:
// Removes a definition from the context, but keeps the variable name
// as a valid variable. The index 0 is a placeholder for cleared definitions.
- Context clearDefinition(NamedDecl *D, Context Ctx) {
+ Context clearDefinition(const NamedDecl *D, Context Ctx) {
Context NewCtx = Ctx;
if (NewCtx.contains(D)) {
NewCtx = ContextFactory.remove(NewCtx, D);
@@ -569,7 +1026,7 @@ protected:
}
// Remove a definition entirely frmo the context.
- Context removeDefinition(NamedDecl *D, Context Ctx) {
+ Context removeDefinition(const NamedDecl *D, Context Ctx) {
Context NewCtx = Ctx;
if (NewCtx.contains(D)) {
NewCtx = ContextFactory.remove(NewCtx, D);
@@ -586,9 +1043,8 @@ protected:
// This has to be defined after LocalVariableMap.
-CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(Lockset::Factory &F,
- LocalVariableMap &M) {
- return CFGBlockInfo(F.getEmptyMap(), M.getEmptyContext());
+CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) {
+ return CFGBlockInfo(M.getEmptyContext());
}
@@ -655,7 +1111,7 @@ LocalVariableMap::Context
LocalVariableMap::intersectContexts(Context C1, Context C2) {
Context Result = C1;
for (Context::iterator I = C1.begin(), E = C1.end(); I != E; ++I) {
- NamedDecl *Dec = I.getKey();
+ const NamedDecl *Dec = I.getKey();
unsigned i1 = I.getData();
const unsigned *i2 = C2.lookup(Dec);
if (!i2) // variable doesn't exist on second path
@@ -672,7 +1128,7 @@ LocalVariableMap::intersectContexts(Context C1, Context C2) {
LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) {
Context Result = getEmptyContext();
for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
- NamedDecl *Dec = I.getKey();
+ const NamedDecl *Dec = I.getKey();
unsigned i = I.getData();
Result = addReference(Dec, i, Result);
}
@@ -684,7 +1140,7 @@ LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) {
// createReferenceContext.
void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
for (Context::iterator I = C1.begin(), E = C1.end(); I != E; ++I) {
- NamedDecl *Dec = I.getKey();
+ const NamedDecl *Dec = I.getKey();
unsigned i1 = I.getData();
VarDefinition *VDef = &VarDefinitions[i1];
assert(VDef->isReference());
@@ -725,7 +1181,7 @@ void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
// incoming back edge, it duplicates the context, creating new definitions
// that refer back to the originals. (These correspond to places where SSA
// might have to insert a phi node.) On the second pass, these definitions are
-// set to NULL if the the variable has changed on the back-edge (i.e. a phi
+// set to NULL if the variable has changed on the back-edge (i.e. a phi
// node was actually required.) E.g.
//
// { Context | VarDefinitions }
@@ -869,24 +1325,294 @@ static void findBlockLocations(CFG *CFGraph,
class ThreadSafetyAnalyzer {
friend class BuildLockset;
- ThreadSafetyHandler &Handler;
- Lockset::Factory LocksetFactory;
- LocalVariableMap LocalVarMap;
+ ThreadSafetyHandler &Handler;
+ LocalVariableMap LocalVarMap;
+ FactManager FactMan;
+ std::vector<CFGBlockInfo> BlockInfo;
public:
ThreadSafetyAnalyzer(ThreadSafetyHandler &H) : Handler(H) {}
- Lockset intersectAndWarn(const CFGBlockInfo &Block1, CFGBlockSide Side1,
- const CFGBlockInfo &Block2, CFGBlockSide Side2,
- LockErrorKind LEK);
+ void addLock(FactSet &FSet, const SExpr &Mutex, const LockData &LDat);
+ void removeLock(FactSet &FSet, const SExpr &Mutex,
+ SourceLocation UnlockLoc, bool FullyRemove=false);
+
+ template <typename AttrType>
+ void getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, Expr *Exp,
+ const NamedDecl *D);
- Lockset addLock(Lockset &LSet, Expr *MutexExp, const NamedDecl *D,
- LockKind LK, SourceLocation Loc);
+ template <class AttrType>
+ void getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, Expr *Exp,
+ const NamedDecl *D,
+ const CFGBlock *PredBlock, const CFGBlock *CurrBlock,
+ Expr *BrE, bool Neg);
+
+ const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C,
+ bool &Negate);
+
+ void getEdgeLockset(FactSet &Result, const FactSet &ExitSet,
+ const CFGBlock* PredBlock,
+ const CFGBlock *CurrBlock);
+
+ void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2,
+ SourceLocation JoinLoc,
+ LockErrorKind LEK1, LockErrorKind LEK2,
+ bool Modify=true);
+
+ void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2,
+ SourceLocation JoinLoc, LockErrorKind LEK1,
+ bool Modify=true) {
+ intersectAndWarn(FSet1, FSet2, JoinLoc, LEK1, LEK1, Modify);
+ }
void runAnalysis(AnalysisDeclContext &AC);
};
+/// \brief Add a new lock to the lockset, warning if the lock is already there.
+/// \param Mutex -- the Mutex expression for the lock
+/// \param LDat -- the LockData for the lock
+void ThreadSafetyAnalyzer::addLock(FactSet &FSet, const SExpr &Mutex,
+ const LockData &LDat) {
+ // FIXME: deal with acquired before/after annotations.
+ // FIXME: Don't always warn when we have support for reentrant locks.
+ if (FSet.findLock(FactMan, Mutex)) {
+ Handler.handleDoubleLock(Mutex.toString(), LDat.AcquireLoc);
+ } else {
+ FSet.addLock(FactMan, Mutex, LDat);
+ }
+}
+
+
+/// \brief Remove a lock from the lockset, warning if the lock is not there.
+/// \param LockExp The lock expression corresponding to the lock to be removed
+/// \param UnlockLoc The source location of the unlock (only used in error msg)
+void ThreadSafetyAnalyzer::removeLock(FactSet &FSet,
+ const SExpr &Mutex,
+ SourceLocation UnlockLoc,
+ bool FullyRemove) {
+ const LockData *LDat = FSet.findLock(FactMan, Mutex);
+ if (!LDat) {
+ Handler.handleUnmatchedUnlock(Mutex.toString(), UnlockLoc);
+ return;
+ }
+
+ if (LDat->UnderlyingMutex.isValid()) {
+ // This is scoped lockable object, which manages the real mutex.
+ if (FullyRemove) {
+ // We're destroying the managing object.
+ // Remove the underlying mutex if it exists; but don't warn.
+ if (FSet.findLock(FactMan, LDat->UnderlyingMutex))
+ FSet.removeLock(FactMan, LDat->UnderlyingMutex);
+ } else {
+ // We're releasing the underlying mutex, but not destroying the
+ // managing object. Warn on dual release.
+ if (!FSet.findLock(FactMan, LDat->UnderlyingMutex)) {
+ Handler.handleUnmatchedUnlock(LDat->UnderlyingMutex.toString(),
+ UnlockLoc);
+ }
+ FSet.removeLock(FactMan, LDat->UnderlyingMutex);
+ return;
+ }
+ }
+ FSet.removeLock(FactMan, Mutex);
+}
+
+
+/// \brief Extract the list of mutexIDs from the attribute on an expression,
+/// and push them onto Mtxs, discarding any duplicates.
+template <typename AttrType>
+void ThreadSafetyAnalyzer::getMutexIDs(MutexIDList &Mtxs, AttrType *Attr,
+ Expr *Exp, const NamedDecl *D) {
+ typedef typename AttrType::args_iterator iterator_type;
+
+ if (Attr->args_size() == 0) {
+ // The mutex held is the "this" object.
+ SExpr Mu(0, Exp, D);
+ if (!Mu.isValid())
+ SExpr::warnInvalidLock(Handler, 0, Exp, D);
+ else
+ Mtxs.push_back_nodup(Mu);
+ return;
+ }
+
+ for (iterator_type I=Attr->args_begin(), E=Attr->args_end(); I != E; ++I) {
+ SExpr Mu(*I, Exp, D);
+ if (!Mu.isValid())
+ SExpr::warnInvalidLock(Handler, *I, Exp, D);
+ else
+ Mtxs.push_back_nodup(Mu);
+ }
+}
+
+
+/// \brief Extract the list of mutexIDs from a trylock attribute. If the
+/// trylock applies to the given edge, then push them onto Mtxs, discarding
+/// any duplicates.
+template <class AttrType>
+void ThreadSafetyAnalyzer::getMutexIDs(MutexIDList &Mtxs, AttrType *Attr,
+ Expr *Exp, const NamedDecl *D,
+ const CFGBlock *PredBlock,
+ const CFGBlock *CurrBlock,
+ Expr *BrE, bool Neg) {
+ // Find out which branch has the lock
+ bool branch = 0;
+ if (CXXBoolLiteralExpr *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE)) {
+ branch = BLE->getValue();
+ }
+ else if (IntegerLiteral *ILE = dyn_cast_or_null<IntegerLiteral>(BrE)) {
+ branch = ILE->getValue().getBoolValue();
+ }
+ int branchnum = branch ? 0 : 1;
+ if (Neg) branchnum = !branchnum;
+
+ // If we've taken the trylock branch, then add the lock
+ int i = 0;
+ for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
+ SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) {
+ if (*SI == CurrBlock && i == branchnum) {
+ getMutexIDs(Mtxs, Attr, Exp, D);
+ }
+ }
+}
+
+
+bool getStaticBooleanValue(Expr* E, bool& TCond) {
+ if (isa<CXXNullPtrLiteralExpr>(E) || isa<GNUNullExpr>(E)) {
+ TCond = false;
+ return true;
+ } else if (CXXBoolLiteralExpr *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) {
+ TCond = BLE->getValue();
+ return true;
+ } else if (IntegerLiteral *ILE = dyn_cast<IntegerLiteral>(E)) {
+ TCond = ILE->getValue().getBoolValue();
+ return true;
+ } else if (ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E)) {
+ return getStaticBooleanValue(CE->getSubExpr(), TCond);
+ }
+ return false;
+}
+
+
+// If Cond can be traced back to a function call, return the call expression.
+// The negate variable should be called with false, and will be set to true
+// if the function call is negated, e.g. if (!mu.tryLock(...))
+const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
+ LocalVarContext C,
+ bool &Negate) {
+ if (!Cond)
+ return 0;
+
+ if (const CallExpr *CallExp = dyn_cast<CallExpr>(Cond)) {
+ return CallExp;
+ }
+ else if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond)) {
+ return getTrylockCallExpr(PE->getSubExpr(), C, Negate);
+ }
+ else if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(Cond)) {
+ return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
+ }
+ else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Cond)) {
+ const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
+ return getTrylockCallExpr(E, C, Negate);
+ }
+ else if (const UnaryOperator *UOP = dyn_cast<UnaryOperator>(Cond)) {
+ if (UOP->getOpcode() == UO_LNot) {
+ Negate = !Negate;
+ return getTrylockCallExpr(UOP->getSubExpr(), C, Negate);
+ }
+ return 0;
+ }
+ else if (const BinaryOperator *BOP = dyn_cast<BinaryOperator>(Cond)) {
+ if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) {
+ if (BOP->getOpcode() == BO_NE)
+ Negate = !Negate;
+
+ bool TCond = false;
+ if (getStaticBooleanValue(BOP->getRHS(), TCond)) {
+ if (!TCond) Negate = !Negate;
+ return getTrylockCallExpr(BOP->getLHS(), C, Negate);
+ }
+ else if (getStaticBooleanValue(BOP->getLHS(), TCond)) {
+ if (!TCond) Negate = !Negate;
+ return getTrylockCallExpr(BOP->getRHS(), C, Negate);
+ }
+ return 0;
+ }
+ return 0;
+ }
+ // FIXME -- handle && and || as well.
+ return 0;
+}
+
+
+/// \brief Find the lockset that holds on the edge between PredBlock
+/// and CurrBlock. The edge set is the exit set of PredBlock (passed
+/// as the ExitSet parameter) plus any trylocks, which are conditionally held.
+void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
+ const FactSet &ExitSet,
+ const CFGBlock *PredBlock,
+ const CFGBlock *CurrBlock) {
+ Result = ExitSet;
+
+ if (!PredBlock->getTerminatorCondition())
+ return;
+
+ bool Negate = false;
+ const Stmt *Cond = PredBlock->getTerminatorCondition();
+ const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()];
+ const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
+
+ CallExpr *Exp =
+ const_cast<CallExpr*>(getTrylockCallExpr(Cond, LVarCtx, Negate));
+ if (!Exp)
+ return;
+
+ NamedDecl *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
+ if(!FunDecl || !FunDecl->hasAttrs())
+ return;
+
+
+ MutexIDList ExclusiveLocksToAdd;
+ MutexIDList SharedLocksToAdd;
+
+ // If the condition is a call to a Trylock function, then grab the attributes
+ AttrVec &ArgAttrs = FunDecl->getAttrs();
+ for (unsigned i = 0; i < ArgAttrs.size(); ++i) {
+ Attr *Attr = ArgAttrs[i];
+ switch (Attr->getKind()) {
+ case attr::ExclusiveTrylockFunction: {
+ ExclusiveTrylockFunctionAttr *A =
+ cast<ExclusiveTrylockFunctionAttr>(Attr);
+ getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl,
+ PredBlock, CurrBlock, A->getSuccessValue(), Negate);
+ break;
+ }
+ case attr::SharedTrylockFunction: {
+ SharedTrylockFunctionAttr *A =
+ cast<SharedTrylockFunctionAttr>(Attr);
+ getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl,
+ PredBlock, CurrBlock, A->getSuccessValue(), Negate);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ // Add and remove locks.
+ SourceLocation Loc = Exp->getExprLoc();
+ for (unsigned i=0,n=ExclusiveLocksToAdd.size(); i<n; ++i) {
+ addLock(Result, ExclusiveLocksToAdd[i],
+ LockData(Loc, LK_Exclusive));
+ }
+ for (unsigned i=0,n=SharedLocksToAdd.size(); i<n; ++i) {
+ addLock(Result, SharedLocksToAdd[i],
+ LockData(Loc, LK_Shared));
+ }
+}
+
+
/// \brief We use this class to visit different types of expressions in
/// CFGBlocks, and build up the lockset.
/// An expression may cause us to add or remove locks from the lockset, or else
@@ -895,50 +1621,31 @@ public:
class BuildLockset : public StmtVisitor<BuildLockset> {
friend class ThreadSafetyAnalyzer;
- ThreadSafetyHandler &Handler;
- Lockset::Factory &LocksetFactory;
- LocalVariableMap &LocalVarMap;
-
- Lockset LSet;
+ ThreadSafetyAnalyzer *Analyzer;
+ FactSet FSet;
LocalVariableMap::Context LVarCtx;
unsigned CtxIndex;
// Helper functions
- void addLock(const MutexID &Mutex, const LockData &LDat);
- void removeLock(const MutexID &Mutex, SourceLocation UnlockLoc);
+ const ValueDecl *getValueDecl(Expr *Exp);
- template <class AttrType>
- void addLocksToSet(LockKind LK, AttrType *Attr,
- Expr *Exp, NamedDecl *D, VarDecl *VD = 0);
- void removeLocksFromSet(UnlockFunctionAttr *Attr,
- Expr *Exp, NamedDecl* FunDecl);
+ void warnIfMutexNotHeld(const NamedDecl *D, Expr *Exp, AccessKind AK,
+ Expr *MutexExp, ProtectedOperationKind POK);
- const ValueDecl *getValueDecl(Expr *Exp);
- void warnIfMutexNotHeld (const NamedDecl *D, Expr *Exp, AccessKind AK,
- Expr *MutexExp, ProtectedOperationKind POK);
void checkAccess(Expr *Exp, AccessKind AK);
void checkDereference(Expr *Exp, AccessKind AK);
- void handleCall(Expr *Exp, NamedDecl *D, VarDecl *VD = 0);
-
- template <class AttrType>
- void addTrylock(LockKind LK, AttrType *Attr, Expr *Exp, NamedDecl *FunDecl,
- const CFGBlock* PredBlock, const CFGBlock *CurrBlock,
- Expr *BrE, bool Neg);
- CallExpr* getTrylockCallExpr(Stmt *Cond, LocalVariableMap::Context C,
- bool &Negate);
- void handleTrylock(Stmt *Cond, const CFGBlock* PredBlock,
- const CFGBlock *CurrBlock);
+ void handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD = 0);
/// \brief Returns true if the lockset contains a lock, regardless of whether
/// the lock is held exclusively or shared.
- bool locksetContains(const MutexID &Lock) const {
- return LSet.lookup(Lock);
+ bool locksetContains(const SExpr &Mu) const {
+ return FSet.findLock(Analyzer->FactMan, Mu);
}
/// \brief Returns true if the lockset contains a lock with the passed in
/// locktype.
- bool locksetContains(const MutexID &Lock, LockKind KindRequested) const {
- const LockData *LockHeld = LSet.lookup(Lock);
+ bool locksetContains(const SExpr &Mu, LockKind KindRequested) const {
+ const LockData *LockHeld = FSet.findLock(Analyzer->FactMan, Mu);
return (LockHeld && KindRequested == LockHeld->LKind);
}
@@ -946,7 +1653,7 @@ class BuildLockset : public StmtVisitor<BuildLockset> {
/// passed in locktype. So for example, if we pass in LK_Shared, this function
/// returns true if the lock is held LK_Shared or LK_Exclusive. If we pass in
/// LK_Exclusive, this function returns true if the lock is held LK_Exclusive.
- bool locksetContainsAtLeast(const MutexID &Lock,
+ bool locksetContainsAtLeast(const SExpr &Lock,
LockKind KindRequested) const {
switch (KindRequested) {
case LK_Shared:
@@ -958,12 +1665,10 @@ class BuildLockset : public StmtVisitor<BuildLockset> {
}
public:
- BuildLockset(ThreadSafetyAnalyzer *analyzer, CFGBlockInfo &Info)
+ BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info)
: StmtVisitor<BuildLockset>(),
- Handler(analyzer->Handler),
- LocksetFactory(analyzer->LocksetFactory),
- LocalVarMap(analyzer->LocalVarMap),
- LSet(Info.EntrySet),
+ Analyzer(Anlzr),
+ FSet(Info.EntrySet),
LVarCtx(Info.EntryContext),
CtxIndex(Info.EntryIndex)
{}
@@ -976,104 +1681,6 @@ public:
void VisitDeclStmt(DeclStmt *S);
};
-/// \brief Add a new lock to the lockset, warning if the lock is already there.
-/// \param Mutex -- the Mutex expression for the lock
-/// \param LDat -- the LockData for the lock
-void BuildLockset::addLock(const MutexID &Mutex, const LockData& LDat) {
- // FIXME: deal with acquired before/after annotations.
- // FIXME: Don't always warn when we have support for reentrant locks.
- if (locksetContains(Mutex))
- Handler.handleDoubleLock(Mutex.getName(), LDat.AcquireLoc);
- else
- LSet = LocksetFactory.add(LSet, Mutex, LDat);
-}
-
-/// \brief Remove a lock from the lockset, warning if the lock is not there.
-/// \param LockExp The lock expression corresponding to the lock to be removed
-/// \param UnlockLoc The source location of the unlock (only used in error msg)
-void BuildLockset::removeLock(const MutexID &Mutex, SourceLocation UnlockLoc) {
- const LockData *LDat = LSet.lookup(Mutex);
- if (!LDat)
- Handler.handleUnmatchedUnlock(Mutex.getName(), UnlockLoc);
- else {
- // For scoped-lockable vars, remove the mutex associated with this var.
- if (LDat->UnderlyingMutex.isValid())
- removeLock(LDat->UnderlyingMutex, UnlockLoc);
- LSet = LocksetFactory.remove(LSet, Mutex);
- }
-}
-
-/// \brief This function, parameterized by an attribute type, is used to add a
-/// set of locks specified as attribute arguments to the lockset.
-template <typename AttrType>
-void BuildLockset::addLocksToSet(LockKind LK, AttrType *Attr,
- Expr *Exp, NamedDecl* FunDecl, VarDecl *VD) {
- typedef typename AttrType::args_iterator iterator_type;
-
- SourceLocation ExpLocation = Exp->getExprLoc();
-
- // Figure out if we're calling the constructor of scoped lockable class
- bool isScopedVar = false;
- if (VD) {
- if (CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FunDecl)) {
- CXXRecordDecl* PD = CD->getParent();
- if (PD && PD->getAttr<ScopedLockableAttr>())
- isScopedVar = true;
- }
- }
-
- if (Attr->args_size() == 0) {
- // The mutex held is the "this" object.
- MutexID Mutex(0, Exp, FunDecl);
- if (!Mutex.isValid())
- MutexID::warnInvalidLock(Handler, 0, Exp, FunDecl);
- else
- addLock(Mutex, LockData(ExpLocation, LK));
- return;
- }
-
- for (iterator_type I=Attr->args_begin(), E=Attr->args_end(); I != E; ++I) {
- MutexID Mutex(*I, Exp, FunDecl);
- if (!Mutex.isValid())
- MutexID::warnInvalidLock(Handler, *I, Exp, FunDecl);
- else {
- addLock(Mutex, LockData(ExpLocation, LK));
- if (isScopedVar) {
- // For scoped lockable vars, map this var to its underlying mutex.
- DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation());
- MutexID SMutex(&DRE, 0, 0);
- addLock(SMutex, LockData(VD->getLocation(), LK, Mutex));
- }
- }
- }
-}
-
-/// \brief This function removes a set of locks specified as attribute
-/// arguments from the lockset.
-void BuildLockset::removeLocksFromSet(UnlockFunctionAttr *Attr,
- Expr *Exp, NamedDecl* FunDecl) {
- SourceLocation ExpLocation;
- if (Exp) ExpLocation = Exp->getExprLoc();
-
- if (Attr->args_size() == 0) {
- // The mutex held is the "this" object.
- MutexID Mu(0, Exp, FunDecl);
- if (!Mu.isValid())
- MutexID::warnInvalidLock(Handler, 0, Exp, FunDecl);
- else
- removeLock(Mu, ExpLocation);
- return;
- }
-
- for (UnlockFunctionAttr::args_iterator I = Attr->args_begin(),
- E = Attr->args_end(); I != E; ++I) {
- MutexID Mutex(*I, Exp, FunDecl);
- if (!Mutex.isValid())
- MutexID::warnInvalidLock(Handler, *I, Exp, FunDecl);
- else
- removeLock(Mutex, ExpLocation);
- }
-}
/// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs
const ValueDecl *BuildLockset::getValueDecl(Expr *Exp) {
@@ -1093,11 +1700,12 @@ void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, Expr *Exp,
ProtectedOperationKind POK) {
LockKind LK = getLockKindFromAccessKind(AK);
- MutexID Mutex(MutexExp, Exp, D);
+ SExpr Mutex(MutexExp, Exp, D);
if (!Mutex.isValid())
- MutexID::warnInvalidLock(Handler, MutexExp, Exp, D);
+ SExpr::warnInvalidLock(Analyzer->Handler, MutexExp, Exp, D);
else if (!locksetContainsAtLeast(Mutex, LK))
- Handler.handleMutexNotHeld(D, POK, Mutex.getName(), LK, Exp->getExprLoc());
+ Analyzer->Handler.handleMutexNotHeld(D, POK, Mutex.toString(), LK,
+ Exp->getExprLoc());
}
/// \brief This method identifies variable dereferences and checks pt_guarded_by
@@ -1116,8 +1724,9 @@ void BuildLockset::checkDereference(Expr *Exp, AccessKind AK) {
if(!D || !D->hasAttrs())
return;
- if (D->getAttr<PtGuardedVarAttr>() && LSet.isEmpty())
- Handler.handleNoMutexHeld(D, POK_VarDereference, AK, Exp->getExprLoc());
+ if (D->getAttr<PtGuardedVarAttr>() && FSet.isEmpty())
+ Analyzer->Handler.handleNoMutexHeld(D, POK_VarDereference, AK,
+ Exp->getExprLoc());
const AttrVec &ArgAttrs = D->getAttrs();
for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i)
@@ -1134,8 +1743,9 @@ void BuildLockset::checkAccess(Expr *Exp, AccessKind AK) {
if(!D || !D->hasAttrs())
return;
- if (D->getAttr<GuardedVarAttr>() && LSet.isEmpty())
- Handler.handleNoMutexHeld(D, POK_VarAccess, AK, Exp->getExprLoc());
+ if (D->getAttr<GuardedVarAttr>() && FSet.isEmpty())
+ Analyzer->Handler.handleNoMutexHeld(D, POK_VarAccess, AK,
+ Exp->getExprLoc());
const AttrVec &ArgAttrs = D->getAttrs();
for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i)
@@ -1153,68 +1763,68 @@ void BuildLockset::checkAccess(Expr *Exp, AccessKind AK) {
/// and check that the appropriate locks are held. Non-const method calls with
/// the same signature as const method calls can be also treated as reads.
///
-/// FIXME: We need to also visit CallExprs to catch/check global functions.
-///
-/// FIXME: Do not flag an error for member variables accessed in constructors/
-/// destructors
-void BuildLockset::handleCall(Expr *Exp, NamedDecl *D, VarDecl *VD) {
- AttrVec &ArgAttrs = D->getAttrs();
+void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
+ const AttrVec &ArgAttrs = D->getAttrs();
+ MutexIDList ExclusiveLocksToAdd;
+ MutexIDList SharedLocksToAdd;
+ MutexIDList LocksToRemove;
+
for(unsigned i = 0; i < ArgAttrs.size(); ++i) {
- Attr *Attr = ArgAttrs[i];
- switch (Attr->getKind()) {
+ Attr *At = const_cast<Attr*>(ArgAttrs[i]);
+ switch (At->getKind()) {
// When we encounter an exclusive lock function, we need to add the lock
// to our lockset with kind exclusive.
case attr::ExclusiveLockFunction: {
- ExclusiveLockFunctionAttr *A = cast<ExclusiveLockFunctionAttr>(Attr);
- addLocksToSet(LK_Exclusive, A, Exp, D, VD);
+ ExclusiveLockFunctionAttr *A = cast<ExclusiveLockFunctionAttr>(At);
+ Analyzer->getMutexIDs(ExclusiveLocksToAdd, A, Exp, D);
break;
}
// When we encounter a shared lock function, we need to add the lock
// to our lockset with kind shared.
case attr::SharedLockFunction: {
- SharedLockFunctionAttr *A = cast<SharedLockFunctionAttr>(Attr);
- addLocksToSet(LK_Shared, A, Exp, D, VD);
+ SharedLockFunctionAttr *A = cast<SharedLockFunctionAttr>(At);
+ Analyzer->getMutexIDs(SharedLocksToAdd, A, Exp, D);
break;
}
// When we encounter an unlock function, we need to remove unlocked
// mutexes from the lockset, and flag a warning if they are not there.
case attr::UnlockFunction: {
- UnlockFunctionAttr *UFAttr = cast<UnlockFunctionAttr>(Attr);
- removeLocksFromSet(UFAttr, Exp, D);
+ UnlockFunctionAttr *A = cast<UnlockFunctionAttr>(At);
+ Analyzer->getMutexIDs(LocksToRemove, A, Exp, D);
break;
}
case attr::ExclusiveLocksRequired: {
- ExclusiveLocksRequiredAttr *ELRAttr =
- cast<ExclusiveLocksRequiredAttr>(Attr);
+ ExclusiveLocksRequiredAttr *A = cast<ExclusiveLocksRequiredAttr>(At);
for (ExclusiveLocksRequiredAttr::args_iterator
- I = ELRAttr->args_begin(), E = ELRAttr->args_end(); I != E; ++I)
+ I = A->args_begin(), E = A->args_end(); I != E; ++I)
warnIfMutexNotHeld(D, Exp, AK_Written, *I, POK_FunctionCall);
break;
}
case attr::SharedLocksRequired: {
- SharedLocksRequiredAttr *SLRAttr = cast<SharedLocksRequiredAttr>(Attr);
+ SharedLocksRequiredAttr *A = cast<SharedLocksRequiredAttr>(At);
- for (SharedLocksRequiredAttr::args_iterator I = SLRAttr->args_begin(),
- E = SLRAttr->args_end(); I != E; ++I)
+ for (SharedLocksRequiredAttr::args_iterator I = A->args_begin(),
+ E = A->args_end(); I != E; ++I)
warnIfMutexNotHeld(D, Exp, AK_Read, *I, POK_FunctionCall);
break;
}
case attr::LocksExcluded: {
- LocksExcludedAttr *LEAttr = cast<LocksExcludedAttr>(Attr);
- for (LocksExcludedAttr::args_iterator I = LEAttr->args_begin(),
- E = LEAttr->args_end(); I != E; ++I) {
- MutexID Mutex(*I, Exp, D);
+ LocksExcludedAttr *A = cast<LocksExcludedAttr>(At);
+ for (LocksExcludedAttr::args_iterator I = A->args_begin(),
+ E = A->args_end(); I != E; ++I) {
+ SExpr Mutex(*I, Exp, D);
if (!Mutex.isValid())
- MutexID::warnInvalidLock(Handler, *I, Exp, D);
+ SExpr::warnInvalidLock(Analyzer->Handler, *I, Exp, D);
else if (locksetContains(Mutex))
- Handler.handleFunExcludesLock(D->getName(), Mutex.getName(),
- Exp->getExprLoc());
+ Analyzer->Handler.handleFunExcludesLock(D->getName(),
+ Mutex.toString(),
+ Exp->getExprLoc());
}
break;
}
@@ -1224,102 +1834,50 @@ void BuildLockset::handleCall(Expr *Exp, NamedDecl *D, VarDecl *VD) {
break;
}
}
-}
-
-
-/// \brief Add lock to set, if the current block is in the taken branch of a
-/// trylock.
-template <class AttrType>
-void BuildLockset::addTrylock(LockKind LK, AttrType *Attr, Expr *Exp,
- NamedDecl *FunDecl, const CFGBlock *PredBlock,
- const CFGBlock *CurrBlock, Expr *BrE, bool Neg) {
- // Find out which branch has the lock
- bool branch = 0;
- if (CXXBoolLiteralExpr *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE)) {
- branch = BLE->getValue();
- }
- else if (IntegerLiteral *ILE = dyn_cast_or_null<IntegerLiteral>(BrE)) {
- branch = ILE->getValue().getBoolValue();
- }
- int branchnum = branch ? 0 : 1;
- if (Neg) branchnum = !branchnum;
- // If we've taken the trylock branch, then add the lock
- int i = 0;
- for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
- SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) {
- if (*SI == CurrBlock && i == branchnum) {
- addLocksToSet(LK, Attr, Exp, FunDecl, 0);
+ // Figure out if we're calling the constructor of scoped lockable class
+ bool isScopedVar = false;
+ if (VD) {
+ if (const CXXConstructorDecl *CD = dyn_cast<const CXXConstructorDecl>(D)) {
+ const CXXRecordDecl* PD = CD->getParent();
+ if (PD && PD->getAttr<ScopedLockableAttr>())
+ isScopedVar = true;
}
}
-}
-
-// If Cond can be traced back to a function call, return the call expression.
-// The negate variable should be called with false, and will be set to true
-// if the function call is negated, e.g. if (!mu.tryLock(...))
-CallExpr* BuildLockset::getTrylockCallExpr(Stmt *Cond,
- LocalVariableMap::Context C,
- bool &Negate) {
- if (!Cond)
- return 0;
-
- if (CallExpr *CallExp = dyn_cast<CallExpr>(Cond)) {
- return CallExp;
- }
- else if (ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(Cond)) {
- return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
+ // Add locks.
+ SourceLocation Loc = Exp->getExprLoc();
+ for (unsigned i=0,n=ExclusiveLocksToAdd.size(); i<n; ++i) {
+ Analyzer->addLock(FSet, ExclusiveLocksToAdd[i],
+ LockData(Loc, LK_Exclusive, isScopedVar));
}
- else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Cond)) {
- Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
- return getTrylockCallExpr(E, C, Negate);
- }
- else if (UnaryOperator *UOP = dyn_cast<UnaryOperator>(Cond)) {
- if (UOP->getOpcode() == UO_LNot) {
- Negate = !Negate;
- return getTrylockCallExpr(UOP->getSubExpr(), C, Negate);
- }
+ for (unsigned i=0,n=SharedLocksToAdd.size(); i<n; ++i) {
+ Analyzer->addLock(FSet, SharedLocksToAdd[i],
+ LockData(Loc, LK_Shared, isScopedVar));
}
- // FIXME -- handle && and || as well.
- return NULL;
-}
-
-
-/// \brief Process a conditional branch from a previous block to the current
-/// block, looking for trylock calls.
-void BuildLockset::handleTrylock(Stmt *Cond, const CFGBlock *PredBlock,
- const CFGBlock *CurrBlock) {
- bool Negate = false;
- CallExpr *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate);
- if (!Exp)
- return;
- NamedDecl *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
- if(!FunDecl || !FunDecl->hasAttrs())
- return;
+ // Add the managing object as a dummy mutex, mapped to the underlying mutex.
+ // FIXME -- this doesn't work if we acquire multiple locks.
+ if (isScopedVar) {
+ SourceLocation MLoc = VD->getLocation();
+ DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation());
+ SExpr SMutex(&DRE, 0, 0);
- // If the condition is a call to a Trylock function, then grab the attributes
- AttrVec &ArgAttrs = FunDecl->getAttrs();
- for (unsigned i = 0; i < ArgAttrs.size(); ++i) {
- Attr *Attr = ArgAttrs[i];
- switch (Attr->getKind()) {
- case attr::ExclusiveTrylockFunction: {
- ExclusiveTrylockFunctionAttr *A =
- cast<ExclusiveTrylockFunctionAttr>(Attr);
- addTrylock(LK_Exclusive, A, Exp, FunDecl, PredBlock, CurrBlock,
- A->getSuccessValue(), Negate);
- break;
- }
- case attr::SharedTrylockFunction: {
- SharedTrylockFunctionAttr *A =
- cast<SharedTrylockFunctionAttr>(Attr);
- addTrylock(LK_Shared, A, Exp, FunDecl, PredBlock, CurrBlock,
- A->getSuccessValue(), Negate);
- break;
- }
- default:
- break;
+ for (unsigned i=0,n=ExclusiveLocksToAdd.size(); i<n; ++i) {
+ Analyzer->addLock(FSet, SMutex, LockData(MLoc, LK_Exclusive,
+ ExclusiveLocksToAdd[i]));
}
+ for (unsigned i=0,n=SharedLocksToAdd.size(); i<n; ++i) {
+ Analyzer->addLock(FSet, SMutex, LockData(MLoc, LK_Shared,
+ SharedLocksToAdd[i]));
+ }
+ }
+
+ // Remove locks.
+ // FIXME -- should only fully remove if the attribute refers to 'this'.
+ bool Dtor = isa<CXXDestructorDecl>(D);
+ for (unsigned i=0,n=LocksToRemove.size(); i<n; ++i) {
+ Analyzer->removeLock(FSet, LocksToRemove[i], Loc, Dtor);
}
}
@@ -1351,7 +1909,7 @@ void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) {
return;
// adjust the context
- LVarCtx = LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx);
+ LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx);
Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
checkAccess(LHSExp, AK_Written);
@@ -1383,13 +1941,17 @@ void BuildLockset::VisitCXXConstructExpr(CXXConstructExpr *Exp) {
void BuildLockset::VisitDeclStmt(DeclStmt *S) {
// adjust the context
- LVarCtx = LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
+ LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
DeclGroupRef DGrp = S->getDeclGroup();
for (DeclGroupRef::iterator I = DGrp.begin(), E = DGrp.end(); I != E; ++I) {
Decl *D = *I;
if (VarDecl *VD = dyn_cast_or_null<VarDecl>(D)) {
Expr *E = VD->getInit();
+ // handle constructors that involve temporaries
+ if (ExprWithCleanups *EWC = dyn_cast_or_null<ExprWithCleanups>(E))
+ E = EWC->getSubExpr();
+
if (CXXConstructExpr *CE = dyn_cast_or_null<CXXConstructExpr>(E)) {
NamedDecl *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor());
if (!CtorD || !CtorD->hasAttrs())
@@ -1401,6 +1963,7 @@ void BuildLockset::VisitDeclStmt(DeclStmt *S) {
}
+
/// \brief Compute the intersection of two locksets and issue warnings for any
/// locks in the symmetric difference.
///
@@ -1409,58 +1972,80 @@ void BuildLockset::VisitDeclStmt(DeclStmt *S) {
/// A; if () then B; else C; D; we need to check that the lockset after B and C
/// are the same. In the event of a difference, we use the intersection of these
/// two locksets at the start of D.
-Lockset ThreadSafetyAnalyzer::intersectAndWarn(const CFGBlockInfo &Block1,
- CFGBlockSide Side1,
- const CFGBlockInfo &Block2,
- CFGBlockSide Side2,
- LockErrorKind LEK) {
- Lockset LSet1 = Block1.getSet(Side1);
- Lockset LSet2 = Block2.getSet(Side2);
-
- Lockset Intersection = LSet1;
- for (Lockset::iterator I = LSet2.begin(), E = LSet2.end(); I != E; ++I) {
- const MutexID &LSet2Mutex = I.getKey();
- const LockData &LSet2LockData = I.getData();
- if (const LockData *LD = LSet1.lookup(LSet2Mutex)) {
- if (LD->LKind != LSet2LockData.LKind) {
- Handler.handleExclusiveAndShared(LSet2Mutex.getName(),
- LSet2LockData.AcquireLoc,
- LD->AcquireLoc);
- if (LD->LKind != LK_Exclusive)
- Intersection = LocksetFactory.add(Intersection, LSet2Mutex,
- LSet2LockData);
+///
+/// \param LSet1 The first lockset.
+/// \param LSet2 The second lockset.
+/// \param JoinLoc The location of the join point for error reporting
+/// \param LEK1 The error message to report if a mutex is missing from LSet1
+/// \param LEK2 The error message to report if a mutex is missing from Lset2
+void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &FSet1,
+ const FactSet &FSet2,
+ SourceLocation JoinLoc,
+ LockErrorKind LEK1,
+ LockErrorKind LEK2,
+ bool Modify) {
+ FactSet FSet1Orig = FSet1;
+
+ for (FactSet::const_iterator I = FSet2.begin(), E = FSet2.end();
+ I != E; ++I) {
+ const SExpr &FSet2Mutex = FactMan[*I].MutID;
+ const LockData &LDat2 = FactMan[*I].LDat;
+
+ if (const LockData *LDat1 = FSet1.findLock(FactMan, FSet2Mutex)) {
+ if (LDat1->LKind != LDat2.LKind) {
+ Handler.handleExclusiveAndShared(FSet2Mutex.toString(),
+ LDat2.AcquireLoc,
+ LDat1->AcquireLoc);
+ if (Modify && LDat1->LKind != LK_Exclusive) {
+ FSet1.removeLock(FactMan, FSet2Mutex);
+ FSet1.addLock(FactMan, FSet2Mutex, LDat2);
+ }
}
} else {
- Handler.handleMutexHeldEndOfScope(LSet2Mutex.getName(),
- LSet2LockData.AcquireLoc,
- Block1.getLocation(Side1), LEK);
+ if (LDat2.UnderlyingMutex.isValid()) {
+ if (FSet2.findLock(FactMan, LDat2.UnderlyingMutex)) {
+ // If this is a scoped lock that manages another mutex, and if the
+ // underlying mutex is still held, then warn about the underlying
+ // mutex.
+ Handler.handleMutexHeldEndOfScope(LDat2.UnderlyingMutex.toString(),
+ LDat2.AcquireLoc,
+ JoinLoc, LEK1);
+ }
+ }
+ else if (!LDat2.Managed)
+ Handler.handleMutexHeldEndOfScope(FSet2Mutex.toString(),
+ LDat2.AcquireLoc,
+ JoinLoc, LEK1);
}
}
- for (Lockset::iterator I = LSet1.begin(), E = LSet1.end(); I != E; ++I) {
- if (!LSet2.contains(I.getKey())) {
- const MutexID &Mutex = I.getKey();
- const LockData &MissingLock = I.getData();
- Handler.handleMutexHeldEndOfScope(Mutex.getName(),
- MissingLock.AcquireLoc,
- Block2.getLocation(Side2), LEK);
- Intersection = LocksetFactory.remove(Intersection, Mutex);
+ for (FactSet::const_iterator I = FSet1.begin(), E = FSet1.end();
+ I != E; ++I) {
+ const SExpr &FSet1Mutex = FactMan[*I].MutID;
+ const LockData &LDat1 = FactMan[*I].LDat;
+
+ if (!FSet2.findLock(FactMan, FSet1Mutex)) {
+ if (LDat1.UnderlyingMutex.isValid()) {
+ if (FSet1Orig.findLock(FactMan, LDat1.UnderlyingMutex)) {
+ // If this is a scoped lock that manages another mutex, and if the
+ // underlying mutex is still held, then warn about the underlying
+ // mutex.
+ Handler.handleMutexHeldEndOfScope(LDat1.UnderlyingMutex.toString(),
+ LDat1.AcquireLoc,
+ JoinLoc, LEK1);
+ }
+ }
+ else if (!LDat1.Managed)
+ Handler.handleMutexHeldEndOfScope(FSet1Mutex.toString(),
+ LDat1.AcquireLoc,
+ JoinLoc, LEK2);
+ if (Modify)
+ FSet1.removeLock(FactMan, FSet1Mutex);
}
}
- return Intersection;
}
-Lockset ThreadSafetyAnalyzer::addLock(Lockset &LSet, Expr *MutexExp,
- const NamedDecl *D,
- LockKind LK, SourceLocation Loc) {
- MutexID Mutex(MutexExp, 0, D);
- if (!Mutex.isValid()) {
- MutexID::warnInvalidLock(Handler, MutexExp, 0, D);
- return LSet;
- }
- LockData NewLock(Loc, LK);
- return LocksetFactory.add(LSet, Mutex, NewLock);
-}
+
/// \brief Check a function's CFG for thread-safety violations.
///
@@ -1472,6 +2057,8 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
if (!CFGraph) return;
const NamedDecl *D = dyn_cast_or_null<NamedDecl>(AC.getDecl());
+ // AC.dumpCFG(true);
+
if (!D)
return; // Ignore anonymous functions for now.
if (D->getAttr<NoThreadSafetyAnalysisAttr>())
@@ -1485,8 +2072,8 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
if (isa<CXXDestructorDecl>(D))
return; // Don't check inside destructors.
- std::vector<CFGBlockInfo> BlockInfo(CFGraph->getNumBlockIDs(),
- CFGBlockInfo::getEmptyBlockInfo(LocksetFactory, LocalVarMap));
+ BlockInfo.resize(CFGraph->getNumBlockIDs(),
+ CFGBlockInfo::getEmptyBlockInfo(LocalVarMap));
// We need to explore the CFG via a "topological" ordering.
// That way, we will be guaranteed to have information about required
@@ -1505,27 +2092,22 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
// FIXME: is there a more intelligent way to check lock/unlock functions?
if (!SortedGraph->empty() && D->hasAttrs()) {
const CFGBlock *FirstBlock = *SortedGraph->begin();
- Lockset &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet;
+ FactSet &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet;
const AttrVec &ArgAttrs = D->getAttrs();
+
+ MutexIDList ExclusiveLocksToAdd;
+ MutexIDList SharedLocksToAdd;
+
+ SourceLocation Loc = D->getLocation();
for (unsigned i = 0; i < ArgAttrs.size(); ++i) {
Attr *Attr = ArgAttrs[i];
- SourceLocation AttrLoc = Attr->getLocation();
- if (SharedLocksRequiredAttr *SLRAttr
- = dyn_cast<SharedLocksRequiredAttr>(Attr)) {
- for (SharedLocksRequiredAttr::args_iterator
- SLRIter = SLRAttr->args_begin(),
- SLREnd = SLRAttr->args_end(); SLRIter != SLREnd; ++SLRIter)
- InitialLockset = addLock(InitialLockset,
- *SLRIter, D, LK_Shared,
- AttrLoc);
- } else if (ExclusiveLocksRequiredAttr *ELRAttr
- = dyn_cast<ExclusiveLocksRequiredAttr>(Attr)) {
- for (ExclusiveLocksRequiredAttr::args_iterator
- ELRIter = ELRAttr->args_begin(),
- ELREnd = ELRAttr->args_end(); ELRIter != ELREnd; ++ELRIter)
- InitialLockset = addLock(InitialLockset,
- *ELRIter, D, LK_Exclusive,
- AttrLoc);
+ Loc = Attr->getLocation();
+ if (ExclusiveLocksRequiredAttr *A
+ = dyn_cast<ExclusiveLocksRequiredAttr>(Attr)) {
+ getMutexIDs(ExclusiveLocksToAdd, A, (Expr*) 0, D);
+ } else if (SharedLocksRequiredAttr *A
+ = dyn_cast<SharedLocksRequiredAttr>(Attr)) {
+ getMutexIDs(SharedLocksToAdd, A, (Expr*) 0, D);
} else if (isa<UnlockFunctionAttr>(Attr)) {
// Don't try to check unlock functions for now
return;
@@ -1535,8 +2117,24 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
} else if (isa<SharedLockFunctionAttr>(Attr)) {
// Don't try to check lock functions for now
return;
+ } else if (isa<ExclusiveTrylockFunctionAttr>(Attr)) {
+ // Don't try to check trylock functions for now
+ return;
+ } else if (isa<SharedTrylockFunctionAttr>(Attr)) {
+ // Don't try to check trylock functions for now
+ return;
}
}
+
+ // FIXME -- Loc can be wrong here.
+ for (unsigned i=0,n=ExclusiveLocksToAdd.size(); i<n; ++i) {
+ addLock(InitialLockset, ExclusiveLocksToAdd[i],
+ LockData(Loc, LK_Exclusive));
+ }
+ for (unsigned i=0,n=SharedLocksToAdd.size(); i<n; ++i) {
+ addLock(InitialLockset, SharedLocksToAdd[i],
+ LockData(Loc, LK_Shared));
+ }
}
for (PostOrderCFGView::iterator I = SortedGraph->begin(),
@@ -1587,15 +2185,16 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
int PrevBlockID = (*PI)->getBlockID();
CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
+ FactSet PrevLockset;
+ getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock);
if (!LocksetInitialized) {
- CurrBlockInfo->EntrySet = PrevBlockInfo->ExitSet;
+ CurrBlockInfo->EntrySet = PrevLockset;
LocksetInitialized = true;
} else {
- CurrBlockInfo->EntrySet =
- intersectAndWarn(*CurrBlockInfo, CBS_Entry,
- *PrevBlockInfo, CBS_Exit,
- LEK_LockedSomePredecessors);
+ intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset,
+ CurrBlockInfo->EntryLoc,
+ LEK_LockedSomePredecessors);
}
}
@@ -1619,23 +2218,20 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
const Stmt *Terminator = PrevBlock->getTerminator();
bool IsLoop = Terminator && isa<ContinueStmt>(Terminator);
+ FactSet PrevLockset;
+ getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet,
+ PrevBlock, CurrBlock);
+
// Do not update EntrySet.
- intersectAndWarn(*CurrBlockInfo, CBS_Entry, *PrevBlockInfo, CBS_Exit,
+ intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset,
+ PrevBlockInfo->ExitLoc,
IsLoop ? LEK_LockedSomeLoopIterations
- : LEK_LockedSomePredecessors);
+ : LEK_LockedSomePredecessors,
+ false);
}
}
BuildLockset LocksetBuilder(this, *CurrBlockInfo);
- CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
- PE = CurrBlock->pred_end();
- if (PI != PE) {
- // If the predecessor ended in a branch, then process any trylocks.
- // FIXME -- check to make sure there's only one predecessor.
- if (Stmt *TCE = (*PI)->getTerminatorCondition()) {
- LocksetBuilder.handleTrylock(TCE, *PI, CurrBlock);
- }
- }
// Visit all the statements in the basic block.
for (CFGBlock::const_iterator BI = CurrBlock->begin(),
@@ -1665,7 +2261,7 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
break;
}
}
- CurrBlockInfo->ExitSet = LocksetBuilder.LSet;
+ CurrBlockInfo->ExitSet = LocksetBuilder.FSet;
// For every back edge from CurrBlock (the end of the loop) to another block
// (FirstLoopBlock) we need to check that the Lockset of Block is equal to
@@ -1679,19 +2275,24 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
continue;
CFGBlock *FirstLoopBlock = *SI;
- CFGBlockInfo &PreLoop = BlockInfo[FirstLoopBlock->getBlockID()];
- CFGBlockInfo &LoopEnd = BlockInfo[CurrBlockID];
- intersectAndWarn(LoopEnd, CBS_Exit, PreLoop, CBS_Entry,
- LEK_LockedSomeLoopIterations);
+ CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()];
+ CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID];
+ intersectAndWarn(LoopEnd->ExitSet, PreLoop->EntrySet,
+ PreLoop->EntryLoc,
+ LEK_LockedSomeLoopIterations,
+ false);
}
}
- CFGBlockInfo &Initial = BlockInfo[CFGraph->getEntry().getBlockID()];
- CFGBlockInfo &Final = BlockInfo[CFGraph->getExit().getBlockID()];
+ CFGBlockInfo *Initial = &BlockInfo[CFGraph->getEntry().getBlockID()];
+ CFGBlockInfo *Final = &BlockInfo[CFGraph->getExit().getBlockID()];
// FIXME: Should we call this function for all blocks which exit the function?
- intersectAndWarn(Initial, CBS_Entry, Final, CBS_Exit,
- LEK_LockedAtEndOfFunction);
+ intersectAndWarn(Initial->EntrySet, Final->ExitSet,
+ Final->ExitLoc,
+ LEK_LockedAtEndOfFunction,
+ LEK_NotLockedAtEndOfFunction,
+ false);
}
} // end anonymous namespace
diff --git a/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp b/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp
index 1c7e6b6..858be45 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp
@@ -16,6 +16,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/PackedVector.h"
#include "llvm/ADT/DenseMap.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/AnalysisContext.h"
@@ -25,6 +26,8 @@
using namespace clang;
+#define DEBUG_LOGGING 0
+
static bool isTrackedVar(const VarDecl *vd, const DeclContext *dc) {
if (vd->isLocalVarDecl() && !vd->hasGlobalStorage() &&
!vd->isExceptionVariable() &&
@@ -95,143 +98,79 @@ static bool isAlwaysUninit(const Value v) {
namespace {
typedef llvm::PackedVector<Value, 2> ValueVector;
-typedef std::pair<ValueVector *, ValueVector *> BVPair;
class CFGBlockValues {
const CFG &cfg;
- BVPair *vals;
+ std::vector<ValueVector*> vals;
ValueVector scratch;
DeclToIndex declToIndex;
-
- ValueVector &lazyCreate(ValueVector *&bv);
public:
CFGBlockValues(const CFG &cfg);
~CFGBlockValues();
-
+
unsigned getNumEntries() const { return declToIndex.size(); }
void computeSetOfDeclarations(const DeclContext &dc);
- ValueVector &getValueVector(const CFGBlock *block,
- const CFGBlock *dstBlock);
-
- BVPair &getValueVectors(const CFGBlock *block, bool shouldLazyCreate);
+ ValueVector &getValueVector(const CFGBlock *block) {
+ return *vals[block->getBlockID()];
+ }
+ void setAllScratchValues(Value V);
void mergeIntoScratch(ValueVector const &source, bool isFirst);
bool updateValueVectorWithScratch(const CFGBlock *block);
- bool updateValueVectors(const CFGBlock *block, const BVPair &newVals);
bool hasNoDeclarations() const {
return declToIndex.size() == 0;
}
void resetScratch();
- ValueVector &getScratch() { return scratch; }
ValueVector::reference operator[](const VarDecl *vd);
+
+ Value getValue(const CFGBlock *block, const CFGBlock *dstBlock,
+ const VarDecl *vd) {
+ const llvm::Optional<unsigned> &idx = declToIndex.getValueIndex(vd);
+ assert(idx.hasValue());
+ return getValueVector(block)[idx.getValue()];
+ }
};
} // end anonymous namespace
-CFGBlockValues::CFGBlockValues(const CFG &c) : cfg(c), vals(0) {
- unsigned n = cfg.getNumBlockIDs();
- if (!n)
- return;
- vals = new std::pair<ValueVector*, ValueVector*>[n];
- memset((void*)vals, 0, sizeof(*vals) * n);
-}
+CFGBlockValues::CFGBlockValues(const CFG &c) : cfg(c), vals(0) {}
CFGBlockValues::~CFGBlockValues() {
- unsigned n = cfg.getNumBlockIDs();
- if (n == 0)
- return;
- for (unsigned i = 0; i < n; ++i) {
- delete vals[i].first;
- delete vals[i].second;
- }
- delete [] vals;
+ for (std::vector<ValueVector*>::iterator I = vals.begin(), E = vals.end();
+ I != E; ++I)
+ delete *I;
}
void CFGBlockValues::computeSetOfDeclarations(const DeclContext &dc) {
declToIndex.computeMap(dc);
- scratch.resize(declToIndex.size());
-}
-
-ValueVector &CFGBlockValues::lazyCreate(ValueVector *&bv) {
- if (!bv)
- bv = new ValueVector(declToIndex.size());
- return *bv;
-}
-
-/// This function pattern matches for a '&&' or '||' that appears at
-/// the beginning of a CFGBlock that also (1) has a terminator and
-/// (2) has no other elements. If such an expression is found, it is returned.
-static const BinaryOperator *getLogicalOperatorInChain(const CFGBlock *block) {
- if (block->empty())
- return 0;
-
- CFGElement front = block->front();
- const CFGStmt *cstmt = front.getAs<CFGStmt>();
- if (!cstmt)
- return 0;
-
- const BinaryOperator *b = dyn_cast_or_null<BinaryOperator>(cstmt->getStmt());
-
- if (!b || !b->isLogicalOp())
- return 0;
-
- if (block->pred_size() == 2) {
- if (block->getTerminatorCondition() == b) {
- if (block->succ_size() == 2)
- return b;
- }
- else if (block->size() == 1)
- return b;
- }
-
- return 0;
-}
-
-ValueVector &CFGBlockValues::getValueVector(const CFGBlock *block,
- const CFGBlock *dstBlock) {
- unsigned idx = block->getBlockID();
- if (dstBlock && getLogicalOperatorInChain(block)) {
- if (*block->succ_begin() == dstBlock)
- return lazyCreate(vals[idx].first);
- assert(*(block->succ_begin()+1) == dstBlock);
- return lazyCreate(vals[idx].second);
- }
-
- assert(vals[idx].second == 0);
- return lazyCreate(vals[idx].first);
-}
-
-BVPair &CFGBlockValues::getValueVectors(const clang::CFGBlock *block,
- bool shouldLazyCreate) {
- unsigned idx = block->getBlockID();
- lazyCreate(vals[idx].first);
- if (shouldLazyCreate)
- lazyCreate(vals[idx].second);
- return vals[idx];
+ unsigned decls = declToIndex.size();
+ scratch.resize(decls);
+ unsigned n = cfg.getNumBlockIDs();
+ if (!n)
+ return;
+ vals.resize(n);
+ for (unsigned i = 0; i < n; ++i)
+ vals[i] = new ValueVector(decls);
}
-#if 0
+#if DEBUG_LOGGING
static void printVector(const CFGBlock *block, ValueVector &bv,
unsigned num) {
-
llvm::errs() << block->getBlockID() << " :";
for (unsigned i = 0; i < bv.size(); ++i) {
llvm::errs() << ' ' << bv[i];
}
llvm::errs() << " : " << num << '\n';
}
+#endif
-static void printVector(const char *name, ValueVector const &bv) {
- llvm::errs() << name << " : ";
- for (unsigned i = 0; i < bv.size(); ++i) {
- llvm::errs() << ' ' << bv[i];
- }
- llvm::errs() << "\n";
+void CFGBlockValues::setAllScratchValues(Value V) {
+ for (unsigned I = 0, E = scratch.size(); I != E; ++I)
+ scratch[I] = V;
}
-#endif
void CFGBlockValues::mergeIntoScratch(ValueVector const &source,
bool isFirst) {
@@ -242,30 +181,16 @@ void CFGBlockValues::mergeIntoScratch(ValueVector const &source,
}
bool CFGBlockValues::updateValueVectorWithScratch(const CFGBlock *block) {
- ValueVector &dst = getValueVector(block, 0);
+ ValueVector &dst = getValueVector(block);
bool changed = (dst != scratch);
if (changed)
dst = scratch;
-#if 0
+#if DEBUG_LOGGING
printVector(block, scratch, 0);
#endif
return changed;
}
-bool CFGBlockValues::updateValueVectors(const CFGBlock *block,
- const BVPair &newVals) {
- BVPair &vals = getValueVectors(block, true);
- bool changed = *newVals.first != *vals.first ||
- *newVals.second != *vals.second;
- *vals.first = *newVals.first;
- *vals.second = *newVals.second;
-#if 0
- printVector(block, *vals.first, 1);
- printVector(block, *vals.second, 2);
-#endif
- return changed;
-}
-
void CFGBlockValues::resetScratch() {
scratch.reset();
}
@@ -321,7 +246,7 @@ const CFGBlock *DataflowWorklist::dequeue() {
}
//------------------------------------------------------------------------====//
-// Transfer function for uninitialized values analysis.
+// Classification of DeclRefExprs as use or initialization.
//====------------------------------------------------------------------------//
namespace {
@@ -329,106 +254,339 @@ class FindVarResult {
const VarDecl *vd;
const DeclRefExpr *dr;
public:
- FindVarResult(VarDecl *vd, DeclRefExpr *dr) : vd(vd), dr(dr) {}
-
+ FindVarResult(const VarDecl *vd, const DeclRefExpr *dr) : vd(vd), dr(dr) {}
+
const DeclRefExpr *getDeclRefExpr() const { return dr; }
const VarDecl *getDecl() const { return vd; }
};
-
+
+static const Expr *stripCasts(ASTContext &C, const Expr *Ex) {
+ while (Ex) {
+ Ex = Ex->IgnoreParenNoopCasts(C);
+ if (const CastExpr *CE = dyn_cast<CastExpr>(Ex)) {
+ if (CE->getCastKind() == CK_LValueBitCast) {
+ Ex = CE->getSubExpr();
+ continue;
+ }
+ }
+ break;
+ }
+ return Ex;
+}
+
+/// If E is an expression comprising a reference to a single variable, find that
+/// variable.
+static FindVarResult findVar(const Expr *E, const DeclContext *DC) {
+ if (const DeclRefExpr *DRE =
+ dyn_cast<DeclRefExpr>(stripCasts(DC->getParentASTContext(), E)))
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (isTrackedVar(VD, DC))
+ return FindVarResult(VD, DRE);
+ return FindVarResult(0, 0);
+}
+
+/// \brief Classify each DeclRefExpr as an initialization or a use. Any
+/// DeclRefExpr which isn't explicitly classified will be assumed to have
+/// escaped the analysis and will be treated as an initialization.
+class ClassifyRefs : public StmtVisitor<ClassifyRefs> {
+public:
+ enum Class {
+ Init,
+ Use,
+ SelfInit,
+ Ignore
+ };
+
+private:
+ const DeclContext *DC;
+ llvm::DenseMap<const DeclRefExpr*, Class> Classification;
+
+ bool isTrackedVar(const VarDecl *VD) const {
+ return ::isTrackedVar(VD, DC);
+ }
+
+ void classify(const Expr *E, Class C);
+
+public:
+ ClassifyRefs(AnalysisDeclContext &AC) : DC(cast<DeclContext>(AC.getDecl())) {}
+
+ void VisitDeclStmt(DeclStmt *DS);
+ void VisitUnaryOperator(UnaryOperator *UO);
+ void VisitBinaryOperator(BinaryOperator *BO);
+ void VisitCallExpr(CallExpr *CE);
+ void VisitCastExpr(CastExpr *CE);
+
+ void operator()(Stmt *S) { Visit(S); }
+
+ Class get(const DeclRefExpr *DRE) const {
+ llvm::DenseMap<const DeclRefExpr*, Class>::const_iterator I
+ = Classification.find(DRE);
+ if (I != Classification.end())
+ return I->second;
+
+ const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl());
+ if (!VD || !isTrackedVar(VD))
+ return Ignore;
+
+ return Init;
+ }
+};
+}
+
+static const DeclRefExpr *getSelfInitExpr(VarDecl *VD) {
+ if (Expr *Init = VD->getInit()) {
+ const DeclRefExpr *DRE
+ = dyn_cast<DeclRefExpr>(stripCasts(VD->getASTContext(), Init));
+ if (DRE && DRE->getDecl() == VD)
+ return DRE;
+ }
+ return 0;
+}
+
+void ClassifyRefs::classify(const Expr *E, Class C) {
+ FindVarResult Var = findVar(E, DC);
+ if (const DeclRefExpr *DRE = Var.getDeclRefExpr())
+ Classification[DRE] = std::max(Classification[DRE], C);
+}
+
+void ClassifyRefs::VisitDeclStmt(DeclStmt *DS) {
+ for (DeclStmt::decl_iterator DI = DS->decl_begin(), DE = DS->decl_end();
+ DI != DE; ++DI) {
+ VarDecl *VD = dyn_cast<VarDecl>(*DI);
+ if (VD && isTrackedVar(VD))
+ if (const DeclRefExpr *DRE = getSelfInitExpr(VD))
+ Classification[DRE] = SelfInit;
+ }
+}
+
+void ClassifyRefs::VisitBinaryOperator(BinaryOperator *BO) {
+ // Ignore the evaluation of a DeclRefExpr on the LHS of an assignment. If this
+ // is not a compound-assignment, we will treat it as initializing the variable
+ // when TransferFunctions visits it. A compound-assignment does not affect
+ // whether a variable is uninitialized, and there's no point counting it as a
+ // use.
+ if (BO->isCompoundAssignmentOp())
+ classify(BO->getLHS(), Use);
+ else if (BO->getOpcode() == BO_Assign)
+ classify(BO->getLHS(), Ignore);
+}
+
+void ClassifyRefs::VisitUnaryOperator(UnaryOperator *UO) {
+ // Increment and decrement are uses despite there being no lvalue-to-rvalue
+ // conversion.
+ if (UO->isIncrementDecrementOp())
+ classify(UO->getSubExpr(), Use);
+}
+
+void ClassifyRefs::VisitCallExpr(CallExpr *CE) {
+ // If a value is passed by const reference to a function, we should not assume
+ // that it is initialized by the call, and we conservatively do not assume
+ // that it is used.
+ for (CallExpr::arg_iterator I = CE->arg_begin(), E = CE->arg_end();
+ I != E; ++I)
+ if ((*I)->getType().isConstQualified() && (*I)->isGLValue())
+ classify(*I, Ignore);
+}
+
+void ClassifyRefs::VisitCastExpr(CastExpr *CE) {
+ if (CE->getCastKind() == CK_LValueToRValue)
+ classify(CE->getSubExpr(), Use);
+ else if (CStyleCastExpr *CSE = dyn_cast<CStyleCastExpr>(CE)) {
+ if (CSE->getType()->isVoidType()) {
+ // Squelch any detected load of an uninitialized value if
+ // we cast it to void.
+ // e.g. (void) x;
+ classify(CSE->getSubExpr(), Ignore);
+ }
+ }
+}
+
+//------------------------------------------------------------------------====//
+// Transfer function for uninitialized values analysis.
+//====------------------------------------------------------------------------//
+
+namespace {
class TransferFunctions : public StmtVisitor<TransferFunctions> {
CFGBlockValues &vals;
const CFG &cfg;
+ const CFGBlock *block;
AnalysisDeclContext &ac;
+ const ClassifyRefs &classification;
UninitVariablesHandler *handler;
-
- /// The last DeclRefExpr seen when analyzing a block. Used to
- /// cheat when detecting cases when the address of a variable is taken.
- DeclRefExpr *lastDR;
-
- /// The last lvalue-to-rvalue conversion of a variable whose value
- /// was uninitialized. Normally this results in a warning, but it is
- /// possible to either silence the warning in some cases, or we
- /// propagate the uninitialized value.
- CastExpr *lastLoad;
-
- /// For some expressions, we want to ignore any post-processing after
- /// visitation.
- bool skipProcessUses;
-
+
public:
TransferFunctions(CFGBlockValues &vals, const CFG &cfg,
- AnalysisDeclContext &ac,
+ const CFGBlock *block, AnalysisDeclContext &ac,
+ const ClassifyRefs &classification,
UninitVariablesHandler *handler)
- : vals(vals), cfg(cfg), ac(ac), handler(handler),
- lastDR(0), lastLoad(0),
- skipProcessUses(false) {}
-
- void reportUninit(const DeclRefExpr *ex, const VarDecl *vd,
- bool isAlwaysUninit);
+ : vals(vals), cfg(cfg), block(block), ac(ac),
+ classification(classification), handler(handler) {}
+ void reportUse(const Expr *ex, const VarDecl *vd);
+
+ void VisitObjCForCollectionStmt(ObjCForCollectionStmt *FS);
void VisitBlockExpr(BlockExpr *be);
+ void VisitCallExpr(CallExpr *ce);
void VisitDeclStmt(DeclStmt *ds);
void VisitDeclRefExpr(DeclRefExpr *dr);
- void VisitUnaryOperator(UnaryOperator *uo);
void VisitBinaryOperator(BinaryOperator *bo);
- void VisitCastExpr(CastExpr *ce);
- void VisitObjCForCollectionStmt(ObjCForCollectionStmt *fs);
- void Visit(Stmt *s);
-
+
bool isTrackedVar(const VarDecl *vd) {
return ::isTrackedVar(vd, cast<DeclContext>(ac.getDecl()));
}
-
- FindVarResult findBlockVarDecl(Expr *ex);
-
- void ProcessUses(Stmt *s = 0);
-};
-}
-static const Expr *stripCasts(ASTContext &C, const Expr *Ex) {
- while (Ex) {
- Ex = Ex->IgnoreParenNoopCasts(C);
- if (const CastExpr *CE = dyn_cast<CastExpr>(Ex)) {
- if (CE->getCastKind() == CK_LValueBitCast) {
- Ex = CE->getSubExpr();
- continue;
+ FindVarResult findVar(const Expr *ex) {
+ return ::findVar(ex, cast<DeclContext>(ac.getDecl()));
+ }
+
+ UninitUse getUninitUse(const Expr *ex, const VarDecl *vd, Value v) {
+ UninitUse Use(ex, isAlwaysUninit(v));
+
+ assert(isUninitialized(v));
+ if (Use.getKind() == UninitUse::Always)
+ return Use;
+
+ // If an edge which leads unconditionally to this use did not initialize
+ // the variable, we can say something stronger than 'may be uninitialized':
+ // we can say 'either it's used uninitialized or you have dead code'.
+ //
+ // We track the number of successors of a node which have been visited, and
+ // visit a node once we have visited all of its successors. Only edges where
+ // the variable might still be uninitialized are followed. Since a variable
+ // can't transfer from being initialized to being uninitialized, this will
+ // trace out the subgraph which inevitably leads to the use and does not
+ // initialize the variable. We do not want to skip past loops, since their
+ // non-termination might be correlated with the initialization condition.
+ //
+ // For example:
+ //
+ // void f(bool a, bool b) {
+ // block1: int n;
+ // if (a) {
+ // block2: if (b)
+ // block3: n = 1;
+ // block4: } else if (b) {
+ // block5: while (!a) {
+ // block6: do_work(&a);
+ // n = 2;
+ // }
+ // }
+ // block7: if (a)
+ // block8: g();
+ // block9: return n;
+ // }
+ //
+ // Starting from the maybe-uninitialized use in block 9:
+ // * Block 7 is not visited because we have only visited one of its two
+ // successors.
+ // * Block 8 is visited because we've visited its only successor.
+ // From block 8:
+ // * Block 7 is visited because we've now visited both of its successors.
+ // From block 7:
+ // * Blocks 1, 2, 4, 5, and 6 are not visited because we didn't visit all
+ // of their successors (we didn't visit 4, 3, 5, 6, and 5, respectively).
+ // * Block 3 is not visited because it initializes 'n'.
+ // Now the algorithm terminates, having visited blocks 7 and 8, and having
+ // found the frontier is blocks 2, 4, and 5.
+ //
+ // 'n' is definitely uninitialized for two edges into block 7 (from blocks 2
+ // and 4), so we report that any time either of those edges is taken (in
+ // each case when 'b == false'), 'n' is used uninitialized.
+ llvm::SmallVector<const CFGBlock*, 32> Queue;
+ llvm::SmallVector<unsigned, 32> SuccsVisited(cfg.getNumBlockIDs(), 0);
+ Queue.push_back(block);
+ // Specify that we've already visited all successors of the starting block.
+ // This has the dual purpose of ensuring we never add it to the queue, and
+ // of marking it as not being a candidate element of the frontier.
+ SuccsVisited[block->getBlockID()] = block->succ_size();
+ while (!Queue.empty()) {
+ const CFGBlock *B = Queue.back();
+ Queue.pop_back();
+ for (CFGBlock::const_pred_iterator I = B->pred_begin(), E = B->pred_end();
+ I != E; ++I) {
+ const CFGBlock *Pred = *I;
+ if (vals.getValue(Pred, B, vd) == Initialized)
+ // This block initializes the variable.
+ continue;
+
+ unsigned &SV = SuccsVisited[Pred->getBlockID()];
+ if (!SV) {
+ // When visiting the first successor of a block, mark all NULL
+ // successors as having been visited.
+ for (CFGBlock::const_succ_iterator SI = Pred->succ_begin(),
+ SE = Pred->succ_end();
+ SI != SE; ++SI)
+ if (!*SI)
+ ++SV;
+ }
+
+ if (++SV == Pred->succ_size())
+ // All paths from this block lead to the use and don't initialize the
+ // variable.
+ Queue.push_back(Pred);
+ }
+ }
+
+ // Scan the frontier, looking for blocks where the variable was
+ // uninitialized.
+ for (CFG::const_iterator BI = cfg.begin(), BE = cfg.end(); BI != BE; ++BI) {
+ const CFGBlock *Block = *BI;
+ unsigned BlockID = Block->getBlockID();
+ const Stmt *Term = Block->getTerminator();
+ if (SuccsVisited[BlockID] && SuccsVisited[BlockID] < Block->succ_size() &&
+ Term) {
+ // This block inevitably leads to the use. If we have an edge from here
+ // to a post-dominator block, and the variable is uninitialized on that
+ // edge, we have found a bug.
+ for (CFGBlock::const_succ_iterator I = Block->succ_begin(),
+ E = Block->succ_end(); I != E; ++I) {
+ const CFGBlock *Succ = *I;
+ if (Succ && SuccsVisited[Succ->getBlockID()] >= Succ->succ_size() &&
+ vals.getValue(Block, Succ, vd) == Uninitialized) {
+ // Switch cases are a special case: report the label to the caller
+ // as the 'terminator', not the switch statement itself. Suppress
+ // situations where no label matched: we can't be sure that's
+ // possible.
+ if (isa<SwitchStmt>(Term)) {
+ const Stmt *Label = Succ->getLabel();
+ if (!Label || !isa<SwitchCase>(Label))
+ // Might not be possible.
+ continue;
+ UninitUse::Branch Branch;
+ Branch.Terminator = Label;
+ Branch.Output = 0; // Ignored.
+ Use.addUninitBranch(Branch);
+ } else {
+ UninitUse::Branch Branch;
+ Branch.Terminator = Term;
+ Branch.Output = I - Block->succ_begin();
+ Use.addUninitBranch(Branch);
+ }
+ }
+ }
}
}
- break;
- }
- return Ex;
-}
-void TransferFunctions::reportUninit(const DeclRefExpr *ex,
- const VarDecl *vd, bool isAlwaysUnit) {
- if (handler) handler->handleUseOfUninitVariable(ex, vd, isAlwaysUnit);
+ return Use;
+ }
+};
}
-FindVarResult TransferFunctions::findBlockVarDecl(Expr *ex) {
- if (DeclRefExpr *dr = dyn_cast<DeclRefExpr>(ex->IgnoreParenCasts()))
- if (VarDecl *vd = dyn_cast<VarDecl>(dr->getDecl()))
- if (isTrackedVar(vd))
- return FindVarResult(vd, dr);
- return FindVarResult(0, 0);
+void TransferFunctions::reportUse(const Expr *ex, const VarDecl *vd) {
+ if (!handler)
+ return;
+ Value v = vals[vd];
+ if (isUninitialized(v))
+ handler->handleUseOfUninitVariable(vd, getUninitUse(ex, vd, v));
}
-void TransferFunctions::VisitObjCForCollectionStmt(ObjCForCollectionStmt *fs) {
+void TransferFunctions::VisitObjCForCollectionStmt(ObjCForCollectionStmt *FS) {
// This represents an initialization of the 'element' value.
- Stmt *element = fs->getElement();
- const VarDecl *vd = 0;
-
- if (DeclStmt *ds = dyn_cast<DeclStmt>(element)) {
- vd = cast<VarDecl>(ds->getSingleDecl());
- if (!isTrackedVar(vd))
- vd = 0;
- } else {
- // Initialize the value of the reference variable.
- const FindVarResult &res = findBlockVarDecl(cast<Expr>(element));
- vd = res.getDecl();
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(FS->getElement())) {
+ const VarDecl *VD = cast<VarDecl>(DS->getSingleDecl());
+ if (isTrackedVar(VD))
+ vals[VD] = Initialized;
}
-
- if (vd)
- vals[vd] = Initialized;
}
void TransferFunctions::VisitBlockExpr(BlockExpr *be) {
@@ -442,231 +600,112 @@ void TransferFunctions::VisitBlockExpr(BlockExpr *be) {
vals[vd] = Initialized;
continue;
}
- Value v = vals[vd];
- if (handler && isUninitialized(v))
- handler->handleUseOfUninitVariable(be, vd, isAlwaysUninit(v));
+ reportUse(be, vd);
}
}
-void TransferFunctions::VisitDeclRefExpr(DeclRefExpr *dr) {
- // Record the last DeclRefExpr seen. This is an lvalue computation.
- // We use this value to later detect if a variable "escapes" the analysis.
- if (const VarDecl *vd = dyn_cast<VarDecl>(dr->getDecl()))
- if (isTrackedVar(vd)) {
- ProcessUses();
- lastDR = dr;
- }
-}
-
-void TransferFunctions::VisitDeclStmt(DeclStmt *ds) {
- for (DeclStmt::decl_iterator DI = ds->decl_begin(), DE = ds->decl_end();
- DI != DE; ++DI) {
- if (VarDecl *vd = dyn_cast<VarDecl>(*DI)) {
- if (isTrackedVar(vd)) {
- if (Expr *init = vd->getInit()) {
- // If the initializer consists solely of a reference to itself, we
- // explicitly mark the variable as uninitialized. This allows code
- // like the following:
- //
- // int x = x;
- //
- // to deliberately leave a variable uninitialized. Different analysis
- // clients can detect this pattern and adjust their reporting
- // appropriately, but we need to continue to analyze subsequent uses
- // of the variable.
- if (init == lastLoad) {
- const DeclRefExpr *DR
- = cast<DeclRefExpr>(stripCasts(ac.getASTContext(),
- lastLoad->getSubExpr()));
- if (DR->getDecl() == vd) {
- // int x = x;
- // Propagate uninitialized value, but don't immediately report
- // a problem.
- vals[vd] = Uninitialized;
- lastLoad = 0;
- lastDR = 0;
- if (handler)
- handler->handleSelfInit(vd);
- return;
- }
- }
-
- // All other cases: treat the new variable as initialized.
- // This is a minor optimization to reduce the propagation
- // of the analysis, since we will have already reported
- // the use of the uninitialized value (which visiting the
- // initializer).
- vals[vd] = Initialized;
- }
- }
- }
- }
+void TransferFunctions::VisitCallExpr(CallExpr *ce) {
+ // After a call to a function like setjmp or vfork, any variable which is
+ // initialized anywhere within this function may now be initialized. For now,
+ // just assume such a call initializes all variables.
+ // FIXME: Only mark variables as initialized if they have an initializer which
+ // is reachable from here.
+ Decl *Callee = ce->getCalleeDecl();
+ if (Callee && Callee->hasAttr<ReturnsTwiceAttr>())
+ vals.setAllScratchValues(Initialized);
}
-void TransferFunctions::VisitBinaryOperator(clang::BinaryOperator *bo) {
- if (bo->isAssignmentOp()) {
- const FindVarResult &res = findBlockVarDecl(bo->getLHS());
- if (const VarDecl *vd = res.getDecl()) {
- ValueVector::reference val = vals[vd];
- if (isUninitialized(val)) {
- if (bo->getOpcode() != BO_Assign)
- reportUninit(res.getDeclRefExpr(), vd, isAlwaysUninit(val));
- else
- val = Initialized;
- }
- }
+void TransferFunctions::VisitDeclRefExpr(DeclRefExpr *dr) {
+ switch (classification.get(dr)) {
+ case ClassifyRefs::Ignore:
+ break;
+ case ClassifyRefs::Use:
+ reportUse(dr, cast<VarDecl>(dr->getDecl()));
+ break;
+ case ClassifyRefs::Init:
+ vals[cast<VarDecl>(dr->getDecl())] = Initialized;
+ break;
+ case ClassifyRefs::SelfInit:
+ if (handler)
+ handler->handleSelfInit(cast<VarDecl>(dr->getDecl()));
+ break;
}
}
-void TransferFunctions::VisitUnaryOperator(clang::UnaryOperator *uo) {
- switch (uo->getOpcode()) {
- case clang::UO_PostDec:
- case clang::UO_PostInc:
- case clang::UO_PreDec:
- case clang::UO_PreInc: {
- const FindVarResult &res = findBlockVarDecl(uo->getSubExpr());
- if (const VarDecl *vd = res.getDecl()) {
- assert(res.getDeclRefExpr() == lastDR);
- // We null out lastDR to indicate we have fully processed it
- // and we don't want the auto-value setting in Visit().
- lastDR = 0;
-
- ValueVector::reference val = vals[vd];
- if (isUninitialized(val))
- reportUninit(res.getDeclRefExpr(), vd, isAlwaysUninit(val));
- }
- break;
- }
- default:
- break;
+void TransferFunctions::VisitBinaryOperator(BinaryOperator *BO) {
+ if (BO->getOpcode() == BO_Assign) {
+ FindVarResult Var = findVar(BO->getLHS());
+ if (const VarDecl *VD = Var.getDecl())
+ vals[VD] = Initialized;
}
}
-void TransferFunctions::VisitCastExpr(clang::CastExpr *ce) {
- if (ce->getCastKind() == CK_LValueToRValue) {
- const FindVarResult &res = findBlockVarDecl(ce->getSubExpr());
- if (res.getDecl()) {
- assert(res.getDeclRefExpr() == lastDR);
- lastLoad = ce;
- }
- }
- else if (ce->getCastKind() == CK_NoOp ||
- ce->getCastKind() == CK_LValueBitCast) {
- skipProcessUses = true;
- }
- else if (CStyleCastExpr *cse = dyn_cast<CStyleCastExpr>(ce)) {
- if (cse->getType()->isVoidType()) {
- // e.g. (void) x;
- if (lastLoad == cse->getSubExpr()) {
- // Squelch any detected load of an uninitialized value if
- // we cast it to void.
- lastLoad = 0;
- lastDR = 0;
+void TransferFunctions::VisitDeclStmt(DeclStmt *DS) {
+ for (DeclStmt::decl_iterator DI = DS->decl_begin(), DE = DS->decl_end();
+ DI != DE; ++DI) {
+ VarDecl *VD = dyn_cast<VarDecl>(*DI);
+ if (VD && isTrackedVar(VD)) {
+ if (getSelfInitExpr(VD)) {
+ // If the initializer consists solely of a reference to itself, we
+ // explicitly mark the variable as uninitialized. This allows code
+ // like the following:
+ //
+ // int x = x;
+ //
+ // to deliberately leave a variable uninitialized. Different analysis
+ // clients can detect this pattern and adjust their reporting
+ // appropriately, but we need to continue to analyze subsequent uses
+ // of the variable.
+ vals[VD] = Uninitialized;
+ } else if (VD->getInit()) {
+ // Treat the new variable as initialized.
+ vals[VD] = Initialized;
+ } else {
+ // No initializer: the variable is now uninitialized. This matters
+ // for cases like:
+ // while (...) {
+ // int n;
+ // use(n);
+ // n = 0;
+ // }
+ // FIXME: Mark the variable as uninitialized whenever its scope is
+ // left, since its scope could be re-entered by a jump over the
+ // declaration.
+ vals[VD] = Uninitialized;
}
}
}
}
-void TransferFunctions::Visit(clang::Stmt *s) {
- skipProcessUses = false;
- StmtVisitor<TransferFunctions>::Visit(s);
- if (!skipProcessUses)
- ProcessUses(s);
-}
-
-void TransferFunctions::ProcessUses(Stmt *s) {
- // This method is typically called after visiting a CFGElement statement
- // in the CFG. We delay processing of reporting many loads of uninitialized
- // values until here.
- if (lastLoad) {
- // If we just visited the lvalue-to-rvalue cast, there is nothing
- // left to do.
- if (lastLoad == s)
- return;
-
- const DeclRefExpr *DR =
- cast<DeclRefExpr>(stripCasts(ac.getASTContext(),
- lastLoad->getSubExpr()));
- const VarDecl *VD = cast<VarDecl>(DR->getDecl());
-
- // If we reach here, we may have seen a load of an uninitialized value
- // and it hasn't been casted to void or otherwise handled. In this
- // situation, report the incident.
- if (isUninitialized(vals[VD]))
- reportUninit(DR, VD, isAlwaysUninit(vals[VD]));
-
- lastLoad = 0;
-
- if (DR == lastDR) {
- lastDR = 0;
- return;
- }
- }
-
- // Any other uses of 'lastDR' involve taking an lvalue of variable.
- // In this case, it "escapes" the analysis.
- if (lastDR && lastDR != s) {
- vals[cast<VarDecl>(lastDR->getDecl())] = Initialized;
- lastDR = 0;
- }
-}
-
//------------------------------------------------------------------------====//
// High-level "driver" logic for uninitialized values analysis.
//====------------------------------------------------------------------------//
static bool runOnBlock(const CFGBlock *block, const CFG &cfg,
AnalysisDeclContext &ac, CFGBlockValues &vals,
+ const ClassifyRefs &classification,
llvm::BitVector &wasAnalyzed,
UninitVariablesHandler *handler = 0) {
-
wasAnalyzed[block->getBlockID()] = true;
-
- if (const BinaryOperator *b = getLogicalOperatorInChain(block)) {
- CFGBlock::const_pred_iterator itr = block->pred_begin();
- BVPair vA = vals.getValueVectors(*itr, false);
- ++itr;
- BVPair vB = vals.getValueVectors(*itr, false);
-
- BVPair valsAB;
-
- if (b->getOpcode() == BO_LAnd) {
- // Merge the 'F' bits from the first and second.
- vals.mergeIntoScratch(*(vA.second ? vA.second : vA.first), true);
- vals.mergeIntoScratch(*(vB.second ? vB.second : vB.first), false);
- valsAB.first = vA.first;
- valsAB.second = &vals.getScratch();
- } else {
- // Merge the 'T' bits from the first and second.
- assert(b->getOpcode() == BO_LOr);
- vals.mergeIntoScratch(*vA.first, true);
- vals.mergeIntoScratch(*vB.first, false);
- valsAB.first = &vals.getScratch();
- valsAB.second = vA.second ? vA.second : vA.first;
- }
- return vals.updateValueVectors(block, valsAB);
- }
-
- // Default behavior: merge in values of predecessor blocks.
vals.resetScratch();
+ // Merge in values of predecessor blocks.
bool isFirst = true;
for (CFGBlock::const_pred_iterator I = block->pred_begin(),
E = block->pred_end(); I != E; ++I) {
const CFGBlock *pred = *I;
if (wasAnalyzed[pred->getBlockID()]) {
- vals.mergeIntoScratch(vals.getValueVector(pred, block), isFirst);
+ vals.mergeIntoScratch(vals.getValueVector(pred), isFirst);
isFirst = false;
}
}
// Apply the transfer function.
- TransferFunctions tf(vals, cfg, ac, handler);
+ TransferFunctions tf(vals, cfg, block, ac, classification, handler);
for (CFGBlock::const_iterator I = block->begin(), E = block->end();
I != E; ++I) {
if (const CFGStmt *cs = dyn_cast<CFGStmt>(&*I)) {
tf.Visit(const_cast<Stmt*>(cs->getStmt()));
}
}
- tf.ProcessUses();
return vals.updateValueVectorWithScratch(block);
}
@@ -683,17 +722,16 @@ void clang::runUninitializedVariablesAnalysis(
stats.NumVariablesAnalyzed = vals.getNumEntries();
+ // Precompute which expressions are uses and which are initializations.
+ ClassifyRefs classification(ac);
+ cfg.VisitBlockStmts(classification);
+
// Mark all variables uninitialized at the entry.
const CFGBlock &entry = cfg.getEntry();
- for (CFGBlock::const_succ_iterator i = entry.succ_begin(),
- e = entry.succ_end(); i != e; ++i) {
- if (const CFGBlock *succ = *i) {
- ValueVector &vec = vals.getValueVector(&entry, succ);
- const unsigned n = vals.getNumEntries();
- for (unsigned j = 0; j < n ; ++j) {
- vec[j] = Uninitialized;
- }
- }
+ ValueVector &vec = vals.getValueVector(&entry);
+ const unsigned n = vals.getNumEntries();
+ for (unsigned j = 0; j < n ; ++j) {
+ vec[j] = Uninitialized;
}
// Proceed with the workist.
@@ -705,7 +743,8 @@ void clang::runUninitializedVariablesAnalysis(
while (const CFGBlock *block = worklist.dequeue()) {
// Did the block change?
- bool changed = runOnBlock(block, cfg, ac, vals, wasAnalyzed);
+ bool changed = runOnBlock(block, cfg, ac, vals,
+ classification, wasAnalyzed);
++stats.NumBlockVisits;
if (changed || !previouslyVisited[block->getBlockID()])
worklist.enqueueSuccessors(block);
@@ -716,7 +755,7 @@ void clang::runUninitializedVariablesAnalysis(
for (CFG::const_iterator BI = cfg.begin(), BE = cfg.end(); BI != BE; ++BI) {
const CFGBlock *block = *BI;
if (wasAnalyzed[block->getBlockID()]) {
- runOnBlock(block, cfg, ac, vals, wasAnalyzed, &handler);
+ runOnBlock(block, cfg, ac, vals, classification, wasAnalyzed, &handler);
++stats.NumBlockVisits;
}
}
OpenPOWER on IntegriCloud