summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/tools/clang/lib/Checker
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/tools/clang/lib/Checker')
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/AdjustedReturnValueChecker.cpp96
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/AggExprVisitor.cpp55
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/ArrayBoundChecker.cpp91
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/AttrNonNullChecker.cpp112
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/BasicConstraintManager.cpp317
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.cpp577
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.h41
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/BasicStore.cpp510
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/BasicValueFactory.cpp289
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/BugReporter.cpp1896
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/BugReporterVisitors.cpp423
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/BuiltinFunctionChecker.cpp75
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/CFRefCount.cpp3620
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/CMakeLists.txt74
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/CallAndMessageChecker.cpp346
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/CallInliner.cpp54
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/CastSizeChecker.cpp82
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/CastToStructChecker.cpp78
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/CheckDeadStores.cpp289
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/CheckObjCDealloc.cpp261
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/CheckObjCInstMethSignature.cpp119
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/CheckSecuritySyntaxOnly.cpp494
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/CheckSizeofPointer.cpp71
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/Checker.cpp35
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/CocoaConventions.cpp195
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/DereferenceChecker.cpp156
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/DivZeroChecker.cpp85
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/Environment.cpp191
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/ExplodedGraph.cpp281
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/FixedAddressChecker.cpp71
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/FlatStore.cpp168
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/GRBlockCounter.cpp85
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/GRCXXExprEngine.cpp246
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/GRCoreEngine.cpp722
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/GRExprEngine.cpp3481
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.cpp41
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.h26
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/GRExprEngineInternalChecks.h52
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/GRState.cpp370
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/LLVMConventionsChecker.cpp335
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/MacOSXAPIChecker.cpp141
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/Makefile21
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/MallocChecker.cpp367
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/ManagerRegistry.cpp20
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/MemRegion.cpp807
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/NSAutoreleasePoolChecker.cpp86
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/NSErrorChecker.cpp237
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/NoReturnFunctionChecker.cpp79
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/OSAtomicChecker.cpp196
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/ObjCUnusedIVarsChecker.cpp161
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/PathDiagnostic.cpp281
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/PointerArithChecker.cpp72
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/PointerSubChecker.cpp78
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/PthreadLockChecker.cpp141
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/RangeConstraintManager.cpp359
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/RegionStore.cpp1917
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/ReturnPointerRangeChecker.cpp97
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/ReturnStackAddressChecker.cpp125
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/ReturnUndefChecker.cpp67
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/SVals.cpp347
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/SValuator.cpp157
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.cpp249
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.h83
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/SimpleSValuator.cpp434
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/Store.cpp335
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/SymbolManager.cpp231
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/UndefBranchChecker.cpp118
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/UndefCapturedBlockVarChecker.cpp101
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/UndefResultChecker.cpp86
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/UndefinedArraySubscriptChecker.cpp56
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/UndefinedAssignmentChecker.cpp95
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/UnixAPIChecker.cpp222
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/VLASizeChecker.cpp96
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/ValueManager.cpp149
74 files changed, 24521 insertions, 0 deletions
diff --git a/contrib/llvm/tools/clang/lib/Checker/AdjustedReturnValueChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/AdjustedReturnValueChecker.cpp
new file mode 100644
index 0000000..b92f2e7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/AdjustedReturnValueChecker.cpp
@@ -0,0 +1,96 @@
+//== AdjustedReturnValueChecker.cpp -----------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines AdjustedReturnValueChecker, a simple check to see if the
+// return value of a function call is different than the one the caller thinks
+// it is.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugReporter.h"
+#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+
+using namespace clang;
+
+namespace {
+class AdjustedReturnValueChecker :
+ public CheckerVisitor<AdjustedReturnValueChecker> {
+public:
+ AdjustedReturnValueChecker() {}
+
+ void PostVisitCallExpr(CheckerContext &C, const CallExpr *CE);
+
+ static void *getTag() {
+ static int x = 0; return &x;
+ }
+};
+}
+
+void clang::RegisterAdjustedReturnValueChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new AdjustedReturnValueChecker());
+}
+
+void AdjustedReturnValueChecker::PostVisitCallExpr(CheckerContext &C,
+ const CallExpr *CE) {
+
+ // Get the result type of the call.
+ QualType expectedResultTy = CE->getType();
+
+ // Fetch the signature of the called function.
+ const GRState *state = C.getState();
+
+ SVal V = state->getSVal(CE);
+
+ if (V.isUnknown())
+ return;
+
+ // Casting to void? Discard the value.
+ if (expectedResultTy->isVoidType()) {
+ C.GenerateNode(state->BindExpr(CE, UnknownVal()));
+ return;
+ }
+
+ const MemRegion *callee = state->getSVal(CE->getCallee()).getAsRegion();
+ if (!callee)
+ return;
+
+ QualType actualResultTy;
+
+ if (const FunctionTextRegion *FT = dyn_cast<FunctionTextRegion>(callee)) {
+ const FunctionDecl *FD = FT->getDecl();
+ actualResultTy = FD->getResultType();
+ }
+ else if (const BlockDataRegion *BD = dyn_cast<BlockDataRegion>(callee)) {
+ const BlockTextRegion *BR = BD->getCodeRegion();
+ const BlockPointerType *BT =
+ BR->getLocationType(C.getASTContext())->getAs<BlockPointerType>();
+ const FunctionType *FT = BT->getPointeeType()->getAs<FunctionType>();
+ actualResultTy = FT->getResultType();
+ }
+
+ // Can this happen?
+ if (actualResultTy.isNull())
+ return;
+
+ // For now, ignore references.
+ if (actualResultTy->getAs<ReferenceType>())
+ return;
+
+
+ // Are they the same?
+ if (expectedResultTy != actualResultTy) {
+ // FIXME: Do more checking and actual emit an error. At least performing
+ // the cast avoids some assertion failures elsewhere.
+ SValuator &SVator = C.getSValuator();
+ V = SVator.EvalCast(V, expectedResultTy, actualResultTy);
+ C.GenerateNode(state->BindExpr(CE, V));
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/AggExprVisitor.cpp b/contrib/llvm/tools/clang/lib/Checker/AggExprVisitor.cpp
new file mode 100644
index 0000000..343afec
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/AggExprVisitor.cpp
@@ -0,0 +1,55 @@
+//=-- AggExprVisitor.cpp - evaluating expressions of C++ class type -*- C++ -*-=
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines AggExprVisitor class, which contains lots of boiler
+// plate code for evaluating expressions of C++ class type.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "clang/AST/StmtVisitor.h"
+
+using namespace clang;
+
+namespace {
+class AggExprVisitor : public StmtVisitor<AggExprVisitor> {
+ SVal DestPtr;
+ ExplodedNode *Pred;
+ ExplodedNodeSet &DstSet;
+ GRExprEngine &Eng;
+
+public:
+ AggExprVisitor(SVal dest, ExplodedNode *N, ExplodedNodeSet &dst,
+ GRExprEngine &eng)
+ : DestPtr(dest), Pred(N), DstSet(dst), Eng(eng) {}
+
+ void VisitCastExpr(CastExpr *E);
+ void VisitCXXConstructExpr(CXXConstructExpr *E);
+};
+}
+
+void AggExprVisitor::VisitCastExpr(CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ assert(0 && "Unhandled cast kind");
+ case CastExpr::CK_NoOp:
+ case CastExpr::CK_ConstructorConversion:
+ Visit(E->getSubExpr());
+ break;
+ }
+}
+
+void AggExprVisitor::VisitCXXConstructExpr(CXXConstructExpr *E) {
+ Eng.VisitCXXConstructExpr(E, DestPtr, Pred, DstSet);
+}
+
+void GRExprEngine::VisitAggExpr(const Expr *E, SVal Dest, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ AggExprVisitor(Dest, Pred, Dst, *this).Visit(const_cast<Expr *>(E));
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/ArrayBoundChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/ArrayBoundChecker.cpp
new file mode 100644
index 0000000..746b3f9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/ArrayBoundChecker.cpp
@@ -0,0 +1,91 @@
+//== ArrayBoundChecker.cpp ------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ArrayBoundChecker, which is a path-sensitive check
+// which looks for an out-of-bound array element access.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Checker/PathSensitive/GRExprEngine.h"
+
+using namespace clang;
+
+namespace {
+class ArrayBoundChecker :
+ public CheckerVisitor<ArrayBoundChecker> {
+ BuiltinBug *BT;
+public:
+ ArrayBoundChecker() : BT(0) {}
+ static void *getTag();
+ void VisitLocation(CheckerContext &C, const Stmt *S, SVal l);
+};
+}
+
+void clang::RegisterArrayBoundChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new ArrayBoundChecker());
+}
+
+void *ArrayBoundChecker::getTag() {
+ static int x = 0; return &x;
+}
+
+void ArrayBoundChecker::VisitLocation(CheckerContext &C, const Stmt *S, SVal l){
+ // Check for out of bound array element access.
+ const MemRegion *R = l.getAsRegion();
+ if (!R)
+ return;
+
+ R = R->StripCasts();
+
+ const ElementRegion *ER = dyn_cast<ElementRegion>(R);
+ if (!ER)
+ return;
+
+ // Get the index of the accessed element.
+ DefinedOrUnknownSVal &Idx = cast<DefinedOrUnknownSVal>(ER->getIndex());
+
+ const GRState *state = C.getState();
+
+ // Get the size of the array.
+ DefinedOrUnknownSVal NumElements
+ = C.getStoreManager().getSizeInElements(state, ER->getSuperRegion(),
+ ER->getValueType(C.getASTContext()));
+
+ const GRState *StInBound = state->AssumeInBound(Idx, NumElements, true);
+ const GRState *StOutBound = state->AssumeInBound(Idx, NumElements, false);
+ if (StOutBound && !StInBound) {
+ ExplodedNode *N = C.GenerateSink(StOutBound);
+ if (!N)
+ return;
+
+ if (!BT)
+ BT = new BuiltinBug("Out-of-bound array access",
+ "Access out-of-bound array element (buffer overflow)");
+
+ // FIXME: It would be nice to eventually make this diagnostic more clear,
+ // e.g., by referencing the original declaration or by saying *why* this
+ // reference is outside the range.
+
+ // Generate a report for this bug.
+ RangedBugReport *report =
+ new RangedBugReport(*BT, BT->getDescription(), N);
+
+ report->addRange(S->getSourceRange());
+ C.EmitReport(report);
+ return;
+ }
+
+ // Array bound check succeeded. From this point forward the array bound
+ // should always succeed.
+ assert(StInBound);
+ C.addTransition(StInBound);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/AttrNonNullChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/AttrNonNullChecker.cpp
new file mode 100644
index 0000000..309a74c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/AttrNonNullChecker.cpp
@@ -0,0 +1,112 @@
+//===--- AttrNonNullChecker.h - Undefined arguments checker ----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines AttrNonNullChecker, a builtin check in GRExprEngine that
+// performs checks for arguments declared to have nonnull attribute.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+
+using namespace clang;
+
+namespace {
+class AttrNonNullChecker
+ : public CheckerVisitor<AttrNonNullChecker> {
+ BugType *BT;
+public:
+ AttrNonNullChecker() : BT(0) {}
+ static void *getTag() {
+ static int x = 0;
+ return &x;
+ }
+ void PreVisitCallExpr(CheckerContext &C, const CallExpr *CE);
+};
+} // end anonymous namespace
+
+void clang::RegisterAttrNonNullChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new AttrNonNullChecker());
+}
+
+void AttrNonNullChecker::PreVisitCallExpr(CheckerContext &C,
+ const CallExpr *CE) {
+ const GRState *state = C.getState();
+
+ // Check if the callee has a 'nonnull' attribute.
+ SVal X = state->getSVal(CE->getCallee());
+
+ const FunctionDecl* FD = X.getAsFunctionDecl();
+ if (!FD)
+ return;
+
+ const NonNullAttr* Att = FD->getAttr<NonNullAttr>();
+ if (!Att)
+ return;
+
+ // Iterate through the arguments of CE and check them for null.
+ unsigned idx = 0;
+
+ for (CallExpr::const_arg_iterator I=CE->arg_begin(), E=CE->arg_end(); I!=E;
+ ++I, ++idx) {
+
+ if (!Att->isNonNull(idx))
+ continue;
+
+ const SVal &V = state->getSVal(*I);
+ const DefinedSVal *DV = dyn_cast<DefinedSVal>(&V);
+
+ if (!DV)
+ continue;
+
+ ConstraintManager &CM = C.getConstraintManager();
+ const GRState *stateNotNull, *stateNull;
+ llvm::tie(stateNotNull, stateNull) = CM.AssumeDual(state, *DV);
+
+ if (stateNull && !stateNotNull) {
+ // Generate an error node. Check for a null node in case
+ // we cache out.
+ if (ExplodedNode *errorNode = C.GenerateSink(stateNull)) {
+
+ // Lazily allocate the BugType object if it hasn't already been
+ // created. Ownership is transferred to the BugReporter object once
+ // the BugReport is passed to 'EmitWarning'.
+ if (!BT)
+ BT = new BugType("Argument with 'nonnull' attribute passed null",
+ "API");
+
+ EnhancedBugReport *R =
+ new EnhancedBugReport(*BT,
+ "Null pointer passed as an argument to a "
+ "'nonnull' parameter", errorNode);
+
+ // Highlight the range of the argument that was null.
+ const Expr *arg = *I;
+ R->addRange(arg->getSourceRange());
+ R->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue, arg);
+
+ // Emit the bug report.
+ C.EmitReport(R);
+ }
+
+ // Always return. Either we cached out or we just emitted an error.
+ return;
+ }
+
+ // If a pointer value passed the check we should assume that it is
+ // indeed not null from this point forward.
+ assert(stateNotNull);
+ state = stateNotNull;
+ }
+
+ // If we reach here all of the arguments passed the nonnull check.
+ // If 'state' has been updated generated a new node.
+ C.addTransition(state);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/BasicConstraintManager.cpp b/contrib/llvm/tools/clang/lib/Checker/BasicConstraintManager.cpp
new file mode 100644
index 0000000..e89546e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/BasicConstraintManager.cpp
@@ -0,0 +1,317 @@
+//== BasicConstraintManager.cpp - Manage basic constraints.------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BasicConstraintManager, a class that tracks simple
+// equality and inequality constraints on symbolic values of GRState.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SimpleConstraintManager.h"
+#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/Checker/PathSensitive/GRStateTrait.h"
+#include "clang/Checker/PathSensitive/GRTransferFuncs.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+
+namespace { class ConstNotEq {}; }
+namespace { class ConstEq {}; }
+
+typedef llvm::ImmutableMap<SymbolRef,GRState::IntSetTy> ConstNotEqTy;
+typedef llvm::ImmutableMap<SymbolRef,const llvm::APSInt*> ConstEqTy;
+
+static int ConstEqIndex = 0;
+static int ConstNotEqIndex = 0;
+
+namespace clang {
+template<>
+struct GRStateTrait<ConstNotEq> : public GRStatePartialTrait<ConstNotEqTy> {
+ static inline void* GDMIndex() { return &ConstNotEqIndex; }
+};
+
+template<>
+struct GRStateTrait<ConstEq> : public GRStatePartialTrait<ConstEqTy> {
+ static inline void* GDMIndex() { return &ConstEqIndex; }
+};
+}
+
+namespace {
+// BasicConstraintManager only tracks equality and inequality constraints of
+// constants and integer variables.
+class BasicConstraintManager
+ : public SimpleConstraintManager {
+ GRState::IntSetTy::Factory ISetFactory;
+public:
+ BasicConstraintManager(GRStateManager &statemgr, GRSubEngine &subengine)
+ : SimpleConstraintManager(subengine),
+ ISetFactory(statemgr.getAllocator()) {}
+
+ const GRState* AssumeSymNE(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& V);
+
+ const GRState* AssumeSymEQ(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& V);
+
+ const GRState* AssumeSymLT(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& V);
+
+ const GRState* AssumeSymGT(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& V);
+
+ const GRState* AssumeSymGE(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& V);
+
+ const GRState* AssumeSymLE(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& V);
+
+ const GRState* AddEQ(const GRState* state, SymbolRef sym, const llvm::APSInt& V);
+
+ const GRState* AddNE(const GRState* state, SymbolRef sym, const llvm::APSInt& V);
+
+ const llvm::APSInt* getSymVal(const GRState* state, SymbolRef sym) const;
+ bool isNotEqual(const GRState* state, SymbolRef sym, const llvm::APSInt& V)
+ const;
+ bool isEqual(const GRState* state, SymbolRef sym, const llvm::APSInt& V)
+ const;
+
+ const GRState* RemoveDeadBindings(const GRState* state, SymbolReaper& SymReaper);
+
+ void print(const GRState* state, llvm::raw_ostream& Out,
+ const char* nl, const char *sep);
+};
+
+} // end anonymous namespace
+
+ConstraintManager* clang::CreateBasicConstraintManager(GRStateManager& statemgr,
+ GRSubEngine &subengine) {
+ return new BasicConstraintManager(statemgr, subengine);
+}
+
+const GRState*
+BasicConstraintManager::AssumeSymNE(const GRState *state, SymbolRef sym,
+ const llvm::APSInt& V) {
+ // First, determine if sym == X, where X != V.
+ if (const llvm::APSInt* X = getSymVal(state, sym)) {
+ bool isFeasible = (*X != V);
+ return isFeasible ? state : NULL;
+ }
+
+ // Second, determine if sym != V.
+ if (isNotEqual(state, sym, V))
+ return state;
+
+ // If we reach here, sym is not a constant and we don't know if it is != V.
+ // Make that assumption.
+ return AddNE(state, sym, V);
+}
+
+const GRState *BasicConstraintManager::AssumeSymEQ(const GRState *state,
+ SymbolRef sym,
+ const llvm::APSInt &V) {
+ // First, determine if sym == X, where X != V.
+ if (const llvm::APSInt* X = getSymVal(state, sym)) {
+ bool isFeasible = *X == V;
+ return isFeasible ? state : NULL;
+ }
+
+ // Second, determine if sym != V.
+ if (isNotEqual(state, sym, V))
+ return NULL;
+
+ // If we reach here, sym is not a constant and we don't know if it is == V.
+ // Make that assumption.
+ return AddEQ(state, sym, V);
+}
+
+// These logic will be handled in another ConstraintManager.
+const GRState *BasicConstraintManager::AssumeSymLT(const GRState *state,
+ SymbolRef sym,
+ const llvm::APSInt& V) {
+ // Is 'V' the smallest possible value?
+ if (V == llvm::APSInt::getMinValue(V.getBitWidth(), V.isUnsigned())) {
+ // sym cannot be any value less than 'V'. This path is infeasible.
+ return NULL;
+ }
+
+ // FIXME: For now have assuming x < y be the same as assuming sym != V;
+ return AssumeSymNE(state, sym, V);
+}
+
+const GRState *BasicConstraintManager::AssumeSymGT(const GRState *state,
+ SymbolRef sym,
+ const llvm::APSInt& V) {
+
+ // Is 'V' the largest possible value?
+ if (V == llvm::APSInt::getMaxValue(V.getBitWidth(), V.isUnsigned())) {
+ // sym cannot be any value greater than 'V'. This path is infeasible.
+ return NULL;
+ }
+
+ // FIXME: For now have assuming x > y be the same as assuming sym != V;
+ return AssumeSymNE(state, sym, V);
+}
+
+const GRState *BasicConstraintManager::AssumeSymGE(const GRState *state,
+ SymbolRef sym,
+ const llvm::APSInt &V) {
+
+ // Reject a path if the value of sym is a constant X and !(X >= V).
+ if (const llvm::APSInt *X = getSymVal(state, sym)) {
+ bool isFeasible = *X >= V;
+ return isFeasible ? state : NULL;
+ }
+
+ // Sym is not a constant, but it is worth looking to see if V is the
+ // maximum integer value.
+ if (V == llvm::APSInt::getMaxValue(V.getBitWidth(), V.isUnsigned())) {
+ // If we know that sym != V, then this condition is infeasible since
+ // there is no other value greater than V.
+ bool isFeasible = !isNotEqual(state, sym, V);
+
+ // If the path is still feasible then as a consequence we know that
+ // 'sym == V' because we cannot have 'sym > V' (no larger values).
+ // Add this constraint.
+ return isFeasible ? AddEQ(state, sym, V) : NULL;
+ }
+
+ return state;
+}
+
+const GRState*
+BasicConstraintManager::AssumeSymLE(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& V) {
+
+ // Reject a path if the value of sym is a constant X and !(X <= V).
+ if (const llvm::APSInt* X = getSymVal(state, sym)) {
+ bool isFeasible = *X <= V;
+ return isFeasible ? state : NULL;
+ }
+
+ // Sym is not a constant, but it is worth looking to see if V is the
+ // minimum integer value.
+ if (V == llvm::APSInt::getMinValue(V.getBitWidth(), V.isUnsigned())) {
+ // If we know that sym != V, then this condition is infeasible since
+ // there is no other value less than V.
+ bool isFeasible = !isNotEqual(state, sym, V);
+
+ // If the path is still feasible then as a consequence we know that
+ // 'sym == V' because we cannot have 'sym < V' (no smaller values).
+ // Add this constraint.
+ return isFeasible ? AddEQ(state, sym, V) : NULL;
+ }
+
+ return state;
+}
+
+const GRState* BasicConstraintManager::AddEQ(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& V) {
+ // Create a new state with the old binding replaced.
+ return state->set<ConstEq>(sym, &V);
+}
+
+const GRState* BasicConstraintManager::AddNE(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& V) {
+
+ // First, retrieve the NE-set associated with the given symbol.
+ ConstNotEqTy::data_type* T = state->get<ConstNotEq>(sym);
+ GRState::IntSetTy S = T ? *T : ISetFactory.GetEmptySet();
+
+ // Now add V to the NE set.
+ S = ISetFactory.Add(S, &V);
+
+ // Create a new state with the old binding replaced.
+ return state->set<ConstNotEq>(sym, S);
+}
+
+const llvm::APSInt* BasicConstraintManager::getSymVal(const GRState* state,
+ SymbolRef sym) const {
+ const ConstEqTy::data_type* T = state->get<ConstEq>(sym);
+ return T ? *T : NULL;
+}
+
+bool BasicConstraintManager::isNotEqual(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& V) const {
+
+ // Retrieve the NE-set associated with the given symbol.
+ const ConstNotEqTy::data_type* T = state->get<ConstNotEq>(sym);
+
+ // See if V is present in the NE-set.
+ return T ? T->contains(&V) : false;
+}
+
+bool BasicConstraintManager::isEqual(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& V) const {
+ // Retrieve the EQ-set associated with the given symbol.
+ const ConstEqTy::data_type* T = state->get<ConstEq>(sym);
+ // See if V is present in the EQ-set.
+ return T ? **T == V : false;
+}
+
+/// Scan all symbols referenced by the constraints. If the symbol is not alive
+/// as marked in LSymbols, mark it as dead in DSymbols.
+const GRState*
+BasicConstraintManager::RemoveDeadBindings(const GRState* state,
+ SymbolReaper& SymReaper) {
+
+ ConstEqTy CE = state->get<ConstEq>();
+ ConstEqTy::Factory& CEFactory = state->get_context<ConstEq>();
+
+ for (ConstEqTy::iterator I = CE.begin(), E = CE.end(); I!=E; ++I) {
+ SymbolRef sym = I.getKey();
+ if (SymReaper.maybeDead(sym)) CE = CEFactory.Remove(CE, sym);
+ }
+ state = state->set<ConstEq>(CE);
+
+ ConstNotEqTy CNE = state->get<ConstNotEq>();
+ ConstNotEqTy::Factory& CNEFactory = state->get_context<ConstNotEq>();
+
+ for (ConstNotEqTy::iterator I = CNE.begin(), E = CNE.end(); I != E; ++I) {
+ SymbolRef sym = I.getKey();
+ if (SymReaper.maybeDead(sym)) CNE = CNEFactory.Remove(CNE, sym);
+ }
+
+ return state->set<ConstNotEq>(CNE);
+}
+
+void BasicConstraintManager::print(const GRState* state, llvm::raw_ostream& Out,
+ const char* nl, const char *sep) {
+ // Print equality constraints.
+
+ ConstEqTy CE = state->get<ConstEq>();
+
+ if (!CE.isEmpty()) {
+ Out << nl << sep << "'==' constraints:";
+ for (ConstEqTy::iterator I = CE.begin(), E = CE.end(); I!=E; ++I)
+ Out << nl << " $" << I.getKey() << " : " << *I.getData();
+ }
+
+ // Print != constraints.
+
+ ConstNotEqTy CNE = state->get<ConstNotEq>();
+
+ if (!CNE.isEmpty()) {
+ Out << nl << sep << "'!=' constraints:";
+
+ for (ConstNotEqTy::iterator I = CNE.begin(), EI = CNE.end(); I!=EI; ++I) {
+ Out << nl << " $" << I.getKey() << " : ";
+ bool isFirst = true;
+
+ GRState::IntSetTy::iterator J = I.getData().begin(),
+ EJ = I.getData().end();
+
+ for ( ; J != EJ; ++J) {
+ if (isFirst) isFirst = false;
+ else Out << ", ";
+
+ Out << (*J)->getSExtValue(); // Hack: should print to raw_ostream.
+ }
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.cpp b/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.cpp
new file mode 100644
index 0000000..b852e2a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.cpp
@@ -0,0 +1,577 @@
+//== BasicObjCFoundationChecks.cpp - Simple Apple-Foundation checks -*- C++ -*--
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BasicObjCFoundationChecks, a class that encapsulates
+// a set of simple checks to run on Objective-C code using Apple's Foundation
+// classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "BasicObjCFoundationChecks.h"
+
+#include "clang/Checker/PathSensitive/ExplodedGraph.h"
+#include "clang/Checker/PathSensitive/GRSimpleAPICheck.h"
+#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/MemRegion.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Checker/Checkers/LocalCheckers.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ASTContext.h"
+
+using namespace clang;
+
+static const ObjCInterfaceType* GetReceiverType(const ObjCMessageExpr* ME) {
+ QualType T;
+ switch (ME->getReceiverKind()) {
+ case ObjCMessageExpr::Instance:
+ T = ME->getInstanceReceiver()->getType();
+ break;
+
+ case ObjCMessageExpr::SuperInstance:
+ T = ME->getSuperType();
+ break;
+
+ case ObjCMessageExpr::Class:
+ case ObjCMessageExpr::SuperClass:
+ return 0;
+ }
+
+ if (const ObjCObjectPointerType *PT = T->getAs<ObjCObjectPointerType>())
+ return PT->getInterfaceType();
+
+ return NULL;
+}
+
+static const char* GetReceiverNameType(const ObjCMessageExpr* ME) {
+ if (const ObjCInterfaceType *ReceiverType = GetReceiverType(ME))
+ return ReceiverType->getDecl()->getIdentifier()->getNameStart();
+ return NULL;
+}
+
+namespace {
+
+class APIMisuse : public BugType {
+public:
+ APIMisuse(const char* name) : BugType(name, "API Misuse (Apple)") {}
+};
+
+class BasicObjCFoundationChecks : public GRSimpleAPICheck {
+ APIMisuse *BT;
+ BugReporter& BR;
+ ASTContext &Ctx;
+
+ bool isNSString(const ObjCInterfaceType *T, llvm::StringRef suffix);
+ bool AuditNSString(ExplodedNode* N, const ObjCMessageExpr* ME);
+
+ void Warn(ExplodedNode* N, const Expr* E, const std::string& s);
+ void WarnNilArg(ExplodedNode* N, const Expr* E);
+
+ bool CheckNilArg(ExplodedNode* N, unsigned Arg);
+
+public:
+ BasicObjCFoundationChecks(ASTContext& ctx, BugReporter& br)
+ : BT(0), BR(br), Ctx(ctx) {}
+
+ bool Audit(ExplodedNode* N, GRStateManager&);
+
+private:
+ void WarnNilArg(ExplodedNode* N, const ObjCMessageExpr* ME, unsigned Arg) {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ os << "Argument to '" << GetReceiverNameType(ME) << "' method '"
+ << ME->getSelector().getAsString() << "' cannot be nil.";
+
+ // Lazily create the BugType object for NilArg. This will be owned
+ // by the BugReporter object 'BR' once we call BR.EmitWarning.
+ if (!BT) BT = new APIMisuse("nil argument");
+
+ RangedBugReport *R = new RangedBugReport(*BT, os.str(), N);
+ R->addRange(ME->getArg(Arg)->getSourceRange());
+ BR.EmitReport(R);
+ }
+};
+
+} // end anonymous namespace
+
+
+GRSimpleAPICheck*
+clang::CreateBasicObjCFoundationChecks(ASTContext& Ctx, BugReporter& BR) {
+ return new BasicObjCFoundationChecks(Ctx, BR);
+}
+
+
+
+bool BasicObjCFoundationChecks::Audit(ExplodedNode* N,
+ GRStateManager&) {
+
+ const ObjCMessageExpr* ME =
+ cast<ObjCMessageExpr>(cast<PostStmt>(N->getLocation()).getStmt());
+
+ const ObjCInterfaceType *ReceiverType = GetReceiverType(ME);
+
+ if (!ReceiverType)
+ return false;
+
+ if (isNSString(ReceiverType,
+ ReceiverType->getDecl()->getIdentifier()->getName()))
+ return AuditNSString(N, ME);
+
+ return false;
+}
+
+static inline bool isNil(SVal X) {
+ return isa<loc::ConcreteInt>(X);
+}
+
+//===----------------------------------------------------------------------===//
+// Error reporting.
+//===----------------------------------------------------------------------===//
+
+bool BasicObjCFoundationChecks::CheckNilArg(ExplodedNode* N, unsigned Arg) {
+ const ObjCMessageExpr* ME =
+ cast<ObjCMessageExpr>(cast<PostStmt>(N->getLocation()).getStmt());
+
+ const Expr * E = ME->getArg(Arg);
+
+ if (isNil(N->getState()->getSVal(E))) {
+ WarnNilArg(N, ME, Arg);
+ return true;
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// NSString checking.
+//===----------------------------------------------------------------------===//
+
+bool BasicObjCFoundationChecks::isNSString(const ObjCInterfaceType *T,
+ llvm::StringRef ClassName) {
+ return ClassName == "NSString" || ClassName == "NSMutableString";
+}
+
+bool BasicObjCFoundationChecks::AuditNSString(ExplodedNode* N,
+ const ObjCMessageExpr* ME) {
+
+ Selector S = ME->getSelector();
+
+ if (S.isUnarySelector())
+ return false;
+
+ // FIXME: This is going to be really slow doing these checks with
+ // lexical comparisons.
+
+ std::string NameStr = S.getAsString();
+ llvm::StringRef Name(NameStr);
+ assert(!Name.empty());
+
+ // FIXME: Checking for initWithFormat: will not work in most cases
+ // yet because [NSString alloc] returns id, not NSString*. We will
+ // need support for tracking expected-type information in the analyzer
+ // to find these errors.
+ if (Name == "caseInsensitiveCompare:" ||
+ Name == "compare:" ||
+ Name == "compare:options:" ||
+ Name == "compare:options:range:" ||
+ Name == "compare:options:range:locale:" ||
+ Name == "componentsSeparatedByCharactersInSet:" ||
+ Name == "initWithFormat:")
+ return CheckNilArg(N, 0);
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Error reporting.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class AuditCFNumberCreate : public GRSimpleAPICheck {
+ APIMisuse* BT;
+
+ // FIXME: Either this should be refactored into GRSimpleAPICheck, or
+ // it should always be passed with a call to Audit. The latter
+ // approach makes this class more stateless.
+ ASTContext& Ctx;
+ IdentifierInfo* II;
+ BugReporter& BR;
+
+public:
+ AuditCFNumberCreate(ASTContext& ctx, BugReporter& br)
+ : BT(0), Ctx(ctx), II(&Ctx.Idents.get("CFNumberCreate")), BR(br){}
+
+ ~AuditCFNumberCreate() {}
+
+ bool Audit(ExplodedNode* N, GRStateManager&);
+
+private:
+ void AddError(const TypedRegion* R, const Expr* Ex, ExplodedNode *N,
+ uint64_t SourceSize, uint64_t TargetSize, uint64_t NumberKind);
+};
+} // end anonymous namespace
+
+enum CFNumberType {
+ kCFNumberSInt8Type = 1,
+ kCFNumberSInt16Type = 2,
+ kCFNumberSInt32Type = 3,
+ kCFNumberSInt64Type = 4,
+ kCFNumberFloat32Type = 5,
+ kCFNumberFloat64Type = 6,
+ kCFNumberCharType = 7,
+ kCFNumberShortType = 8,
+ kCFNumberIntType = 9,
+ kCFNumberLongType = 10,
+ kCFNumberLongLongType = 11,
+ kCFNumberFloatType = 12,
+ kCFNumberDoubleType = 13,
+ kCFNumberCFIndexType = 14,
+ kCFNumberNSIntegerType = 15,
+ kCFNumberCGFloatType = 16
+};
+
+namespace {
+ template<typename T>
+ class Optional {
+ bool IsKnown;
+ T Val;
+ public:
+ Optional() : IsKnown(false), Val(0) {}
+ Optional(const T& val) : IsKnown(true), Val(val) {}
+
+ bool isKnown() const { return IsKnown; }
+
+ const T& getValue() const {
+ assert (isKnown());
+ return Val;
+ }
+
+ operator const T&() const {
+ return getValue();
+ }
+ };
+}
+
+static Optional<uint64_t> GetCFNumberSize(ASTContext& Ctx, uint64_t i) {
+ static const unsigned char FixedSize[] = { 8, 16, 32, 64, 32, 64 };
+
+ if (i < kCFNumberCharType)
+ return FixedSize[i-1];
+
+ QualType T;
+
+ switch (i) {
+ case kCFNumberCharType: T = Ctx.CharTy; break;
+ case kCFNumberShortType: T = Ctx.ShortTy; break;
+ case kCFNumberIntType: T = Ctx.IntTy; break;
+ case kCFNumberLongType: T = Ctx.LongTy; break;
+ case kCFNumberLongLongType: T = Ctx.LongLongTy; break;
+ case kCFNumberFloatType: T = Ctx.FloatTy; break;
+ case kCFNumberDoubleType: T = Ctx.DoubleTy; break;
+ case kCFNumberCFIndexType:
+ case kCFNumberNSIntegerType:
+ case kCFNumberCGFloatType:
+ // FIXME: We need a way to map from names to Type*.
+ default:
+ return Optional<uint64_t>();
+ }
+
+ return Ctx.getTypeSize(T);
+}
+
+#if 0
+static const char* GetCFNumberTypeStr(uint64_t i) {
+ static const char* Names[] = {
+ "kCFNumberSInt8Type",
+ "kCFNumberSInt16Type",
+ "kCFNumberSInt32Type",
+ "kCFNumberSInt64Type",
+ "kCFNumberFloat32Type",
+ "kCFNumberFloat64Type",
+ "kCFNumberCharType",
+ "kCFNumberShortType",
+ "kCFNumberIntType",
+ "kCFNumberLongType",
+ "kCFNumberLongLongType",
+ "kCFNumberFloatType",
+ "kCFNumberDoubleType",
+ "kCFNumberCFIndexType",
+ "kCFNumberNSIntegerType",
+ "kCFNumberCGFloatType"
+ };
+
+ return i <= kCFNumberCGFloatType ? Names[i-1] : "Invalid CFNumberType";
+}
+#endif
+
+bool AuditCFNumberCreate::Audit(ExplodedNode* N,GRStateManager&){
+ const CallExpr* CE =
+ cast<CallExpr>(cast<PostStmt>(N->getLocation()).getStmt());
+ const Expr* Callee = CE->getCallee();
+ SVal CallV = N->getState()->getSVal(Callee);
+ const FunctionDecl* FD = CallV.getAsFunctionDecl();
+
+ if (!FD || FD->getIdentifier() != II || CE->getNumArgs()!=3)
+ return false;
+
+ // Get the value of the "theType" argument.
+ SVal TheTypeVal = N->getState()->getSVal(CE->getArg(1));
+
+ // FIXME: We really should allow ranges of valid theType values, and
+ // bifurcate the state appropriately.
+ nonloc::ConcreteInt* V = dyn_cast<nonloc::ConcreteInt>(&TheTypeVal);
+
+ if (!V)
+ return false;
+
+ uint64_t NumberKind = V->getValue().getLimitedValue();
+ Optional<uint64_t> TargetSize = GetCFNumberSize(Ctx, NumberKind);
+
+ // FIXME: In some cases we can emit an error.
+ if (!TargetSize.isKnown())
+ return false;
+
+ // Look at the value of the integer being passed by reference. Essentially
+ // we want to catch cases where the value passed in is not equal to the
+ // size of the type being created.
+ SVal TheValueExpr = N->getState()->getSVal(CE->getArg(2));
+
+ // FIXME: Eventually we should handle arbitrary locations. We can do this
+ // by having an enhanced memory model that does low-level typing.
+ loc::MemRegionVal* LV = dyn_cast<loc::MemRegionVal>(&TheValueExpr);
+
+ if (!LV)
+ return false;
+
+ const TypedRegion* R = dyn_cast<TypedRegion>(LV->StripCasts());
+
+ if (!R)
+ return false;
+
+ QualType T = Ctx.getCanonicalType(R->getValueType(Ctx));
+
+ // FIXME: If the pointee isn't an integer type, should we flag a warning?
+ // People can do weird stuff with pointers.
+
+ if (!T->isIntegerType())
+ return false;
+
+ uint64_t SourceSize = Ctx.getTypeSize(T);
+
+ // CHECK: is SourceSize == TargetSize
+
+ if (SourceSize == TargetSize)
+ return false;
+
+ AddError(R, CE->getArg(2), N, SourceSize, TargetSize, NumberKind);
+
+ // FIXME: We can actually create an abstract "CFNumber" object that has
+ // the bits initialized to the provided values.
+ return SourceSize < TargetSize;
+}
+
+void AuditCFNumberCreate::AddError(const TypedRegion* R, const Expr* Ex,
+ ExplodedNode *N,
+ uint64_t SourceSize, uint64_t TargetSize,
+ uint64_t NumberKind) {
+
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ os << (SourceSize == 8 ? "An " : "A ")
+ << SourceSize << " bit integer is used to initialize a CFNumber "
+ "object that represents "
+ << (TargetSize == 8 ? "an " : "a ")
+ << TargetSize << " bit integer. ";
+
+ if (SourceSize < TargetSize)
+ os << (TargetSize - SourceSize)
+ << " bits of the CFNumber value will be garbage." ;
+ else
+ os << (SourceSize - TargetSize)
+ << " bits of the input integer will be lost.";
+
+ // Lazily create the BugType object. This will be owned
+ // by the BugReporter object 'BR' once we call BR.EmitWarning.
+ if (!BT) BT = new APIMisuse("Bad use of CFNumberCreate");
+ RangedBugReport *report = new RangedBugReport(*BT, os.str(), N);
+ report->addRange(Ex->getSourceRange());
+ BR.EmitReport(report);
+}
+
+GRSimpleAPICheck*
+clang::CreateAuditCFNumberCreate(ASTContext& Ctx, BugReporter& BR) {
+ return new AuditCFNumberCreate(Ctx, BR);
+}
+
+//===----------------------------------------------------------------------===//
+// CFRetain/CFRelease auditing for null arguments.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class AuditCFRetainRelease : public GRSimpleAPICheck {
+ APIMisuse *BT;
+
+ // FIXME: Either this should be refactored into GRSimpleAPICheck, or
+ // it should always be passed with a call to Audit. The latter
+ // approach makes this class more stateless.
+ ASTContext& Ctx;
+ IdentifierInfo *Retain, *Release;
+ BugReporter& BR;
+
+public:
+ AuditCFRetainRelease(ASTContext& ctx, BugReporter& br)
+ : BT(0), Ctx(ctx),
+ Retain(&Ctx.Idents.get("CFRetain")), Release(&Ctx.Idents.get("CFRelease")),
+ BR(br){}
+
+ ~AuditCFRetainRelease() {}
+
+ bool Audit(ExplodedNode* N, GRStateManager&);
+};
+} // end anonymous namespace
+
+
+bool AuditCFRetainRelease::Audit(ExplodedNode* N, GRStateManager&) {
+ const CallExpr* CE = cast<CallExpr>(cast<PostStmt>(N->getLocation()).getStmt());
+
+ // If the CallExpr doesn't have exactly 1 argument just give up checking.
+ if (CE->getNumArgs() != 1)
+ return false;
+
+ // Check if we called CFRetain/CFRelease.
+ const GRState* state = N->getState();
+ SVal X = state->getSVal(CE->getCallee());
+ const FunctionDecl* FD = X.getAsFunctionDecl();
+
+ if (!FD)
+ return false;
+
+ const IdentifierInfo *FuncII = FD->getIdentifier();
+ if (!(FuncII == Retain || FuncII == Release))
+ return false;
+
+ // Finally, check if the argument is NULL.
+ // FIXME: We should be able to bifurcate the state here, as a successful
+ // check will result in the value not being NULL afterwards.
+ // FIXME: Need a way to register vistors for the BugReporter. Would like
+ // to benefit from the same diagnostics that regular null dereference
+ // reporting has.
+ if (state->getStateManager().isEqual(state, CE->getArg(0), 0)) {
+ if (!BT)
+ BT = new APIMisuse("null passed to CFRetain/CFRelease");
+
+ const char *description = (FuncII == Retain)
+ ? "Null pointer argument in call to CFRetain"
+ : "Null pointer argument in call to CFRelease";
+
+ RangedBugReport *report = new RangedBugReport(*BT, description, N);
+ report->addRange(CE->getArg(0)->getSourceRange());
+ BR.EmitReport(report);
+ return true;
+ }
+
+ return false;
+}
+
+
+GRSimpleAPICheck*
+clang::CreateAuditCFRetainRelease(ASTContext& Ctx, BugReporter& BR) {
+ return new AuditCFRetainRelease(Ctx, BR);
+}
+
+//===----------------------------------------------------------------------===//
+// Check for sending 'retain', 'release', or 'autorelease' directly to a Class.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ClassReleaseChecker :
+ public CheckerVisitor<ClassReleaseChecker> {
+ Selector releaseS;
+ Selector retainS;
+ Selector autoreleaseS;
+ Selector drainS;
+ BugType *BT;
+public:
+ ClassReleaseChecker(ASTContext &Ctx)
+ : releaseS(GetNullarySelector("release", Ctx)),
+ retainS(GetNullarySelector("retain", Ctx)),
+ autoreleaseS(GetNullarySelector("autorelease", Ctx)),
+ drainS(GetNullarySelector("drain", Ctx)),
+ BT(0) {}
+
+ static void *getTag() { static int x = 0; return &x; }
+
+ void PreVisitObjCMessageExpr(CheckerContext &C, const ObjCMessageExpr *ME);
+};
+}
+
+void ClassReleaseChecker::PreVisitObjCMessageExpr(CheckerContext &C,
+ const ObjCMessageExpr *ME) {
+ ObjCInterfaceDecl *Class = 0;
+ switch (ME->getReceiverKind()) {
+ case ObjCMessageExpr::Class:
+ Class = ME->getClassReceiver()->getAs<ObjCObjectType>()->getInterface();
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ Class = ME->getSuperType()->getAs<ObjCObjectType>()->getInterface();
+ break;
+
+ case ObjCMessageExpr::Instance:
+ case ObjCMessageExpr::SuperInstance:
+ return;
+ }
+
+ Selector S = ME->getSelector();
+ if (!(S == releaseS || S == retainS || S == autoreleaseS || S == drainS))
+ return;
+
+ if (!BT)
+ BT = new APIMisuse("message incorrectly sent to class instead of class "
+ "instance");
+
+ ExplodedNode *N = C.GenerateNode();
+
+ if (!N)
+ return;
+
+ llvm::SmallString<200> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ os << "The '" << S.getAsString() << "' message should be sent to instances "
+ "of class '" << Class->getName()
+ << "' and not the class directly";
+
+ RangedBugReport *report = new RangedBugReport(*BT, os.str(), N);
+ report->addRange(ME->getSourceRange());
+ C.EmitReport(report);
+}
+
+//===----------------------------------------------------------------------===//
+// Check registration.
+//===----------------------------------------------------------------------===//
+
+void clang::RegisterAppleChecks(GRExprEngine& Eng, const Decl &D) {
+ ASTContext& Ctx = Eng.getContext();
+ BugReporter &BR = Eng.getBugReporter();
+
+ Eng.AddCheck(CreateBasicObjCFoundationChecks(Ctx, BR),
+ Stmt::ObjCMessageExprClass);
+ Eng.AddCheck(CreateAuditCFNumberCreate(Ctx, BR), Stmt::CallExprClass);
+ Eng.AddCheck(CreateAuditCFRetainRelease(Ctx, BR), Stmt::CallExprClass);
+
+ RegisterNSErrorChecks(BR, Eng, D);
+ RegisterNSAutoreleasePoolChecks(Eng);
+ Eng.registerCheck(new ClassReleaseChecker(Ctx));
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.h b/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.h
new file mode 100644
index 0000000..679c6dc
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.h
@@ -0,0 +1,41 @@
+//== BasicObjCFoundationChecks.h - Simple Apple-Foundation checks -*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BasicObjCFoundationChecks, a class that encapsulates
+// a set of simple checks to run on Objective-C code using Apple's Foundation
+// classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_BASICOBJCFOUNDATIONCHECKS
+#define LLVM_CLANG_ANALYSIS_BASICOBJCFOUNDATIONCHECKS
+
+namespace clang {
+
+class ASTContext;
+class BugReporter;
+class Decl;
+class GRExprEngine;
+class GRSimpleAPICheck;
+
+GRSimpleAPICheck *CreateBasicObjCFoundationChecks(ASTContext& Ctx,
+ BugReporter& BR);
+
+GRSimpleAPICheck *CreateAuditCFNumberCreate(ASTContext& Ctx,
+ BugReporter& BR);
+
+GRSimpleAPICheck *CreateAuditCFRetainRelease(ASTContext& Ctx,
+ BugReporter& BR);
+
+void RegisterNSErrorChecks(BugReporter& BR, GRExprEngine &Eng, const Decl &D);
+void RegisterNSAutoreleasePoolChecks(GRExprEngine &Eng);
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Checker/BasicStore.cpp b/contrib/llvm/tools/clang/lib/Checker/BasicStore.cpp
new file mode 100644
index 0000000..5be5ca6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/BasicStore.cpp
@@ -0,0 +1,510 @@
+//== BasicStore.cpp - Basic map from Locations to Values --------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defined the BasicStore and BasicStoreManager classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ExprObjC.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Checker/PathSensitive/GRState.h"
+#include "llvm/ADT/ImmutableMap.h"
+
+using namespace clang;
+
+typedef llvm::ImmutableMap<const MemRegion*,SVal> BindingsTy;
+
+namespace {
+
+class BasicStoreSubRegionMap : public SubRegionMap {
+public:
+ BasicStoreSubRegionMap() {}
+
+ bool iterSubRegions(const MemRegion* R, Visitor& V) const {
+ return true; // Do nothing. No subregions.
+ }
+};
+
+class BasicStoreManager : public StoreManager {
+ BindingsTy::Factory VBFactory;
+public:
+ BasicStoreManager(GRStateManager& mgr)
+ : StoreManager(mgr), VBFactory(mgr.getAllocator()) {}
+
+ ~BasicStoreManager() {}
+
+ SubRegionMap *getSubRegionMap(Store store) {
+ return new BasicStoreSubRegionMap();
+ }
+
+ SVal Retrieve(Store store, Loc loc, QualType T = QualType());
+
+ Store InvalidateRegion(Store store, const MemRegion *R, const Expr *E,
+ unsigned Count, InvalidatedSymbols *IS);
+
+ Store scanForIvars(Stmt *B, const Decl* SelfDecl,
+ const MemRegion *SelfRegion, Store St);
+
+ Store Bind(Store St, Loc loc, SVal V);
+ Store Remove(Store St, Loc loc);
+ Store getInitialStore(const LocationContext *InitLoc);
+
+ // FIXME: Investigate what is using this. This method should be removed.
+ virtual Loc getLoc(const VarDecl* VD, const LocationContext *LC) {
+ return ValMgr.makeLoc(MRMgr.getVarRegion(VD, LC));
+ }
+
+ Store BindCompoundLiteral(Store store, const CompoundLiteralExpr*,
+ const LocationContext*, SVal val) {
+ return store;
+ }
+
+ /// ArrayToPointer - Used by GRExprEngine::VistCast to handle implicit
+ /// conversions between arrays and pointers.
+ SVal ArrayToPointer(Loc Array) { return Array; }
+
+ /// RemoveDeadBindings - Scans a BasicStore of 'state' for dead values.
+ /// It updatees the GRState object in place with the values removed.
+ const GRState *RemoveDeadBindings(GRState &state, Stmt* Loc,
+ const StackFrameContext *LCtx,
+ SymbolReaper& SymReaper,
+ llvm::SmallVectorImpl<const MemRegion*>& RegionRoots);
+
+ void iterBindings(Store store, BindingsHandler& f);
+
+ Store BindDecl(Store store, const VarRegion *VR, SVal InitVal) {
+ return BindDeclInternal(store, VR, &InitVal);
+ }
+
+ Store BindDeclWithNoInit(Store store, const VarRegion *VR) {
+ return BindDeclInternal(store, VR, 0);
+ }
+
+ Store BindDeclInternal(Store store, const VarRegion *VR, SVal *InitVal);
+
+ static inline BindingsTy GetBindings(Store store) {
+ return BindingsTy(static_cast<const BindingsTy::TreeTy*>(store));
+ }
+
+ void print(Store store, llvm::raw_ostream& Out, const char* nl,
+ const char *sep);
+
+private:
+ SVal LazyRetrieve(Store store, const TypedRegion *R);
+
+ ASTContext& getContext() { return StateMgr.getContext(); }
+};
+
+} // end anonymous namespace
+
+
+StoreManager* clang::CreateBasicStoreManager(GRStateManager& StMgr) {
+ return new BasicStoreManager(StMgr);
+}
+
+static bool isHigherOrderRawPtr(QualType T, ASTContext &C) {
+ bool foundPointer = false;
+ while (1) {
+ const PointerType *PT = T->getAs<PointerType>();
+ if (!PT) {
+ if (!foundPointer)
+ return false;
+
+ // intptr_t* or intptr_t**, etc?
+ if (T->isIntegerType() && C.getTypeSize(T) == C.getTypeSize(C.VoidPtrTy))
+ return true;
+
+ QualType X = C.getCanonicalType(T).getUnqualifiedType();
+ return X == C.VoidTy;
+ }
+
+ foundPointer = true;
+ T = PT->getPointeeType();
+ }
+}
+
+SVal BasicStoreManager::LazyRetrieve(Store store, const TypedRegion *R) {
+ const VarRegion *VR = dyn_cast<VarRegion>(R);
+ if (!VR)
+ return UnknownVal();
+
+ const VarDecl *VD = VR->getDecl();
+ QualType T = VD->getType();
+
+ // Only handle simple types that we can symbolicate.
+ if (!SymbolManager::canSymbolicate(T) || !T->isScalarType())
+ return UnknownVal();
+
+ // Globals and parameters start with symbolic values.
+ // Local variables initially are undefined.
+ if (VR->hasGlobalsOrParametersStorage() ||
+ isa<UnknownSpaceRegion>(VR->getMemorySpace()))
+ return ValMgr.getRegionValueSymbolVal(R);
+ return UndefinedVal();
+}
+
+SVal BasicStoreManager::Retrieve(Store store, Loc loc, QualType T) {
+ if (isa<UnknownVal>(loc))
+ return UnknownVal();
+
+ assert(!isa<UndefinedVal>(loc));
+
+ switch (loc.getSubKind()) {
+
+ case loc::MemRegionKind: {
+ const MemRegion* R = cast<loc::MemRegionVal>(loc).getRegion();
+
+ if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R)))
+ return UnknownVal();
+
+ BindingsTy B = GetBindings(store);
+ BindingsTy::data_type *Val = B.lookup(R);
+ const TypedRegion *TR = cast<TypedRegion>(R);
+
+ if (Val)
+ return CastRetrievedVal(*Val, TR, T);
+
+ SVal V = LazyRetrieve(store, TR);
+ return V.isUnknownOrUndef() ? V : CastRetrievedVal(V, TR, T);
+ }
+
+ case loc::ConcreteIntKind:
+ // Some clients may call GetSVal with such an option simply because
+ // they are doing a quick scan through their Locs (potentially to
+ // invalidate their bindings). Just return Undefined.
+ return UndefinedVal();
+
+ default:
+ assert (false && "Invalid Loc.");
+ break;
+ }
+
+ return UnknownVal();
+}
+
+Store BasicStoreManager::Bind(Store store, Loc loc, SVal V) {
+ if (isa<loc::ConcreteInt>(loc))
+ return store;
+
+ const MemRegion* R = cast<loc::MemRegionVal>(loc).getRegion();
+ ASTContext &C = StateMgr.getContext();
+
+ // Special case: handle store of pointer values (Loc) to pointers via
+ // a cast to intXX_t*, void*, etc. This is needed to handle
+ // OSCompareAndSwap32Barrier/OSCompareAndSwap64Barrier.
+ if (isa<Loc>(V) || isa<nonloc::LocAsInteger>(V))
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ // FIXME: Should check for index 0.
+ QualType T = ER->getLocationType(C);
+
+ if (isHigherOrderRawPtr(T, C))
+ R = ER->getSuperRegion();
+ }
+
+ if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R)))
+ return store;
+
+ const TypedRegion *TyR = cast<TypedRegion>(R);
+
+ // Do not bind to arrays. We need to explicitly check for this so that
+ // we do not encounter any weirdness of trying to load/store from arrays.
+ if (TyR->isBoundable() && TyR->getValueType(C)->isArrayType())
+ return store;
+
+ if (nonloc::LocAsInteger *X = dyn_cast<nonloc::LocAsInteger>(&V)) {
+ // Only convert 'V' to a location iff the underlying region type
+ // is a location as well.
+ // FIXME: We are allowing a store of an arbitrary location to
+ // a pointer. We may wish to flag a type error here if the types
+ // are incompatible. This may also cause lots of breakage
+ // elsewhere. Food for thought.
+ if (TyR->isBoundable() && Loc::IsLocType(TyR->getValueType(C)))
+ V = X->getLoc();
+ }
+
+ BindingsTy B = GetBindings(store);
+ return V.isUnknown()
+ ? VBFactory.Remove(B, R).getRoot()
+ : VBFactory.Add(B, R, V).getRoot();
+}
+
+Store BasicStoreManager::Remove(Store store, Loc loc) {
+ switch (loc.getSubKind()) {
+ case loc::MemRegionKind: {
+ const MemRegion* R = cast<loc::MemRegionVal>(loc).getRegion();
+
+ if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R)))
+ return store;
+
+ return VBFactory.Remove(GetBindings(store), R).getRoot();
+ }
+ default:
+ assert ("Remove for given Loc type not yet implemented.");
+ return store;
+ }
+}
+
+const GRState *BasicStoreManager::RemoveDeadBindings(GRState &state, Stmt* Loc,
+ const StackFrameContext *LCtx,
+ SymbolReaper& SymReaper,
+ llvm::SmallVectorImpl<const MemRegion*>& RegionRoots)
+{
+ Store store = state.getStore();
+ BindingsTy B = GetBindings(store);
+ typedef SVal::symbol_iterator symbol_iterator;
+
+ // Iterate over the variable bindings.
+ for (BindingsTy::iterator I=B.begin(), E=B.end(); I!=E ; ++I) {
+ if (const VarRegion *VR = dyn_cast<VarRegion>(I.getKey())) {
+ if (SymReaper.isLive(Loc, VR))
+ RegionRoots.push_back(VR);
+ else
+ continue;
+ }
+ else if (isa<ObjCIvarRegion>(I.getKey())) {
+ RegionRoots.push_back(I.getKey());
+ }
+ else
+ continue;
+
+ // Mark the bindings in the data as live.
+ SVal X = I.getData();
+ for (symbol_iterator SI=X.symbol_begin(), SE=X.symbol_end(); SI!=SE; ++SI)
+ SymReaper.markLive(*SI);
+ }
+
+ // Scan for live variables and live symbols.
+ llvm::SmallPtrSet<const MemRegion*, 10> Marked;
+
+ while (!RegionRoots.empty()) {
+ const MemRegion* MR = RegionRoots.back();
+ RegionRoots.pop_back();
+
+ while (MR) {
+ if (const SymbolicRegion* SymR = dyn_cast<SymbolicRegion>(MR)) {
+ SymReaper.markLive(SymR->getSymbol());
+ break;
+ }
+ else if (isa<VarRegion>(MR) || isa<ObjCIvarRegion>(MR)) {
+ if (Marked.count(MR))
+ break;
+
+ Marked.insert(MR);
+ SVal X = Retrieve(store, loc::MemRegionVal(MR));
+
+ // FIXME: We need to handle symbols nested in region definitions.
+ for (symbol_iterator SI=X.symbol_begin(),SE=X.symbol_end();SI!=SE;++SI)
+ SymReaper.markLive(*SI);
+
+ if (!isa<loc::MemRegionVal>(X))
+ break;
+
+ const loc::MemRegionVal& LVD = cast<loc::MemRegionVal>(X);
+ RegionRoots.push_back(LVD.getRegion());
+ break;
+ }
+ else if (const SubRegion* R = dyn_cast<SubRegion>(MR))
+ MR = R->getSuperRegion();
+ else
+ break;
+ }
+ }
+
+ // Remove dead variable bindings.
+ for (BindingsTy::iterator I=B.begin(), E=B.end(); I!=E ; ++I) {
+ const MemRegion* R = I.getKey();
+
+ if (!Marked.count(R)) {
+ store = Remove(store, ValMgr.makeLoc(R));
+ SVal X = I.getData();
+
+ for (symbol_iterator SI=X.symbol_begin(), SE=X.symbol_end(); SI!=SE; ++SI)
+ SymReaper.maybeDead(*SI);
+ }
+ }
+
+ state.setStore(store);
+ return StateMgr.getPersistentState(state);
+}
+
+Store BasicStoreManager::scanForIvars(Stmt *B, const Decl* SelfDecl,
+ const MemRegion *SelfRegion, Store St) {
+ for (Stmt::child_iterator CI=B->child_begin(), CE=B->child_end();
+ CI != CE; ++CI) {
+
+ if (!*CI)
+ continue;
+
+ // Check if the statement is an ivar reference. We only
+ // care about self.ivar.
+ if (ObjCIvarRefExpr *IV = dyn_cast<ObjCIvarRefExpr>(*CI)) {
+ const Expr *Base = IV->getBase()->IgnoreParenCasts();
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Base)) {
+ if (DR->getDecl() == SelfDecl) {
+ const ObjCIvarRegion *IVR = MRMgr.getObjCIvarRegion(IV->getDecl(),
+ SelfRegion);
+ SVal X = ValMgr.getRegionValueSymbolVal(IVR);
+ St = Bind(St, ValMgr.makeLoc(IVR), X);
+ }
+ }
+ }
+ else
+ St = scanForIvars(*CI, SelfDecl, SelfRegion, St);
+ }
+
+ return St;
+}
+
+Store BasicStoreManager::getInitialStore(const LocationContext *InitLoc) {
+ // The LiveVariables information already has a compilation of all VarDecls
+ // used in the function. Iterate through this set, and "symbolicate"
+ // any VarDecl whose value originally comes from outside the function.
+ typedef LiveVariables::AnalysisDataTy LVDataTy;
+ LVDataTy& D = InitLoc->getLiveVariables()->getAnalysisData();
+ Store St = VBFactory.GetEmptyMap().getRoot();
+
+ for (LVDataTy::decl_iterator I=D.begin_decl(), E=D.end_decl(); I != E; ++I) {
+ NamedDecl* ND = const_cast<NamedDecl*>(I->first);
+
+ // Handle implicit parameters.
+ if (ImplicitParamDecl* PD = dyn_cast<ImplicitParamDecl>(ND)) {
+ const Decl& CD = *InitLoc->getDecl();
+ if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(&CD)) {
+ if (MD->getSelfDecl() == PD) {
+ // FIXME: Add type constraints (when they become available) to
+ // SelfRegion? (i.e., it implements MD->getClassInterface()).
+ const VarRegion *VR = MRMgr.getVarRegion(PD, InitLoc);
+ const MemRegion *SelfRegion =
+ ValMgr.getRegionValueSymbolVal(VR).getAsRegion();
+ assert(SelfRegion);
+ St = Bind(St, ValMgr.makeLoc(VR), loc::MemRegionVal(SelfRegion));
+ // Scan the method for ivar references. While this requires an
+ // entire AST scan, the cost should not be high in practice.
+ St = scanForIvars(MD->getBody(), PD, SelfRegion, St);
+ }
+ }
+ }
+ }
+
+ return St;
+}
+
+Store BasicStoreManager::BindDeclInternal(Store store, const VarRegion* VR,
+ SVal* InitVal) {
+
+ BasicValueFactory& BasicVals = StateMgr.getBasicVals();
+ const VarDecl *VD = VR->getDecl();
+
+ // BasicStore does not model arrays and structs.
+ if (VD->getType()->isArrayType() || VD->getType()->isStructureOrClassType())
+ return store;
+
+ if (VD->hasGlobalStorage()) {
+ // Handle variables with global storage: extern, static, PrivateExtern.
+
+ // FIXME:: static variables may have an initializer, but the second time a
+ // function is called those values may not be current. Currently, a function
+ // will not be called more than once.
+
+ // Static global variables should not be visited here.
+ assert(!(VD->getStorageClass() == VarDecl::Static &&
+ VD->isFileVarDecl()));
+
+ // Process static variables.
+ if (VD->getStorageClass() == VarDecl::Static) {
+ // C99: 6.7.8 Initialization
+ // If an object that has static storage duration is not initialized
+ // explicitly, then:
+ // —if it has pointer type, it is initialized to a null pointer;
+ // —if it has arithmetic type, it is initialized to (positive or
+ // unsigned) zero;
+ if (!InitVal) {
+ QualType T = VD->getType();
+ if (Loc::IsLocType(T))
+ store = Bind(store, loc::MemRegionVal(VR),
+ loc::ConcreteInt(BasicVals.getValue(0, T)));
+ else if (T->isIntegerType())
+ store = Bind(store, loc::MemRegionVal(VR),
+ nonloc::ConcreteInt(BasicVals.getValue(0, T)));
+ else {
+ // assert(0 && "ignore other types of variables");
+ }
+ } else {
+ store = Bind(store, loc::MemRegionVal(VR), *InitVal);
+ }
+ }
+ } else {
+ // Process local scalar variables.
+ QualType T = VD->getType();
+ if (ValMgr.getSymbolManager().canSymbolicate(T)) {
+ SVal V = InitVal ? *InitVal : UndefinedVal();
+ store = Bind(store, loc::MemRegionVal(VR), V);
+ }
+ }
+
+ return store;
+}
+
+void BasicStoreManager::print(Store store, llvm::raw_ostream& Out,
+ const char* nl, const char *sep) {
+
+ BindingsTy B = GetBindings(store);
+ Out << "Variables:" << nl;
+
+ bool isFirst = true;
+
+ for (BindingsTy::iterator I=B.begin(), E=B.end(); I != E; ++I) {
+ if (isFirst)
+ isFirst = false;
+ else
+ Out << nl;
+
+ Out << ' ' << I.getKey() << " : " << I.getData();
+ }
+}
+
+
+void BasicStoreManager::iterBindings(Store store, BindingsHandler& f) {
+ BindingsTy B = GetBindings(store);
+
+ for (BindingsTy::iterator I=B.begin(), E=B.end(); I != E; ++I)
+ f.HandleBinding(*this, store, I.getKey(), I.getData());
+
+}
+
+StoreManager::BindingsHandler::~BindingsHandler() {}
+
+//===----------------------------------------------------------------------===//
+// Binding invalidation.
+//===----------------------------------------------------------------------===//
+
+Store BasicStoreManager::InvalidateRegion(Store store,
+ const MemRegion *R,
+ const Expr *E,
+ unsigned Count,
+ InvalidatedSymbols *IS) {
+ R = R->StripCasts();
+
+ if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R)))
+ return store;
+
+ if (IS) {
+ BindingsTy B = GetBindings(store);
+ if (BindingsTy::data_type *Val = B.lookup(R)) {
+ if (SymbolRef Sym = Val->getAsSymbol())
+ IS->insert(Sym);
+ }
+ }
+
+ QualType T = cast<TypedRegion>(R)->getValueType(R->getContext());
+ SVal V = ValMgr.getConjuredSymbolVal(R, E, T, Count);
+ return Bind(store, loc::MemRegionVal(R), V);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Checker/BasicValueFactory.cpp b/contrib/llvm/tools/clang/lib/Checker/BasicValueFactory.cpp
new file mode 100644
index 0000000..246beea
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/BasicValueFactory.cpp
@@ -0,0 +1,289 @@
+//=== BasicValueFactory.cpp - Basic values for Path Sens analysis --*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BasicValueFactory, a class that manages the lifetime
+// of APSInt objects and symbolic constraints used by GRExprEngine
+// and related classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/PathSensitive/BasicValueFactory.h"
+
+using namespace clang;
+
+void CompoundValData::Profile(llvm::FoldingSetNodeID& ID, QualType T,
+ llvm::ImmutableList<SVal> L) {
+ T.Profile(ID);
+ ID.AddPointer(L.getInternalPointer());
+}
+
+void LazyCompoundValData::Profile(llvm::FoldingSetNodeID& ID,
+ const void *store,const TypedRegion *region) {
+ ID.AddPointer(store);
+ ID.AddPointer(region);
+}
+
+typedef std::pair<SVal, uintptr_t> SValData;
+typedef std::pair<SVal, SVal> SValPair;
+
+namespace llvm {
+template<> struct FoldingSetTrait<SValData> {
+ static inline void Profile(const SValData& X, llvm::FoldingSetNodeID& ID) {
+ X.first.Profile(ID);
+ ID.AddPointer( (void*) X.second);
+ }
+};
+
+template<> struct FoldingSetTrait<SValPair> {
+ static inline void Profile(const SValPair& X, llvm::FoldingSetNodeID& ID) {
+ X.first.Profile(ID);
+ X.second.Profile(ID);
+ }
+};
+}
+
+typedef llvm::FoldingSet<llvm::FoldingSetNodeWrapper<SValData> >
+ PersistentSValsTy;
+
+typedef llvm::FoldingSet<llvm::FoldingSetNodeWrapper<SValPair> >
+ PersistentSValPairsTy;
+
+BasicValueFactory::~BasicValueFactory() {
+ // Note that the dstor for the contents of APSIntSet will never be called,
+ // so we iterate over the set and invoke the dstor for each APSInt. This
+ // frees an aux. memory allocated to represent very large constants.
+ for (APSIntSetTy::iterator I=APSIntSet.begin(), E=APSIntSet.end(); I!=E; ++I)
+ I->getValue().~APSInt();
+
+ delete (PersistentSValsTy*) PersistentSVals;
+ delete (PersistentSValPairsTy*) PersistentSValPairs;
+}
+
+const llvm::APSInt& BasicValueFactory::getValue(const llvm::APSInt& X) {
+ llvm::FoldingSetNodeID ID;
+ void* InsertPos;
+ typedef llvm::FoldingSetNodeWrapper<llvm::APSInt> FoldNodeTy;
+
+ X.Profile(ID);
+ FoldNodeTy* P = APSIntSet.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!P) {
+ P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
+ new (P) FoldNodeTy(X);
+ APSIntSet.InsertNode(P, InsertPos);
+ }
+
+ return *P;
+}
+
+const llvm::APSInt& BasicValueFactory::getValue(const llvm::APInt& X,
+ bool isUnsigned) {
+ llvm::APSInt V(X, isUnsigned);
+ return getValue(V);
+}
+
+const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, unsigned BitWidth,
+ bool isUnsigned) {
+ llvm::APSInt V(BitWidth, isUnsigned);
+ V = X;
+ return getValue(V);
+}
+
+const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, QualType T) {
+
+ unsigned bits = Ctx.getTypeSize(T);
+ llvm::APSInt V(bits, T->isUnsignedIntegerType() || Loc::IsLocType(T));
+ V = X;
+ return getValue(V);
+}
+
+const CompoundValData*
+BasicValueFactory::getCompoundValData(QualType T,
+ llvm::ImmutableList<SVal> Vals) {
+
+ llvm::FoldingSetNodeID ID;
+ CompoundValData::Profile(ID, T, Vals);
+ void* InsertPos;
+
+ CompoundValData* D = CompoundValDataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!D) {
+ D = (CompoundValData*) BPAlloc.Allocate<CompoundValData>();
+ new (D) CompoundValData(T, Vals);
+ CompoundValDataSet.InsertNode(D, InsertPos);
+ }
+
+ return D;
+}
+
+const LazyCompoundValData*
+BasicValueFactory::getLazyCompoundValData(const void *store,
+ const TypedRegion *region) {
+ llvm::FoldingSetNodeID ID;
+ LazyCompoundValData::Profile(ID, store, region);
+ void* InsertPos;
+
+ LazyCompoundValData *D =
+ LazyCompoundValDataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!D) {
+ D = (LazyCompoundValData*) BPAlloc.Allocate<LazyCompoundValData>();
+ new (D) LazyCompoundValData(store, region);
+ LazyCompoundValDataSet.InsertNode(D, InsertPos);
+ }
+
+ return D;
+}
+
+const llvm::APSInt*
+BasicValueFactory::EvaluateAPSInt(BinaryOperator::Opcode Op,
+ const llvm::APSInt& V1, const llvm::APSInt& V2) {
+
+ switch (Op) {
+ default:
+ assert (false && "Invalid Opcode.");
+
+ case BinaryOperator::Mul:
+ return &getValue( V1 * V2 );
+
+ case BinaryOperator::Div:
+ return &getValue( V1 / V2 );
+
+ case BinaryOperator::Rem:
+ return &getValue( V1 % V2 );
+
+ case BinaryOperator::Add:
+ return &getValue( V1 + V2 );
+
+ case BinaryOperator::Sub:
+ return &getValue( V1 - V2 );
+
+ case BinaryOperator::Shl: {
+
+ // FIXME: This logic should probably go higher up, where we can
+ // test these conditions symbolically.
+
+ // FIXME: Expand these checks to include all undefined behavior.
+
+ if (V2.isSigned() && V2.isNegative())
+ return NULL;
+
+ uint64_t Amt = V2.getZExtValue();
+
+ if (Amt > V1.getBitWidth())
+ return NULL;
+
+ return &getValue( V1.operator<<( (unsigned) Amt ));
+ }
+
+ case BinaryOperator::Shr: {
+
+ // FIXME: This logic should probably go higher up, where we can
+ // test these conditions symbolically.
+
+ // FIXME: Expand these checks to include all undefined behavior.
+
+ if (V2.isSigned() && V2.isNegative())
+ return NULL;
+
+ uint64_t Amt = V2.getZExtValue();
+
+ if (Amt > V1.getBitWidth())
+ return NULL;
+
+ return &getValue( V1.operator>>( (unsigned) Amt ));
+ }
+
+ case BinaryOperator::LT:
+ return &getTruthValue( V1 < V2 );
+
+ case BinaryOperator::GT:
+ return &getTruthValue( V1 > V2 );
+
+ case BinaryOperator::LE:
+ return &getTruthValue( V1 <= V2 );
+
+ case BinaryOperator::GE:
+ return &getTruthValue( V1 >= V2 );
+
+ case BinaryOperator::EQ:
+ return &getTruthValue( V1 == V2 );
+
+ case BinaryOperator::NE:
+ return &getTruthValue( V1 != V2 );
+
+ // Note: LAnd, LOr, Comma are handled specially by higher-level logic.
+
+ case BinaryOperator::And:
+ return &getValue( V1 & V2 );
+
+ case BinaryOperator::Or:
+ return &getValue( V1 | V2 );
+
+ case BinaryOperator::Xor:
+ return &getValue( V1 ^ V2 );
+ }
+}
+
+
+const std::pair<SVal, uintptr_t>&
+BasicValueFactory::getPersistentSValWithData(const SVal& V, uintptr_t Data) {
+
+ // Lazily create the folding set.
+ if (!PersistentSVals) PersistentSVals = new PersistentSValsTy();
+
+ llvm::FoldingSetNodeID ID;
+ void* InsertPos;
+ V.Profile(ID);
+ ID.AddPointer((void*) Data);
+
+ PersistentSValsTy& Map = *((PersistentSValsTy*) PersistentSVals);
+
+ typedef llvm::FoldingSetNodeWrapper<SValData> FoldNodeTy;
+ FoldNodeTy* P = Map.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!P) {
+ P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
+ new (P) FoldNodeTy(std::make_pair(V, Data));
+ Map.InsertNode(P, InsertPos);
+ }
+
+ return P->getValue();
+}
+
+const std::pair<SVal, SVal>&
+BasicValueFactory::getPersistentSValPair(const SVal& V1, const SVal& V2) {
+
+ // Lazily create the folding set.
+ if (!PersistentSValPairs) PersistentSValPairs = new PersistentSValPairsTy();
+
+ llvm::FoldingSetNodeID ID;
+ void* InsertPos;
+ V1.Profile(ID);
+ V2.Profile(ID);
+
+ PersistentSValPairsTy& Map = *((PersistentSValPairsTy*) PersistentSValPairs);
+
+ typedef llvm::FoldingSetNodeWrapper<SValPair> FoldNodeTy;
+ FoldNodeTy* P = Map.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!P) {
+ P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
+ new (P) FoldNodeTy(std::make_pair(V1, V2));
+ Map.InsertNode(P, InsertPos);
+ }
+
+ return P->getValue();
+}
+
+const SVal* BasicValueFactory::getPersistentSVal(SVal X) {
+ return &getPersistentSValWithData(X, 0).first;
+}
+
+
diff --git a/contrib/llvm/tools/clang/lib/Checker/BugReporter.cpp b/contrib/llvm/tools/clang/lib/Checker/BugReporter.cpp
new file mode 100644
index 0000000..3bcc03f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/BugReporter.cpp
@@ -0,0 +1,1896 @@
+// BugReporter.cpp - Generate PathDiagnostics for Bugs ------------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BugReporter, a utility class for generating
+// PathDiagnostics.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/BugReporter/BugReporter.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Analysis/ProgramPoint.h"
+#include "clang/Checker/BugReporter/PathDiagnostic.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/OwningPtr.h"
+#include <queue>
+
+using namespace clang;
+
+BugReporterVisitor::~BugReporterVisitor() {}
+BugReporterContext::~BugReporterContext() {
+ for (visitor_iterator I = visitor_begin(), E = visitor_end(); I != E; ++I)
+ if ((*I)->isOwnedByReporterContext()) delete *I;
+}
+
+void BugReporterContext::addVisitor(BugReporterVisitor* visitor) {
+ if (!visitor)
+ return;
+
+ llvm::FoldingSetNodeID ID;
+ visitor->Profile(ID);
+ void *InsertPos;
+
+ if (CallbacksSet.FindNodeOrInsertPos(ID, InsertPos)) {
+ delete visitor;
+ return;
+ }
+
+ CallbacksSet.InsertNode(visitor, InsertPos);
+ Callbacks = F.Add(visitor, Callbacks);
+}
+
+//===----------------------------------------------------------------------===//
+// Helper routines for walking the ExplodedGraph and fetching statements.
+//===----------------------------------------------------------------------===//
+
+static inline const Stmt* GetStmt(const ProgramPoint &P) {
+ if (const StmtPoint* SP = dyn_cast<StmtPoint>(&P))
+ return SP->getStmt();
+ else if (const BlockEdge* BE = dyn_cast<BlockEdge>(&P))
+ return BE->getSrc()->getTerminator();
+
+ return 0;
+}
+
+static inline const ExplodedNode*
+GetPredecessorNode(const ExplodedNode* N) {
+ return N->pred_empty() ? NULL : *(N->pred_begin());
+}
+
+static inline const ExplodedNode*
+GetSuccessorNode(const ExplodedNode* N) {
+ return N->succ_empty() ? NULL : *(N->succ_begin());
+}
+
+static const Stmt* GetPreviousStmt(const ExplodedNode* N) {
+ for (N = GetPredecessorNode(N); N; N = GetPredecessorNode(N))
+ if (const Stmt *S = GetStmt(N->getLocation()))
+ return S;
+
+ return 0;
+}
+
+static const Stmt* GetNextStmt(const ExplodedNode* N) {
+ for (N = GetSuccessorNode(N); N; N = GetSuccessorNode(N))
+ if (const Stmt *S = GetStmt(N->getLocation())) {
+ // Check if the statement is '?' or '&&'/'||'. These are "merges",
+ // not actual statement points.
+ switch (S->getStmtClass()) {
+ case Stmt::ChooseExprClass:
+ case Stmt::ConditionalOperatorClass: continue;
+ case Stmt::BinaryOperatorClass: {
+ BinaryOperator::Opcode Op = cast<BinaryOperator>(S)->getOpcode();
+ if (Op == BinaryOperator::LAnd || Op == BinaryOperator::LOr)
+ continue;
+ break;
+ }
+ default:
+ break;
+ }
+
+ // Some expressions don't have locations.
+ if (S->getLocStart().isInvalid())
+ continue;
+
+ return S;
+ }
+
+ return 0;
+}
+
+static inline const Stmt*
+GetCurrentOrPreviousStmt(const ExplodedNode* N) {
+ if (const Stmt *S = GetStmt(N->getLocation()))
+ return S;
+
+ return GetPreviousStmt(N);
+}
+
+static inline const Stmt*
+GetCurrentOrNextStmt(const ExplodedNode* N) {
+ if (const Stmt *S = GetStmt(N->getLocation()))
+ return S;
+
+ return GetNextStmt(N);
+}
+
+//===----------------------------------------------------------------------===//
+// PathDiagnosticBuilder and its associated routines and helper objects.
+//===----------------------------------------------------------------------===//
+
+typedef llvm::DenseMap<const ExplodedNode*,
+const ExplodedNode*> NodeBackMap;
+
+namespace {
+class NodeMapClosure : public BugReport::NodeResolver {
+ NodeBackMap& M;
+public:
+ NodeMapClosure(NodeBackMap *m) : M(*m) {}
+ ~NodeMapClosure() {}
+
+ const ExplodedNode* getOriginalNode(const ExplodedNode* N) {
+ NodeBackMap::iterator I = M.find(N);
+ return I == M.end() ? 0 : I->second;
+ }
+};
+
+class PathDiagnosticBuilder : public BugReporterContext {
+ BugReport *R;
+ PathDiagnosticClient *PDC;
+ llvm::OwningPtr<ParentMap> PM;
+ NodeMapClosure NMC;
+public:
+ PathDiagnosticBuilder(GRBugReporter &br,
+ BugReport *r, NodeBackMap *Backmap,
+ PathDiagnosticClient *pdc)
+ : BugReporterContext(br),
+ R(r), PDC(pdc), NMC(Backmap) {
+ addVisitor(R);
+ }
+
+ PathDiagnosticLocation ExecutionContinues(const ExplodedNode* N);
+
+ PathDiagnosticLocation ExecutionContinues(llvm::raw_string_ostream& os,
+ const ExplodedNode* N);
+
+ Decl const &getCodeDecl() { return R->getEndNode()->getCodeDecl(); }
+
+ ParentMap& getParentMap() { return R->getEndNode()->getParentMap(); }
+
+ const Stmt *getParent(const Stmt *S) {
+ return getParentMap().getParent(S);
+ }
+
+ virtual NodeMapClosure& getNodeResolver() { return NMC; }
+ BugReport& getReport() { return *R; }
+
+ PathDiagnosticLocation getEnclosingStmtLocation(const Stmt *S);
+
+ PathDiagnosticLocation
+ getEnclosingStmtLocation(const PathDiagnosticLocation &L) {
+ if (const Stmt *S = L.asStmt())
+ return getEnclosingStmtLocation(S);
+
+ return L;
+ }
+
+ PathDiagnosticClient::PathGenerationScheme getGenerationScheme() const {
+ return PDC ? PDC->getGenerationScheme() : PathDiagnosticClient::Extensive;
+ }
+
+ bool supportsLogicalOpControlFlow() const {
+ return PDC ? PDC->supportsLogicalOpControlFlow() : true;
+ }
+};
+} // end anonymous namespace
+
+PathDiagnosticLocation
+PathDiagnosticBuilder::ExecutionContinues(const ExplodedNode* N) {
+ if (const Stmt *S = GetNextStmt(N))
+ return PathDiagnosticLocation(S, getSourceManager());
+
+ return FullSourceLoc(N->getLocationContext()->getDecl()->getBodyRBrace(),
+ getSourceManager());
+}
+
+PathDiagnosticLocation
+PathDiagnosticBuilder::ExecutionContinues(llvm::raw_string_ostream& os,
+ const ExplodedNode* N) {
+
+ // Slow, but probably doesn't matter.
+ if (os.str().empty())
+ os << ' ';
+
+ const PathDiagnosticLocation &Loc = ExecutionContinues(N);
+
+ if (Loc.asStmt())
+ os << "Execution continues on line "
+ << getSourceManager().getInstantiationLineNumber(Loc.asLocation())
+ << '.';
+ else {
+ os << "Execution jumps to the end of the ";
+ const Decl *D = N->getLocationContext()->getDecl();
+ if (isa<ObjCMethodDecl>(D))
+ os << "method";
+ else if (isa<FunctionDecl>(D))
+ os << "function";
+ else {
+ assert(isa<BlockDecl>(D));
+ os << "anonymous block";
+ }
+ os << '.';
+ }
+
+ return Loc;
+}
+
+static bool IsNested(const Stmt *S, ParentMap &PM) {
+ if (isa<Expr>(S) && PM.isConsumedExpr(cast<Expr>(S)))
+ return true;
+
+ const Stmt *Parent = PM.getParentIgnoreParens(S);
+
+ if (Parent)
+ switch (Parent->getStmtClass()) {
+ case Stmt::ForStmtClass:
+ case Stmt::DoStmtClass:
+ case Stmt::WhileStmtClass:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+PathDiagnosticLocation
+PathDiagnosticBuilder::getEnclosingStmtLocation(const Stmt *S) {
+ assert(S && "Null Stmt* passed to getEnclosingStmtLocation");
+ ParentMap &P = getParentMap();
+ SourceManager &SMgr = getSourceManager();
+
+ while (IsNested(S, P)) {
+ const Stmt *Parent = P.getParentIgnoreParens(S);
+
+ if (!Parent)
+ break;
+
+ switch (Parent->getStmtClass()) {
+ case Stmt::BinaryOperatorClass: {
+ const BinaryOperator *B = cast<BinaryOperator>(Parent);
+ if (B->isLogicalOp())
+ return PathDiagnosticLocation(S, SMgr);
+ break;
+ }
+ case Stmt::CompoundStmtClass:
+ case Stmt::StmtExprClass:
+ return PathDiagnosticLocation(S, SMgr);
+ case Stmt::ChooseExprClass:
+ // Similar to '?' if we are referring to condition, just have the edge
+ // point to the entire choose expression.
+ if (cast<ChooseExpr>(Parent)->getCond() == S)
+ return PathDiagnosticLocation(Parent, SMgr);
+ else
+ return PathDiagnosticLocation(S, SMgr);
+ case Stmt::ConditionalOperatorClass:
+ // For '?', if we are referring to condition, just have the edge point
+ // to the entire '?' expression.
+ if (cast<ConditionalOperator>(Parent)->getCond() == S)
+ return PathDiagnosticLocation(Parent, SMgr);
+ else
+ return PathDiagnosticLocation(S, SMgr);
+ case Stmt::DoStmtClass:
+ return PathDiagnosticLocation(S, SMgr);
+ case Stmt::ForStmtClass:
+ if (cast<ForStmt>(Parent)->getBody() == S)
+ return PathDiagnosticLocation(S, SMgr);
+ break;
+ case Stmt::IfStmtClass:
+ if (cast<IfStmt>(Parent)->getCond() != S)
+ return PathDiagnosticLocation(S, SMgr);
+ break;
+ case Stmt::ObjCForCollectionStmtClass:
+ if (cast<ObjCForCollectionStmt>(Parent)->getBody() == S)
+ return PathDiagnosticLocation(S, SMgr);
+ break;
+ case Stmt::WhileStmtClass:
+ if (cast<WhileStmt>(Parent)->getCond() != S)
+ return PathDiagnosticLocation(S, SMgr);
+ break;
+ default:
+ break;
+ }
+
+ S = Parent;
+ }
+
+ assert(S && "Cannot have null Stmt for PathDiagnosticLocation");
+
+ // Special case: DeclStmts can appear in for statement declarations, in which
+ // case the ForStmt is the context.
+ if (isa<DeclStmt>(S)) {
+ if (const Stmt *Parent = P.getParent(S)) {
+ switch (Parent->getStmtClass()) {
+ case Stmt::ForStmtClass:
+ case Stmt::ObjCForCollectionStmtClass:
+ return PathDiagnosticLocation(Parent, SMgr);
+ default:
+ break;
+ }
+ }
+ }
+ else if (isa<BinaryOperator>(S)) {
+ // Special case: the binary operator represents the initialization
+ // code in a for statement (this can happen when the variable being
+ // initialized is an old variable.
+ if (const ForStmt *FS =
+ dyn_cast_or_null<ForStmt>(P.getParentIgnoreParens(S))) {
+ if (FS->getInit() == S)
+ return PathDiagnosticLocation(FS, SMgr);
+ }
+ }
+
+ return PathDiagnosticLocation(S, SMgr);
+}
+
+//===----------------------------------------------------------------------===//
+// ScanNotableSymbols: closure-like callback for scanning Store bindings.
+//===----------------------------------------------------------------------===//
+
+static const VarDecl*
+GetMostRecentVarDeclBinding(const ExplodedNode* N,
+ GRStateManager& VMgr, SVal X) {
+
+ for ( ; N ; N = N->pred_empty() ? 0 : *N->pred_begin()) {
+
+ ProgramPoint P = N->getLocation();
+
+ if (!isa<PostStmt>(P))
+ continue;
+
+ const DeclRefExpr* DR = dyn_cast<DeclRefExpr>(cast<PostStmt>(P).getStmt());
+
+ if (!DR)
+ continue;
+
+ SVal Y = N->getState()->getSVal(DR);
+
+ if (X != Y)
+ continue;
+
+ const VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl());
+
+ if (!VD)
+ continue;
+
+ return VD;
+ }
+
+ return 0;
+}
+
+namespace {
+class NotableSymbolHandler
+: public StoreManager::BindingsHandler {
+
+ SymbolRef Sym;
+ const GRState* PrevSt;
+ const Stmt* S;
+ GRStateManager& VMgr;
+ const ExplodedNode* Pred;
+ PathDiagnostic& PD;
+ BugReporter& BR;
+
+public:
+
+ NotableSymbolHandler(SymbolRef sym, const GRState* prevst, const Stmt* s,
+ GRStateManager& vmgr, const ExplodedNode* pred,
+ PathDiagnostic& pd, BugReporter& br)
+ : Sym(sym), PrevSt(prevst), S(s), VMgr(vmgr), Pred(pred), PD(pd), BR(br) {}
+
+ bool HandleBinding(StoreManager& SMgr, Store store, const MemRegion* R,
+ SVal V) {
+
+ SymbolRef ScanSym = V.getAsSymbol();
+
+ if (ScanSym != Sym)
+ return true;
+
+ // Check if the previous state has this binding.
+ SVal X = PrevSt->getSVal(loc::MemRegionVal(R));
+
+ if (X == V) // Same binding?
+ return true;
+
+ // Different binding. Only handle assignments for now. We don't pull
+ // this check out of the loop because we will eventually handle other
+ // cases.
+
+ VarDecl *VD = 0;
+
+ if (const BinaryOperator* B = dyn_cast<BinaryOperator>(S)) {
+ if (!B->isAssignmentOp())
+ return true;
+
+ // What variable did we assign to?
+ DeclRefExpr* DR = dyn_cast<DeclRefExpr>(B->getLHS()->IgnoreParenCasts());
+
+ if (!DR)
+ return true;
+
+ VD = dyn_cast<VarDecl>(DR->getDecl());
+ }
+ else if (const DeclStmt* DS = dyn_cast<DeclStmt>(S)) {
+ // FIXME: Eventually CFGs won't have DeclStmts. Right now we
+ // assume that each DeclStmt has a single Decl. This invariant
+ // holds by contruction in the CFG.
+ VD = dyn_cast<VarDecl>(*DS->decl_begin());
+ }
+
+ if (!VD)
+ return true;
+
+ // What is the most recently referenced variable with this binding?
+ const VarDecl* MostRecent = GetMostRecentVarDeclBinding(Pred, VMgr, V);
+
+ if (!MostRecent)
+ return true;
+
+ // Create the diagnostic.
+ FullSourceLoc L(S->getLocStart(), BR.getSourceManager());
+
+ if (Loc::IsLocType(VD->getType())) {
+ std::string msg = "'" + std::string(VD->getNameAsString()) +
+ "' now aliases '" + MostRecent->getNameAsString() + "'";
+
+ PD.push_front(new PathDiagnosticEventPiece(L, msg));
+ }
+
+ return true;
+ }
+};
+}
+
+static void HandleNotableSymbol(const ExplodedNode* N,
+ const Stmt* S,
+ SymbolRef Sym, BugReporter& BR,
+ PathDiagnostic& PD) {
+
+ const ExplodedNode* Pred = N->pred_empty() ? 0 : *N->pred_begin();
+ const GRState* PrevSt = Pred ? Pred->getState() : 0;
+
+ if (!PrevSt)
+ return;
+
+ // Look at the region bindings of the current state that map to the
+ // specified symbol. Are any of them not in the previous state?
+ GRStateManager& VMgr = cast<GRBugReporter>(BR).getStateManager();
+ NotableSymbolHandler H(Sym, PrevSt, S, VMgr, Pred, PD, BR);
+ cast<GRBugReporter>(BR).getStateManager().iterBindings(N->getState(), H);
+}
+
+namespace {
+class ScanNotableSymbols
+: public StoreManager::BindingsHandler {
+
+ llvm::SmallSet<SymbolRef, 10> AlreadyProcessed;
+ const ExplodedNode* N;
+ const Stmt* S;
+ GRBugReporter& BR;
+ PathDiagnostic& PD;
+
+public:
+ ScanNotableSymbols(const ExplodedNode* n, const Stmt* s,
+ GRBugReporter& br, PathDiagnostic& pd)
+ : N(n), S(s), BR(br), PD(pd) {}
+
+ bool HandleBinding(StoreManager& SMgr, Store store,
+ const MemRegion* R, SVal V) {
+
+ SymbolRef ScanSym = V.getAsSymbol();
+
+ if (!ScanSym)
+ return true;
+
+ if (!BR.isNotable(ScanSym))
+ return true;
+
+ if (AlreadyProcessed.count(ScanSym))
+ return true;
+
+ AlreadyProcessed.insert(ScanSym);
+
+ HandleNotableSymbol(N, S, ScanSym, BR, PD);
+ return true;
+ }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// "Minimal" path diagnostic generation algorithm.
+//===----------------------------------------------------------------------===//
+
+static void CompactPathDiagnostic(PathDiagnostic &PD, const SourceManager& SM);
+
+static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
+ PathDiagnosticBuilder &PDB,
+ const ExplodedNode *N) {
+
+ SourceManager& SMgr = PDB.getSourceManager();
+ const ExplodedNode* NextNode = N->pred_empty()
+ ? NULL : *(N->pred_begin());
+ while (NextNode) {
+ N = NextNode;
+ NextNode = GetPredecessorNode(N);
+
+ ProgramPoint P = N->getLocation();
+
+ if (const BlockEdge* BE = dyn_cast<BlockEdge>(&P)) {
+ CFGBlock* Src = BE->getSrc();
+ CFGBlock* Dst = BE->getDst();
+ Stmt* T = Src->getTerminator();
+
+ if (!T)
+ continue;
+
+ FullSourceLoc Start(T->getLocStart(), SMgr);
+
+ switch (T->getStmtClass()) {
+ default:
+ break;
+
+ case Stmt::GotoStmtClass:
+ case Stmt::IndirectGotoStmtClass: {
+ const Stmt* S = GetNextStmt(N);
+
+ if (!S)
+ continue;
+
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ const PathDiagnosticLocation &End = PDB.getEnclosingStmtLocation(S);
+
+ os << "Control jumps to line "
+ << End.asLocation().getInstantiationLineNumber();
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ break;
+ }
+
+ case Stmt::SwitchStmtClass: {
+ // Figure out what case arm we took.
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ if (Stmt* S = Dst->getLabel()) {
+ PathDiagnosticLocation End(S, SMgr);
+
+ switch (S->getStmtClass()) {
+ default:
+ os << "No cases match in the switch statement. "
+ "Control jumps to line "
+ << End.asLocation().getInstantiationLineNumber();
+ break;
+ case Stmt::DefaultStmtClass:
+ os << "Control jumps to the 'default' case at line "
+ << End.asLocation().getInstantiationLineNumber();
+ break;
+
+ case Stmt::CaseStmtClass: {
+ os << "Control jumps to 'case ";
+ CaseStmt* Case = cast<CaseStmt>(S);
+ Expr* LHS = Case->getLHS()->IgnoreParenCasts();
+
+ // Determine if it is an enum.
+ bool GetRawInt = true;
+
+ if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(LHS)) {
+ // FIXME: Maybe this should be an assertion. Are there cases
+ // were it is not an EnumConstantDecl?
+ EnumConstantDecl* D =
+ dyn_cast<EnumConstantDecl>(DR->getDecl());
+
+ if (D) {
+ GetRawInt = false;
+ os << D;
+ }
+ }
+
+ if (GetRawInt)
+ os << LHS->EvaluateAsInt(PDB.getASTContext());
+
+ os << ":' at line "
+ << End.asLocation().getInstantiationLineNumber();
+ break;
+ }
+ }
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ }
+ else {
+ os << "'Default' branch taken. ";
+ const PathDiagnosticLocation &End = PDB.ExecutionContinues(os, N);
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ }
+
+ break;
+ }
+
+ case Stmt::BreakStmtClass:
+ case Stmt::ContinueStmtClass: {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ break;
+ }
+
+ // Determine control-flow for ternary '?'.
+ case Stmt::ConditionalOperatorClass: {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ os << "'?' condition is ";
+
+ if (*(Src->succ_begin()+1) == Dst)
+ os << "false";
+ else
+ os << "true";
+
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ break;
+ }
+
+ // Determine control-flow for short-circuited '&&' and '||'.
+ case Stmt::BinaryOperatorClass: {
+ if (!PDB.supportsLogicalOpControlFlow())
+ break;
+
+ BinaryOperator *B = cast<BinaryOperator>(T);
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ os << "Left side of '";
+
+ if (B->getOpcode() == BinaryOperator::LAnd) {
+ os << "&&" << "' is ";
+
+ if (*(Src->succ_begin()+1) == Dst) {
+ os << "false";
+ PathDiagnosticLocation End(B->getLHS(), SMgr);
+ PathDiagnosticLocation Start(B->getOperatorLoc(), SMgr);
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ }
+ else {
+ os << "true";
+ PathDiagnosticLocation Start(B->getLHS(), SMgr);
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ }
+ }
+ else {
+ assert(B->getOpcode() == BinaryOperator::LOr);
+ os << "||" << "' is ";
+
+ if (*(Src->succ_begin()+1) == Dst) {
+ os << "false";
+ PathDiagnosticLocation Start(B->getLHS(), SMgr);
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ }
+ else {
+ os << "true";
+ PathDiagnosticLocation End(B->getLHS(), SMgr);
+ PathDiagnosticLocation Start(B->getOperatorLoc(), SMgr);
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ }
+ }
+
+ break;
+ }
+
+ case Stmt::DoStmtClass: {
+ if (*(Src->succ_begin()) == Dst) {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ os << "Loop condition is true. ";
+ PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
+
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ }
+ else {
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ "Loop condition is false. Exiting loop"));
+ }
+
+ break;
+ }
+
+ case Stmt::WhileStmtClass:
+ case Stmt::ForStmtClass: {
+ if (*(Src->succ_begin()+1) == Dst) {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ os << "Loop condition is false. ";
+ PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ }
+ else {
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ "Loop condition is true. Entering loop body"));
+ }
+
+ break;
+ }
+
+ case Stmt::IfStmtClass: {
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ if (*(Src->succ_begin()+1) == Dst)
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ "Taking false branch"));
+ else
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ "Taking true branch"));
+
+ break;
+ }
+ }
+ }
+
+ if (NextNode) {
+ for (BugReporterContext::visitor_iterator I = PDB.visitor_begin(),
+ E = PDB.visitor_end(); I!=E; ++I) {
+ if (PathDiagnosticPiece* p = (*I)->VisitNode(N, NextNode, PDB))
+ PD.push_front(p);
+ }
+ }
+
+ if (const PostStmt* PS = dyn_cast<PostStmt>(&P)) {
+ // Scan the region bindings, and see if a "notable" symbol has a new
+ // lval binding.
+ ScanNotableSymbols SNS(N, PS->getStmt(), PDB.getBugReporter(), PD);
+ PDB.getStateManager().iterBindings(N->getState(), SNS);
+ }
+ }
+
+ // After constructing the full PathDiagnostic, do a pass over it to compact
+ // PathDiagnosticPieces that occur within a macro.
+ CompactPathDiagnostic(PD, PDB.getSourceManager());
+}
+
+//===----------------------------------------------------------------------===//
+// "Extensive" PathDiagnostic generation.
+//===----------------------------------------------------------------------===//
+
+static bool IsControlFlowExpr(const Stmt *S) {
+ const Expr *E = dyn_cast<Expr>(S);
+
+ if (!E)
+ return false;
+
+ E = E->IgnoreParenCasts();
+
+ if (isa<ConditionalOperator>(E))
+ return true;
+
+ if (const BinaryOperator *B = dyn_cast<BinaryOperator>(E))
+ if (B->isLogicalOp())
+ return true;
+
+ return false;
+}
+
+namespace {
+class ContextLocation : public PathDiagnosticLocation {
+ bool IsDead;
+public:
+ ContextLocation(const PathDiagnosticLocation &L, bool isdead = false)
+ : PathDiagnosticLocation(L), IsDead(isdead) {}
+
+ void markDead() { IsDead = true; }
+ bool isDead() const { return IsDead; }
+};
+
+class EdgeBuilder {
+ std::vector<ContextLocation> CLocs;
+ typedef std::vector<ContextLocation>::iterator iterator;
+ PathDiagnostic &PD;
+ PathDiagnosticBuilder &PDB;
+ PathDiagnosticLocation PrevLoc;
+
+ bool IsConsumedExpr(const PathDiagnosticLocation &L);
+
+ bool containsLocation(const PathDiagnosticLocation &Container,
+ const PathDiagnosticLocation &Containee);
+
+ PathDiagnosticLocation getContextLocation(const PathDiagnosticLocation &L);
+
+ PathDiagnosticLocation cleanUpLocation(PathDiagnosticLocation L,
+ bool firstCharOnly = false) {
+ if (const Stmt *S = L.asStmt()) {
+ const Stmt *Original = S;
+ while (1) {
+ // Adjust the location for some expressions that are best referenced
+ // by one of their subexpressions.
+ switch (S->getStmtClass()) {
+ default:
+ break;
+ case Stmt::ParenExprClass:
+ S = cast<ParenExpr>(S)->IgnoreParens();
+ firstCharOnly = true;
+ continue;
+ case Stmt::ConditionalOperatorClass:
+ S = cast<ConditionalOperator>(S)->getCond();
+ firstCharOnly = true;
+ continue;
+ case Stmt::ChooseExprClass:
+ S = cast<ChooseExpr>(S)->getCond();
+ firstCharOnly = true;
+ continue;
+ case Stmt::BinaryOperatorClass:
+ S = cast<BinaryOperator>(S)->getLHS();
+ firstCharOnly = true;
+ continue;
+ }
+
+ break;
+ }
+
+ if (S != Original)
+ L = PathDiagnosticLocation(S, L.getManager());
+ }
+
+ if (firstCharOnly)
+ L = PathDiagnosticLocation(L.asLocation());
+
+ return L;
+ }
+
+ void popLocation() {
+ if (!CLocs.back().isDead() && CLocs.back().asLocation().isFileID()) {
+ // For contexts, we only one the first character as the range.
+ rawAddEdge(cleanUpLocation(CLocs.back(), true));
+ }
+ CLocs.pop_back();
+ }
+
+ PathDiagnosticLocation IgnoreParens(const PathDiagnosticLocation &L);
+
+public:
+ EdgeBuilder(PathDiagnostic &pd, PathDiagnosticBuilder &pdb)
+ : PD(pd), PDB(pdb) {
+
+ // If the PathDiagnostic already has pieces, add the enclosing statement
+ // of the first piece as a context as well.
+ if (!PD.empty()) {
+ PrevLoc = PD.begin()->getLocation();
+
+ if (const Stmt *S = PrevLoc.asStmt())
+ addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt());
+ }
+ }
+
+ ~EdgeBuilder() {
+ while (!CLocs.empty()) popLocation();
+
+ // Finally, add an initial edge from the start location of the first
+ // statement (if it doesn't already exist).
+ // FIXME: Should handle CXXTryStmt if analyser starts supporting C++.
+ if (const CompoundStmt *CS =
+ PDB.getCodeDecl().getCompoundBody())
+ if (!CS->body_empty()) {
+ SourceLocation Loc = (*CS->body_begin())->getLocStart();
+ rawAddEdge(PathDiagnosticLocation(Loc, PDB.getSourceManager()));
+ }
+
+ }
+
+ void addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd = false);
+
+ void addEdge(const Stmt *S, bool alwaysAdd = false) {
+ addEdge(PathDiagnosticLocation(S, PDB.getSourceManager()), alwaysAdd);
+ }
+
+ void rawAddEdge(PathDiagnosticLocation NewLoc);
+
+ void addContext(const Stmt *S);
+ void addExtendedContext(const Stmt *S);
+};
+} // end anonymous namespace
+
+
+PathDiagnosticLocation
+EdgeBuilder::getContextLocation(const PathDiagnosticLocation &L) {
+ if (const Stmt *S = L.asStmt()) {
+ if (IsControlFlowExpr(S))
+ return L;
+
+ return PDB.getEnclosingStmtLocation(S);
+ }
+
+ return L;
+}
+
+bool EdgeBuilder::containsLocation(const PathDiagnosticLocation &Container,
+ const PathDiagnosticLocation &Containee) {
+
+ if (Container == Containee)
+ return true;
+
+ if (Container.asDecl())
+ return true;
+
+ if (const Stmt *S = Containee.asStmt())
+ if (const Stmt *ContainerS = Container.asStmt()) {
+ while (S) {
+ if (S == ContainerS)
+ return true;
+ S = PDB.getParent(S);
+ }
+ return false;
+ }
+
+ // Less accurate: compare using source ranges.
+ SourceRange ContainerR = Container.asRange();
+ SourceRange ContaineeR = Containee.asRange();
+
+ SourceManager &SM = PDB.getSourceManager();
+ SourceLocation ContainerRBeg = SM.getInstantiationLoc(ContainerR.getBegin());
+ SourceLocation ContainerREnd = SM.getInstantiationLoc(ContainerR.getEnd());
+ SourceLocation ContaineeRBeg = SM.getInstantiationLoc(ContaineeR.getBegin());
+ SourceLocation ContaineeREnd = SM.getInstantiationLoc(ContaineeR.getEnd());
+
+ unsigned ContainerBegLine = SM.getInstantiationLineNumber(ContainerRBeg);
+ unsigned ContainerEndLine = SM.getInstantiationLineNumber(ContainerREnd);
+ unsigned ContaineeBegLine = SM.getInstantiationLineNumber(ContaineeRBeg);
+ unsigned ContaineeEndLine = SM.getInstantiationLineNumber(ContaineeREnd);
+
+ assert(ContainerBegLine <= ContainerEndLine);
+ assert(ContaineeBegLine <= ContaineeEndLine);
+
+ return (ContainerBegLine <= ContaineeBegLine &&
+ ContainerEndLine >= ContaineeEndLine &&
+ (ContainerBegLine != ContaineeBegLine ||
+ SM.getInstantiationColumnNumber(ContainerRBeg) <=
+ SM.getInstantiationColumnNumber(ContaineeRBeg)) &&
+ (ContainerEndLine != ContaineeEndLine ||
+ SM.getInstantiationColumnNumber(ContainerREnd) >=
+ SM.getInstantiationColumnNumber(ContainerREnd)));
+}
+
+PathDiagnosticLocation
+EdgeBuilder::IgnoreParens(const PathDiagnosticLocation &L) {
+ if (const Expr* E = dyn_cast_or_null<Expr>(L.asStmt()))
+ return PathDiagnosticLocation(E->IgnoreParenCasts(),
+ PDB.getSourceManager());
+ return L;
+}
+
+void EdgeBuilder::rawAddEdge(PathDiagnosticLocation NewLoc) {
+ if (!PrevLoc.isValid()) {
+ PrevLoc = NewLoc;
+ return;
+ }
+
+ const PathDiagnosticLocation &NewLocClean = cleanUpLocation(NewLoc);
+ const PathDiagnosticLocation &PrevLocClean = cleanUpLocation(PrevLoc);
+
+ if (NewLocClean.asLocation() == PrevLocClean.asLocation())
+ return;
+
+ // FIXME: Ignore intra-macro edges for now.
+ if (NewLocClean.asLocation().getInstantiationLoc() ==
+ PrevLocClean.asLocation().getInstantiationLoc())
+ return;
+
+ PD.push_front(new PathDiagnosticControlFlowPiece(NewLocClean, PrevLocClean));
+ PrevLoc = NewLoc;
+}
+
+void EdgeBuilder::addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd) {
+
+ if (!alwaysAdd && NewLoc.asLocation().isMacroID())
+ return;
+
+ const PathDiagnosticLocation &CLoc = getContextLocation(NewLoc);
+
+ while (!CLocs.empty()) {
+ ContextLocation &TopContextLoc = CLocs.back();
+
+ // Is the top location context the same as the one for the new location?
+ if (TopContextLoc == CLoc) {
+ if (alwaysAdd) {
+ if (IsConsumedExpr(TopContextLoc) &&
+ !IsControlFlowExpr(TopContextLoc.asStmt()))
+ TopContextLoc.markDead();
+
+ rawAddEdge(NewLoc);
+ }
+
+ return;
+ }
+
+ if (containsLocation(TopContextLoc, CLoc)) {
+ if (alwaysAdd) {
+ rawAddEdge(NewLoc);
+
+ if (IsConsumedExpr(CLoc) && !IsControlFlowExpr(CLoc.asStmt())) {
+ CLocs.push_back(ContextLocation(CLoc, true));
+ return;
+ }
+ }
+
+ CLocs.push_back(CLoc);
+ return;
+ }
+
+ // Context does not contain the location. Flush it.
+ popLocation();
+ }
+
+ // If we reach here, there is no enclosing context. Just add the edge.
+ rawAddEdge(NewLoc);
+}
+
+bool EdgeBuilder::IsConsumedExpr(const PathDiagnosticLocation &L) {
+ if (const Expr *X = dyn_cast_or_null<Expr>(L.asStmt()))
+ return PDB.getParentMap().isConsumedExpr(X) && !IsControlFlowExpr(X);
+
+ return false;
+}
+
+void EdgeBuilder::addExtendedContext(const Stmt *S) {
+ if (!S)
+ return;
+
+ const Stmt *Parent = PDB.getParent(S);
+ while (Parent) {
+ if (isa<CompoundStmt>(Parent))
+ Parent = PDB.getParent(Parent);
+ else
+ break;
+ }
+
+ if (Parent) {
+ switch (Parent->getStmtClass()) {
+ case Stmt::DoStmtClass:
+ case Stmt::ObjCAtSynchronizedStmtClass:
+ addContext(Parent);
+ default:
+ break;
+ }
+ }
+
+ addContext(S);
+}
+
+void EdgeBuilder::addContext(const Stmt *S) {
+ if (!S)
+ return;
+
+ PathDiagnosticLocation L(S, PDB.getSourceManager());
+
+ while (!CLocs.empty()) {
+ const PathDiagnosticLocation &TopContextLoc = CLocs.back();
+
+ // Is the top location context the same as the one for the new location?
+ if (TopContextLoc == L)
+ return;
+
+ if (containsLocation(TopContextLoc, L)) {
+ CLocs.push_back(L);
+ return;
+ }
+
+ // Context does not contain the location. Flush it.
+ popLocation();
+ }
+
+ CLocs.push_back(L);
+}
+
+static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
+ PathDiagnosticBuilder &PDB,
+ const ExplodedNode *N) {
+ EdgeBuilder EB(PD, PDB);
+
+ const ExplodedNode* NextNode = N->pred_empty() ? NULL : *(N->pred_begin());
+ while (NextNode) {
+ N = NextNode;
+ NextNode = GetPredecessorNode(N);
+ ProgramPoint P = N->getLocation();
+
+ do {
+ // Block edges.
+ if (const BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
+ const CFGBlock &Blk = *BE->getSrc();
+ const Stmt *Term = Blk.getTerminator();
+
+ // Are we jumping to the head of a loop? Add a special diagnostic.
+ if (const Stmt *Loop = BE->getDst()->getLoopTarget()) {
+ PathDiagnosticLocation L(Loop, PDB.getSourceManager());
+ const CompoundStmt *CS = NULL;
+
+ if (!Term) {
+ if (const ForStmt *FS = dyn_cast<ForStmt>(Loop))
+ CS = dyn_cast<CompoundStmt>(FS->getBody());
+ else if (const WhileStmt *WS = dyn_cast<WhileStmt>(Loop))
+ CS = dyn_cast<CompoundStmt>(WS->getBody());
+ }
+
+ PathDiagnosticEventPiece *p =
+ new PathDiagnosticEventPiece(L,
+ "Looping back to the head of the loop");
+
+ EB.addEdge(p->getLocation(), true);
+ PD.push_front(p);
+
+ if (CS) {
+ PathDiagnosticLocation BL(CS->getRBracLoc(),
+ PDB.getSourceManager());
+ BL = PathDiagnosticLocation(BL.asLocation());
+ EB.addEdge(BL);
+ }
+ }
+
+ if (Term)
+ EB.addContext(Term);
+
+ break;
+ }
+
+ if (const BlockEntrance *BE = dyn_cast<BlockEntrance>(&P)) {
+ if (const Stmt* S = BE->getFirstStmt()) {
+ if (IsControlFlowExpr(S)) {
+ // Add the proper context for '&&', '||', and '?'.
+ EB.addContext(S);
+ }
+ else
+ EB.addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt());
+ }
+
+ break;
+ }
+ } while (0);
+
+ if (!NextNode)
+ continue;
+
+ for (BugReporterContext::visitor_iterator I = PDB.visitor_begin(),
+ E = PDB.visitor_end(); I!=E; ++I) {
+ if (PathDiagnosticPiece* p = (*I)->VisitNode(N, NextNode, PDB)) {
+ const PathDiagnosticLocation &Loc = p->getLocation();
+ EB.addEdge(Loc, true);
+ PD.push_front(p);
+ if (const Stmt *S = Loc.asStmt())
+ EB.addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt());
+ }
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Methods for BugType and subclasses.
+//===----------------------------------------------------------------------===//
+BugType::~BugType() {
+ // Free up the equivalence class objects. Observe that we get a pointer to
+ // the object first before incrementing the iterator, as destroying the
+ // node before doing so means we will read from freed memory.
+ for (iterator I = begin(), E = end(); I !=E; ) {
+ BugReportEquivClass *EQ = &*I;
+ ++I;
+ delete EQ;
+ }
+}
+void BugType::FlushReports(BugReporter &BR) {}
+
+//===----------------------------------------------------------------------===//
+// Methods for BugReport and subclasses.
+//===----------------------------------------------------------------------===//
+BugReport::~BugReport() {}
+RangedBugReport::~RangedBugReport() {}
+
+const Stmt* BugReport::getStmt() const {
+ ProgramPoint ProgP = EndNode->getLocation();
+ const Stmt *S = NULL;
+
+ if (BlockEntrance* BE = dyn_cast<BlockEntrance>(&ProgP)) {
+ CFGBlock &Exit = ProgP.getLocationContext()->getCFG()->getExit();
+ if (BE->getBlock() == &Exit)
+ S = GetPreviousStmt(EndNode);
+ }
+ if (!S)
+ S = GetStmt(ProgP);
+
+ return S;
+}
+
+PathDiagnosticPiece*
+BugReport::getEndPath(BugReporterContext& BRC,
+ const ExplodedNode* EndPathNode) {
+
+ const Stmt* S = getStmt();
+
+ if (!S)
+ return NULL;
+
+ const SourceRange *Beg, *End;
+ getRanges(Beg, End);
+ PathDiagnosticLocation L(S, BRC.getSourceManager());
+
+ // Only add the statement itself as a range if we didn't specify any
+ // special ranges for this report.
+ PathDiagnosticPiece* P = new PathDiagnosticEventPiece(L, getDescription(),
+ Beg == End);
+
+ for (; Beg != End; ++Beg)
+ P->addRange(*Beg);
+
+ return P;
+}
+
+void BugReport::getRanges(const SourceRange*& beg, const SourceRange*& end) {
+ if (const Expr* E = dyn_cast_or_null<Expr>(getStmt())) {
+ R = E->getSourceRange();
+ assert(R.isValid());
+ beg = &R;
+ end = beg+1;
+ }
+ else
+ beg = end = 0;
+}
+
+SourceLocation BugReport::getLocation() const {
+ if (EndNode)
+ if (const Stmt* S = GetCurrentOrPreviousStmt(EndNode)) {
+ // For member expressions, return the location of the '.' or '->'.
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(S))
+ return ME->getMemberLoc();
+ // For binary operators, return the location of the operator.
+ if (const BinaryOperator *B = dyn_cast<BinaryOperator>(S))
+ return B->getOperatorLoc();
+
+ return S->getLocStart();
+ }
+
+ return FullSourceLoc();
+}
+
+PathDiagnosticPiece* BugReport::VisitNode(const ExplodedNode* N,
+ const ExplodedNode* PrevN,
+ BugReporterContext &BRC) {
+ return NULL;
+}
+
+//===----------------------------------------------------------------------===//
+// Methods for BugReporter and subclasses.
+//===----------------------------------------------------------------------===//
+
+BugReportEquivClass::~BugReportEquivClass() {
+ for (iterator I=begin(), E=end(); I!=E; ++I) delete *I;
+}
+
+GRBugReporter::~GRBugReporter() { }
+BugReporterData::~BugReporterData() {}
+
+ExplodedGraph &GRBugReporter::getGraph() { return Eng.getGraph(); }
+
+GRStateManager&
+GRBugReporter::getStateManager() { return Eng.getStateManager(); }
+
+BugReporter::~BugReporter() { FlushReports(); }
+
+void BugReporter::FlushReports() {
+ if (BugTypes.isEmpty())
+ return;
+
+ // First flush the warnings for each BugType. This may end up creating new
+ // warnings and new BugTypes. Because ImmutableSet is a functional data
+ // structure, we do not need to worry about the iterators being invalidated.
+ for (BugTypesTy::iterator I=BugTypes.begin(), E=BugTypes.end(); I!=E; ++I)
+ const_cast<BugType*>(*I)->FlushReports(*this);
+
+ // Iterate through BugTypes a second time. BugTypes may have been updated
+ // with new BugType objects and new warnings.
+ for (BugTypesTy::iterator I=BugTypes.begin(), E=BugTypes.end(); I!=E; ++I) {
+ BugType *BT = const_cast<BugType*>(*I);
+
+ typedef llvm::FoldingSet<BugReportEquivClass> SetTy;
+ SetTy& EQClasses = BT->EQClasses;
+
+ for (SetTy::iterator EI=EQClasses.begin(), EE=EQClasses.end(); EI!=EE;++EI){
+ BugReportEquivClass& EQ = *EI;
+ FlushReport(EQ);
+ }
+
+ // Delete the BugType object.
+ delete BT;
+ }
+
+ // Remove all references to the BugType objects.
+ BugTypes = F.GetEmptySet();
+}
+
+//===----------------------------------------------------------------------===//
+// PathDiagnostics generation.
+//===----------------------------------------------------------------------===//
+
+static std::pair<std::pair<ExplodedGraph*, NodeBackMap*>,
+ std::pair<ExplodedNode*, unsigned> >
+MakeReportGraph(const ExplodedGraph* G,
+ const ExplodedNode** NStart,
+ const ExplodedNode** NEnd) {
+
+ // Create the trimmed graph. It will contain the shortest paths from the
+ // error nodes to the root. In the new graph we should only have one
+ // error node unless there are two or more error nodes with the same minimum
+ // path length.
+ ExplodedGraph* GTrim;
+ InterExplodedGraphMap* NMap;
+
+ llvm::DenseMap<const void*, const void*> InverseMap;
+ llvm::tie(GTrim, NMap) = G->Trim(NStart, NEnd, &InverseMap);
+
+ // Create owning pointers for GTrim and NMap just to ensure that they are
+ // released when this function exists.
+ llvm::OwningPtr<ExplodedGraph> AutoReleaseGTrim(GTrim);
+ llvm::OwningPtr<InterExplodedGraphMap> AutoReleaseNMap(NMap);
+
+ // Find the (first) error node in the trimmed graph. We just need to consult
+ // the node map (NMap) which maps from nodes in the original graph to nodes
+ // in the new graph.
+
+ std::queue<const ExplodedNode*> WS;
+ typedef llvm::DenseMap<const ExplodedNode*, unsigned> IndexMapTy;
+ IndexMapTy IndexMap;
+
+ for (const ExplodedNode** I = NStart; I != NEnd; ++I)
+ if (const ExplodedNode *N = NMap->getMappedNode(*I)) {
+ unsigned NodeIndex = (I - NStart) / sizeof(*I);
+ WS.push(N);
+ IndexMap[*I] = NodeIndex;
+ }
+
+ assert(!WS.empty() && "No error node found in the trimmed graph.");
+
+ // Create a new (third!) graph with a single path. This is the graph
+ // that will be returned to the caller.
+ ExplodedGraph *GNew = new ExplodedGraph(GTrim->getContext());
+
+ // Sometimes the trimmed graph can contain a cycle. Perform a reverse BFS
+ // to the root node, and then construct a new graph that contains only
+ // a single path.
+ llvm::DenseMap<const void*,unsigned> Visited;
+
+ unsigned cnt = 0;
+ const ExplodedNode* Root = 0;
+
+ while (!WS.empty()) {
+ const ExplodedNode* Node = WS.front();
+ WS.pop();
+
+ if (Visited.find(Node) != Visited.end())
+ continue;
+
+ Visited[Node] = cnt++;
+
+ if (Node->pred_empty()) {
+ Root = Node;
+ break;
+ }
+
+ for (ExplodedNode::const_pred_iterator I=Node->pred_begin(),
+ E=Node->pred_end(); I!=E; ++I)
+ WS.push(*I);
+ }
+
+ assert(Root);
+
+ // Now walk from the root down the BFS path, always taking the successor
+ // with the lowest number.
+ ExplodedNode *Last = 0, *First = 0;
+ NodeBackMap *BM = new NodeBackMap();
+ unsigned NodeIndex = 0;
+
+ for ( const ExplodedNode *N = Root ;;) {
+ // Lookup the number associated with the current node.
+ llvm::DenseMap<const void*,unsigned>::iterator I = Visited.find(N);
+ assert(I != Visited.end());
+
+ // Create the equivalent node in the new graph with the same state
+ // and location.
+ ExplodedNode* NewN = GNew->getNode(N->getLocation(), N->getState());
+
+ // Store the mapping to the original node.
+ llvm::DenseMap<const void*, const void*>::iterator IMitr=InverseMap.find(N);
+ assert(IMitr != InverseMap.end() && "No mapping to original node.");
+ (*BM)[NewN] = (const ExplodedNode*) IMitr->second;
+
+ // Link up the new node with the previous node.
+ if (Last)
+ NewN->addPredecessor(Last, *GNew);
+
+ Last = NewN;
+
+ // Are we at the final node?
+ IndexMapTy::iterator IMI =
+ IndexMap.find((const ExplodedNode*)(IMitr->second));
+ if (IMI != IndexMap.end()) {
+ First = NewN;
+ NodeIndex = IMI->second;
+ break;
+ }
+
+ // Find the next successor node. We choose the node that is marked
+ // with the lowest DFS number.
+ ExplodedNode::const_succ_iterator SI = N->succ_begin();
+ ExplodedNode::const_succ_iterator SE = N->succ_end();
+ N = 0;
+
+ for (unsigned MinVal = 0; SI != SE; ++SI) {
+
+ I = Visited.find(*SI);
+
+ if (I == Visited.end())
+ continue;
+
+ if (!N || I->second < MinVal) {
+ N = *SI;
+ MinVal = I->second;
+ }
+ }
+
+ assert(N);
+ }
+
+ assert(First);
+
+ return std::make_pair(std::make_pair(GNew, BM),
+ std::make_pair(First, NodeIndex));
+}
+
+/// CompactPathDiagnostic - This function postprocesses a PathDiagnostic object
+/// and collapses PathDiagosticPieces that are expanded by macros.
+static void CompactPathDiagnostic(PathDiagnostic &PD, const SourceManager& SM) {
+ typedef std::vector<std::pair<PathDiagnosticMacroPiece*, SourceLocation> >
+ MacroStackTy;
+
+ typedef std::vector<PathDiagnosticPiece*>
+ PiecesTy;
+
+ MacroStackTy MacroStack;
+ PiecesTy Pieces;
+
+ for (PathDiagnostic::iterator I = PD.begin(), E = PD.end(); I!=E; ++I) {
+ // Get the location of the PathDiagnosticPiece.
+ const FullSourceLoc Loc = I->getLocation().asLocation();
+
+ // Determine the instantiation location, which is the location we group
+ // related PathDiagnosticPieces.
+ SourceLocation InstantiationLoc = Loc.isMacroID() ?
+ SM.getInstantiationLoc(Loc) :
+ SourceLocation();
+
+ if (Loc.isFileID()) {
+ MacroStack.clear();
+ Pieces.push_back(&*I);
+ continue;
+ }
+
+ assert(Loc.isMacroID());
+
+ // Is the PathDiagnosticPiece within the same macro group?
+ if (!MacroStack.empty() && InstantiationLoc == MacroStack.back().second) {
+ MacroStack.back().first->push_back(&*I);
+ continue;
+ }
+
+ // We aren't in the same group. Are we descending into a new macro
+ // or are part of an old one?
+ PathDiagnosticMacroPiece *MacroGroup = 0;
+
+ SourceLocation ParentInstantiationLoc = InstantiationLoc.isMacroID() ?
+ SM.getInstantiationLoc(Loc) :
+ SourceLocation();
+
+ // Walk the entire macro stack.
+ while (!MacroStack.empty()) {
+ if (InstantiationLoc == MacroStack.back().second) {
+ MacroGroup = MacroStack.back().first;
+ break;
+ }
+
+ if (ParentInstantiationLoc == MacroStack.back().second) {
+ MacroGroup = MacroStack.back().first;
+ break;
+ }
+
+ MacroStack.pop_back();
+ }
+
+ if (!MacroGroup || ParentInstantiationLoc == MacroStack.back().second) {
+ // Create a new macro group and add it to the stack.
+ PathDiagnosticMacroPiece *NewGroup = new PathDiagnosticMacroPiece(Loc);
+
+ if (MacroGroup)
+ MacroGroup->push_back(NewGroup);
+ else {
+ assert(InstantiationLoc.isFileID());
+ Pieces.push_back(NewGroup);
+ }
+
+ MacroGroup = NewGroup;
+ MacroStack.push_back(std::make_pair(MacroGroup, InstantiationLoc));
+ }
+
+ // Finally, add the PathDiagnosticPiece to the group.
+ MacroGroup->push_back(&*I);
+ }
+
+ // Now take the pieces and construct a new PathDiagnostic.
+ PD.resetPath(false);
+
+ for (PiecesTy::iterator I=Pieces.begin(), E=Pieces.end(); I!=E; ++I) {
+ if (PathDiagnosticMacroPiece *MP=dyn_cast<PathDiagnosticMacroPiece>(*I))
+ if (!MP->containsEvent()) {
+ delete MP;
+ continue;
+ }
+
+ PD.push_back(*I);
+ }
+}
+
+void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD,
+ BugReportEquivClass& EQ) {
+
+ std::vector<const ExplodedNode*> Nodes;
+
+ for (BugReportEquivClass::iterator I=EQ.begin(), E=EQ.end(); I!=E; ++I) {
+ const ExplodedNode* N = I->getEndNode();
+ if (N) Nodes.push_back(N);
+ }
+
+ if (Nodes.empty())
+ return;
+
+ // Construct a new graph that contains only a single path from the error
+ // node to a root.
+ const std::pair<std::pair<ExplodedGraph*, NodeBackMap*>,
+ std::pair<ExplodedNode*, unsigned> >&
+ GPair = MakeReportGraph(&getGraph(), &Nodes[0], &Nodes[0] + Nodes.size());
+
+ // Find the BugReport with the original location.
+ BugReport *R = 0;
+ unsigned i = 0;
+ for (BugReportEquivClass::iterator I=EQ.begin(), E=EQ.end(); I!=E; ++I, ++i)
+ if (i == GPair.second.second) { R = *I; break; }
+
+ assert(R && "No original report found for sliced graph.");
+
+ llvm::OwningPtr<ExplodedGraph> ReportGraph(GPair.first.first);
+ llvm::OwningPtr<NodeBackMap> BackMap(GPair.first.second);
+ const ExplodedNode *N = GPair.second.first;
+
+ // Start building the path diagnostic...
+ PathDiagnosticBuilder PDB(*this, R, BackMap.get(), getPathDiagnosticClient());
+
+ if (PathDiagnosticPiece* Piece = R->getEndPath(PDB, N))
+ PD.push_back(Piece);
+ else
+ return;
+
+ // Register node visitors.
+ R->registerInitialVisitors(PDB, N);
+ bugreporter::registerNilReceiverVisitor(PDB);
+
+ switch (PDB.getGenerationScheme()) {
+ case PathDiagnosticClient::Extensive:
+ GenerateExtensivePathDiagnostic(PD, PDB, N);
+ break;
+ case PathDiagnosticClient::Minimal:
+ GenerateMinimalPathDiagnostic(PD, PDB, N);
+ break;
+ }
+}
+
+void BugReporter::Register(BugType *BT) {
+ BugTypes = F.Add(BugTypes, BT);
+}
+
+void BugReporter::EmitReport(BugReport* R) {
+ // Compute the bug report's hash to determine its equivalence class.
+ llvm::FoldingSetNodeID ID;
+ R->Profile(ID);
+
+ // Lookup the equivance class. If there isn't one, create it.
+ BugType& BT = R->getBugType();
+ Register(&BT);
+ void *InsertPos;
+ BugReportEquivClass* EQ = BT.EQClasses.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!EQ) {
+ EQ = new BugReportEquivClass(R);
+ BT.EQClasses.InsertNode(EQ, InsertPos);
+ }
+ else
+ EQ->AddReport(R);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Emitting reports in equivalence classes.
+//===----------------------------------------------------------------------===//
+
+namespace {
+struct FRIEC_WLItem {
+ const ExplodedNode *N;
+ ExplodedNode::const_succ_iterator I, E;
+
+ FRIEC_WLItem(const ExplodedNode *n)
+ : N(n), I(N->succ_begin()), E(N->succ_end()) {}
+};
+}
+
+static BugReport *FindReportInEquivalenceClass(BugReportEquivClass& EQ) {
+ BugReportEquivClass::iterator I = EQ.begin(), E = EQ.end();
+ assert(I != E);
+ BugReport *R = *I;
+ BugType& BT = R->getBugType();
+
+ if (!BT.isSuppressOnSink())
+ return R;
+
+ // For bug reports that should be suppressed when all paths are post-dominated
+ // by a sink node, iterate through the reports in the equivalence class
+ // until we find one that isn't post-dominated (if one exists). We use a
+ // DFS traversal of the ExplodedGraph to find a non-sink node. We could write
+ // this as a recursive function, but we don't want to risk blowing out the
+ // stack for very long paths.
+ for (; I != E; ++I) {
+ R = *I;
+ const ExplodedNode *N = R->getEndNode();
+
+ if (!N)
+ continue;
+
+ if (N->isSink()) {
+ assert(false &&
+ "BugType::isSuppressSink() should not be 'true' for sink end nodes");
+ return R;
+ }
+
+ if (N->succ_empty())
+ return R;
+
+ // At this point we know that 'N' is not a sink and it has at least one
+ // successor. Use a DFS worklist to find a non-sink end-of-path node.
+ typedef FRIEC_WLItem WLItem;
+ typedef llvm::SmallVector<WLItem, 10> DFSWorkList;
+ llvm::DenseMap<const ExplodedNode *, unsigned> Visited;
+
+ DFSWorkList WL;
+ WL.push_back(N);
+ Visited[N] = 1;
+
+ while (!WL.empty()) {
+ WLItem &WI = WL.back();
+ assert(!WI.N->succ_empty());
+
+ for (; WI.I != WI.E; ++WI.I) {
+ const ExplodedNode *Succ = *WI.I;
+ // End-of-path node?
+ if (Succ->succ_empty()) {
+ // If we found an end-of-path node that is not a sink, then return
+ // this report.
+ if (!Succ->isSink())
+ return R;
+
+ // Found a sink? Continue on to the next successor.
+ continue;
+ }
+
+ // Mark the successor as visited. If it hasn't been explored,
+ // enqueue it to the DFS worklist.
+ unsigned &mark = Visited[Succ];
+ if (!mark) {
+ mark = 1;
+ WL.push_back(Succ);
+ break;
+ }
+ }
+
+ if (&WL.back() == &WI)
+ WL.pop_back();
+ }
+ }
+
+ // If we reach here, the end nodes for all reports in the equivalence
+ // class are post-dominated by a sink node.
+ return NULL;
+}
+
+
+//===----------------------------------------------------------------------===//
+// DiagnosticCache. This is a hack to cache analyzer diagnostics. It
+// uses global state, which eventually should go elsewhere.
+//===----------------------------------------------------------------------===//
+namespace {
+class DiagCacheItem : public llvm::FoldingSetNode {
+ llvm::FoldingSetNodeID ID;
+public:
+ DiagCacheItem(BugReport *R, PathDiagnostic *PD) {
+ ID.AddString(R->getBugType().getName());
+ ID.AddString(R->getBugType().getCategory());
+ ID.AddString(R->getDescription());
+ ID.AddInteger(R->getLocation().getRawEncoding());
+ PD->Profile(ID);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &id) {
+ id = ID;
+ }
+
+ llvm::FoldingSetNodeID &getID() { return ID; }
+};
+}
+
+static bool IsCachedDiagnostic(BugReport *R, PathDiagnostic *PD) {
+ // FIXME: Eventually this diagnostic cache should reside in something
+ // like AnalysisManager instead of being a static variable. This is
+ // really unsafe in the long term.
+ typedef llvm::FoldingSet<DiagCacheItem> DiagnosticCache;
+ static DiagnosticCache DC;
+
+ void *InsertPos;
+ DiagCacheItem *Item = new DiagCacheItem(R, PD);
+
+ if (DC.FindNodeOrInsertPos(Item->getID(), InsertPos)) {
+ delete Item;
+ return true;
+ }
+
+ DC.InsertNode(Item, InsertPos);
+ return false;
+}
+
+void BugReporter::FlushReport(BugReportEquivClass& EQ) {
+ BugReport *R = FindReportInEquivalenceClass(EQ);
+
+ if (!R)
+ return;
+
+ PathDiagnosticClient* PD = getPathDiagnosticClient();
+
+ // FIXME: Make sure we use the 'R' for the path that was actually used.
+ // Probably doesn't make a difference in practice.
+ BugType& BT = R->getBugType();
+
+ llvm::OwningPtr<PathDiagnostic>
+ D(new PathDiagnostic(R->getBugType().getName(),
+ !PD || PD->useVerboseDescription()
+ ? R->getDescription() : R->getShortDescription(),
+ BT.getCategory()));
+
+ GeneratePathDiagnostic(*D.get(), EQ);
+
+ if (IsCachedDiagnostic(R, D.get()))
+ return;
+
+ // Get the meta data.
+ std::pair<const char**, const char**> Meta = R->getExtraDescriptiveText();
+ for (const char** s = Meta.first; s != Meta.second; ++s)
+ D->addMeta(*s);
+
+ // Emit a summary diagnostic to the regular Diagnostics engine.
+ const SourceRange *Beg = 0, *End = 0;
+ R->getRanges(Beg, End);
+ Diagnostic& Diag = getDiagnostic();
+ FullSourceLoc L(R->getLocation(), getSourceManager());
+
+ // Search the description for '%', as that will be interpretted as a
+ // format character by FormatDiagnostics.
+ llvm::StringRef desc = R->getShortDescription();
+ unsigned ErrorDiag;
+ {
+ llvm::SmallString<512> TmpStr;
+ llvm::raw_svector_ostream Out(TmpStr);
+ for (llvm::StringRef::iterator I=desc.begin(), E=desc.end(); I!=E; ++I)
+ if (*I == '%')
+ Out << "%%";
+ else
+ Out << *I;
+
+ Out.flush();
+ ErrorDiag = Diag.getCustomDiagID(Diagnostic::Warning, TmpStr);
+ }
+
+ switch (End-Beg) {
+ default: assert(0 && "Don't handle this many ranges yet!");
+ case 0: Diag.Report(L, ErrorDiag); break;
+ case 1: Diag.Report(L, ErrorDiag) << Beg[0]; break;
+ case 2: Diag.Report(L, ErrorDiag) << Beg[0] << Beg[1]; break;
+ case 3: Diag.Report(L, ErrorDiag) << Beg[0] << Beg[1] << Beg[2]; break;
+ }
+
+ // Emit a full diagnostic for the path if we have a PathDiagnosticClient.
+ if (!PD)
+ return;
+
+ if (D->empty()) {
+ PathDiagnosticPiece* piece =
+ new PathDiagnosticEventPiece(L, R->getDescription());
+
+ for ( ; Beg != End; ++Beg) piece->addRange(*Beg);
+ D->push_back(piece);
+ }
+
+ PD->HandlePathDiagnostic(D.take());
+}
+
+void BugReporter::EmitBasicReport(llvm::StringRef name, llvm::StringRef str,
+ SourceLocation Loc,
+ SourceRange* RBeg, unsigned NumRanges) {
+ EmitBasicReport(name, "", str, Loc, RBeg, NumRanges);
+}
+
+void BugReporter::EmitBasicReport(llvm::StringRef name,
+ llvm::StringRef category,
+ llvm::StringRef str, SourceLocation Loc,
+ SourceRange* RBeg, unsigned NumRanges) {
+
+ // 'BT' will be owned by BugReporter as soon as we call 'EmitReport'.
+ BugType *BT = new BugType(name, category);
+ FullSourceLoc L = getContext().getFullLoc(Loc);
+ RangedBugReport *R = new DiagBugReport(*BT, str, L);
+ for ( ; NumRanges > 0 ; --NumRanges, ++RBeg) R->addRange(*RBeg);
+ EmitReport(R);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/BugReporterVisitors.cpp b/contrib/llvm/tools/clang/lib/Checker/BugReporterVisitors.cpp
new file mode 100644
index 0000000..776e12b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/BugReporterVisitors.cpp
@@ -0,0 +1,423 @@
+// BugReporterVisitors.cpp - Helpers for reporting bugs -----------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a set of BugReporter "visitors" which can be used to
+// enhance the diagnostics reported for a bug.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Checker/BugReporter/BugReporter.h"
+#include "clang/Checker/BugReporter/PathDiagnostic.h"
+#include "clang/Checker/PathSensitive/ExplodedGraph.h"
+#include "clang/Checker/PathSensitive/GRState.h"
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Utility functions.
+//===----------------------------------------------------------------------===//
+
+const Stmt *clang::bugreporter::GetDerefExpr(const ExplodedNode *N) {
+ // Pattern match for a few useful cases (do something smarter later):
+ // a[0], p->f, *p
+ const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
+
+ if (const UnaryOperator *U = dyn_cast<UnaryOperator>(S)) {
+ if (U->getOpcode() == UnaryOperator::Deref)
+ return U->getSubExpr()->IgnoreParenCasts();
+ }
+ else if (const MemberExpr *ME = dyn_cast<MemberExpr>(S)) {
+ return ME->getBase()->IgnoreParenCasts();
+ }
+ else if (const ArraySubscriptExpr *AE = dyn_cast<ArraySubscriptExpr>(S)) {
+ // Retrieve the base for arrays since BasicStoreManager doesn't know how
+ // to reason about them.
+ return AE->getBase();
+ }
+
+ return NULL;
+}
+
+const Stmt*
+clang::bugreporter::GetDenomExpr(const ExplodedNode *N) {
+ const Stmt *S = N->getLocationAs<PreStmt>()->getStmt();
+ if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(S))
+ return BE->getRHS();
+ return NULL;
+}
+
+const Stmt*
+clang::bugreporter::GetCalleeExpr(const ExplodedNode *N) {
+ // Callee is checked as a PreVisit to the CallExpr.
+ const Stmt *S = N->getLocationAs<PreStmt>()->getStmt();
+ if (const CallExpr *CE = dyn_cast<CallExpr>(S))
+ return CE->getCallee();
+ return NULL;
+}
+
+const Stmt*
+clang::bugreporter::GetRetValExpr(const ExplodedNode *N) {
+ const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
+ if (const ReturnStmt *RS = dyn_cast<ReturnStmt>(S))
+ return RS->getRetValue();
+ return NULL;
+}
+
+//===----------------------------------------------------------------------===//
+// Definitions for bug reporter visitors.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class FindLastStoreBRVisitor : public BugReporterVisitor {
+ const MemRegion *R;
+ SVal V;
+ bool satisfied;
+ const ExplodedNode *StoreSite;
+public:
+ FindLastStoreBRVisitor(SVal v, const MemRegion *r)
+ : R(r), V(v), satisfied(false), StoreSite(0) {}
+
+ virtual void Profile(llvm::FoldingSetNodeID &ID) const {
+ static int tag = 0;
+ ID.AddPointer(&tag);
+ ID.AddPointer(R);
+ ID.Add(V);
+ }
+
+ PathDiagnosticPiece* VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext& BRC) {
+
+ if (satisfied)
+ return NULL;
+
+ if (!StoreSite) {
+ const ExplodedNode *Node = N, *Last = NULL;
+
+ for ( ; Node ; Last = Node, Node = Node->getFirstPred()) {
+
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ if (const PostStmt *P = Node->getLocationAs<PostStmt>())
+ if (const DeclStmt *DS = P->getStmtAs<DeclStmt>())
+ if (DS->getSingleDecl() == VR->getDecl()) {
+ Last = Node;
+ break;
+ }
+ }
+
+ if (Node->getState()->getSVal(R) != V)
+ break;
+ }
+
+ if (!Node || !Last) {
+ satisfied = true;
+ return NULL;
+ }
+
+ StoreSite = Last;
+ }
+
+ if (StoreSite != N)
+ return NULL;
+
+ satisfied = true;
+ llvm::SmallString<256> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+
+ if (const PostStmt *PS = N->getLocationAs<PostStmt>()) {
+ if (const DeclStmt *DS = PS->getStmtAs<DeclStmt>()) {
+
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ os << "Variable '" << VR->getDecl() << "' ";
+ }
+ else
+ return NULL;
+
+ if (isa<loc::ConcreteInt>(V)) {
+ bool b = false;
+ ASTContext &C = BRC.getASTContext();
+ if (R->isBoundable()) {
+ if (const TypedRegion *TR = dyn_cast<TypedRegion>(R)) {
+ if (TR->getValueType(C)->isObjCObjectPointerType()) {
+ os << "initialized to nil";
+ b = true;
+ }
+ }
+ }
+
+ if (!b)
+ os << "initialized to a null pointer value";
+ }
+ else if (isa<nonloc::ConcreteInt>(V)) {
+ os << "initialized to " << cast<nonloc::ConcreteInt>(V).getValue();
+ }
+ else if (V.isUndef()) {
+ if (isa<VarRegion>(R)) {
+ const VarDecl *VD = cast<VarDecl>(DS->getSingleDecl());
+ if (VD->getInit())
+ os << "initialized to a garbage value";
+ else
+ os << "declared without an initial value";
+ }
+ }
+ }
+ }
+
+ if (os.str().empty()) {
+ if (isa<loc::ConcreteInt>(V)) {
+ bool b = false;
+ ASTContext &C = BRC.getASTContext();
+ if (R->isBoundable()) {
+ if (const TypedRegion *TR = dyn_cast<TypedRegion>(R)) {
+ if (TR->getValueType(C)->isObjCObjectPointerType()) {
+ os << "nil object reference stored to ";
+ b = true;
+ }
+ }
+ }
+
+ if (!b)
+ os << "Null pointer value stored to ";
+ }
+ else if (V.isUndef()) {
+ os << "Uninitialized value stored to ";
+ }
+ else if (isa<nonloc::ConcreteInt>(V)) {
+ os << "The value " << cast<nonloc::ConcreteInt>(V).getValue()
+ << " is assigned to ";
+ }
+ else
+ return NULL;
+
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ os << '\'' << VR->getDecl() << '\'';
+ }
+ else
+ return NULL;
+ }
+
+ // FIXME: Refactor this into BugReporterContext.
+ const Stmt *S = 0;
+ ProgramPoint P = N->getLocation();
+
+ if (BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
+ CFGBlock *BSrc = BE->getSrc();
+ S = BSrc->getTerminatorCondition();
+ }
+ else if (PostStmt *PS = dyn_cast<PostStmt>(&P)) {
+ S = PS->getStmt();
+ }
+
+ if (!S)
+ return NULL;
+
+ // Construct a new PathDiagnosticPiece.
+ PathDiagnosticLocation L(S, BRC.getSourceManager());
+ return new PathDiagnosticEventPiece(L, os.str());
+ }
+};
+
+
+static void registerFindLastStore(BugReporterContext& BRC, const MemRegion *R,
+ SVal V) {
+ BRC.addVisitor(new FindLastStoreBRVisitor(V, R));
+}
+
+class TrackConstraintBRVisitor : public BugReporterVisitor {
+ DefinedSVal Constraint;
+ const bool Assumption;
+ bool isSatisfied;
+public:
+ TrackConstraintBRVisitor(DefinedSVal constraint, bool assumption)
+ : Constraint(constraint), Assumption(assumption), isSatisfied(false) {}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ static int tag = 0;
+ ID.AddPointer(&tag);
+ ID.AddBoolean(Assumption);
+ ID.Add(Constraint);
+ }
+
+ PathDiagnosticPiece* VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext& BRC) {
+ if (isSatisfied)
+ return NULL;
+
+ // Check if in the previous state it was feasible for this constraint
+ // to *not* be true.
+ if (PrevN->getState()->Assume(Constraint, !Assumption)) {
+
+ isSatisfied = true;
+
+ // As a sanity check, make sure that the negation of the constraint
+ // was infeasible in the current state. If it is feasible, we somehow
+ // missed the transition point.
+ if (N->getState()->Assume(Constraint, !Assumption))
+ return NULL;
+
+ // We found the transition point for the constraint. We now need to
+ // pretty-print the constraint. (work-in-progress)
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ if (isa<Loc>(Constraint)) {
+ os << "Assuming pointer value is ";
+ os << (Assumption ? "non-null" : "null");
+ }
+
+ if (os.str().empty())
+ return NULL;
+
+ // FIXME: Refactor this into BugReporterContext.
+ const Stmt *S = 0;
+ ProgramPoint P = N->getLocation();
+
+ if (BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
+ CFGBlock *BSrc = BE->getSrc();
+ S = BSrc->getTerminatorCondition();
+ }
+ else if (PostStmt *PS = dyn_cast<PostStmt>(&P)) {
+ S = PS->getStmt();
+ }
+
+ if (!S)
+ return NULL;
+
+ // Construct a new PathDiagnosticPiece.
+ PathDiagnosticLocation L(S, BRC.getSourceManager());
+ return new PathDiagnosticEventPiece(L, os.str());
+ }
+
+ return NULL;
+ }
+};
+} // end anonymous namespace
+
+static void registerTrackConstraint(BugReporterContext& BRC,
+ DefinedSVal Constraint,
+ bool Assumption) {
+ BRC.addVisitor(new TrackConstraintBRVisitor(Constraint, Assumption));
+}
+
+void clang::bugreporter::registerTrackNullOrUndefValue(BugReporterContext& BRC,
+ const void *data,
+ const ExplodedNode* N) {
+
+ const Stmt *S = static_cast<const Stmt*>(data);
+
+ if (!S)
+ return;
+
+ GRStateManager &StateMgr = BRC.getStateManager();
+ const GRState *state = N->getState();
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ const VarRegion *R =
+ StateMgr.getRegionManager().getVarRegion(VD, N->getLocationContext());
+
+ // What did we load?
+ SVal V = state->getSVal(S);
+
+ if (isa<loc::ConcreteInt>(V) || isa<nonloc::ConcreteInt>(V)
+ || V.isUndef()) {
+ ::registerFindLastStore(BRC, R, V);
+ }
+ }
+ }
+
+ SVal V = state->getSValAsScalarOrLoc(S);
+
+ // Uncomment this to find cases where we aren't properly getting the
+ // base value that was dereferenced.
+ // assert(!V.isUnknownOrUndef());
+
+ // Is it a symbolic value?
+ if (loc::MemRegionVal *L = dyn_cast<loc::MemRegionVal>(&V)) {
+ const SubRegion *R = cast<SubRegion>(L->getRegion());
+ while (R && !isa<SymbolicRegion>(R)) {
+ R = dyn_cast<SubRegion>(R->getSuperRegion());
+ }
+
+ if (R) {
+ assert(isa<SymbolicRegion>(R));
+ registerTrackConstraint(BRC, loc::MemRegionVal(R), false);
+ }
+ }
+}
+
+void clang::bugreporter::registerFindLastStore(BugReporterContext& BRC,
+ const void *data,
+ const ExplodedNode* N) {
+
+ const MemRegion *R = static_cast<const MemRegion*>(data);
+
+ if (!R)
+ return;
+
+ const GRState *state = N->getState();
+ SVal V = state->getSVal(R);
+
+ if (V.isUnknown())
+ return;
+
+ BRC.addVisitor(new FindLastStoreBRVisitor(V, R));
+}
+
+
+namespace {
+class NilReceiverVisitor : public BugReporterVisitor {
+public:
+ NilReceiverVisitor() {}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ static int x = 0;
+ ID.AddPointer(&x);
+ }
+
+ PathDiagnosticPiece* VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext& BRC) {
+
+ const PostStmt *P = N->getLocationAs<PostStmt>();
+ if (!P)
+ return 0;
+ const ObjCMessageExpr *ME = P->getStmtAs<ObjCMessageExpr>();
+ if (!ME)
+ return 0;
+ const Expr *Receiver = ME->getInstanceReceiver();
+ if (!Receiver)
+ return 0;
+ const GRState *state = N->getState();
+ const SVal &V = state->getSVal(Receiver);
+ const DefinedOrUnknownSVal *DV = dyn_cast<DefinedOrUnknownSVal>(&V);
+ if (!DV)
+ return 0;
+ state = state->Assume(*DV, true);
+ if (state)
+ return 0;
+
+ // The receiver was nil, and hence the method was skipped.
+ // Register a BugReporterVisitor to issue a message telling us how
+ // the receiver was null.
+ bugreporter::registerTrackNullOrUndefValue(BRC, Receiver, N);
+ // Issue a message saying that the method was skipped.
+ PathDiagnosticLocation L(Receiver, BRC.getSourceManager());
+ return new PathDiagnosticEventPiece(L, "No method actually called "
+ "because the receiver is nil");
+ }
+};
+} // end anonymous namespace
+
+void clang::bugreporter::registerNilReceiverVisitor(BugReporterContext &BRC) {
+ BRC.addVisitor(new NilReceiverVisitor());
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/BuiltinFunctionChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/BuiltinFunctionChecker.cpp
new file mode 100644
index 0000000..9c8b516
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/BuiltinFunctionChecker.cpp
@@ -0,0 +1,75 @@
+//=== BuiltinFunctionChecker.cpp --------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker evaluates clang builtin functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/PathSensitive/Checker.h"
+#include "clang/Basic/Builtins.h"
+
+using namespace clang;
+
+namespace {
+
+class BuiltinFunctionChecker : public Checker {
+public:
+ static void *getTag() { static int tag = 0; return &tag; }
+ virtual bool EvalCallExpr(CheckerContext &C, const CallExpr *CE);
+};
+
+}
+
+void clang::RegisterBuiltinFunctionChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new BuiltinFunctionChecker());
+}
+
+bool BuiltinFunctionChecker::EvalCallExpr(CheckerContext &C,const CallExpr *CE){
+ const GRState *state = C.getState();
+ const Expr *Callee = CE->getCallee();
+ SVal L = state->getSVal(Callee);
+ const FunctionDecl *FD = L.getAsFunctionDecl();
+
+ if (!FD)
+ return false;
+
+ unsigned id = FD->getBuiltinID();
+
+ if (!id)
+ return false;
+
+ switch (id) {
+ case Builtin::BI__builtin_expect: {
+ // For __builtin_expect, just return the value of the subexpression.
+ assert (CE->arg_begin() != CE->arg_end());
+ SVal X = state->getSVal(*(CE->arg_begin()));
+ C.GenerateNode(state->BindExpr(CE, X));
+ return true;
+ }
+
+ case Builtin::BI__builtin_alloca: {
+ // FIXME: Refactor into StoreManager itself?
+ MemRegionManager& RM = C.getStoreManager().getRegionManager();
+ const MemRegion* R =
+ RM.getAllocaRegion(CE, C.getNodeBuilder().getCurrentBlockCount(),
+ C.getPredecessor()->getLocationContext());
+
+ // Set the extent of the region in bytes. This enables us to use the
+ // SVal of the argument directly. If we save the extent in bits, we
+ // cannot represent values like symbol*8.
+ SVal Extent = state->getSVal(*(CE->arg_begin()));
+ state = C.getStoreManager().setExtent(state, R, Extent);
+ C.GenerateNode(state->BindExpr(CE, loc::MemRegionVal(R)));
+ return true;
+ }
+ }
+
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/CFRefCount.cpp b/contrib/llvm/tools/clang/lib/Checker/CFRefCount.cpp
new file mode 100644
index 0000000..42e6f67
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/CFRefCount.cpp
@@ -0,0 +1,3620 @@
+// CFRefCount.cpp - Transfer functions for tracking simple values -*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the methods for CFRefCount, which implements
+// a reference count checker for Core Foundation (Mac OS X).
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/BugReporter/PathDiagnostic.h"
+#include "clang/Checker/Checkers/LocalCheckers.h"
+#include "clang/Checker/DomainSpecific/CocoaConventions.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Checker/PathSensitive/GRExprEngineBuilders.h"
+#include "clang/Checker/PathSensitive/GRStateTrait.h"
+#include "clang/Checker/PathSensitive/GRTransferFuncs.h"
+#include "clang/Checker/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/ImmutableList.h"
+#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include <stdarg.h>
+
+using namespace clang;
+using llvm::StringRef;
+using llvm::StrInStrNoCase;
+
+namespace {
+class InstanceReceiver {
+ const ObjCMessageExpr *ME;
+ const LocationContext *LC;
+public:
+ InstanceReceiver(const ObjCMessageExpr *me = 0,
+ const LocationContext *lc = 0) : ME(me), LC(lc) {}
+
+ bool isValid() const {
+ return ME && ME->isInstanceMessage();
+ }
+ operator bool() const {
+ return isValid();
+ }
+
+ SVal getSValAsScalarOrLoc(const GRState *state) {
+ assert(isValid());
+ // We have an expression for the receiver? Fetch the value
+ // of that expression.
+ if (const Expr *Ex = ME->getInstanceReceiver())
+ return state->getSValAsScalarOrLoc(Ex);
+
+ // Otherwise we are sending a message to super. In this case the
+ // object reference is the same as 'self'.
+ if (const ImplicitParamDecl *SelfDecl = LC->getSelfDecl())
+ return state->getSVal(state->getRegion(SelfDecl, LC));
+
+ return UnknownVal();
+ }
+
+ SourceRange getSourceRange() const {
+ assert(isValid());
+ if (const Expr *Ex = ME->getInstanceReceiver())
+ return Ex->getSourceRange();
+
+ // Otherwise we are sending a message to super.
+ SourceLocation L = ME->getSuperLoc();
+ assert(L.isValid());
+ return SourceRange(L, L);
+ }
+};
+}
+
+static const ObjCMethodDecl*
+ResolveToInterfaceMethodDecl(const ObjCMethodDecl *MD) {
+ ObjCInterfaceDecl *ID =
+ const_cast<ObjCInterfaceDecl*>(MD->getClassInterface());
+
+ return MD->isInstanceMethod()
+ ? ID->lookupInstanceMethod(MD->getSelector())
+ : ID->lookupClassMethod(MD->getSelector());
+}
+
+namespace {
+class GenericNodeBuilder {
+ GRStmtNodeBuilder *SNB;
+ Stmt *S;
+ const void *tag;
+ GREndPathNodeBuilder *ENB;
+public:
+ GenericNodeBuilder(GRStmtNodeBuilder &snb, Stmt *s,
+ const void *t)
+ : SNB(&snb), S(s), tag(t), ENB(0) {}
+
+ GenericNodeBuilder(GREndPathNodeBuilder &enb)
+ : SNB(0), S(0), tag(0), ENB(&enb) {}
+
+ ExplodedNode *MakeNode(const GRState *state, ExplodedNode *Pred) {
+ if (SNB)
+ return SNB->generateNode(PostStmt(S, Pred->getLocationContext(), tag),
+ state, Pred);
+
+ assert(ENB);
+ return ENB->generateNode(state, Pred);
+ }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Primitives used for constructing summaries for function/method calls.
+//===----------------------------------------------------------------------===//
+
+/// ArgEffect is used to summarize a function/method call's effect on a
+/// particular argument.
+enum ArgEffect { Autorelease, Dealloc, DecRef, DecRefMsg, DoNothing,
+ DoNothingByRef, IncRefMsg, IncRef, MakeCollectable, MayEscape,
+ NewAutoreleasePool, SelfOwn, StopTracking };
+
+namespace llvm {
+template <> struct FoldingSetTrait<ArgEffect> {
+static inline void Profile(const ArgEffect X, FoldingSetNodeID& ID) {
+ ID.AddInteger((unsigned) X);
+}
+};
+} // end llvm namespace
+
+/// ArgEffects summarizes the effects of a function/method call on all of
+/// its arguments.
+typedef llvm::ImmutableMap<unsigned,ArgEffect> ArgEffects;
+
+namespace {
+
+/// RetEffect is used to summarize a function/method call's behavior with
+/// respect to its return value.
+class RetEffect {
+public:
+ enum Kind { NoRet, Alias, OwnedSymbol, OwnedAllocatedSymbol,
+ NotOwnedSymbol, GCNotOwnedSymbol, ReceiverAlias,
+ OwnedWhenTrackedReceiver };
+
+ enum ObjKind { CF, ObjC, AnyObj };
+
+private:
+ Kind K;
+ ObjKind O;
+ unsigned index;
+
+ RetEffect(Kind k, unsigned idx = 0) : K(k), O(AnyObj), index(idx) {}
+ RetEffect(Kind k, ObjKind o) : K(k), O(o), index(0) {}
+
+public:
+ Kind getKind() const { return K; }
+
+ ObjKind getObjKind() const { return O; }
+
+ unsigned getIndex() const {
+ assert(getKind() == Alias);
+ return index;
+ }
+
+ bool isOwned() const {
+ return K == OwnedSymbol || K == OwnedAllocatedSymbol ||
+ K == OwnedWhenTrackedReceiver;
+ }
+
+ static RetEffect MakeOwnedWhenTrackedReceiver() {
+ return RetEffect(OwnedWhenTrackedReceiver, ObjC);
+ }
+
+ static RetEffect MakeAlias(unsigned Idx) {
+ return RetEffect(Alias, Idx);
+ }
+ static RetEffect MakeReceiverAlias() {
+ return RetEffect(ReceiverAlias);
+ }
+ static RetEffect MakeOwned(ObjKind o, bool isAllocated = false) {
+ return RetEffect(isAllocated ? OwnedAllocatedSymbol : OwnedSymbol, o);
+ }
+ static RetEffect MakeNotOwned(ObjKind o) {
+ return RetEffect(NotOwnedSymbol, o);
+ }
+ static RetEffect MakeGCNotOwned() {
+ return RetEffect(GCNotOwnedSymbol, ObjC);
+ }
+
+ static RetEffect MakeNoRet() {
+ return RetEffect(NoRet);
+ }
+
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ ID.AddInteger((unsigned)K);
+ ID.AddInteger((unsigned)O);
+ ID.AddInteger(index);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// Reference-counting logic (typestate + counts).
+//===----------------------------------------------------------------------===//
+
+class RefVal {
+public:
+ enum Kind {
+ Owned = 0, // Owning reference.
+ NotOwned, // Reference is not owned by still valid (not freed).
+ Released, // Object has been released.
+ ReturnedOwned, // Returned object passes ownership to caller.
+ ReturnedNotOwned, // Return object does not pass ownership to caller.
+ ERROR_START,
+ ErrorDeallocNotOwned, // -dealloc called on non-owned object.
+ ErrorDeallocGC, // Calling -dealloc with GC enabled.
+ ErrorUseAfterRelease, // Object used after released.
+ ErrorReleaseNotOwned, // Release of an object that was not owned.
+ ERROR_LEAK_START,
+ ErrorLeak, // A memory leak due to excessive reference counts.
+ ErrorLeakReturned, // A memory leak due to the returning method not having
+ // the correct naming conventions.
+ ErrorGCLeakReturned,
+ ErrorOverAutorelease,
+ ErrorReturnedNotOwned
+ };
+
+private:
+ Kind kind;
+ RetEffect::ObjKind okind;
+ unsigned Cnt;
+ unsigned ACnt;
+ QualType T;
+
+ RefVal(Kind k, RetEffect::ObjKind o, unsigned cnt, unsigned acnt, QualType t)
+ : kind(k), okind(o), Cnt(cnt), ACnt(acnt), T(t) {}
+
+ RefVal(Kind k, unsigned cnt = 0)
+ : kind(k), okind(RetEffect::AnyObj), Cnt(cnt), ACnt(0) {}
+
+public:
+ Kind getKind() const { return kind; }
+
+ RetEffect::ObjKind getObjKind() const { return okind; }
+
+ unsigned getCount() const { return Cnt; }
+ unsigned getAutoreleaseCount() const { return ACnt; }
+ unsigned getCombinedCounts() const { return Cnt + ACnt; }
+ void clearCounts() { Cnt = 0; ACnt = 0; }
+ void setCount(unsigned i) { Cnt = i; }
+ void setAutoreleaseCount(unsigned i) { ACnt = i; }
+
+ QualType getType() const { return T; }
+
+ // Useful predicates.
+
+ static bool isError(Kind k) { return k >= ERROR_START; }
+
+ static bool isLeak(Kind k) { return k >= ERROR_LEAK_START; }
+
+ bool isOwned() const {
+ return getKind() == Owned;
+ }
+
+ bool isNotOwned() const {
+ return getKind() == NotOwned;
+ }
+
+ bool isReturnedOwned() const {
+ return getKind() == ReturnedOwned;
+ }
+
+ bool isReturnedNotOwned() const {
+ return getKind() == ReturnedNotOwned;
+ }
+
+ bool isNonLeakError() const {
+ Kind k = getKind();
+ return isError(k) && !isLeak(k);
+ }
+
+ static RefVal makeOwned(RetEffect::ObjKind o, QualType t,
+ unsigned Count = 1) {
+ return RefVal(Owned, o, Count, 0, t);
+ }
+
+ static RefVal makeNotOwned(RetEffect::ObjKind o, QualType t,
+ unsigned Count = 0) {
+ return RefVal(NotOwned, o, Count, 0, t);
+ }
+
+ // Comparison, profiling, and pretty-printing.
+
+ bool operator==(const RefVal& X) const {
+ return kind == X.kind && Cnt == X.Cnt && T == X.T && ACnt == X.ACnt;
+ }
+
+ RefVal operator-(size_t i) const {
+ return RefVal(getKind(), getObjKind(), getCount() - i,
+ getAutoreleaseCount(), getType());
+ }
+
+ RefVal operator+(size_t i) const {
+ return RefVal(getKind(), getObjKind(), getCount() + i,
+ getAutoreleaseCount(), getType());
+ }
+
+ RefVal operator^(Kind k) const {
+ return RefVal(k, getObjKind(), getCount(), getAutoreleaseCount(),
+ getType());
+ }
+
+ RefVal autorelease() const {
+ return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount()+1,
+ getType());
+ }
+
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ ID.AddInteger((unsigned) kind);
+ ID.AddInteger(Cnt);
+ ID.AddInteger(ACnt);
+ ID.Add(T);
+ }
+
+ void print(llvm::raw_ostream& Out) const;
+};
+
+void RefVal::print(llvm::raw_ostream& Out) const {
+ if (!T.isNull())
+ Out << "Tracked Type:" << T.getAsString() << '\n';
+
+ switch (getKind()) {
+ default: assert(false);
+ case Owned: {
+ Out << "Owned";
+ unsigned cnt = getCount();
+ if (cnt) Out << " (+ " << cnt << ")";
+ break;
+ }
+
+ case NotOwned: {
+ Out << "NotOwned";
+ unsigned cnt = getCount();
+ if (cnt) Out << " (+ " << cnt << ")";
+ break;
+ }
+
+ case ReturnedOwned: {
+ Out << "ReturnedOwned";
+ unsigned cnt = getCount();
+ if (cnt) Out << " (+ " << cnt << ")";
+ break;
+ }
+
+ case ReturnedNotOwned: {
+ Out << "ReturnedNotOwned";
+ unsigned cnt = getCount();
+ if (cnt) Out << " (+ " << cnt << ")";
+ break;
+ }
+
+ case Released:
+ Out << "Released";
+ break;
+
+ case ErrorDeallocGC:
+ Out << "-dealloc (GC)";
+ break;
+
+ case ErrorDeallocNotOwned:
+ Out << "-dealloc (not-owned)";
+ break;
+
+ case ErrorLeak:
+ Out << "Leaked";
+ break;
+
+ case ErrorLeakReturned:
+ Out << "Leaked (Bad naming)";
+ break;
+
+ case ErrorGCLeakReturned:
+ Out << "Leaked (GC-ed at return)";
+ break;
+
+ case ErrorUseAfterRelease:
+ Out << "Use-After-Release [ERROR]";
+ break;
+
+ case ErrorReleaseNotOwned:
+ Out << "Release of Not-Owned [ERROR]";
+ break;
+
+ case RefVal::ErrorOverAutorelease:
+ Out << "Over autoreleased";
+ break;
+
+ case RefVal::ErrorReturnedNotOwned:
+ Out << "Non-owned object returned instead of owned";
+ break;
+ }
+
+ if (ACnt) {
+ Out << " [ARC +" << ACnt << ']';
+ }
+}
+} //end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// RefBindings - State used to track object reference counts.
+//===----------------------------------------------------------------------===//
+
+typedef llvm::ImmutableMap<SymbolRef, RefVal> RefBindings;
+
+namespace clang {
+ template<>
+ struct GRStateTrait<RefBindings> : public GRStatePartialTrait<RefBindings> {
+ static void* GDMIndex() {
+ static int RefBIndex = 0;
+ return &RefBIndex;
+ }
+ };
+}
+
+//===----------------------------------------------------------------------===//
+// Summaries
+//===----------------------------------------------------------------------===//
+
+namespace {
+class RetainSummary {
+ /// Args - an ordered vector of (index, ArgEffect) pairs, where index
+ /// specifies the argument (starting from 0). This can be sparsely
+ /// populated; arguments with no entry in Args use 'DefaultArgEffect'.
+ ArgEffects Args;
+
+ /// DefaultArgEffect - The default ArgEffect to apply to arguments that
+ /// do not have an entry in Args.
+ ArgEffect DefaultArgEffect;
+
+ /// Receiver - If this summary applies to an Objective-C message expression,
+ /// this is the effect applied to the state of the receiver.
+ ArgEffect Receiver;
+
+ /// Ret - The effect on the return value. Used to indicate if the
+ /// function/method call returns a new tracked symbol, returns an
+ /// alias of one of the arguments in the call, and so on.
+ RetEffect Ret;
+
+ /// EndPath - Indicates that execution of this method/function should
+ /// terminate the simulation of a path.
+ bool EndPath;
+
+public:
+ RetainSummary(ArgEffects A, RetEffect R, ArgEffect defaultEff,
+ ArgEffect ReceiverEff, bool endpath = false)
+ : Args(A), DefaultArgEffect(defaultEff), Receiver(ReceiverEff), Ret(R),
+ EndPath(endpath) {}
+
+ /// getArg - Return the argument effect on the argument specified by
+ /// idx (starting from 0).
+ ArgEffect getArg(unsigned idx) const {
+ if (const ArgEffect *AE = Args.lookup(idx))
+ return *AE;
+
+ return DefaultArgEffect;
+ }
+
+ /// setDefaultArgEffect - Set the default argument effect.
+ void setDefaultArgEffect(ArgEffect E) {
+ DefaultArgEffect = E;
+ }
+
+ /// setArg - Set the argument effect on the argument specified by idx.
+ void setArgEffect(ArgEffects::Factory& AF, unsigned idx, ArgEffect E) {
+ Args = AF.Add(Args, idx, E);
+ }
+
+ /// getRetEffect - Returns the effect on the return value of the call.
+ RetEffect getRetEffect() const { return Ret; }
+
+ /// setRetEffect - Set the effect of the return value of the call.
+ void setRetEffect(RetEffect E) { Ret = E; }
+
+ /// isEndPath - Returns true if executing the given method/function should
+ /// terminate the path.
+ bool isEndPath() const { return EndPath; }
+
+ /// getReceiverEffect - Returns the effect on the receiver of the call.
+ /// This is only meaningful if the summary applies to an ObjCMessageExpr*.
+ ArgEffect getReceiverEffect() const { return Receiver; }
+
+ /// setReceiverEffect - Set the effect on the receiver of the call.
+ void setReceiverEffect(ArgEffect E) { Receiver = E; }
+
+ typedef ArgEffects::iterator ExprIterator;
+
+ ExprIterator begin_args() const { return Args.begin(); }
+ ExprIterator end_args() const { return Args.end(); }
+
+ static void Profile(llvm::FoldingSetNodeID& ID, ArgEffects A,
+ RetEffect RetEff, ArgEffect DefaultEff,
+ ArgEffect ReceiverEff, bool EndPath) {
+ ID.Add(A);
+ ID.Add(RetEff);
+ ID.AddInteger((unsigned) DefaultEff);
+ ID.AddInteger((unsigned) ReceiverEff);
+ ID.AddInteger((unsigned) EndPath);
+ }
+
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ Profile(ID, Args, Ret, DefaultArgEffect, Receiver, EndPath);
+ }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Data structures for constructing summaries.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ObjCSummaryKey {
+ IdentifierInfo* II;
+ Selector S;
+public:
+ ObjCSummaryKey(IdentifierInfo* ii, Selector s)
+ : II(ii), S(s) {}
+
+ ObjCSummaryKey(const ObjCInterfaceDecl* d, Selector s)
+ : II(d ? d->getIdentifier() : 0), S(s) {}
+
+ ObjCSummaryKey(const ObjCInterfaceDecl* d, IdentifierInfo *ii, Selector s)
+ : II(d ? d->getIdentifier() : ii), S(s) {}
+
+ ObjCSummaryKey(Selector s)
+ : II(0), S(s) {}
+
+ IdentifierInfo* getIdentifier() const { return II; }
+ Selector getSelector() const { return S; }
+};
+}
+
+namespace llvm {
+template <> struct DenseMapInfo<ObjCSummaryKey> {
+ static inline ObjCSummaryKey getEmptyKey() {
+ return ObjCSummaryKey(DenseMapInfo<IdentifierInfo*>::getEmptyKey(),
+ DenseMapInfo<Selector>::getEmptyKey());
+ }
+
+ static inline ObjCSummaryKey getTombstoneKey() {
+ return ObjCSummaryKey(DenseMapInfo<IdentifierInfo*>::getTombstoneKey(),
+ DenseMapInfo<Selector>::getTombstoneKey());
+ }
+
+ static unsigned getHashValue(const ObjCSummaryKey &V) {
+ return (DenseMapInfo<IdentifierInfo*>::getHashValue(V.getIdentifier())
+ & 0x88888888)
+ | (DenseMapInfo<Selector>::getHashValue(V.getSelector())
+ & 0x55555555);
+ }
+
+ static bool isEqual(const ObjCSummaryKey& LHS, const ObjCSummaryKey& RHS) {
+ return DenseMapInfo<IdentifierInfo*>::isEqual(LHS.getIdentifier(),
+ RHS.getIdentifier()) &&
+ DenseMapInfo<Selector>::isEqual(LHS.getSelector(),
+ RHS.getSelector());
+ }
+
+};
+template <>
+struct isPodLike<ObjCSummaryKey> { static const bool value = true; };
+} // end llvm namespace
+
+namespace {
+class ObjCSummaryCache {
+ typedef llvm::DenseMap<ObjCSummaryKey, RetainSummary*> MapTy;
+ MapTy M;
+public:
+ ObjCSummaryCache() {}
+
+ RetainSummary* find(const ObjCInterfaceDecl* D, IdentifierInfo *ClsName,
+ Selector S) {
+ // Lookup the method using the decl for the class @interface. If we
+ // have no decl, lookup using the class name.
+ return D ? find(D, S) : find(ClsName, S);
+ }
+
+ RetainSummary* find(const ObjCInterfaceDecl* D, Selector S) {
+ // Do a lookup with the (D,S) pair. If we find a match return
+ // the iterator.
+ ObjCSummaryKey K(D, S);
+ MapTy::iterator I = M.find(K);
+
+ if (I != M.end() || !D)
+ return I->second;
+
+ // Walk the super chain. If we find a hit with a parent, we'll end
+ // up returning that summary. We actually allow that key (null,S), as
+ // we cache summaries for the null ObjCInterfaceDecl* to allow us to
+ // generate initial summaries without having to worry about NSObject
+ // being declared.
+ // FIXME: We may change this at some point.
+ for (ObjCInterfaceDecl* C=D->getSuperClass() ;; C=C->getSuperClass()) {
+ if ((I = M.find(ObjCSummaryKey(C, S))) != M.end())
+ break;
+
+ if (!C)
+ return NULL;
+ }
+
+ // Cache the summary with original key to make the next lookup faster
+ // and return the iterator.
+ RetainSummary *Summ = I->second;
+ M[K] = Summ;
+ return Summ;
+ }
+
+
+ RetainSummary* find(Expr* Receiver, Selector S) {
+ return find(getReceiverDecl(Receiver), S);
+ }
+
+ RetainSummary* find(IdentifierInfo* II, Selector S) {
+ // FIXME: Class method lookup. Right now we dont' have a good way
+ // of going between IdentifierInfo* and the class hierarchy.
+ MapTy::iterator I = M.find(ObjCSummaryKey(II, S));
+
+ if (I == M.end())
+ I = M.find(ObjCSummaryKey(S));
+
+ return I == M.end() ? NULL : I->second;
+ }
+
+ const ObjCInterfaceDecl* getReceiverDecl(Expr* E) {
+ if (const ObjCObjectPointerType* PT =
+ E->getType()->getAs<ObjCObjectPointerType>())
+ return PT->getInterfaceDecl();
+
+ return NULL;
+ }
+
+ RetainSummary*& operator[](ObjCMessageExpr* ME) {
+
+ Selector S = ME->getSelector();
+
+ const ObjCInterfaceDecl* OD = 0;
+ bool IsInstanceMessage = false;
+ switch (ME->getReceiverKind()) {
+ case ObjCMessageExpr::Instance:
+ OD = getReceiverDecl(ME->getInstanceReceiver());
+ IsInstanceMessage = true;
+ break;
+
+ case ObjCMessageExpr::SuperInstance:
+ IsInstanceMessage = true;
+ OD = ME->getSuperType()->getAs<ObjCObjectPointerType>()
+ ->getInterfaceDecl();
+ break;
+
+ case ObjCMessageExpr::Class:
+ OD = ME->getClassReceiver()->getAs<ObjCObjectType>()->getInterface();
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ OD = ME->getSuperType()->getAs<ObjCObjectType>()->getInterface();
+ break;
+ }
+
+ if (IsInstanceMessage)
+ return OD ? M[ObjCSummaryKey(OD->getIdentifier(), S)] : M[S];
+
+ return M[ObjCSummaryKey(OD->getIdentifier(), S)];
+ }
+
+ RetainSummary*& operator[](ObjCSummaryKey K) {
+ return M[K];
+ }
+
+ RetainSummary*& operator[](Selector S) {
+ return M[ ObjCSummaryKey(S) ];
+ }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Data structures for managing collections of summaries.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class RetainSummaryManager {
+
+ //==-----------------------------------------------------------------==//
+ // Typedefs.
+ //==-----------------------------------------------------------------==//
+
+ typedef llvm::DenseMap<FunctionDecl*, RetainSummary*>
+ FuncSummariesTy;
+
+ typedef ObjCSummaryCache ObjCMethodSummariesTy;
+
+ //==-----------------------------------------------------------------==//
+ // Data.
+ //==-----------------------------------------------------------------==//
+
+ /// Ctx - The ASTContext object for the analyzed ASTs.
+ ASTContext& Ctx;
+
+ /// CFDictionaryCreateII - An IdentifierInfo* representing the indentifier
+ /// "CFDictionaryCreate".
+ IdentifierInfo* CFDictionaryCreateII;
+
+ /// GCEnabled - Records whether or not the analyzed code runs in GC mode.
+ const bool GCEnabled;
+
+ /// FuncSummaries - A map from FunctionDecls to summaries.
+ FuncSummariesTy FuncSummaries;
+
+ /// ObjCClassMethodSummaries - A map from selectors (for instance methods)
+ /// to summaries.
+ ObjCMethodSummariesTy ObjCClassMethodSummaries;
+
+ /// ObjCMethodSummaries - A map from selectors to summaries.
+ ObjCMethodSummariesTy ObjCMethodSummaries;
+
+ /// BPAlloc - A BumpPtrAllocator used for allocating summaries, ArgEffects,
+ /// and all other data used by the checker.
+ llvm::BumpPtrAllocator BPAlloc;
+
+ /// AF - A factory for ArgEffects objects.
+ ArgEffects::Factory AF;
+
+ /// ScratchArgs - A holding buffer for construct ArgEffects.
+ ArgEffects ScratchArgs;
+
+ /// ObjCAllocRetE - Default return effect for methods returning Objective-C
+ /// objects.
+ RetEffect ObjCAllocRetE;
+
+ /// ObjCInitRetE - Default return effect for init methods returning
+ /// Objective-C objects.
+ RetEffect ObjCInitRetE;
+
+ RetainSummary DefaultSummary;
+ RetainSummary* StopSummary;
+
+ //==-----------------------------------------------------------------==//
+ // Methods.
+ //==-----------------------------------------------------------------==//
+
+ /// getArgEffects - Returns a persistent ArgEffects object based on the
+ /// data in ScratchArgs.
+ ArgEffects getArgEffects();
+
+ enum UnaryFuncKind { cfretain, cfrelease, cfmakecollectable };
+
+public:
+ RetEffect getObjAllocRetEffect() const { return ObjCAllocRetE; }
+
+ RetainSummary *getDefaultSummary() {
+ RetainSummary *Summ = (RetainSummary*) BPAlloc.Allocate<RetainSummary>();
+ return new (Summ) RetainSummary(DefaultSummary);
+ }
+
+ RetainSummary* getUnarySummary(const FunctionType* FT, UnaryFuncKind func);
+
+ RetainSummary* getCFSummaryCreateRule(FunctionDecl* FD);
+ RetainSummary* getCFSummaryGetRule(FunctionDecl* FD);
+ RetainSummary* getCFCreateGetRuleSummary(FunctionDecl* FD, StringRef FName);
+
+ RetainSummary* getPersistentSummary(ArgEffects AE, RetEffect RetEff,
+ ArgEffect ReceiverEff = DoNothing,
+ ArgEffect DefaultEff = MayEscape,
+ bool isEndPath = false);
+
+ RetainSummary* getPersistentSummary(RetEffect RE,
+ ArgEffect ReceiverEff = DoNothing,
+ ArgEffect DefaultEff = MayEscape) {
+ return getPersistentSummary(getArgEffects(), RE, ReceiverEff, DefaultEff);
+ }
+
+ RetainSummary *getPersistentStopSummary() {
+ if (StopSummary)
+ return StopSummary;
+
+ StopSummary = getPersistentSummary(RetEffect::MakeNoRet(),
+ StopTracking, StopTracking);
+
+ return StopSummary;
+ }
+
+ RetainSummary *getInitMethodSummary(QualType RetTy);
+
+ void InitializeClassMethodSummaries();
+ void InitializeMethodSummaries();
+private:
+
+ void addClsMethSummary(IdentifierInfo* ClsII, Selector S,
+ RetainSummary* Summ) {
+ ObjCClassMethodSummaries[ObjCSummaryKey(ClsII, S)] = Summ;
+ }
+
+ void addNSObjectClsMethSummary(Selector S, RetainSummary *Summ) {
+ ObjCClassMethodSummaries[S] = Summ;
+ }
+
+ void addNSObjectMethSummary(Selector S, RetainSummary *Summ) {
+ ObjCMethodSummaries[S] = Summ;
+ }
+
+ void addClassMethSummary(const char* Cls, const char* nullaryName,
+ RetainSummary *Summ) {
+ IdentifierInfo* ClsII = &Ctx.Idents.get(Cls);
+ Selector S = GetNullarySelector(nullaryName, Ctx);
+ ObjCClassMethodSummaries[ObjCSummaryKey(ClsII, S)] = Summ;
+ }
+
+ void addInstMethSummary(const char* Cls, const char* nullaryName,
+ RetainSummary *Summ) {
+ IdentifierInfo* ClsII = &Ctx.Idents.get(Cls);
+ Selector S = GetNullarySelector(nullaryName, Ctx);
+ ObjCMethodSummaries[ObjCSummaryKey(ClsII, S)] = Summ;
+ }
+
+ Selector generateSelector(va_list argp) {
+ llvm::SmallVector<IdentifierInfo*, 10> II;
+
+ while (const char* s = va_arg(argp, const char*))
+ II.push_back(&Ctx.Idents.get(s));
+
+ return Ctx.Selectors.getSelector(II.size(), &II[0]);
+ }
+
+ void addMethodSummary(IdentifierInfo *ClsII, ObjCMethodSummariesTy& Summaries,
+ RetainSummary* Summ, va_list argp) {
+ Selector S = generateSelector(argp);
+ Summaries[ObjCSummaryKey(ClsII, S)] = Summ;
+ }
+
+ void addInstMethSummary(const char* Cls, RetainSummary* Summ, ...) {
+ va_list argp;
+ va_start(argp, Summ);
+ addMethodSummary(&Ctx.Idents.get(Cls), ObjCMethodSummaries, Summ, argp);
+ va_end(argp);
+ }
+
+ void addClsMethSummary(const char* Cls, RetainSummary* Summ, ...) {
+ va_list argp;
+ va_start(argp, Summ);
+ addMethodSummary(&Ctx.Idents.get(Cls),ObjCClassMethodSummaries, Summ, argp);
+ va_end(argp);
+ }
+
+ void addClsMethSummary(IdentifierInfo *II, RetainSummary* Summ, ...) {
+ va_list argp;
+ va_start(argp, Summ);
+ addMethodSummary(II, ObjCClassMethodSummaries, Summ, argp);
+ va_end(argp);
+ }
+
+ void addPanicSummary(const char* Cls, ...) {
+ RetainSummary* Summ = getPersistentSummary(AF.GetEmptyMap(),
+ RetEffect::MakeNoRet(),
+ DoNothing, DoNothing, true);
+ va_list argp;
+ va_start (argp, Cls);
+ addMethodSummary(&Ctx.Idents.get(Cls), ObjCMethodSummaries, Summ, argp);
+ va_end(argp);
+ }
+
+public:
+
+ RetainSummaryManager(ASTContext& ctx, bool gcenabled)
+ : Ctx(ctx),
+ CFDictionaryCreateII(&ctx.Idents.get("CFDictionaryCreate")),
+ GCEnabled(gcenabled), AF(BPAlloc), ScratchArgs(AF.GetEmptyMap()),
+ ObjCAllocRetE(gcenabled ? RetEffect::MakeGCNotOwned()
+ : RetEffect::MakeOwned(RetEffect::ObjC, true)),
+ ObjCInitRetE(gcenabled ? RetEffect::MakeGCNotOwned()
+ : RetEffect::MakeOwnedWhenTrackedReceiver()),
+ DefaultSummary(AF.GetEmptyMap() /* per-argument effects (none) */,
+ RetEffect::MakeNoRet() /* return effect */,
+ MayEscape, /* default argument effect */
+ DoNothing /* receiver effect */),
+ StopSummary(0) {
+
+ InitializeClassMethodSummaries();
+ InitializeMethodSummaries();
+ }
+
+ ~RetainSummaryManager();
+
+ RetainSummary* getSummary(FunctionDecl* FD);
+
+ RetainSummary *getInstanceMethodSummary(const ObjCMessageExpr *ME,
+ const GRState *state,
+ const LocationContext *LC);
+
+ RetainSummary* getInstanceMethodSummary(const ObjCMessageExpr* ME,
+ const ObjCInterfaceDecl* ID) {
+ return getInstanceMethodSummary(ME->getSelector(), 0,
+ ID, ME->getMethodDecl(), ME->getType());
+ }
+
+ RetainSummary* getInstanceMethodSummary(Selector S, IdentifierInfo *ClsName,
+ const ObjCInterfaceDecl* ID,
+ const ObjCMethodDecl *MD,
+ QualType RetTy);
+
+ RetainSummary *getClassMethodSummary(Selector S, IdentifierInfo *ClsName,
+ const ObjCInterfaceDecl *ID,
+ const ObjCMethodDecl *MD,
+ QualType RetTy);
+
+ RetainSummary *getClassMethodSummary(const ObjCMessageExpr *ME) {
+ ObjCInterfaceDecl *Class = 0;
+ switch (ME->getReceiverKind()) {
+ case ObjCMessageExpr::Class:
+ case ObjCMessageExpr::SuperClass:
+ Class = ME->getReceiverInterface();
+ break;
+
+ case ObjCMessageExpr::Instance:
+ case ObjCMessageExpr::SuperInstance:
+ break;
+ }
+
+ return getClassMethodSummary(ME->getSelector(),
+ Class? Class->getIdentifier() : 0,
+ Class,
+ ME->getMethodDecl(), ME->getType());
+ }
+
+ /// getMethodSummary - This version of getMethodSummary is used to query
+ /// the summary for the current method being analyzed.
+ RetainSummary *getMethodSummary(const ObjCMethodDecl *MD) {
+ // FIXME: Eventually this should be unneeded.
+ const ObjCInterfaceDecl *ID = MD->getClassInterface();
+ Selector S = MD->getSelector();
+ IdentifierInfo *ClsName = ID->getIdentifier();
+ QualType ResultTy = MD->getResultType();
+
+ // Resolve the method decl last.
+ if (const ObjCMethodDecl *InterfaceMD = ResolveToInterfaceMethodDecl(MD))
+ MD = InterfaceMD;
+
+ if (MD->isInstanceMethod())
+ return getInstanceMethodSummary(S, ClsName, ID, MD, ResultTy);
+ else
+ return getClassMethodSummary(S, ClsName, ID, MD, ResultTy);
+ }
+
+ RetainSummary* getCommonMethodSummary(const ObjCMethodDecl* MD,
+ Selector S, QualType RetTy);
+
+ void updateSummaryFromAnnotations(RetainSummary &Summ,
+ const ObjCMethodDecl *MD);
+
+ void updateSummaryFromAnnotations(RetainSummary &Summ,
+ const FunctionDecl *FD);
+
+ bool isGCEnabled() const { return GCEnabled; }
+
+ RetainSummary *copySummary(RetainSummary *OldSumm) {
+ RetainSummary *Summ = (RetainSummary*) BPAlloc.Allocate<RetainSummary>();
+ new (Summ) RetainSummary(*OldSumm);
+ return Summ;
+ }
+};
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Implementation of checker data structures.
+//===----------------------------------------------------------------------===//
+
+RetainSummaryManager::~RetainSummaryManager() {}
+
+ArgEffects RetainSummaryManager::getArgEffects() {
+ ArgEffects AE = ScratchArgs;
+ ScratchArgs = AF.GetEmptyMap();
+ return AE;
+}
+
+RetainSummary*
+RetainSummaryManager::getPersistentSummary(ArgEffects AE, RetEffect RetEff,
+ ArgEffect ReceiverEff,
+ ArgEffect DefaultEff,
+ bool isEndPath) {
+ // Create the summary and return it.
+ RetainSummary *Summ = (RetainSummary*) BPAlloc.Allocate<RetainSummary>();
+ new (Summ) RetainSummary(AE, RetEff, DefaultEff, ReceiverEff, isEndPath);
+ return Summ;
+}
+
+//===----------------------------------------------------------------------===//
+// Summary creation for functions (largely uses of Core Foundation).
+//===----------------------------------------------------------------------===//
+
+static bool isRetain(FunctionDecl* FD, StringRef FName) {
+ return FName.endswith("Retain");
+}
+
+static bool isRelease(FunctionDecl* FD, StringRef FName) {
+ return FName.endswith("Release");
+}
+
+RetainSummary* RetainSummaryManager::getSummary(FunctionDecl* FD) {
+ // Look up a summary in our cache of FunctionDecls -> Summaries.
+ FuncSummariesTy::iterator I = FuncSummaries.find(FD);
+ if (I != FuncSummaries.end())
+ return I->second;
+
+ // No summary? Generate one.
+ RetainSummary *S = 0;
+
+ do {
+ // We generate "stop" summaries for implicitly defined functions.
+ if (FD->isImplicit()) {
+ S = getPersistentStopSummary();
+ break;
+ }
+
+ // [PR 3337] Use 'getAs<FunctionType>' to strip away any typedefs on the
+ // function's type.
+ const FunctionType* FT = FD->getType()->getAs<FunctionType>();
+ const IdentifierInfo *II = FD->getIdentifier();
+ if (!II)
+ break;
+
+ StringRef FName = II->getName();
+
+ // Strip away preceding '_'. Doing this here will effect all the checks
+ // down below.
+ FName = FName.substr(FName.find_first_not_of('_'));
+
+ // Inspect the result type.
+ QualType RetTy = FT->getResultType();
+
+ // FIXME: This should all be refactored into a chain of "summary lookup"
+ // filters.
+ assert(ScratchArgs.isEmpty());
+
+ if (FName == "pthread_create") {
+ // Part of: <rdar://problem/7299394>. This will be addressed
+ // better with IPA.
+ S = getPersistentStopSummary();
+ } else if (FName == "NSMakeCollectable") {
+ // Handle: id NSMakeCollectable(CFTypeRef)
+ S = (RetTy->isObjCIdType())
+ ? getUnarySummary(FT, cfmakecollectable)
+ : getPersistentStopSummary();
+ } else if (FName == "IOBSDNameMatching" ||
+ FName == "IOServiceMatching" ||
+ FName == "IOServiceNameMatching" ||
+ FName == "IORegistryEntryIDMatching" ||
+ FName == "IOOpenFirmwarePathMatching") {
+ // Part of <rdar://problem/6961230>. (IOKit)
+ // This should be addressed using a API table.
+ S = getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true),
+ DoNothing, DoNothing);
+ } else if (FName == "IOServiceGetMatchingService" ||
+ FName == "IOServiceGetMatchingServices") {
+ // FIXES: <rdar://problem/6326900>
+ // This should be addressed using a API table. This strcmp is also
+ // a little gross, but there is no need to super optimize here.
+ ScratchArgs = AF.Add(ScratchArgs, 1, DecRef);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ } else if (FName == "IOServiceAddNotification" ||
+ FName == "IOServiceAddMatchingNotification") {
+ // Part of <rdar://problem/6961230>. (IOKit)
+ // This should be addressed using a API table.
+ ScratchArgs = AF.Add(ScratchArgs, 2, DecRef);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ } else if (FName == "CVPixelBufferCreateWithBytes") {
+ // FIXES: <rdar://problem/7283567>
+ // Eventually this can be improved by recognizing that the pixel
+ // buffer passed to CVPixelBufferCreateWithBytes is released via
+ // a callback and doing full IPA to make sure this is done correctly.
+ // FIXME: This function has an out parameter that returns an
+ // allocated object.
+ ScratchArgs = AF.Add(ScratchArgs, 7, StopTracking);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ } else if (FName == "CGBitmapContextCreateWithData") {
+ // FIXES: <rdar://problem/7358899>
+ // Eventually this can be improved by recognizing that 'releaseInfo'
+ // passed to CGBitmapContextCreateWithData is released via
+ // a callback and doing full IPA to make sure this is done correctly.
+ ScratchArgs = AF.Add(ScratchArgs, 8, StopTracking);
+ S = getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true),
+ DoNothing, DoNothing);
+ } else if (FName == "CVPixelBufferCreateWithPlanarBytes") {
+ // FIXES: <rdar://problem/7283567>
+ // Eventually this can be improved by recognizing that the pixel
+ // buffer passed to CVPixelBufferCreateWithPlanarBytes is released
+ // via a callback and doing full IPA to make sure this is done
+ // correctly.
+ ScratchArgs = AF.Add(ScratchArgs, 12, StopTracking);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ }
+
+ // Did we get a summary?
+ if (S)
+ break;
+
+ // Enable this code once the semantics of NSDeallocateObject are resolved
+ // for GC. <rdar://problem/6619988>
+#if 0
+ // Handle: NSDeallocateObject(id anObject);
+ // This method does allow 'nil' (although we don't check it now).
+ if (strcmp(FName, "NSDeallocateObject") == 0) {
+ return RetTy == Ctx.VoidTy
+ ? getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, Dealloc)
+ : getPersistentStopSummary();
+ }
+#endif
+
+ if (RetTy->isPointerType()) {
+ // For CoreFoundation ('CF') types.
+ if (cocoa::isRefType(RetTy, "CF", FName)) {
+ if (isRetain(FD, FName))
+ S = getUnarySummary(FT, cfretain);
+ else if (FName.find("MakeCollectable") != StringRef::npos)
+ S = getUnarySummary(FT, cfmakecollectable);
+ else
+ S = getCFCreateGetRuleSummary(FD, FName);
+
+ break;
+ }
+
+ // For CoreGraphics ('CG') types.
+ if (cocoa::isRefType(RetTy, "CG", FName)) {
+ if (isRetain(FD, FName))
+ S = getUnarySummary(FT, cfretain);
+ else
+ S = getCFCreateGetRuleSummary(FD, FName);
+
+ break;
+ }
+
+ // For the Disk Arbitration API (DiskArbitration/DADisk.h)
+ if (cocoa::isRefType(RetTy, "DADisk") ||
+ cocoa::isRefType(RetTy, "DADissenter") ||
+ cocoa::isRefType(RetTy, "DASessionRef")) {
+ S = getCFCreateGetRuleSummary(FD, FName);
+ break;
+ }
+
+ break;
+ }
+
+ // Check for release functions, the only kind of functions that we care
+ // about that don't return a pointer type.
+ if (FName[0] == 'C' && (FName[1] == 'F' || FName[1] == 'G')) {
+ // Test for 'CGCF'.
+ FName = FName.substr(FName.startswith("CGCF") ? 4 : 2);
+
+ if (isRelease(FD, FName))
+ S = getUnarySummary(FT, cfrelease);
+ else {
+ assert (ScratchArgs.isEmpty());
+ // Remaining CoreFoundation and CoreGraphics functions.
+ // We use to assume that they all strictly followed the ownership idiom
+ // and that ownership cannot be transferred. While this is technically
+ // correct, many methods allow a tracked object to escape. For example:
+ //
+ // CFMutableDictionaryRef x = CFDictionaryCreateMutable(...);
+ // CFDictionaryAddValue(y, key, x);
+ // CFRelease(x);
+ // ... it is okay to use 'x' since 'y' has a reference to it
+ //
+ // We handle this and similar cases with the follow heuristic. If the
+ // function name contains "InsertValue", "SetValue", "AddValue",
+ // "AppendValue", or "SetAttribute", then we assume that arguments may
+ // "escape." This means that something else holds on to the object,
+ // allowing it be used even after its local retain count drops to 0.
+ ArgEffect E = (StrInStrNoCase(FName, "InsertValue") != StringRef::npos||
+ StrInStrNoCase(FName, "AddValue") != StringRef::npos ||
+ StrInStrNoCase(FName, "SetValue") != StringRef::npos ||
+ StrInStrNoCase(FName, "AppendValue") != StringRef::npos||
+ StrInStrNoCase(FName, "SetAttribute") != StringRef::npos)
+ ? MayEscape : DoNothing;
+
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, E);
+ }
+ }
+ }
+ while (0);
+
+ if (!S)
+ S = getDefaultSummary();
+
+ // Annotations override defaults.
+ assert(S);
+ updateSummaryFromAnnotations(*S, FD);
+
+ FuncSummaries[FD] = S;
+ return S;
+}
+
+RetainSummary*
+RetainSummaryManager::getCFCreateGetRuleSummary(FunctionDecl* FD,
+ StringRef FName) {
+
+ if (FName.find("Create") != StringRef::npos ||
+ FName.find("Copy") != StringRef::npos)
+ return getCFSummaryCreateRule(FD);
+
+ if (FName.find("Get") != StringRef::npos)
+ return getCFSummaryGetRule(FD);
+
+ return getDefaultSummary();
+}
+
+RetainSummary*
+RetainSummaryManager::getUnarySummary(const FunctionType* FT,
+ UnaryFuncKind func) {
+
+ // Sanity check that this is *really* a unary function. This can
+ // happen if people do weird things.
+ const FunctionProtoType* FTP = dyn_cast<FunctionProtoType>(FT);
+ if (!FTP || FTP->getNumArgs() != 1)
+ return getPersistentStopSummary();
+
+ assert (ScratchArgs.isEmpty());
+
+ switch (func) {
+ case cfretain: {
+ ScratchArgs = AF.Add(ScratchArgs, 0, IncRef);
+ return getPersistentSummary(RetEffect::MakeAlias(0),
+ DoNothing, DoNothing);
+ }
+
+ case cfrelease: {
+ ScratchArgs = AF.Add(ScratchArgs, 0, DecRef);
+ return getPersistentSummary(RetEffect::MakeNoRet(),
+ DoNothing, DoNothing);
+ }
+
+ case cfmakecollectable: {
+ ScratchArgs = AF.Add(ScratchArgs, 0, MakeCollectable);
+ return getPersistentSummary(RetEffect::MakeAlias(0),DoNothing, DoNothing);
+ }
+
+ default:
+ assert (false && "Not a supported unary function.");
+ return getDefaultSummary();
+ }
+}
+
+RetainSummary* RetainSummaryManager::getCFSummaryCreateRule(FunctionDecl* FD) {
+ assert (ScratchArgs.isEmpty());
+
+ if (FD->getIdentifier() == CFDictionaryCreateII) {
+ ScratchArgs = AF.Add(ScratchArgs, 1, DoNothingByRef);
+ ScratchArgs = AF.Add(ScratchArgs, 2, DoNothingByRef);
+ }
+
+ return getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true));
+}
+
+RetainSummary* RetainSummaryManager::getCFSummaryGetRule(FunctionDecl* FD) {
+ assert (ScratchArgs.isEmpty());
+ return getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::CF),
+ DoNothing, DoNothing);
+}
+
+//===----------------------------------------------------------------------===//
+// Summary creation for Selectors.
+//===----------------------------------------------------------------------===//
+
+RetainSummary*
+RetainSummaryManager::getInitMethodSummary(QualType RetTy) {
+ assert(ScratchArgs.isEmpty());
+ // 'init' methods conceptually return a newly allocated object and claim
+ // the receiver.
+ if (cocoa::isCocoaObjectRef(RetTy) || cocoa::isCFObjectRef(RetTy))
+ return getPersistentSummary(ObjCInitRetE, DecRefMsg);
+
+ return getDefaultSummary();
+}
+
+void
+RetainSummaryManager::updateSummaryFromAnnotations(RetainSummary &Summ,
+ const FunctionDecl *FD) {
+ if (!FD)
+ return;
+
+ QualType RetTy = FD->getResultType();
+
+ // Determine if there is a special return effect for this method.
+ if (cocoa::isCocoaObjectRef(RetTy)) {
+ if (FD->getAttr<NSReturnsRetainedAttr>()) {
+ Summ.setRetEffect(ObjCAllocRetE);
+ }
+ else if (FD->getAttr<CFReturnsRetainedAttr>()) {
+ Summ.setRetEffect(RetEffect::MakeOwned(RetEffect::CF, true));
+ }
+ else if (FD->getAttr<NSReturnsNotRetainedAttr>()) {
+ Summ.setRetEffect(RetEffect::MakeNotOwned(RetEffect::ObjC));
+ }
+ else if (FD->getAttr<CFReturnsNotRetainedAttr>()) {
+ Summ.setRetEffect(RetEffect::MakeNotOwned(RetEffect::CF));
+ }
+ }
+ else if (RetTy->getAs<PointerType>()) {
+ if (FD->getAttr<CFReturnsRetainedAttr>()) {
+ Summ.setRetEffect(RetEffect::MakeOwned(RetEffect::CF, true));
+ }
+ }
+}
+
+void
+RetainSummaryManager::updateSummaryFromAnnotations(RetainSummary &Summ,
+ const ObjCMethodDecl *MD) {
+ if (!MD)
+ return;
+
+ bool isTrackedLoc = false;
+
+ // Determine if there is a special return effect for this method.
+ if (cocoa::isCocoaObjectRef(MD->getResultType())) {
+ if (MD->getAttr<NSReturnsRetainedAttr>()) {
+ Summ.setRetEffect(ObjCAllocRetE);
+ return;
+ }
+ if (MD->getAttr<NSReturnsNotRetainedAttr>()) {
+ Summ.setRetEffect(RetEffect::MakeNotOwned(RetEffect::ObjC));
+ return;
+ }
+
+ isTrackedLoc = true;
+ }
+
+ if (!isTrackedLoc)
+ isTrackedLoc = MD->getResultType()->getAs<PointerType>() != NULL;
+
+ if (isTrackedLoc) {
+ if (MD->getAttr<CFReturnsRetainedAttr>())
+ Summ.setRetEffect(RetEffect::MakeOwned(RetEffect::CF, true));
+ else if (MD->getAttr<CFReturnsNotRetainedAttr>())
+ Summ.setRetEffect(RetEffect::MakeNotOwned(RetEffect::CF));
+ }
+}
+
+RetainSummary*
+RetainSummaryManager::getCommonMethodSummary(const ObjCMethodDecl* MD,
+ Selector S, QualType RetTy) {
+
+ if (MD) {
+ // Scan the method decl for 'void*' arguments. These should be treated
+ // as 'StopTracking' because they are often used with delegates.
+ // Delegates are a frequent form of false positives with the retain
+ // count checker.
+ unsigned i = 0;
+ for (ObjCMethodDecl::param_iterator I = MD->param_begin(),
+ E = MD->param_end(); I != E; ++I, ++i)
+ if (ParmVarDecl *PD = *I) {
+ QualType Ty = Ctx.getCanonicalType(PD->getType());
+ if (Ty.getLocalUnqualifiedType() == Ctx.VoidPtrTy)
+ ScratchArgs = AF.Add(ScratchArgs, i, StopTracking);
+ }
+ }
+
+ // Any special effect for the receiver?
+ ArgEffect ReceiverEff = DoNothing;
+
+ // If one of the arguments in the selector has the keyword 'delegate' we
+ // should stop tracking the reference count for the receiver. This is
+ // because the reference count is quite possibly handled by a delegate
+ // method.
+ if (S.isKeywordSelector()) {
+ const std::string &str = S.getAsString();
+ assert(!str.empty());
+ if (StrInStrNoCase(str, "delegate:") != StringRef::npos)
+ ReceiverEff = StopTracking;
+ }
+
+ // Look for methods that return an owned object.
+ if (cocoa::isCocoaObjectRef(RetTy)) {
+ // EXPERIMENTAL: Assume the Cocoa conventions for all objects returned
+ // by instance methods.
+ RetEffect E = cocoa::followsFundamentalRule(S)
+ ? ObjCAllocRetE : RetEffect::MakeNotOwned(RetEffect::ObjC);
+
+ return getPersistentSummary(E, ReceiverEff, MayEscape);
+ }
+
+ // Look for methods that return an owned core foundation object.
+ if (cocoa::isCFObjectRef(RetTy)) {
+ RetEffect E = cocoa::followsFundamentalRule(S)
+ ? RetEffect::MakeOwned(RetEffect::CF, true)
+ : RetEffect::MakeNotOwned(RetEffect::CF);
+
+ return getPersistentSummary(E, ReceiverEff, MayEscape);
+ }
+
+ if (ScratchArgs.isEmpty() && ReceiverEff == DoNothing)
+ return getDefaultSummary();
+
+ return getPersistentSummary(RetEffect::MakeNoRet(), ReceiverEff, MayEscape);
+}
+
+RetainSummary*
+RetainSummaryManager::getInstanceMethodSummary(const ObjCMessageExpr *ME,
+ const GRState *state,
+ const LocationContext *LC) {
+
+ // We need the type-information of the tracked receiver object
+ // Retrieve it from the state.
+ const Expr *Receiver = ME->getInstanceReceiver();
+ const ObjCInterfaceDecl* ID = 0;
+
+ // FIXME: Is this really working as expected? There are cases where
+ // we just use the 'ID' from the message expression.
+ SVal receiverV;
+
+ if (Receiver) {
+ receiverV = state->getSValAsScalarOrLoc(Receiver);
+
+ // FIXME: Eventually replace the use of state->get<RefBindings> with
+ // a generic API for reasoning about the Objective-C types of symbolic
+ // objects.
+ if (SymbolRef Sym = receiverV.getAsLocSymbol())
+ if (const RefVal *T = state->get<RefBindings>(Sym))
+ if (const ObjCObjectPointerType* PT =
+ T->getType()->getAs<ObjCObjectPointerType>())
+ ID = PT->getInterfaceDecl();
+
+ // FIXME: this is a hack. This may or may not be the actual method
+ // that is called.
+ if (!ID) {
+ if (const ObjCObjectPointerType *PT =
+ Receiver->getType()->getAs<ObjCObjectPointerType>())
+ ID = PT->getInterfaceDecl();
+ }
+ } else {
+ // FIXME: Hack for 'super'.
+ ID = ME->getReceiverInterface();
+ }
+
+ // FIXME: The receiver could be a reference to a class, meaning that
+ // we should use the class method.
+ RetainSummary *Summ = getInstanceMethodSummary(ME, ID);
+
+ // Special-case: are we sending a mesage to "self"?
+ // This is a hack. When we have full-IP this should be removed.
+ if (isa<ObjCMethodDecl>(LC->getDecl()) && Receiver) {
+ if (const loc::MemRegionVal *L = dyn_cast<loc::MemRegionVal>(&receiverV)) {
+ // Get the region associated with 'self'.
+ if (const ImplicitParamDecl *SelfDecl = LC->getSelfDecl()) {
+ SVal SelfVal = state->getSVal(state->getRegion(SelfDecl, LC));
+ if (L->StripCasts() == SelfVal.getAsRegion()) {
+ // Update the summary to make the default argument effect
+ // 'StopTracking'.
+ Summ = copySummary(Summ);
+ Summ->setDefaultArgEffect(StopTracking);
+ }
+ }
+ }
+ }
+
+ return Summ ? Summ : getDefaultSummary();
+}
+
+RetainSummary*
+RetainSummaryManager::getInstanceMethodSummary(Selector S,
+ IdentifierInfo *ClsName,
+ const ObjCInterfaceDecl* ID,
+ const ObjCMethodDecl *MD,
+ QualType RetTy) {
+
+ // Look up a summary in our summary cache.
+ RetainSummary *Summ = ObjCMethodSummaries.find(ID, ClsName, S);
+
+ if (!Summ) {
+ assert(ScratchArgs.isEmpty());
+
+ // "initXXX": pass-through for receiver.
+ if (cocoa::deriveNamingConvention(S) == cocoa::InitRule)
+ Summ = getInitMethodSummary(RetTy);
+ else
+ Summ = getCommonMethodSummary(MD, S, RetTy);
+
+ // Annotations override defaults.
+ updateSummaryFromAnnotations(*Summ, MD);
+
+ // Memoize the summary.
+ ObjCMethodSummaries[ObjCSummaryKey(ID, ClsName, S)] = Summ;
+ }
+
+ return Summ;
+}
+
+RetainSummary*
+RetainSummaryManager::getClassMethodSummary(Selector S, IdentifierInfo *ClsName,
+ const ObjCInterfaceDecl *ID,
+ const ObjCMethodDecl *MD,
+ QualType RetTy) {
+
+ assert(ClsName && "Class name must be specified.");
+ RetainSummary *Summ = ObjCClassMethodSummaries.find(ID, ClsName, S);
+
+ if (!Summ) {
+ Summ = getCommonMethodSummary(MD, S, RetTy);
+ // Annotations override defaults.
+ updateSummaryFromAnnotations(*Summ, MD);
+ // Memoize the summary.
+ ObjCClassMethodSummaries[ObjCSummaryKey(ID, ClsName, S)] = Summ;
+ }
+
+ return Summ;
+}
+
+void RetainSummaryManager::InitializeClassMethodSummaries() {
+ assert(ScratchArgs.isEmpty());
+ RetainSummary* Summ = getPersistentSummary(ObjCAllocRetE);
+
+ // Create the summaries for "alloc", "new", and "allocWithZone:" for
+ // NSObject and its derivatives.
+ addNSObjectClsMethSummary(GetNullarySelector("alloc", Ctx), Summ);
+ addNSObjectClsMethSummary(GetNullarySelector("new", Ctx), Summ);
+ addNSObjectClsMethSummary(GetUnarySelector("allocWithZone", Ctx), Summ);
+
+ // Create the [NSAssertionHandler currentHander] summary.
+ addClassMethSummary("NSAssertionHandler", "currentHandler",
+ getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::ObjC)));
+
+ // Create the [NSAutoreleasePool addObject:] summary.
+ ScratchArgs = AF.Add(ScratchArgs, 0, Autorelease);
+ addClassMethSummary("NSAutoreleasePool", "addObject",
+ getPersistentSummary(RetEffect::MakeNoRet(),
+ DoNothing, Autorelease));
+
+ // Create a summary for [NSCursor dragCopyCursor].
+ addClassMethSummary("NSCursor", "dragCopyCursor",
+ getPersistentSummary(RetEffect::MakeNoRet(), DoNothing,
+ DoNothing));
+
+ // Create the summaries for [NSObject performSelector...]. We treat
+ // these as 'stop tracking' for the arguments because they are often
+ // used for delegates that can release the object. When we have better
+ // inter-procedural analysis we can potentially do something better. This
+ // workaround is to remove false positives.
+ Summ = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, StopTracking);
+ IdentifierInfo *NSObjectII = &Ctx.Idents.get("NSObject");
+ addClsMethSummary(NSObjectII, Summ, "performSelector", "withObject",
+ "afterDelay", NULL);
+ addClsMethSummary(NSObjectII, Summ, "performSelector", "withObject",
+ "afterDelay", "inModes", NULL);
+ addClsMethSummary(NSObjectII, Summ, "performSelectorOnMainThread",
+ "withObject", "waitUntilDone", NULL);
+ addClsMethSummary(NSObjectII, Summ, "performSelectorOnMainThread",
+ "withObject", "waitUntilDone", "modes", NULL);
+ addClsMethSummary(NSObjectII, Summ, "performSelector", "onThread",
+ "withObject", "waitUntilDone", NULL);
+ addClsMethSummary(NSObjectII, Summ, "performSelector", "onThread",
+ "withObject", "waitUntilDone", "modes", NULL);
+ addClsMethSummary(NSObjectII, Summ, "performSelectorInBackground",
+ "withObject", NULL);
+
+ // Specially handle NSData.
+ RetainSummary *dataWithBytesNoCopySumm =
+ getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::ObjC), DoNothing,
+ DoNothing);
+ addClsMethSummary("NSData", dataWithBytesNoCopySumm,
+ "dataWithBytesNoCopy", "length", NULL);
+ addClsMethSummary("NSData", dataWithBytesNoCopySumm,
+ "dataWithBytesNoCopy", "length", "freeWhenDone", NULL);
+}
+
+void RetainSummaryManager::InitializeMethodSummaries() {
+
+ assert (ScratchArgs.isEmpty());
+
+ // Create the "init" selector. It just acts as a pass-through for the
+ // receiver.
+ RetainSummary *InitSumm = getPersistentSummary(ObjCInitRetE, DecRefMsg);
+ addNSObjectMethSummary(GetNullarySelector("init", Ctx), InitSumm);
+
+ // awakeAfterUsingCoder: behaves basically like an 'init' method. It
+ // claims the receiver and returns a retained object.
+ addNSObjectMethSummary(GetUnarySelector("awakeAfterUsingCoder", Ctx),
+ InitSumm);
+
+ // The next methods are allocators.
+ RetainSummary *AllocSumm = getPersistentSummary(ObjCAllocRetE);
+ RetainSummary *CFAllocSumm =
+ getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true));
+
+ // Create the "copy" selector.
+ addNSObjectMethSummary(GetNullarySelector("copy", Ctx), AllocSumm);
+
+ // Create the "mutableCopy" selector.
+ addNSObjectMethSummary(GetNullarySelector("mutableCopy", Ctx), AllocSumm);
+
+ // Create the "retain" selector.
+ RetEffect E = RetEffect::MakeReceiverAlias();
+ RetainSummary *Summ = getPersistentSummary(E, IncRefMsg);
+ addNSObjectMethSummary(GetNullarySelector("retain", Ctx), Summ);
+
+ // Create the "release" selector.
+ Summ = getPersistentSummary(E, DecRefMsg);
+ addNSObjectMethSummary(GetNullarySelector("release", Ctx), Summ);
+
+ // Create the "drain" selector.
+ Summ = getPersistentSummary(E, isGCEnabled() ? DoNothing : DecRef);
+ addNSObjectMethSummary(GetNullarySelector("drain", Ctx), Summ);
+
+ // Create the -dealloc summary.
+ Summ = getPersistentSummary(RetEffect::MakeNoRet(), Dealloc);
+ addNSObjectMethSummary(GetNullarySelector("dealloc", Ctx), Summ);
+
+ // Create the "autorelease" selector.
+ Summ = getPersistentSummary(E, Autorelease);
+ addNSObjectMethSummary(GetNullarySelector("autorelease", Ctx), Summ);
+
+ // Specially handle NSAutoreleasePool.
+ addInstMethSummary("NSAutoreleasePool", "init",
+ getPersistentSummary(RetEffect::MakeReceiverAlias(),
+ NewAutoreleasePool));
+
+ // For NSWindow, allocated objects are (initially) self-owned.
+ // FIXME: For now we opt for false negatives with NSWindow, as these objects
+ // self-own themselves. However, they only do this once they are displayed.
+ // Thus, we need to track an NSWindow's display status.
+ // This is tracked in <rdar://problem/6062711>.
+ // See also http://llvm.org/bugs/show_bug.cgi?id=3714.
+ RetainSummary *NoTrackYet = getPersistentSummary(RetEffect::MakeNoRet(),
+ StopTracking,
+ StopTracking);
+
+ addClassMethSummary("NSWindow", "alloc", NoTrackYet);
+
+#if 0
+ addInstMethSummary("NSWindow", NoTrackYet, "initWithContentRect",
+ "styleMask", "backing", "defer", NULL);
+
+ addInstMethSummary("NSWindow", NoTrackYet, "initWithContentRect",
+ "styleMask", "backing", "defer", "screen", NULL);
+#endif
+
+ // For NSPanel (which subclasses NSWindow), allocated objects are not
+ // self-owned.
+ // FIXME: For now we don't track NSPanels. object for the same reason
+ // as for NSWindow objects.
+ addClassMethSummary("NSPanel", "alloc", NoTrackYet);
+
+#if 0
+ addInstMethSummary("NSPanel", NoTrackYet, "initWithContentRect",
+ "styleMask", "backing", "defer", NULL);
+
+ addInstMethSummary("NSPanel", NoTrackYet, "initWithContentRect",
+ "styleMask", "backing", "defer", "screen", NULL);
+#endif
+
+ // Don't track allocated autorelease pools yet, as it is okay to prematurely
+ // exit a method.
+ addClassMethSummary("NSAutoreleasePool", "alloc", NoTrackYet);
+
+ // Create NSAssertionHandler summaries.
+ addPanicSummary("NSAssertionHandler", "handleFailureInFunction", "file",
+ "lineNumber", "description", NULL);
+
+ addPanicSummary("NSAssertionHandler", "handleFailureInMethod", "object",
+ "file", "lineNumber", "description", NULL);
+
+ // Create summaries QCRenderer/QCView -createSnapShotImageOfType:
+ addInstMethSummary("QCRenderer", AllocSumm,
+ "createSnapshotImageOfType", NULL);
+ addInstMethSummary("QCView", AllocSumm,
+ "createSnapshotImageOfType", NULL);
+
+ // Create summaries for CIContext, 'createCGImage' and
+ // 'createCGLayerWithSize'. These objects are CF objects, and are not
+ // automatically garbage collected.
+ addInstMethSummary("CIContext", CFAllocSumm,
+ "createCGImage", "fromRect", NULL);
+ addInstMethSummary("CIContext", CFAllocSumm,
+ "createCGImage", "fromRect", "format", "colorSpace", NULL);
+ addInstMethSummary("CIContext", CFAllocSumm, "createCGLayerWithSize",
+ "info", NULL);
+}
+
+//===----------------------------------------------------------------------===//
+// AutoreleaseBindings - State used to track objects in autorelease pools.
+//===----------------------------------------------------------------------===//
+
+typedef llvm::ImmutableMap<SymbolRef, unsigned> ARCounts;
+typedef llvm::ImmutableMap<SymbolRef, ARCounts> ARPoolContents;
+typedef llvm::ImmutableList<SymbolRef> ARStack;
+
+static int AutoRCIndex = 0;
+static int AutoRBIndex = 0;
+
+namespace { class AutoreleasePoolContents {}; }
+namespace { class AutoreleaseStack {}; }
+
+namespace clang {
+template<> struct GRStateTrait<AutoreleaseStack>
+ : public GRStatePartialTrait<ARStack> {
+ static inline void* GDMIndex() { return &AutoRBIndex; }
+};
+
+template<> struct GRStateTrait<AutoreleasePoolContents>
+ : public GRStatePartialTrait<ARPoolContents> {
+ static inline void* GDMIndex() { return &AutoRCIndex; }
+};
+} // end clang namespace
+
+static SymbolRef GetCurrentAutoreleasePool(const GRState* state) {
+ ARStack stack = state->get<AutoreleaseStack>();
+ return stack.isEmpty() ? SymbolRef() : stack.getHead();
+}
+
+static const GRState * SendAutorelease(const GRState *state,
+ ARCounts::Factory &F, SymbolRef sym) {
+
+ SymbolRef pool = GetCurrentAutoreleasePool(state);
+ const ARCounts *cnts = state->get<AutoreleasePoolContents>(pool);
+ ARCounts newCnts(0);
+
+ if (cnts) {
+ const unsigned *cnt = (*cnts).lookup(sym);
+ newCnts = F.Add(*cnts, sym, cnt ? *cnt + 1 : 1);
+ }
+ else
+ newCnts = F.Add(F.GetEmptyMap(), sym, 1);
+
+ return state->set<AutoreleasePoolContents>(pool, newCnts);
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer functions.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class CFRefCount : public GRTransferFuncs {
+public:
+ class BindingsPrinter : public GRState::Printer {
+ public:
+ virtual void Print(llvm::raw_ostream& Out, const GRState* state,
+ const char* nl, const char* sep);
+ };
+
+private:
+ typedef llvm::DenseMap<const ExplodedNode*, const RetainSummary*>
+ SummaryLogTy;
+
+ RetainSummaryManager Summaries;
+ SummaryLogTy SummaryLog;
+ const LangOptions& LOpts;
+ ARCounts::Factory ARCountFactory;
+
+ BugType *useAfterRelease, *releaseNotOwned;
+ BugType *deallocGC, *deallocNotOwned;
+ BugType *leakWithinFunction, *leakAtReturn;
+ BugType *overAutorelease;
+ BugType *returnNotOwnedForOwned;
+ BugReporter *BR;
+
+ const GRState * Update(const GRState * state, SymbolRef sym, RefVal V, ArgEffect E,
+ RefVal::Kind& hasErr);
+
+ void ProcessNonLeakError(ExplodedNodeSet& Dst,
+ GRStmtNodeBuilder& Builder,
+ Expr* NodeExpr, SourceRange ErrorRange,
+ ExplodedNode* Pred,
+ const GRState* St,
+ RefVal::Kind hasErr, SymbolRef Sym);
+
+ const GRState * HandleSymbolDeath(const GRState * state, SymbolRef sid, RefVal V,
+ llvm::SmallVectorImpl<SymbolRef> &Leaked);
+
+ ExplodedNode* ProcessLeaks(const GRState * state,
+ llvm::SmallVectorImpl<SymbolRef> &Leaked,
+ GenericNodeBuilder &Builder,
+ GRExprEngine &Eng,
+ ExplodedNode *Pred = 0);
+
+public:
+ CFRefCount(ASTContext& Ctx, bool gcenabled, const LangOptions& lopts)
+ : Summaries(Ctx, gcenabled),
+ LOpts(lopts), useAfterRelease(0), releaseNotOwned(0),
+ deallocGC(0), deallocNotOwned(0),
+ leakWithinFunction(0), leakAtReturn(0), overAutorelease(0),
+ returnNotOwnedForOwned(0), BR(0) {}
+
+ virtual ~CFRefCount() {}
+
+ void RegisterChecks(GRExprEngine &Eng);
+
+ virtual void RegisterPrinters(std::vector<GRState::Printer*>& Printers) {
+ Printers.push_back(new BindingsPrinter());
+ }
+
+ bool isGCEnabled() const { return Summaries.isGCEnabled(); }
+ const LangOptions& getLangOptions() const { return LOpts; }
+
+ const RetainSummary *getSummaryOfNode(const ExplodedNode *N) const {
+ SummaryLogTy::const_iterator I = SummaryLog.find(N);
+ return I == SummaryLog.end() ? 0 : I->second;
+ }
+
+ // Calls.
+
+ void EvalSummary(ExplodedNodeSet& Dst,
+ GRExprEngine& Eng,
+ GRStmtNodeBuilder& Builder,
+ Expr* Ex,
+ InstanceReceiver Receiver,
+ const RetainSummary& Summ,
+ const MemRegion *Callee,
+ ExprIterator arg_beg, ExprIterator arg_end,
+ ExplodedNode* Pred, const GRState *state);
+
+ virtual void EvalCall(ExplodedNodeSet& Dst,
+ GRExprEngine& Eng,
+ GRStmtNodeBuilder& Builder,
+ CallExpr* CE, SVal L,
+ ExplodedNode* Pred);
+
+
+ virtual void EvalObjCMessageExpr(ExplodedNodeSet& Dst,
+ GRExprEngine& Engine,
+ GRStmtNodeBuilder& Builder,
+ ObjCMessageExpr* ME,
+ ExplodedNode* Pred,
+ const GRState *state);
+
+ bool EvalObjCMessageExprAux(ExplodedNodeSet& Dst,
+ GRExprEngine& Engine,
+ GRStmtNodeBuilder& Builder,
+ ObjCMessageExpr* ME,
+ ExplodedNode* Pred);
+
+ // Stores.
+ virtual void EvalBind(GRStmtNodeBuilderRef& B, SVal location, SVal val);
+
+ // End-of-path.
+
+ virtual void EvalEndPath(GRExprEngine& Engine,
+ GREndPathNodeBuilder& Builder);
+
+ virtual void EvalDeadSymbols(ExplodedNodeSet& Dst,
+ GRExprEngine& Engine,
+ GRStmtNodeBuilder& Builder,
+ ExplodedNode* Pred,
+ Stmt* S, const GRState* state,
+ SymbolReaper& SymReaper);
+
+ std::pair<ExplodedNode*, const GRState *>
+ HandleAutoreleaseCounts(const GRState * state, GenericNodeBuilder Bd,
+ ExplodedNode* Pred, GRExprEngine &Eng,
+ SymbolRef Sym, RefVal V, bool &stop);
+ // Return statements.
+
+ virtual void EvalReturn(ExplodedNodeSet& Dst,
+ GRExprEngine& Engine,
+ GRStmtNodeBuilder& Builder,
+ ReturnStmt* S,
+ ExplodedNode* Pred);
+
+ // Assumptions.
+
+ virtual const GRState *EvalAssume(const GRState* state, SVal condition,
+ bool assumption);
+};
+
+} // end anonymous namespace
+
+static void PrintPool(llvm::raw_ostream &Out, SymbolRef Sym,
+ const GRState *state) {
+ Out << ' ';
+ if (Sym)
+ Out << Sym->getSymbolID();
+ else
+ Out << "<pool>";
+ Out << ":{";
+
+ // Get the contents of the pool.
+ if (const ARCounts *cnts = state->get<AutoreleasePoolContents>(Sym))
+ for (ARCounts::iterator J=cnts->begin(), EJ=cnts->end(); J != EJ; ++J)
+ Out << '(' << J.getKey() << ',' << J.getData() << ')';
+
+ Out << '}';
+}
+
+void CFRefCount::BindingsPrinter::Print(llvm::raw_ostream& Out,
+ const GRState* state,
+ const char* nl, const char* sep) {
+
+ RefBindings B = state->get<RefBindings>();
+
+ if (!B.isEmpty())
+ Out << sep << nl;
+
+ for (RefBindings::iterator I=B.begin(), E=B.end(); I!=E; ++I) {
+ Out << (*I).first << " : ";
+ (*I).second.print(Out);
+ Out << nl;
+ }
+
+ // Print the autorelease stack.
+ Out << sep << nl << "AR pool stack:";
+ ARStack stack = state->get<AutoreleaseStack>();
+
+ PrintPool(Out, SymbolRef(), state); // Print the caller's pool.
+ for (ARStack::iterator I=stack.begin(), E=stack.end(); I!=E; ++I)
+ PrintPool(Out, *I, state);
+
+ Out << nl;
+}
+
+//===----------------------------------------------------------------------===//
+// Error reporting.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+ //===-------------===//
+ // Bug Descriptions. //
+ //===-------------===//
+
+ class CFRefBug : public BugType {
+ protected:
+ CFRefCount& TF;
+
+ CFRefBug(CFRefCount* tf, llvm::StringRef name)
+ : BugType(name, "Memory (Core Foundation/Objective-C)"), TF(*tf) {}
+ public:
+
+ CFRefCount& getTF() { return TF; }
+ const CFRefCount& getTF() const { return TF; }
+
+ // FIXME: Eventually remove.
+ virtual const char* getDescription() const = 0;
+
+ virtual bool isLeak() const { return false; }
+ };
+
+ class UseAfterRelease : public CFRefBug {
+ public:
+ UseAfterRelease(CFRefCount* tf)
+ : CFRefBug(tf, "Use-after-release") {}
+
+ const char* getDescription() const {
+ return "Reference-counted object is used after it is released";
+ }
+ };
+
+ class BadRelease : public CFRefBug {
+ public:
+ BadRelease(CFRefCount* tf) : CFRefBug(tf, "Bad release") {}
+
+ const char* getDescription() const {
+ return "Incorrect decrement of the reference count of an object that is "
+ "not owned at this point by the caller";
+ }
+ };
+
+ class DeallocGC : public CFRefBug {
+ public:
+ DeallocGC(CFRefCount *tf)
+ : CFRefBug(tf, "-dealloc called while using garbage collection") {}
+
+ const char *getDescription() const {
+ return "-dealloc called while using garbage collection";
+ }
+ };
+
+ class DeallocNotOwned : public CFRefBug {
+ public:
+ DeallocNotOwned(CFRefCount *tf)
+ : CFRefBug(tf, "-dealloc sent to non-exclusively owned object") {}
+
+ const char *getDescription() const {
+ return "-dealloc sent to object that may be referenced elsewhere";
+ }
+ };
+
+ class OverAutorelease : public CFRefBug {
+ public:
+ OverAutorelease(CFRefCount *tf) :
+ CFRefBug(tf, "Object sent -autorelease too many times") {}
+
+ const char *getDescription() const {
+ return "Object sent -autorelease too many times";
+ }
+ };
+
+ class ReturnedNotOwnedForOwned : public CFRefBug {
+ public:
+ ReturnedNotOwnedForOwned(CFRefCount *tf) :
+ CFRefBug(tf, "Method should return an owned object") {}
+
+ const char *getDescription() const {
+ return "Object with +0 retain counts returned to caller where a +1 "
+ "(owning) retain count is expected";
+ }
+ };
+
+ class Leak : public CFRefBug {
+ const bool isReturn;
+ protected:
+ Leak(CFRefCount* tf, llvm::StringRef name, bool isRet)
+ : CFRefBug(tf, name), isReturn(isRet) {}
+ public:
+
+ const char* getDescription() const { return ""; }
+
+ bool isLeak() const { return true; }
+ };
+
+ class LeakAtReturn : public Leak {
+ public:
+ LeakAtReturn(CFRefCount* tf, llvm::StringRef name)
+ : Leak(tf, name, true) {}
+ };
+
+ class LeakWithinFunction : public Leak {
+ public:
+ LeakWithinFunction(CFRefCount* tf, llvm::StringRef name)
+ : Leak(tf, name, false) {}
+ };
+
+ //===---------===//
+ // Bug Reports. //
+ //===---------===//
+
+ class CFRefReport : public RangedBugReport {
+ protected:
+ SymbolRef Sym;
+ const CFRefCount &TF;
+ public:
+ CFRefReport(CFRefBug& D, const CFRefCount &tf,
+ ExplodedNode *n, SymbolRef sym)
+ : RangedBugReport(D, D.getDescription(), n), Sym(sym), TF(tf) {}
+
+ CFRefReport(CFRefBug& D, const CFRefCount &tf,
+ ExplodedNode *n, SymbolRef sym, llvm::StringRef endText)
+ : RangedBugReport(D, D.getDescription(), endText, n), Sym(sym), TF(tf) {}
+
+ virtual ~CFRefReport() {}
+
+ CFRefBug& getBugType() {
+ return (CFRefBug&) RangedBugReport::getBugType();
+ }
+ const CFRefBug& getBugType() const {
+ return (const CFRefBug&) RangedBugReport::getBugType();
+ }
+
+ virtual void getRanges(const SourceRange*& beg, const SourceRange*& end) {
+ if (!getBugType().isLeak())
+ RangedBugReport::getRanges(beg, end);
+ else
+ beg = end = 0;
+ }
+
+ SymbolRef getSymbol() const { return Sym; }
+
+ PathDiagnosticPiece* getEndPath(BugReporterContext& BRC,
+ const ExplodedNode* N);
+
+ std::pair<const char**,const char**> getExtraDescriptiveText();
+
+ PathDiagnosticPiece* VisitNode(const ExplodedNode* N,
+ const ExplodedNode* PrevN,
+ BugReporterContext& BRC);
+ };
+
+ class CFRefLeakReport : public CFRefReport {
+ SourceLocation AllocSite;
+ const MemRegion* AllocBinding;
+ public:
+ CFRefLeakReport(CFRefBug& D, const CFRefCount &tf,
+ ExplodedNode *n, SymbolRef sym,
+ GRExprEngine& Eng);
+
+ PathDiagnosticPiece* getEndPath(BugReporterContext& BRC,
+ const ExplodedNode* N);
+
+ SourceLocation getLocation() const { return AllocSite; }
+ };
+} // end anonymous namespace
+
+
+
+static const char* Msgs[] = {
+ // GC only
+ "Code is compiled to only use garbage collection",
+ // No GC.
+ "Code is compiled to use reference counts",
+ // Hybrid, with GC.
+ "Code is compiled to use either garbage collection (GC) or reference counts"
+ " (non-GC). The bug occurs with GC enabled",
+ // Hybrid, without GC
+ "Code is compiled to use either garbage collection (GC) or reference counts"
+ " (non-GC). The bug occurs in non-GC mode"
+};
+
+std::pair<const char**,const char**> CFRefReport::getExtraDescriptiveText() {
+ CFRefCount& TF = static_cast<CFRefBug&>(getBugType()).getTF();
+
+ switch (TF.getLangOptions().getGCMode()) {
+ default:
+ assert(false);
+
+ case LangOptions::GCOnly:
+ assert (TF.isGCEnabled());
+ return std::make_pair(&Msgs[0], &Msgs[0]+1);
+
+ case LangOptions::NonGC:
+ assert (!TF.isGCEnabled());
+ return std::make_pair(&Msgs[1], &Msgs[1]+1);
+
+ case LangOptions::HybridGC:
+ if (TF.isGCEnabled())
+ return std::make_pair(&Msgs[2], &Msgs[2]+1);
+ else
+ return std::make_pair(&Msgs[3], &Msgs[3]+1);
+ }
+}
+
+static inline bool contains(const llvm::SmallVectorImpl<ArgEffect>& V,
+ ArgEffect X) {
+ for (llvm::SmallVectorImpl<ArgEffect>::const_iterator I=V.begin(), E=V.end();
+ I!=E; ++I)
+ if (*I == X) return true;
+
+ return false;
+}
+
+PathDiagnosticPiece* CFRefReport::VisitNode(const ExplodedNode* N,
+ const ExplodedNode* PrevN,
+ BugReporterContext& BRC) {
+
+ if (!isa<PostStmt>(N->getLocation()))
+ return NULL;
+
+ // Check if the type state has changed.
+ const GRState *PrevSt = PrevN->getState();
+ const GRState *CurrSt = N->getState();
+
+ const RefVal* CurrT = CurrSt->get<RefBindings>(Sym);
+ if (!CurrT) return NULL;
+
+ const RefVal &CurrV = *CurrT;
+ const RefVal *PrevT = PrevSt->get<RefBindings>(Sym);
+
+ // Create a string buffer to constain all the useful things we want
+ // to tell the user.
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ // This is the allocation site since the previous node had no bindings
+ // for this symbol.
+ if (!PrevT) {
+ const Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
+
+ if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ // Get the name of the callee (if it is available).
+ SVal X = CurrSt->getSValAsScalarOrLoc(CE->getCallee());
+ if (const FunctionDecl* FD = X.getAsFunctionDecl())
+ os << "Call to function '" << FD << '\'';
+ else
+ os << "function call";
+ }
+ else {
+ assert (isa<ObjCMessageExpr>(S));
+ os << "Method";
+ }
+
+ if (CurrV.getObjKind() == RetEffect::CF) {
+ os << " returns a Core Foundation object with a ";
+ }
+ else {
+ assert (CurrV.getObjKind() == RetEffect::ObjC);
+ os << " returns an Objective-C object with a ";
+ }
+
+ if (CurrV.isOwned()) {
+ os << "+1 retain count (owning reference).";
+
+ if (static_cast<CFRefBug&>(getBugType()).getTF().isGCEnabled()) {
+ assert(CurrV.getObjKind() == RetEffect::CF);
+ os << " "
+ "Core Foundation objects are not automatically garbage collected.";
+ }
+ }
+ else {
+ assert (CurrV.isNotOwned());
+ os << "+0 retain count (non-owning reference).";
+ }
+
+ PathDiagnosticLocation Pos(S, BRC.getSourceManager());
+ return new PathDiagnosticEventPiece(Pos, os.str());
+ }
+
+ // Gather up the effects that were performed on the object at this
+ // program point
+ llvm::SmallVector<ArgEffect, 2> AEffects;
+
+ if (const RetainSummary *Summ =
+ TF.getSummaryOfNode(BRC.getNodeResolver().getOriginalNode(N))) {
+ // We only have summaries attached to nodes after evaluating CallExpr and
+ // ObjCMessageExprs.
+ const Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
+
+ if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ // Iterate through the parameter expressions and see if the symbol
+ // was ever passed as an argument.
+ unsigned i = 0;
+
+ for (CallExpr::const_arg_iterator AI=CE->arg_begin(), AE=CE->arg_end();
+ AI!=AE; ++AI, ++i) {
+
+ // Retrieve the value of the argument. Is it the symbol
+ // we are interested in?
+ if (CurrSt->getSValAsScalarOrLoc(*AI).getAsLocSymbol() != Sym)
+ continue;
+
+ // We have an argument. Get the effect!
+ AEffects.push_back(Summ->getArg(i));
+ }
+ }
+ else if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S)) {
+ if (const Expr *receiver = ME->getInstanceReceiver())
+ if (CurrSt->getSValAsScalarOrLoc(receiver).getAsLocSymbol() == Sym) {
+ // The symbol we are tracking is the receiver.
+ AEffects.push_back(Summ->getReceiverEffect());
+ }
+ }
+ }
+
+ do {
+ // Get the previous type state.
+ RefVal PrevV = *PrevT;
+
+ // Specially handle -dealloc.
+ if (!TF.isGCEnabled() && contains(AEffects, Dealloc)) {
+ // Determine if the object's reference count was pushed to zero.
+ assert(!(PrevV == CurrV) && "The typestate *must* have changed.");
+ // We may not have transitioned to 'release' if we hit an error.
+ // This case is handled elsewhere.
+ if (CurrV.getKind() == RefVal::Released) {
+ assert(CurrV.getCombinedCounts() == 0);
+ os << "Object released by directly sending the '-dealloc' message";
+ break;
+ }
+ }
+
+ // Specially handle CFMakeCollectable and friends.
+ if (contains(AEffects, MakeCollectable)) {
+ // Get the name of the function.
+ const Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
+ SVal X = CurrSt->getSValAsScalarOrLoc(cast<CallExpr>(S)->getCallee());
+ const FunctionDecl* FD = X.getAsFunctionDecl();
+ const std::string& FName = FD->getNameAsString();
+
+ if (TF.isGCEnabled()) {
+ // Determine if the object's reference count was pushed to zero.
+ assert(!(PrevV == CurrV) && "The typestate *must* have changed.");
+
+ os << "In GC mode a call to '" << FName
+ << "' decrements an object's retain count and registers the "
+ "object with the garbage collector. ";
+
+ if (CurrV.getKind() == RefVal::Released) {
+ assert(CurrV.getCount() == 0);
+ os << "Since it now has a 0 retain count the object can be "
+ "automatically collected by the garbage collector.";
+ }
+ else
+ os << "An object must have a 0 retain count to be garbage collected. "
+ "After this call its retain count is +" << CurrV.getCount()
+ << '.';
+ }
+ else
+ os << "When GC is not enabled a call to '" << FName
+ << "' has no effect on its argument.";
+
+ // Nothing more to say.
+ break;
+ }
+
+ // Determine if the typestate has changed.
+ if (!(PrevV == CurrV))
+ switch (CurrV.getKind()) {
+ case RefVal::Owned:
+ case RefVal::NotOwned:
+
+ if (PrevV.getCount() == CurrV.getCount()) {
+ // Did an autorelease message get sent?
+ if (PrevV.getAutoreleaseCount() == CurrV.getAutoreleaseCount())
+ return 0;
+
+ assert(PrevV.getAutoreleaseCount() < CurrV.getAutoreleaseCount());
+ os << "Object sent -autorelease message";
+ break;
+ }
+
+ if (PrevV.getCount() > CurrV.getCount())
+ os << "Reference count decremented.";
+ else
+ os << "Reference count incremented.";
+
+ if (unsigned Count = CurrV.getCount())
+ os << " The object now has a +" << Count << " retain count.";
+
+ if (PrevV.getKind() == RefVal::Released) {
+ assert(TF.isGCEnabled() && CurrV.getCount() > 0);
+ os << " The object is not eligible for garbage collection until the "
+ "retain count reaches 0 again.";
+ }
+
+ break;
+
+ case RefVal::Released:
+ os << "Object released.";
+ break;
+
+ case RefVal::ReturnedOwned:
+ os << "Object returned to caller as an owning reference (single retain "
+ "count transferred to caller).";
+ break;
+
+ case RefVal::ReturnedNotOwned:
+ os << "Object returned to caller with a +0 (non-owning) retain count.";
+ break;
+
+ default:
+ return NULL;
+ }
+
+ // Emit any remaining diagnostics for the argument effects (if any).
+ for (llvm::SmallVectorImpl<ArgEffect>::iterator I=AEffects.begin(),
+ E=AEffects.end(); I != E; ++I) {
+
+ // A bunch of things have alternate behavior under GC.
+ if (TF.isGCEnabled())
+ switch (*I) {
+ default: break;
+ case Autorelease:
+ os << "In GC mode an 'autorelease' has no effect.";
+ continue;
+ case IncRefMsg:
+ os << "In GC mode the 'retain' message has no effect.";
+ continue;
+ case DecRefMsg:
+ os << "In GC mode the 'release' message has no effect.";
+ continue;
+ }
+ }
+ } while (0);
+
+ if (os.str().empty())
+ return 0; // We have nothing to say!
+
+ const Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
+ PathDiagnosticLocation Pos(S, BRC.getSourceManager());
+ PathDiagnosticPiece* P = new PathDiagnosticEventPiece(Pos, os.str());
+
+ // Add the range by scanning the children of the statement for any bindings
+ // to Sym.
+ for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
+ I!=E; ++I)
+ if (const Expr* Exp = dyn_cast_or_null<Expr>(*I))
+ if (CurrSt->getSValAsScalarOrLoc(Exp).getAsLocSymbol() == Sym) {
+ P->addRange(Exp->getSourceRange());
+ break;
+ }
+
+ return P;
+}
+
+namespace {
+ class FindUniqueBinding :
+ public StoreManager::BindingsHandler {
+ SymbolRef Sym;
+ const MemRegion* Binding;
+ bool First;
+
+ public:
+ FindUniqueBinding(SymbolRef sym) : Sym(sym), Binding(0), First(true) {}
+
+ bool HandleBinding(StoreManager& SMgr, Store store, const MemRegion* R,
+ SVal val) {
+
+ SymbolRef SymV = val.getAsSymbol();
+ if (!SymV || SymV != Sym)
+ return true;
+
+ if (Binding) {
+ First = false;
+ return false;
+ }
+ else
+ Binding = R;
+
+ return true;
+ }
+
+ operator bool() { return First && Binding; }
+ const MemRegion* getRegion() { return Binding; }
+ };
+}
+
+static std::pair<const ExplodedNode*,const MemRegion*>
+GetAllocationSite(GRStateManager& StateMgr, const ExplodedNode* N,
+ SymbolRef Sym) {
+
+ // Find both first node that referred to the tracked symbol and the
+ // memory location that value was store to.
+ const ExplodedNode* Last = N;
+ const MemRegion* FirstBinding = 0;
+
+ while (N) {
+ const GRState* St = N->getState();
+ RefBindings B = St->get<RefBindings>();
+
+ if (!B.lookup(Sym))
+ break;
+
+ FindUniqueBinding FB(Sym);
+ StateMgr.iterBindings(St, FB);
+ if (FB) FirstBinding = FB.getRegion();
+
+ Last = N;
+ N = N->pred_empty() ? NULL : *(N->pred_begin());
+ }
+
+ return std::make_pair(Last, FirstBinding);
+}
+
+PathDiagnosticPiece*
+CFRefReport::getEndPath(BugReporterContext& BRC,
+ const ExplodedNode* EndN) {
+ // Tell the BugReporterContext to report cases when the tracked symbol is
+ // assigned to different variables, etc.
+ BRC.addNotableSymbol(Sym);
+ return RangedBugReport::getEndPath(BRC, EndN);
+}
+
+PathDiagnosticPiece*
+CFRefLeakReport::getEndPath(BugReporterContext& BRC,
+ const ExplodedNode* EndN){
+
+ // Tell the BugReporterContext to report cases when the tracked symbol is
+ // assigned to different variables, etc.
+ BRC.addNotableSymbol(Sym);
+
+ // We are reporting a leak. Walk up the graph to get to the first node where
+ // the symbol appeared, and also get the first VarDecl that tracked object
+ // is stored to.
+ const ExplodedNode* AllocNode = 0;
+ const MemRegion* FirstBinding = 0;
+
+ llvm::tie(AllocNode, FirstBinding) =
+ GetAllocationSite(BRC.getStateManager(), EndN, Sym);
+
+ // Get the allocate site.
+ assert(AllocNode);
+ const Stmt* FirstStmt = cast<PostStmt>(AllocNode->getLocation()).getStmt();
+
+ SourceManager& SMgr = BRC.getSourceManager();
+ unsigned AllocLine =SMgr.getInstantiationLineNumber(FirstStmt->getLocStart());
+
+ // Compute an actual location for the leak. Sometimes a leak doesn't
+ // occur at an actual statement (e.g., transition between blocks; end
+ // of function) so we need to walk the graph and compute a real location.
+ const ExplodedNode* LeakN = EndN;
+ PathDiagnosticLocation L;
+
+ while (LeakN) {
+ ProgramPoint P = LeakN->getLocation();
+
+ if (const PostStmt *PS = dyn_cast<PostStmt>(&P)) {
+ L = PathDiagnosticLocation(PS->getStmt()->getLocStart(), SMgr);
+ break;
+ }
+ else if (const BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
+ if (const Stmt* Term = BE->getSrc()->getTerminator()) {
+ L = PathDiagnosticLocation(Term->getLocStart(), SMgr);
+ break;
+ }
+ }
+
+ LeakN = LeakN->succ_empty() ? 0 : *(LeakN->succ_begin());
+ }
+
+ if (!L.isValid()) {
+ const Decl &D = EndN->getCodeDecl();
+ L = PathDiagnosticLocation(D.getBodyRBrace(), SMgr);
+ }
+
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ os << "Object allocated on line " << AllocLine;
+
+ if (FirstBinding)
+ os << " and stored into '" << FirstBinding->getString() << '\'';
+
+ // Get the retain count.
+ const RefVal* RV = EndN->getState()->get<RefBindings>(Sym);
+
+ if (RV->getKind() == RefVal::ErrorLeakReturned) {
+ // FIXME: Per comments in rdar://6320065, "create" only applies to CF
+ // ojbects. Only "copy", "alloc", "retain" and "new" transfer ownership
+ // to the caller for NS objects.
+ ObjCMethodDecl& MD = cast<ObjCMethodDecl>(EndN->getCodeDecl());
+ os << " is returned from a method whose name ('"
+ << MD.getSelector().getAsString()
+ << "') does not contain 'copy' or otherwise starts with"
+ " 'new' or 'alloc'. This violates the naming convention rules given"
+ " in the Memory Management Guide for Cocoa (object leaked)";
+ }
+ else if (RV->getKind() == RefVal::ErrorGCLeakReturned) {
+ ObjCMethodDecl& MD = cast<ObjCMethodDecl>(EndN->getCodeDecl());
+ os << " and returned from method '" << MD.getSelector().getAsString()
+ << "' is potentially leaked when using garbage collection. Callers "
+ "of this method do not expect a returned object with a +1 retain "
+ "count since they expect the object to be managed by the garbage "
+ "collector";
+ }
+ else
+ os << " is no longer referenced after this point and has a retain count of"
+ " +" << RV->getCount() << " (object leaked)";
+
+ return new PathDiagnosticEventPiece(L, os.str());
+}
+
+CFRefLeakReport::CFRefLeakReport(CFRefBug& D, const CFRefCount &tf,
+ ExplodedNode *n,
+ SymbolRef sym, GRExprEngine& Eng)
+: CFRefReport(D, tf, n, sym) {
+
+ // Most bug reports are cached at the location where they occured.
+ // With leaks, we want to unique them by the location where they were
+ // allocated, and only report a single path. To do this, we need to find
+ // the allocation site of a piece of tracked memory, which we do via a
+ // call to GetAllocationSite. This will walk the ExplodedGraph backwards.
+ // Note that this is *not* the trimmed graph; we are guaranteed, however,
+ // that all ancestor nodes that represent the allocation site have the
+ // same SourceLocation.
+ const ExplodedNode* AllocNode = 0;
+
+ llvm::tie(AllocNode, AllocBinding) = // Set AllocBinding.
+ GetAllocationSite(Eng.getStateManager(), getEndNode(), getSymbol());
+
+ // Get the SourceLocation for the allocation site.
+ ProgramPoint P = AllocNode->getLocation();
+ AllocSite = cast<PostStmt>(P).getStmt()->getLocStart();
+
+ // Fill in the description of the bug.
+ Description.clear();
+ llvm::raw_string_ostream os(Description);
+ SourceManager& SMgr = Eng.getContext().getSourceManager();
+ unsigned AllocLine = SMgr.getInstantiationLineNumber(AllocSite);
+ os << "Potential leak ";
+ if (tf.isGCEnabled()) {
+ os << "(when using garbage collection) ";
+ }
+ os << "of an object allocated on line " << AllocLine;
+
+ // FIXME: AllocBinding doesn't get populated for RegionStore yet.
+ if (AllocBinding)
+ os << " and stored into '" << AllocBinding->getString() << '\'';
+}
+
+//===----------------------------------------------------------------------===//
+// Main checker logic.
+//===----------------------------------------------------------------------===//
+
+/// GetReturnType - Used to get the return type of a message expression or
+/// function call with the intention of affixing that type to a tracked symbol.
+/// While the the return type can be queried directly from RetEx, when
+/// invoking class methods we augment to the return type to be that of
+/// a pointer to the class (as opposed it just being id).
+static QualType GetReturnType(const Expr* RetE, ASTContext& Ctx) {
+ QualType RetTy = RetE->getType();
+ // If RetE is not a message expression just return its type.
+ // If RetE is a message expression, return its types if it is something
+ /// more specific than id.
+ if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(RetE))
+ if (const ObjCObjectPointerType *PT = RetTy->getAs<ObjCObjectPointerType>())
+ if (PT->isObjCQualifiedIdType() || PT->isObjCIdType() ||
+ PT->isObjCClassType()) {
+ // At this point we know the return type of the message expression is
+ // id, id<...>, or Class. If we have an ObjCInterfaceDecl, we know this
+ // is a call to a class method whose type we can resolve. In such
+ // cases, promote the return type to XXX* (where XXX is the class).
+ const ObjCInterfaceDecl *D = ME->getReceiverInterface();
+ return !D ? RetTy :
+ Ctx.getObjCObjectPointerType(Ctx.getObjCInterfaceType(D));
+ }
+
+ return RetTy;
+}
+
+void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
+ GRExprEngine& Eng,
+ GRStmtNodeBuilder& Builder,
+ Expr* Ex,
+ InstanceReceiver Receiver,
+ const RetainSummary& Summ,
+ const MemRegion *Callee,
+ ExprIterator arg_beg, ExprIterator arg_end,
+ ExplodedNode* Pred, const GRState *state) {
+
+ // Evaluate the effect of the arguments.
+ RefVal::Kind hasErr = (RefVal::Kind) 0;
+ unsigned idx = 0;
+ SourceRange ErrorRange;
+ SymbolRef ErrorSym = 0;
+
+ llvm::SmallVector<const MemRegion*, 10> RegionsToInvalidate;
+
+ for (ExprIterator I = arg_beg; I != arg_end; ++I, ++idx) {
+ SVal V = state->getSValAsScalarOrLoc(*I);
+ SymbolRef Sym = V.getAsLocSymbol();
+
+ if (Sym)
+ if (RefBindings::data_type* T = state->get<RefBindings>(Sym)) {
+ state = Update(state, Sym, *T, Summ.getArg(idx), hasErr);
+ if (hasErr) {
+ ErrorRange = (*I)->getSourceRange();
+ ErrorSym = Sym;
+ break;
+ }
+ continue;
+ }
+
+ tryAgain:
+ if (isa<Loc>(V)) {
+ if (loc::MemRegionVal* MR = dyn_cast<loc::MemRegionVal>(&V)) {
+ if (Summ.getArg(idx) == DoNothingByRef)
+ continue;
+
+ // Invalidate the value of the variable passed by reference.
+ const MemRegion *R = MR->getRegion();
+
+ // Are we dealing with an ElementRegion? If the element type is
+ // a basic integer type (e.g., char, int) and the underying region
+ // is a variable region then strip off the ElementRegion.
+ // FIXME: We really need to think about this for the general case
+ // as sometimes we are reasoning about arrays and other times
+ // about (char*), etc., is just a form of passing raw bytes.
+ // e.g., void *p = alloca(); foo((char*)p);
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ // Checking for 'integral type' is probably too promiscuous, but
+ // we'll leave it in for now until we have a systematic way of
+ // handling all of these cases. Eventually we need to come up
+ // with an interface to StoreManager so that this logic can be
+ // approriately delegated to the respective StoreManagers while
+ // still allowing us to do checker-specific logic (e.g.,
+ // invalidating reference counts), probably via callbacks.
+ if (ER->getElementType()->isIntegralType()) {
+ const MemRegion *superReg = ER->getSuperRegion();
+ if (isa<VarRegion>(superReg) || isa<FieldRegion>(superReg) ||
+ isa<ObjCIvarRegion>(superReg))
+ R = cast<TypedRegion>(superReg);
+ }
+ // FIXME: What about layers of ElementRegions?
+ }
+
+ // Mark this region for invalidation. We batch invalidate regions
+ // below for efficiency.
+ RegionsToInvalidate.push_back(R);
+ continue;
+ }
+ else {
+ // Nuke all other arguments passed by reference.
+ // FIXME: is this necessary or correct? This handles the non-Region
+ // cases. Is it ever valid to store to these?
+ state = state->unbindLoc(cast<Loc>(V));
+ }
+ }
+ else if (isa<nonloc::LocAsInteger>(V)) {
+ // If we are passing a location wrapped as an integer, unwrap it and
+ // invalidate the values referred by the location.
+ V = cast<nonloc::LocAsInteger>(V).getLoc();
+ goto tryAgain;
+ }
+ }
+
+ // Block calls result in all captured values passed-via-reference to be
+ // invalidated.
+ if (const BlockDataRegion *BR = dyn_cast_or_null<BlockDataRegion>(Callee)) {
+ RegionsToInvalidate.push_back(BR);
+ }
+
+ // Invalidate regions we designed for invalidation use the batch invalidation
+ // API.
+ if (!RegionsToInvalidate.empty()) {
+ // FIXME: We can have collisions on the conjured symbol if the
+ // expression *I also creates conjured symbols. We probably want
+ // to identify conjured symbols by an expression pair: the enclosing
+ // expression (the context) and the expression itself. This should
+ // disambiguate conjured symbols.
+ unsigned Count = Builder.getCurrentBlockCount();
+ StoreManager& StoreMgr = Eng.getStateManager().getStoreManager();
+
+
+ StoreManager::InvalidatedSymbols IS;
+ Store store = state->getStore();
+ store = StoreMgr.InvalidateRegions(store, RegionsToInvalidate.data(),
+ RegionsToInvalidate.data() +
+ RegionsToInvalidate.size(),
+ Ex, Count, &IS);
+ state = state->makeWithStore(store);
+ for (StoreManager::InvalidatedSymbols::iterator I = IS.begin(),
+ E = IS.end(); I!=E; ++I) {
+ // Remove any existing reference-count binding.
+ state = state->remove<RefBindings>(*I);
+ }
+ }
+
+ // Evaluate the effect on the message receiver.
+ if (!ErrorRange.isValid() && Receiver) {
+ SymbolRef Sym = Receiver.getSValAsScalarOrLoc(state).getAsLocSymbol();
+ if (Sym) {
+ if (const RefVal* T = state->get<RefBindings>(Sym)) {
+ state = Update(state, Sym, *T, Summ.getReceiverEffect(), hasErr);
+ if (hasErr) {
+ ErrorRange = Receiver.getSourceRange();
+ ErrorSym = Sym;
+ }
+ }
+ }
+ }
+
+ // Process any errors.
+ if (hasErr) {
+ ProcessNonLeakError(Dst, Builder, Ex, ErrorRange, Pred, state,
+ hasErr, ErrorSym);
+ return;
+ }
+
+ // Consult the summary for the return value.
+ RetEffect RE = Summ.getRetEffect();
+
+ if (RE.getKind() == RetEffect::OwnedWhenTrackedReceiver) {
+ bool found = false;
+ if (Receiver) {
+ SVal V = Receiver.getSValAsScalarOrLoc(state);
+ if (SymbolRef Sym = V.getAsLocSymbol())
+ if (state->get<RefBindings>(Sym)) {
+ found = true;
+ RE = Summaries.getObjAllocRetEffect();
+ }
+ } // FIXME: Otherwise, this is a send-to-super instance message.
+ if (!found)
+ RE = RetEffect::MakeNoRet();
+ }
+
+ switch (RE.getKind()) {
+ default:
+ assert (false && "Unhandled RetEffect."); break;
+
+ case RetEffect::NoRet: {
+ // Make up a symbol for the return value (not reference counted).
+ // FIXME: Most of this logic is not specific to the retain/release
+ // checker.
+
+ // FIXME: We eventually should handle structs and other compound types
+ // that are returned by value.
+
+ QualType T = Ex->getType();
+
+ // For CallExpr, use the result type to know if it returns a reference.
+ if (const CallExpr *CE = dyn_cast<CallExpr>(Ex)) {
+ const Expr *Callee = CE->getCallee();
+ if (const FunctionDecl *FD = state->getSVal(Callee).getAsFunctionDecl())
+ T = FD->getResultType();
+ }
+ else if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(Ex)) {
+ if (const ObjCMethodDecl *MD = ME->getMethodDecl())
+ T = MD->getResultType();
+ }
+
+ if (Loc::IsLocType(T) || (T->isIntegerType() && T->isScalarType())) {
+ unsigned Count = Builder.getCurrentBlockCount();
+ ValueManager &ValMgr = Eng.getValueManager();
+ SVal X = ValMgr.getConjuredSymbolVal(NULL, Ex, T, Count);
+ state = state->BindExpr(Ex, X, false);
+ }
+
+ break;
+ }
+
+ case RetEffect::Alias: {
+ unsigned idx = RE.getIndex();
+ assert (arg_end >= arg_beg);
+ assert (idx < (unsigned) (arg_end - arg_beg));
+ SVal V = state->getSValAsScalarOrLoc(*(arg_beg+idx));
+ state = state->BindExpr(Ex, V, false);
+ break;
+ }
+
+ case RetEffect::ReceiverAlias: {
+ assert(Receiver);
+ SVal V = Receiver.getSValAsScalarOrLoc(state);
+ state = state->BindExpr(Ex, V, false);
+ break;
+ }
+
+ case RetEffect::OwnedAllocatedSymbol:
+ case RetEffect::OwnedSymbol: {
+ unsigned Count = Builder.getCurrentBlockCount();
+ ValueManager &ValMgr = Eng.getValueManager();
+ SymbolRef Sym = ValMgr.getConjuredSymbol(Ex, Count);
+ QualType RetT = GetReturnType(Ex, ValMgr.getContext());
+ state = state->set<RefBindings>(Sym, RefVal::makeOwned(RE.getObjKind(),
+ RetT));
+ state = state->BindExpr(Ex, ValMgr.makeLoc(Sym), false);
+
+ // FIXME: Add a flag to the checker where allocations are assumed to
+ // *not fail.
+#if 0
+ if (RE.getKind() == RetEffect::OwnedAllocatedSymbol) {
+ bool isFeasible;
+ state = state.Assume(loc::SymbolVal(Sym), true, isFeasible);
+ assert(isFeasible && "Cannot assume fresh symbol is non-null.");
+ }
+#endif
+
+ break;
+ }
+
+ case RetEffect::GCNotOwnedSymbol:
+ case RetEffect::NotOwnedSymbol: {
+ unsigned Count = Builder.getCurrentBlockCount();
+ ValueManager &ValMgr = Eng.getValueManager();
+ SymbolRef Sym = ValMgr.getConjuredSymbol(Ex, Count);
+ QualType RetT = GetReturnType(Ex, ValMgr.getContext());
+ state = state->set<RefBindings>(Sym, RefVal::makeNotOwned(RE.getObjKind(),
+ RetT));
+ state = state->BindExpr(Ex, ValMgr.makeLoc(Sym), false);
+ break;
+ }
+ }
+
+ // Generate a sink node if we are at the end of a path.
+ ExplodedNode *NewNode =
+ Summ.isEndPath() ? Builder.MakeSinkNode(Dst, Ex, Pred, state)
+ : Builder.MakeNode(Dst, Ex, Pred, state);
+
+ // Annotate the edge with summary we used.
+ if (NewNode) SummaryLog[NewNode] = &Summ;
+}
+
+
+void CFRefCount::EvalCall(ExplodedNodeSet& Dst,
+ GRExprEngine& Eng,
+ GRStmtNodeBuilder& Builder,
+ CallExpr* CE, SVal L,
+ ExplodedNode* Pred) {
+
+ RetainSummary *Summ = 0;
+
+ // FIXME: Better support for blocks. For now we stop tracking anything
+ // that is passed to blocks.
+ // FIXME: Need to handle variables that are "captured" by the block.
+ if (dyn_cast_or_null<BlockDataRegion>(L.getAsRegion())) {
+ Summ = Summaries.getPersistentStopSummary();
+ }
+ else {
+ const FunctionDecl* FD = L.getAsFunctionDecl();
+ Summ = !FD ? Summaries.getDefaultSummary() :
+ Summaries.getSummary(const_cast<FunctionDecl*>(FD));
+ }
+
+ assert(Summ);
+ EvalSummary(Dst, Eng, Builder, CE, 0, *Summ, L.getAsRegion(),
+ CE->arg_begin(), CE->arg_end(), Pred, Builder.GetState(Pred));
+}
+
+void CFRefCount::EvalObjCMessageExpr(ExplodedNodeSet& Dst,
+ GRExprEngine& Eng,
+ GRStmtNodeBuilder& Builder,
+ ObjCMessageExpr* ME,
+ ExplodedNode* Pred,
+ const GRState *state) {
+ RetainSummary *Summ =
+ ME->isInstanceMessage()
+ ? Summaries.getInstanceMethodSummary(ME, state,Pred->getLocationContext())
+ : Summaries.getClassMethodSummary(ME);
+
+ assert(Summ && "RetainSummary is null");
+ EvalSummary(Dst, Eng, Builder, ME,
+ InstanceReceiver(ME, Pred->getLocationContext()), *Summ, NULL,
+ ME->arg_begin(), ME->arg_end(), Pred, state);
+}
+
+namespace {
+class StopTrackingCallback : public SymbolVisitor {
+ const GRState *state;
+public:
+ StopTrackingCallback(const GRState *st) : state(st) {}
+ const GRState *getState() const { return state; }
+
+ bool VisitSymbol(SymbolRef sym) {
+ state = state->remove<RefBindings>(sym);
+ return true;
+ }
+};
+} // end anonymous namespace
+
+
+void CFRefCount::EvalBind(GRStmtNodeBuilderRef& B, SVal location, SVal val) {
+ // Are we storing to something that causes the value to "escape"?
+ bool escapes = false;
+
+ // A value escapes in three possible cases (this may change):
+ //
+ // (1) we are binding to something that is not a memory region.
+ // (2) we are binding to a memregion that does not have stack storage
+ // (3) we are binding to a memregion with stack storage that the store
+ // does not understand.
+ const GRState *state = B.getState();
+
+ if (!isa<loc::MemRegionVal>(location))
+ escapes = true;
+ else {
+ const MemRegion* R = cast<loc::MemRegionVal>(location).getRegion();
+ escapes = !R->hasStackStorage();
+
+ if (!escapes) {
+ // To test (3), generate a new state with the binding removed. If it is
+ // the same state, then it escapes (since the store cannot represent
+ // the binding).
+ escapes = (state == (state->bindLoc(cast<Loc>(location), UnknownVal())));
+ }
+ }
+
+ // If our store can represent the binding and we aren't storing to something
+ // that doesn't have local storage then just return and have the simulation
+ // state continue as is.
+ if (!escapes)
+ return;
+
+ // Otherwise, find all symbols referenced by 'val' that we are tracking
+ // and stop tracking them.
+ B.MakeNode(state->scanReachableSymbols<StopTrackingCallback>(val).getState());
+}
+
+ // Return statements.
+
+void CFRefCount::EvalReturn(ExplodedNodeSet& Dst,
+ GRExprEngine& Eng,
+ GRStmtNodeBuilder& Builder,
+ ReturnStmt* S,
+ ExplodedNode* Pred) {
+
+ Expr* RetE = S->getRetValue();
+ if (!RetE)
+ return;
+
+ const GRState *state = Builder.GetState(Pred);
+ SymbolRef Sym = state->getSValAsScalarOrLoc(RetE).getAsLocSymbol();
+
+ if (!Sym)
+ return;
+
+ // Get the reference count binding (if any).
+ const RefVal* T = state->get<RefBindings>(Sym);
+
+ if (!T)
+ return;
+
+ // Change the reference count.
+ RefVal X = *T;
+
+ switch (X.getKind()) {
+ case RefVal::Owned: {
+ unsigned cnt = X.getCount();
+ assert (cnt > 0);
+ X.setCount(cnt - 1);
+ X = X ^ RefVal::ReturnedOwned;
+ break;
+ }
+
+ case RefVal::NotOwned: {
+ unsigned cnt = X.getCount();
+ if (cnt) {
+ X.setCount(cnt - 1);
+ X = X ^ RefVal::ReturnedOwned;
+ }
+ else {
+ X = X ^ RefVal::ReturnedNotOwned;
+ }
+ break;
+ }
+
+ default:
+ return;
+ }
+
+ // Update the binding.
+ state = state->set<RefBindings>(Sym, X);
+ Pred = Builder.MakeNode(Dst, S, Pred, state);
+
+ // Did we cache out?
+ if (!Pred)
+ return;
+
+ // Update the autorelease counts.
+ static unsigned autoreleasetag = 0;
+ GenericNodeBuilder Bd(Builder, S, &autoreleasetag);
+ bool stop = false;
+ llvm::tie(Pred, state) = HandleAutoreleaseCounts(state , Bd, Pred, Eng, Sym,
+ X, stop);
+
+ // Did we cache out?
+ if (!Pred || stop)
+ return;
+
+ // Get the updated binding.
+ T = state->get<RefBindings>(Sym);
+ assert(T);
+ X = *T;
+
+ // Any leaks or other errors?
+ if (X.isReturnedOwned() && X.getCount() == 0) {
+ Decl const *CD = &Pred->getCodeDecl();
+ if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(CD)) {
+ const RetainSummary &Summ = *Summaries.getMethodSummary(MD);
+ RetEffect RE = Summ.getRetEffect();
+ bool hasError = false;
+
+ if (RE.getKind() != RetEffect::NoRet) {
+ if (isGCEnabled() && RE.getObjKind() == RetEffect::ObjC) {
+ // Things are more complicated with garbage collection. If the
+ // returned object is suppose to be an Objective-C object, we have
+ // a leak (as the caller expects a GC'ed object) because no
+ // method should return ownership unless it returns a CF object.
+ hasError = true;
+ X = X ^ RefVal::ErrorGCLeakReturned;
+ }
+ else if (!RE.isOwned()) {
+ // Either we are using GC and the returned object is a CF type
+ // or we aren't using GC. In either case, we expect that the
+ // enclosing method is expected to return ownership.
+ hasError = true;
+ X = X ^ RefVal::ErrorLeakReturned;
+ }
+ }
+
+ if (hasError) {
+ // Generate an error node.
+ static int ReturnOwnLeakTag = 0;
+ state = state->set<RefBindings>(Sym, X);
+ ExplodedNode *N =
+ Builder.generateNode(PostStmt(S, Pred->getLocationContext(),
+ &ReturnOwnLeakTag), state, Pred);
+ if (N) {
+ CFRefReport *report =
+ new CFRefLeakReport(*static_cast<CFRefBug*>(leakAtReturn), *this,
+ N, Sym, Eng);
+ BR->EmitReport(report);
+ }
+ }
+ }
+ }
+ else if (X.isReturnedNotOwned()) {
+ Decl const *CD = &Pred->getCodeDecl();
+ if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(CD)) {
+ const RetainSummary &Summ = *Summaries.getMethodSummary(MD);
+ if (Summ.getRetEffect().isOwned()) {
+ // Trying to return a not owned object to a caller expecting an
+ // owned object.
+
+ static int ReturnNotOwnedForOwnedTag = 0;
+ state = state->set<RefBindings>(Sym, X ^ RefVal::ErrorReturnedNotOwned);
+ if (ExplodedNode *N =
+ Builder.generateNode(PostStmt(S, Pred->getLocationContext(),
+ &ReturnNotOwnedForOwnedTag),
+ state, Pred)) {
+ CFRefReport *report =
+ new CFRefReport(*static_cast<CFRefBug*>(returnNotOwnedForOwned),
+ *this, N, Sym);
+ BR->EmitReport(report);
+ }
+ }
+ }
+ }
+}
+
+// Assumptions.
+
+const GRState* CFRefCount::EvalAssume(const GRState *state,
+ SVal Cond, bool Assumption) {
+
+ // FIXME: We may add to the interface of EvalAssume the list of symbols
+ // whose assumptions have changed. For now we just iterate through the
+ // bindings and check if any of the tracked symbols are NULL. This isn't
+ // too bad since the number of symbols we will track in practice are
+ // probably small and EvalAssume is only called at branches and a few
+ // other places.
+ RefBindings B = state->get<RefBindings>();
+
+ if (B.isEmpty())
+ return state;
+
+ bool changed = false;
+ RefBindings::Factory& RefBFactory = state->get_context<RefBindings>();
+
+ for (RefBindings::iterator I=B.begin(), E=B.end(); I!=E; ++I) {
+ // Check if the symbol is null (or equal to any constant).
+ // If this is the case, stop tracking the symbol.
+ if (state->getSymVal(I.getKey())) {
+ changed = true;
+ B = RefBFactory.Remove(B, I.getKey());
+ }
+ }
+
+ if (changed)
+ state = state->set<RefBindings>(B);
+
+ return state;
+}
+
+const GRState * CFRefCount::Update(const GRState * state, SymbolRef sym,
+ RefVal V, ArgEffect E,
+ RefVal::Kind& hasErr) {
+
+ // In GC mode [... release] and [... retain] do nothing.
+ switch (E) {
+ default: break;
+ case IncRefMsg: E = isGCEnabled() ? DoNothing : IncRef; break;
+ case DecRefMsg: E = isGCEnabled() ? DoNothing : DecRef; break;
+ case MakeCollectable: E = isGCEnabled() ? DecRef : DoNothing; break;
+ case NewAutoreleasePool: E = isGCEnabled() ? DoNothing :
+ NewAutoreleasePool; break;
+ }
+
+ // Handle all use-after-releases.
+ if (!isGCEnabled() && V.getKind() == RefVal::Released) {
+ V = V ^ RefVal::ErrorUseAfterRelease;
+ hasErr = V.getKind();
+ return state->set<RefBindings>(sym, V);
+ }
+
+ switch (E) {
+ default:
+ assert (false && "Unhandled CFRef transition.");
+
+ case Dealloc:
+ // Any use of -dealloc in GC is *bad*.
+ if (isGCEnabled()) {
+ V = V ^ RefVal::ErrorDeallocGC;
+ hasErr = V.getKind();
+ break;
+ }
+
+ switch (V.getKind()) {
+ default:
+ assert(false && "Invalid case.");
+ case RefVal::Owned:
+ // The object immediately transitions to the released state.
+ V = V ^ RefVal::Released;
+ V.clearCounts();
+ return state->set<RefBindings>(sym, V);
+ case RefVal::NotOwned:
+ V = V ^ RefVal::ErrorDeallocNotOwned;
+ hasErr = V.getKind();
+ break;
+ }
+ break;
+
+ case NewAutoreleasePool:
+ assert(!isGCEnabled());
+ return state->add<AutoreleaseStack>(sym);
+
+ case MayEscape:
+ if (V.getKind() == RefVal::Owned) {
+ V = V ^ RefVal::NotOwned;
+ break;
+ }
+
+ // Fall-through.
+
+ case DoNothingByRef:
+ case DoNothing:
+ return state;
+
+ case Autorelease:
+ if (isGCEnabled())
+ return state;
+
+ // Update the autorelease counts.
+ state = SendAutorelease(state, ARCountFactory, sym);
+ V = V.autorelease();
+ break;
+
+ case StopTracking:
+ return state->remove<RefBindings>(sym);
+
+ case IncRef:
+ switch (V.getKind()) {
+ default:
+ assert(false);
+
+ case RefVal::Owned:
+ case RefVal::NotOwned:
+ V = V + 1;
+ break;
+ case RefVal::Released:
+ // Non-GC cases are handled above.
+ assert(isGCEnabled());
+ V = (V ^ RefVal::Owned) + 1;
+ break;
+ }
+ break;
+
+ case SelfOwn:
+ V = V ^ RefVal::NotOwned;
+ // Fall-through.
+ case DecRef:
+ switch (V.getKind()) {
+ default:
+ // case 'RefVal::Released' handled above.
+ assert (false);
+
+ case RefVal::Owned:
+ assert(V.getCount() > 0);
+ if (V.getCount() == 1) V = V ^ RefVal::Released;
+ V = V - 1;
+ break;
+
+ case RefVal::NotOwned:
+ if (V.getCount() > 0)
+ V = V - 1;
+ else {
+ V = V ^ RefVal::ErrorReleaseNotOwned;
+ hasErr = V.getKind();
+ }
+ break;
+
+ case RefVal::Released:
+ // Non-GC cases are handled above.
+ assert(isGCEnabled());
+ V = V ^ RefVal::ErrorUseAfterRelease;
+ hasErr = V.getKind();
+ break;
+ }
+ break;
+ }
+ return state->set<RefBindings>(sym, V);
+}
+
+//===----------------------------------------------------------------------===//
+// Handle dead symbols and end-of-path.
+//===----------------------------------------------------------------------===//
+
+std::pair<ExplodedNode*, const GRState *>
+CFRefCount::HandleAutoreleaseCounts(const GRState * state, GenericNodeBuilder Bd,
+ ExplodedNode* Pred,
+ GRExprEngine &Eng,
+ SymbolRef Sym, RefVal V, bool &stop) {
+
+ unsigned ACnt = V.getAutoreleaseCount();
+ stop = false;
+
+ // No autorelease counts? Nothing to be done.
+ if (!ACnt)
+ return std::make_pair(Pred, state);
+
+ assert(!isGCEnabled() && "Autorelease counts in GC mode?");
+ unsigned Cnt = V.getCount();
+
+ // FIXME: Handle sending 'autorelease' to already released object.
+
+ if (V.getKind() == RefVal::ReturnedOwned)
+ ++Cnt;
+
+ if (ACnt <= Cnt) {
+ if (ACnt == Cnt) {
+ V.clearCounts();
+ if (V.getKind() == RefVal::ReturnedOwned)
+ V = V ^ RefVal::ReturnedNotOwned;
+ else
+ V = V ^ RefVal::NotOwned;
+ }
+ else {
+ V.setCount(Cnt - ACnt);
+ V.setAutoreleaseCount(0);
+ }
+ state = state->set<RefBindings>(Sym, V);
+ ExplodedNode *N = Bd.MakeNode(state, Pred);
+ stop = (N == 0);
+ return std::make_pair(N, state);
+ }
+
+ // Woah! More autorelease counts then retain counts left.
+ // Emit hard error.
+ stop = true;
+ V = V ^ RefVal::ErrorOverAutorelease;
+ state = state->set<RefBindings>(Sym, V);
+
+ if (ExplodedNode *N = Bd.MakeNode(state, Pred)) {
+ N->markAsSink();
+
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ os << "Object over-autoreleased: object was sent -autorelease";
+ if (V.getAutoreleaseCount() > 1)
+ os << V.getAutoreleaseCount() << " times";
+ os << " but the object has ";
+ if (V.getCount() == 0)
+ os << "zero (locally visible)";
+ else
+ os << "+" << V.getCount();
+ os << " retain counts";
+
+ CFRefReport *report =
+ new CFRefReport(*static_cast<CFRefBug*>(overAutorelease),
+ *this, N, Sym, os.str());
+ BR->EmitReport(report);
+ }
+
+ return std::make_pair((ExplodedNode*)0, state);
+}
+
+const GRState *
+CFRefCount::HandleSymbolDeath(const GRState * state, SymbolRef sid, RefVal V,
+ llvm::SmallVectorImpl<SymbolRef> &Leaked) {
+
+ bool hasLeak = V.isOwned() ||
+ ((V.isNotOwned() || V.isReturnedOwned()) && V.getCount() > 0);
+
+ if (!hasLeak)
+ return state->remove<RefBindings>(sid);
+
+ Leaked.push_back(sid);
+ return state->set<RefBindings>(sid, V ^ RefVal::ErrorLeak);
+}
+
+ExplodedNode*
+CFRefCount::ProcessLeaks(const GRState * state,
+ llvm::SmallVectorImpl<SymbolRef> &Leaked,
+ GenericNodeBuilder &Builder,
+ GRExprEngine& Eng,
+ ExplodedNode *Pred) {
+
+ if (Leaked.empty())
+ return Pred;
+
+ // Generate an intermediate node representing the leak point.
+ ExplodedNode *N = Builder.MakeNode(state, Pred);
+
+ if (N) {
+ for (llvm::SmallVectorImpl<SymbolRef>::iterator
+ I = Leaked.begin(), E = Leaked.end(); I != E; ++I) {
+
+ CFRefBug *BT = static_cast<CFRefBug*>(Pred ? leakWithinFunction
+ : leakAtReturn);
+ assert(BT && "BugType not initialized.");
+ CFRefLeakReport* report = new CFRefLeakReport(*BT, *this, N, *I, Eng);
+ BR->EmitReport(report);
+ }
+ }
+
+ return N;
+}
+
+void CFRefCount::EvalEndPath(GRExprEngine& Eng,
+ GREndPathNodeBuilder& Builder) {
+
+ const GRState *state = Builder.getState();
+ GenericNodeBuilder Bd(Builder);
+ RefBindings B = state->get<RefBindings>();
+ ExplodedNode *Pred = 0;
+
+ for (RefBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ bool stop = false;
+ llvm::tie(Pred, state) = HandleAutoreleaseCounts(state, Bd, Pred, Eng,
+ (*I).first,
+ (*I).second, stop);
+
+ if (stop)
+ return;
+ }
+
+ B = state->get<RefBindings>();
+ llvm::SmallVector<SymbolRef, 10> Leaked;
+
+ for (RefBindings::iterator I = B.begin(), E = B.end(); I != E; ++I)
+ state = HandleSymbolDeath(state, (*I).first, (*I).second, Leaked);
+
+ ProcessLeaks(state, Leaked, Bd, Eng, Pred);
+}
+
+void CFRefCount::EvalDeadSymbols(ExplodedNodeSet& Dst,
+ GRExprEngine& Eng,
+ GRStmtNodeBuilder& Builder,
+ ExplodedNode* Pred,
+ Stmt* S,
+ const GRState* state,
+ SymbolReaper& SymReaper) {
+
+ RefBindings B = state->get<RefBindings>();
+
+ // Update counts from autorelease pools
+ for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
+ E = SymReaper.dead_end(); I != E; ++I) {
+ SymbolRef Sym = *I;
+ if (const RefVal* T = B.lookup(Sym)){
+ // Use the symbol as the tag.
+ // FIXME: This might not be as unique as we would like.
+ GenericNodeBuilder Bd(Builder, S, Sym);
+ bool stop = false;
+ llvm::tie(Pred, state) = HandleAutoreleaseCounts(state, Bd, Pred, Eng,
+ Sym, *T, stop);
+ if (stop)
+ return;
+ }
+ }
+
+ B = state->get<RefBindings>();
+ llvm::SmallVector<SymbolRef, 10> Leaked;
+
+ for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
+ E = SymReaper.dead_end(); I != E; ++I) {
+ if (const RefVal* T = B.lookup(*I))
+ state = HandleSymbolDeath(state, *I, *T, Leaked);
+ }
+
+ static unsigned LeakPPTag = 0;
+ {
+ GenericNodeBuilder Bd(Builder, S, &LeakPPTag);
+ Pred = ProcessLeaks(state, Leaked, Bd, Eng, Pred);
+ }
+
+ // Did we cache out?
+ if (!Pred)
+ return;
+
+ // Now generate a new node that nukes the old bindings.
+ RefBindings::Factory& F = state->get_context<RefBindings>();
+
+ for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
+ E = SymReaper.dead_end(); I!=E; ++I) B = F.Remove(B, *I);
+
+ state = state->set<RefBindings>(B);
+ Builder.MakeNode(Dst, S, Pred, state);
+}
+
+void CFRefCount::ProcessNonLeakError(ExplodedNodeSet& Dst,
+ GRStmtNodeBuilder& Builder,
+ Expr* NodeExpr, SourceRange ErrorRange,
+ ExplodedNode* Pred,
+ const GRState* St,
+ RefVal::Kind hasErr, SymbolRef Sym) {
+ Builder.BuildSinks = true;
+ ExplodedNode *N = Builder.MakeNode(Dst, NodeExpr, Pred, St);
+
+ if (!N)
+ return;
+
+ CFRefBug *BT = 0;
+
+ switch (hasErr) {
+ default:
+ assert(false && "Unhandled error.");
+ return;
+ case RefVal::ErrorUseAfterRelease:
+ BT = static_cast<CFRefBug*>(useAfterRelease);
+ break;
+ case RefVal::ErrorReleaseNotOwned:
+ BT = static_cast<CFRefBug*>(releaseNotOwned);
+ break;
+ case RefVal::ErrorDeallocGC:
+ BT = static_cast<CFRefBug*>(deallocGC);
+ break;
+ case RefVal::ErrorDeallocNotOwned:
+ BT = static_cast<CFRefBug*>(deallocNotOwned);
+ break;
+ }
+
+ CFRefReport *report = new CFRefReport(*BT, *this, N, Sym);
+ report->addRange(ErrorRange);
+ BR->EmitReport(report);
+}
+
+//===----------------------------------------------------------------------===//
+// Pieces of the retain/release checker implemented using a CheckerVisitor.
+// More pieces of the retain/release checker will be migrated to this interface
+// (ideally, all of it some day).
+//===----------------------------------------------------------------------===//
+
+namespace {
+class RetainReleaseChecker
+ : public CheckerVisitor<RetainReleaseChecker> {
+ CFRefCount *TF;
+public:
+ RetainReleaseChecker(CFRefCount *tf) : TF(tf) {}
+ static void* getTag() { static int x = 0; return &x; }
+
+ void PostVisitBlockExpr(CheckerContext &C, const BlockExpr *BE);
+};
+} // end anonymous namespace
+
+
+void RetainReleaseChecker::PostVisitBlockExpr(CheckerContext &C,
+ const BlockExpr *BE) {
+
+ // Scan the BlockDecRefExprs for any object the retain/release checker
+ // may be tracking.
+ if (!BE->hasBlockDeclRefExprs())
+ return;
+
+ const GRState *state = C.getState();
+ const BlockDataRegion *R =
+ cast<BlockDataRegion>(state->getSVal(BE).getAsRegion());
+
+ BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
+ E = R->referenced_vars_end();
+
+ if (I == E)
+ return;
+
+ // FIXME: For now we invalidate the tracking of all symbols passed to blocks
+ // via captured variables, even though captured variables result in a copy
+ // and in implicit increment/decrement of a retain count.
+ llvm::SmallVector<const MemRegion*, 10> Regions;
+ const LocationContext *LC = C.getPredecessor()->getLocationContext();
+ MemRegionManager &MemMgr = C.getValueManager().getRegionManager();
+
+ for ( ; I != E; ++I) {
+ const VarRegion *VR = *I;
+ if (VR->getSuperRegion() == R) {
+ VR = MemMgr.getVarRegion(VR->getDecl(), LC);
+ }
+ Regions.push_back(VR);
+ }
+
+ state =
+ state->scanReachableSymbols<StopTrackingCallback>(Regions.data(),
+ Regions.data() + Regions.size()).getState();
+ C.addTransition(state);
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function creation for external clients.
+//===----------------------------------------------------------------------===//
+
+void CFRefCount::RegisterChecks(GRExprEngine& Eng) {
+ BugReporter &BR = Eng.getBugReporter();
+
+ useAfterRelease = new UseAfterRelease(this);
+ BR.Register(useAfterRelease);
+
+ releaseNotOwned = new BadRelease(this);
+ BR.Register(releaseNotOwned);
+
+ deallocGC = new DeallocGC(this);
+ BR.Register(deallocGC);
+
+ deallocNotOwned = new DeallocNotOwned(this);
+ BR.Register(deallocNotOwned);
+
+ overAutorelease = new OverAutorelease(this);
+ BR.Register(overAutorelease);
+
+ returnNotOwnedForOwned = new ReturnedNotOwnedForOwned(this);
+ BR.Register(returnNotOwnedForOwned);
+
+ // First register "return" leaks.
+ const char* name = 0;
+
+ if (isGCEnabled())
+ name = "Leak of returned object when using garbage collection";
+ else if (getLangOptions().getGCMode() == LangOptions::HybridGC)
+ name = "Leak of returned object when not using garbage collection (GC) in "
+ "dual GC/non-GC code";
+ else {
+ assert(getLangOptions().getGCMode() == LangOptions::NonGC);
+ name = "Leak of returned object";
+ }
+
+ // Leaks should not be reported if they are post-dominated by a sink.
+ leakAtReturn = new LeakAtReturn(this, name);
+ leakAtReturn->setSuppressOnSink(true);
+ BR.Register(leakAtReturn);
+
+ // Second, register leaks within a function/method.
+ if (isGCEnabled())
+ name = "Leak of object when using garbage collection";
+ else if (getLangOptions().getGCMode() == LangOptions::HybridGC)
+ name = "Leak of object when not using garbage collection (GC) in "
+ "dual GC/non-GC code";
+ else {
+ assert(getLangOptions().getGCMode() == LangOptions::NonGC);
+ name = "Leak";
+ }
+
+ // Leaks should not be reported if they are post-dominated by sinks.
+ leakWithinFunction = new LeakWithinFunction(this, name);
+ leakWithinFunction->setSuppressOnSink(true);
+ BR.Register(leakWithinFunction);
+
+ // Save the reference to the BugReporter.
+ this->BR = &BR;
+
+ // Register the RetainReleaseChecker with the GRExprEngine object.
+ // Functionality in CFRefCount will be migrated to RetainReleaseChecker
+ // over time.
+ Eng.registerCheck(new RetainReleaseChecker(this));
+}
+
+GRTransferFuncs* clang::MakeCFRefCountTF(ASTContext& Ctx, bool GCEnabled,
+ const LangOptions& lopts) {
+ return new CFRefCount(Ctx, GCEnabled, lopts);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/CMakeLists.txt b/contrib/llvm/tools/clang/lib/Checker/CMakeLists.txt
new file mode 100644
index 0000000..9c6adc6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/CMakeLists.txt
@@ -0,0 +1,74 @@
+set(LLVM_NO_RTTI 1)
+
+add_clang_library(clangChecker
+ AdjustedReturnValueChecker.cpp
+ AggExprVisitor.cpp
+ ArrayBoundChecker.cpp
+ AttrNonNullChecker.cpp
+ BasicConstraintManager.cpp
+ BasicObjCFoundationChecks.cpp
+ BasicStore.cpp
+ BasicValueFactory.cpp
+ BugReporter.cpp
+ BugReporterVisitors.cpp
+ BuiltinFunctionChecker.cpp
+ CallAndMessageChecker.cpp
+ CallInliner.cpp
+ CastSizeChecker.cpp
+ CastToStructChecker.cpp
+ CFRefCount.cpp
+ CheckDeadStores.cpp
+ Checker.cpp
+ CheckObjCDealloc.cpp
+ CheckObjCInstMethSignature.cpp
+ CheckSecuritySyntaxOnly.cpp
+ CheckSizeofPointer.cpp
+ CocoaConventions.cpp
+ DereferenceChecker.cpp
+ DivZeroChecker.cpp
+ Environment.cpp
+ ExplodedGraph.cpp
+ FixedAddressChecker.cpp
+ FlatStore.cpp
+ GRBlockCounter.cpp
+ GRCoreEngine.cpp
+ GRCXXExprEngine.cpp
+ GRExprEngine.cpp
+ GRExprEngineExperimentalChecks.cpp
+ GRState.cpp
+ LLVMConventionsChecker.cpp
+ MacOSXAPIChecker.cpp
+ MallocChecker.cpp
+ ManagerRegistry.cpp
+ MemRegion.cpp
+ NoReturnFunctionChecker.cpp
+ NSAutoreleasePoolChecker.cpp
+ NSErrorChecker.cpp
+ ObjCUnusedIVarsChecker.cpp
+ OSAtomicChecker.cpp
+ PathDiagnostic.cpp
+ PointerArithChecker.cpp
+ PointerSubChecker.cpp
+ PthreadLockChecker.cpp
+ RangeConstraintManager.cpp
+ RegionStore.cpp
+ ReturnPointerRangeChecker.cpp
+ ReturnStackAddressChecker.cpp
+ ReturnUndefChecker.cpp
+ SimpleConstraintManager.cpp
+ SimpleSValuator.cpp
+ Store.cpp
+ SVals.cpp
+ SValuator.cpp
+ SymbolManager.cpp
+ UndefBranchChecker.cpp
+ UndefCapturedBlockVarChecker.cpp
+ UndefinedArraySubscriptChecker.cpp
+ UndefinedAssignmentChecker.cpp
+ UndefResultChecker.cpp
+ UnixAPIChecker.cpp
+ ValueManager.cpp
+ VLASizeChecker.cpp
+ )
+
+add_dependencies(clangChecker ClangStmtNodes)
diff --git a/contrib/llvm/tools/clang/lib/Checker/CallAndMessageChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/CallAndMessageChecker.cpp
new file mode 100644
index 0000000..c619d75
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/CallAndMessageChecker.cpp
@@ -0,0 +1,346 @@
+//===--- CallAndMessageChecker.cpp ------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines CallAndMessageChecker, a builtin checker that checks for various
+// errors of call and objc message expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+
+using namespace clang;
+
+namespace {
+class CallAndMessageChecker
+ : public CheckerVisitor<CallAndMessageChecker> {
+ BugType *BT_call_null;
+ BugType *BT_call_undef;
+ BugType *BT_call_arg;
+ BugType *BT_msg_undef;
+ BugType *BT_msg_arg;
+ BugType *BT_msg_ret;
+public:
+ CallAndMessageChecker() :
+ BT_call_null(0), BT_call_undef(0), BT_call_arg(0),
+ BT_msg_undef(0), BT_msg_arg(0), BT_msg_ret(0) {}
+
+ static void *getTag() {
+ static int x = 0;
+ return &x;
+ }
+
+ void PreVisitCallExpr(CheckerContext &C, const CallExpr *CE);
+ void PreVisitObjCMessageExpr(CheckerContext &C, const ObjCMessageExpr *ME);
+ bool EvalNilReceiver(CheckerContext &C, const ObjCMessageExpr *ME);
+
+private:
+ bool PreVisitProcessArg(CheckerContext &C, const Expr *Ex,
+ const char *BT_desc, BugType *&BT);
+
+ void EmitBadCall(BugType *BT, CheckerContext &C, const CallExpr *CE);
+ void EmitNilReceiverBug(CheckerContext &C, const ObjCMessageExpr *ME,
+ ExplodedNode *N);
+
+ void HandleNilReceiver(CheckerContext &C, const GRState *state,
+ const ObjCMessageExpr *ME);
+
+ void LazyInit_BT(const char *desc, BugType *&BT) {
+ if (!BT)
+ BT = new BuiltinBug(desc);
+ }
+};
+} // end anonymous namespace
+
+void clang::RegisterCallAndMessageChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new CallAndMessageChecker());
+}
+
+void CallAndMessageChecker::EmitBadCall(BugType *BT, CheckerContext &C,
+ const CallExpr *CE) {
+ ExplodedNode *N = C.GenerateSink();
+ if (!N)
+ return;
+
+ EnhancedBugReport *R = new EnhancedBugReport(*BT, BT->getName(), N);
+ R->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue,
+ bugreporter::GetCalleeExpr(N));
+ C.EmitReport(R);
+}
+
+bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
+ const Expr *Ex,
+ const char *BT_desc,
+ BugType *&BT) {
+
+ const SVal &V = C.getState()->getSVal(Ex);
+
+ if (V.isUndef()) {
+ if (ExplodedNode *N = C.GenerateSink()) {
+ LazyInit_BT(BT_desc, BT);
+
+ // Generate a report for this bug.
+ EnhancedBugReport *R = new EnhancedBugReport(*BT, BT->getName(), N);
+ R->addRange(Ex->getSourceRange());
+ R->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue, Ex);
+ C.EmitReport(R);
+ }
+ return true;
+ }
+
+ if (const nonloc::LazyCompoundVal *LV =
+ dyn_cast<nonloc::LazyCompoundVal>(&V)) {
+
+ class FindUninitializedField {
+ public:
+ llvm::SmallVector<const FieldDecl *, 10> FieldChain;
+ private:
+ ASTContext &C;
+ StoreManager &StoreMgr;
+ MemRegionManager &MrMgr;
+ Store store;
+ public:
+ FindUninitializedField(ASTContext &c, StoreManager &storeMgr,
+ MemRegionManager &mrMgr, Store s)
+ : C(c), StoreMgr(storeMgr), MrMgr(mrMgr), store(s) {}
+
+ bool Find(const TypedRegion *R) {
+ QualType T = R->getValueType(C);
+ if (const RecordType *RT = T->getAsStructureType()) {
+ const RecordDecl *RD = RT->getDecl()->getDefinition();
+ assert(RD && "Referred record has no definition");
+ for (RecordDecl::field_iterator I =
+ RD->field_begin(), E = RD->field_end(); I!=E; ++I) {
+ const FieldRegion *FR = MrMgr.getFieldRegion(*I, R);
+ FieldChain.push_back(*I);
+ T = (*I)->getType();
+ if (T->getAsStructureType()) {
+ if (Find(FR))
+ return true;
+ }
+ else {
+ const SVal &V = StoreMgr.Retrieve(store, loc::MemRegionVal(FR));
+ if (V.isUndef())
+ return true;
+ }
+ FieldChain.pop_back();
+ }
+ }
+
+ return false;
+ }
+ };
+
+ const LazyCompoundValData *D = LV->getCVData();
+ FindUninitializedField F(C.getASTContext(),
+ C.getState()->getStateManager().getStoreManager(),
+ C.getValueManager().getRegionManager(),
+ D->getStore());
+
+ if (F.Find(D->getRegion())) {
+ if (ExplodedNode *N = C.GenerateSink()) {
+ LazyInit_BT(BT_desc, BT);
+ llvm::SmallString<512> Str;
+ llvm::raw_svector_ostream os(Str);
+ os << "Passed-by-value struct argument contains uninitialized data";
+
+ if (F.FieldChain.size() == 1)
+ os << " (e.g., field: '" << F.FieldChain[0] << "')";
+ else {
+ os << " (e.g., via the field chain: '";
+ bool first = true;
+ for (llvm::SmallVectorImpl<const FieldDecl *>::iterator
+ DI = F.FieldChain.begin(), DE = F.FieldChain.end(); DI!=DE;++DI){
+ if (first)
+ first = false;
+ else
+ os << '.';
+ os << *DI;
+ }
+ os << "')";
+ }
+
+ // Generate a report for this bug.
+ EnhancedBugReport *R = new EnhancedBugReport(*BT, os.str(), N);
+ R->addRange(Ex->getSourceRange());
+
+ // FIXME: enhance track back for uninitialized value for arbitrary
+ // memregions
+ C.EmitReport(R);
+ }
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void CallAndMessageChecker::PreVisitCallExpr(CheckerContext &C,
+ const CallExpr *CE){
+
+ const Expr *Callee = CE->getCallee()->IgnoreParens();
+ SVal L = C.getState()->getSVal(Callee);
+
+ if (L.isUndef()) {
+ if (!BT_call_undef)
+ BT_call_undef =
+ new BuiltinBug("Called function pointer is an undefined pointer value");
+ EmitBadCall(BT_call_undef, C, CE);
+ return;
+ }
+
+ if (isa<loc::ConcreteInt>(L)) {
+ if (!BT_call_null)
+ BT_call_null =
+ new BuiltinBug("Called function pointer is null (null dereference)");
+ EmitBadCall(BT_call_null, C, CE);
+ }
+
+ for (CallExpr::const_arg_iterator I = CE->arg_begin(), E = CE->arg_end();
+ I != E; ++I)
+ if (PreVisitProcessArg(C, *I,
+ "Pass-by-value argument in function call is"
+ " undefined", BT_call_arg))
+ return;
+}
+
+void CallAndMessageChecker::PreVisitObjCMessageExpr(CheckerContext &C,
+ const ObjCMessageExpr *ME) {
+
+ const GRState *state = C.getState();
+
+ // FIXME: Handle 'super'?
+ if (const Expr *receiver = ME->getInstanceReceiver())
+ if (state->getSVal(receiver).isUndef()) {
+ if (ExplodedNode *N = C.GenerateSink()) {
+ if (!BT_msg_undef)
+ BT_msg_undef =
+ new BuiltinBug("Receiver in message expression is a garbage value");
+ EnhancedBugReport *R =
+ new EnhancedBugReport(*BT_msg_undef, BT_msg_undef->getName(), N);
+ R->addRange(receiver->getSourceRange());
+ R->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue,
+ receiver);
+ C.EmitReport(R);
+ }
+ return;
+ }
+
+ // Check for any arguments that are uninitialized/undefined.
+ for (ObjCMessageExpr::const_arg_iterator I = ME->arg_begin(),
+ E = ME->arg_end(); I != E; ++I)
+ if (PreVisitProcessArg(C, *I,
+ "Pass-by-value argument in message expression "
+ "is undefined", BT_msg_arg))
+ return;
+}
+
+bool CallAndMessageChecker::EvalNilReceiver(CheckerContext &C,
+ const ObjCMessageExpr *ME) {
+ HandleNilReceiver(C, C.getState(), ME);
+ return true; // Nil receiver is not handled elsewhere.
+}
+
+void CallAndMessageChecker::EmitNilReceiverBug(CheckerContext &C,
+ const ObjCMessageExpr *ME,
+ ExplodedNode *N) {
+
+ if (!BT_msg_ret)
+ BT_msg_ret =
+ new BuiltinBug("Receiver in message expression is "
+ "'nil' and returns a garbage value");
+
+ llvm::SmallString<200> buf;
+ llvm::raw_svector_ostream os(buf);
+ os << "The receiver of message '" << ME->getSelector().getAsString()
+ << "' is nil and returns a value of type '"
+ << ME->getType().getAsString() << "' that will be garbage";
+
+ EnhancedBugReport *report = new EnhancedBugReport(*BT_msg_ret, os.str(), N);
+ if (const Expr *receiver = ME->getInstanceReceiver()) {
+ report->addRange(receiver->getSourceRange());
+ report->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue,
+ receiver);
+ }
+ C.EmitReport(report);
+}
+
+static bool SupportsNilWithFloatRet(const llvm::Triple &triple) {
+ return triple.getVendor() == llvm::Triple::Apple &&
+ triple.getDarwinMajorNumber() >= 9;
+}
+
+void CallAndMessageChecker::HandleNilReceiver(CheckerContext &C,
+ const GRState *state,
+ const ObjCMessageExpr *ME) {
+
+ // Check the return type of the message expression. A message to nil will
+ // return different values depending on the return type and the architecture.
+ QualType RetTy = ME->getType();
+
+ ASTContext &Ctx = C.getASTContext();
+ CanQualType CanRetTy = Ctx.getCanonicalType(RetTy);
+
+ if (CanRetTy->isStructureOrClassType()) {
+ // FIXME: At some point we shouldn't rely on isConsumedExpr(), but instead
+ // have the "use of undefined value" be smarter about where the
+ // undefined value came from.
+ if (C.getPredecessor()->getParentMap().isConsumedExpr(ME)) {
+ if (ExplodedNode* N = C.GenerateSink(state))
+ EmitNilReceiverBug(C, ME, N);
+ return;
+ }
+
+ // The result is not consumed by a surrounding expression. Just propagate
+ // the current state.
+ C.addTransition(state);
+ return;
+ }
+
+ // Other cases: check if the return type is smaller than void*.
+ if (CanRetTy != Ctx.VoidTy &&
+ C.getPredecessor()->getParentMap().isConsumedExpr(ME)) {
+ // Compute: sizeof(void *) and sizeof(return type)
+ const uint64_t voidPtrSize = Ctx.getTypeSize(Ctx.VoidPtrTy);
+ const uint64_t returnTypeSize = Ctx.getTypeSize(CanRetTy);
+
+ if (voidPtrSize < returnTypeSize &&
+ !(SupportsNilWithFloatRet(Ctx.Target.getTriple()) &&
+ (Ctx.FloatTy == CanRetTy ||
+ Ctx.DoubleTy == CanRetTy ||
+ Ctx.LongDoubleTy == CanRetTy ||
+ Ctx.LongLongTy == CanRetTy))) {
+ if (ExplodedNode* N = C.GenerateSink(state))
+ EmitNilReceiverBug(C, ME, N);
+ return;
+ }
+
+ // Handle the safe cases where the return value is 0 if the
+ // receiver is nil.
+ //
+ // FIXME: For now take the conservative approach that we only
+ // return null values if we *know* that the receiver is nil.
+ // This is because we can have surprises like:
+ //
+ // ... = [[NSScreens screens] objectAtIndex:0];
+ //
+ // What can happen is that [... screens] could return nil, but
+ // it most likely isn't nil. We should assume the semantics
+ // of this case unless we have *a lot* more knowledge.
+ //
+ SVal V = C.getValueManager().makeZeroVal(ME->getType());
+ C.GenerateNode(state->BindExpr(ME, V));
+ return;
+ }
+
+ C.addTransition(state);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/CallInliner.cpp b/contrib/llvm/tools/clang/lib/Checker/CallInliner.cpp
new file mode 100644
index 0000000..88e1a05
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/CallInliner.cpp
@@ -0,0 +1,54 @@
+//===--- CallInliner.cpp - Transfer function that inlines callee ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the callee inlining transfer function.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/Checker/Checkers/LocalCheckers.h"
+
+using namespace clang;
+
+namespace {
+class CallInliner : public Checker {
+public:
+ static void *getTag() {
+ static int x;
+ return &x;
+ }
+
+ virtual bool EvalCallExpr(CheckerContext &C, const CallExpr *CE);
+};
+}
+
+void clang::RegisterCallInliner(GRExprEngine &Eng) {
+ Eng.registerCheck(new CallInliner());
+}
+
+bool CallInliner::EvalCallExpr(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ const Expr *Callee = CE->getCallee();
+ SVal L = state->getSVal(Callee);
+
+ const FunctionDecl *FD = L.getAsFunctionDecl();
+ if (!FD)
+ return false;
+
+ if (!FD->getBody(FD))
+ return false;
+
+ // Now we have the definition of the callee, create a CallEnter node.
+ CallEnter Loc(CE, FD, C.getPredecessor()->getLocationContext());
+ C.addTransition(state, Loc);
+
+ return true;
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Checker/CastSizeChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/CastSizeChecker.cpp
new file mode 100644
index 0000000..754d775
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/CastSizeChecker.cpp
@@ -0,0 +1,82 @@
+//=== CastSizeChecker.cpp ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// CastSizeChecker checks when casting a malloc'ed symbolic region to type T,
+// whether the size of the symbolic region is a multiple of the size of T.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/CharUnits.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "GRExprEngineInternalChecks.h"
+
+using namespace clang;
+
+namespace {
+class CastSizeChecker : public CheckerVisitor<CastSizeChecker> {
+ BuiltinBug *BT;
+public:
+ CastSizeChecker() : BT(0) {}
+ static void *getTag();
+ void PreVisitCastExpr(CheckerContext &C, const CastExpr *B);
+};
+}
+
+void *CastSizeChecker::getTag() {
+ static int x;
+ return &x;
+}
+
+void CastSizeChecker::PreVisitCastExpr(CheckerContext &C, const CastExpr *CE) {
+ const Expr *E = CE->getSubExpr();
+ ASTContext &Ctx = C.getASTContext();
+ QualType ToTy = Ctx.getCanonicalType(CE->getType());
+ PointerType *ToPTy = dyn_cast<PointerType>(ToTy.getTypePtr());
+
+ if (!ToPTy)
+ return;
+
+ QualType ToPointeeTy = ToPTy->getPointeeType();
+
+ const MemRegion *R = C.getState()->getSVal(E).getAsRegion();
+ if (R == 0)
+ return;
+
+ const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R);
+ if (SR == 0)
+ return;
+
+ llvm::Optional<SVal> V =
+ C.getEngine().getStoreManager().getExtent(C.getState(), SR);
+ if (!V)
+ return;
+
+ const nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(V);
+ if (!CI)
+ return;
+
+ CharUnits RegionSize = CharUnits::fromQuantity(CI->getValue().getSExtValue());
+ CharUnits TypeSize = C.getASTContext().getTypeSizeInChars(ToPointeeTy);
+ if (RegionSize % TypeSize != 0) {
+ if (ExplodedNode *N = C.GenerateSink()) {
+ if (!BT)
+ BT = new BuiltinBug("Cast region with wrong size.",
+ "Cast a region whose size is not a multiple of the"
+ " destination type size.");
+ RangedBugReport *R = new RangedBugReport(*BT, BT->getDescription(), N);
+ R->addRange(CE->getSourceRange());
+ C.EmitReport(R);
+ }
+ }
+}
+
+
+void clang::RegisterCastSizeChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new CastSizeChecker());
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/CastToStructChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/CastToStructChecker.cpp
new file mode 100644
index 0000000..eeaed97
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/CastToStructChecker.cpp
@@ -0,0 +1,78 @@
+//=== CastToStructChecker.cpp - Fixed address usage checker ----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This files defines CastToStructChecker, a builtin checker that checks for
+// cast from non-struct pointer to struct pointer.
+// This check corresponds to CWE-588.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "GRExprEngineInternalChecks.h"
+
+using namespace clang;
+
+namespace {
+class CastToStructChecker
+ : public CheckerVisitor<CastToStructChecker> {
+ BuiltinBug *BT;
+public:
+ CastToStructChecker() : BT(0) {}
+ static void *getTag();
+ void PreVisitCastExpr(CheckerContext &C, const CastExpr *B);
+};
+}
+
+void *CastToStructChecker::getTag() {
+ static int x;
+ return &x;
+}
+
+void CastToStructChecker::PreVisitCastExpr(CheckerContext &C,
+ const CastExpr *CE) {
+ const Expr *E = CE->getSubExpr();
+ ASTContext &Ctx = C.getASTContext();
+ QualType OrigTy = Ctx.getCanonicalType(E->getType());
+ QualType ToTy = Ctx.getCanonicalType(CE->getType());
+
+ PointerType *OrigPTy = dyn_cast<PointerType>(OrigTy.getTypePtr());
+ PointerType *ToPTy = dyn_cast<PointerType>(ToTy.getTypePtr());
+
+ if (!ToPTy || !OrigPTy)
+ return;
+
+ QualType OrigPointeeTy = OrigPTy->getPointeeType();
+ QualType ToPointeeTy = ToPTy->getPointeeType();
+
+ if (!ToPointeeTy->isStructureOrClassType())
+ return;
+
+ // We allow cast from void*.
+ if (OrigPointeeTy->isVoidType())
+ return;
+
+ // Now the cast-to-type is struct pointer, the original type is not void*.
+ if (!OrigPointeeTy->isRecordType()) {
+ if (ExplodedNode *N = C.GenerateNode()) {
+ if (!BT)
+ BT = new BuiltinBug("Cast from non-struct type to struct type",
+ "Casting a non-structure type to a structure type "
+ "and accessing a field can lead to memory access "
+ "errors or data corruption.");
+ RangedBugReport *R = new RangedBugReport(*BT,BT->getDescription(), N);
+ R->addRange(CE->getSourceRange());
+ C.EmitReport(R);
+ }
+ }
+}
+
+void clang::RegisterCastToStructChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new CastToStructChecker());
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/CheckDeadStores.cpp b/contrib/llvm/tools/clang/lib/Checker/CheckDeadStores.cpp
new file mode 100644
index 0000000..d6ea187
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/CheckDeadStores.cpp
@@ -0,0 +1,289 @@
+//==- DeadStores.cpp - Check for stores to dead variables --------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a DeadStores, a flow-sensitive checker that looks for
+// stores to variables that are no longer live.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/Checkers/LocalCheckers.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/Visitors/CFGRecStmtVisitor.h"
+#include "clang/Checker/BugReporter/BugReporter.h"
+#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ParentMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+
+using namespace clang;
+
+namespace {
+
+class DeadStoreObs : public LiveVariables::ObserverTy {
+ ASTContext &Ctx;
+ BugReporter& BR;
+ ParentMap& Parents;
+ llvm::SmallPtrSet<VarDecl*, 20> Escaped;
+
+ enum DeadStoreKind { Standard, Enclosing, DeadIncrement, DeadInit };
+
+public:
+ DeadStoreObs(ASTContext &ctx, BugReporter& br, ParentMap& parents,
+ llvm::SmallPtrSet<VarDecl*, 20> &escaped)
+ : Ctx(ctx), BR(br), Parents(parents), Escaped(escaped) {}
+
+ virtual ~DeadStoreObs() {}
+
+ void Report(VarDecl* V, DeadStoreKind dsk, SourceLocation L, SourceRange R) {
+ if (Escaped.count(V))
+ return;
+
+ std::string name = V->getNameAsString();
+
+ const char* BugType = 0;
+ std::string msg;
+
+ switch (dsk) {
+ default:
+ assert(false && "Impossible dead store type.");
+
+ case DeadInit:
+ BugType = "Dead initialization";
+ msg = "Value stored to '" + name +
+ "' during its initialization is never read";
+ break;
+
+ case DeadIncrement:
+ BugType = "Dead increment";
+ case Standard:
+ if (!BugType) BugType = "Dead assignment";
+ msg = "Value stored to '" + name + "' is never read";
+ break;
+
+ case Enclosing:
+ BugType = "Dead nested assignment";
+ msg = "Although the value stored to '" + name +
+ "' is used in the enclosing expression, the value is never actually"
+ " read from '" + name + "'";
+ break;
+ }
+
+ BR.EmitBasicReport(BugType, "Dead store", msg, L, R);
+ }
+
+ void CheckVarDecl(VarDecl* VD, Expr* Ex, Expr* Val,
+ DeadStoreKind dsk,
+ const LiveVariables::AnalysisDataTy& AD,
+ const LiveVariables::ValTy& Live) {
+
+ if (!VD->hasLocalStorage())
+ return;
+ // Reference types confuse the dead stores checker. Skip them
+ // for now.
+ if (VD->getType()->getAs<ReferenceType>())
+ return;
+
+ if (!Live(VD, AD) &&
+ !(VD->getAttr<UnusedAttr>() || VD->getAttr<BlocksAttr>()))
+ Report(VD, dsk, Ex->getSourceRange().getBegin(),
+ Val->getSourceRange());
+ }
+
+ void CheckDeclRef(DeclRefExpr* DR, Expr* Val, DeadStoreKind dsk,
+ const LiveVariables::AnalysisDataTy& AD,
+ const LiveVariables::ValTy& Live) {
+ if (VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl()))
+ CheckVarDecl(VD, DR, Val, dsk, AD, Live);
+ }
+
+ bool isIncrement(VarDecl* VD, BinaryOperator* B) {
+ if (B->isCompoundAssignmentOp())
+ return true;
+
+ Expr* RHS = B->getRHS()->IgnoreParenCasts();
+ BinaryOperator* BRHS = dyn_cast<BinaryOperator>(RHS);
+
+ if (!BRHS)
+ return false;
+
+ DeclRefExpr *DR;
+
+ if ((DR = dyn_cast<DeclRefExpr>(BRHS->getLHS()->IgnoreParenCasts())))
+ if (DR->getDecl() == VD)
+ return true;
+
+ if ((DR = dyn_cast<DeclRefExpr>(BRHS->getRHS()->IgnoreParenCasts())))
+ if (DR->getDecl() == VD)
+ return true;
+
+ return false;
+ }
+
+ virtual void ObserveStmt(Stmt* S,
+ const LiveVariables::AnalysisDataTy& AD,
+ const LiveVariables::ValTy& Live) {
+
+ // Skip statements in macros.
+ if (S->getLocStart().isMacroID())
+ return;
+
+ if (BinaryOperator* B = dyn_cast<BinaryOperator>(S)) {
+ if (!B->isAssignmentOp()) return; // Skip non-assignments.
+
+ if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(B->getLHS()))
+ if (VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ // Special case: check for assigning null to a pointer.
+ // This is a common form of defensive programming.
+ QualType T = VD->getType();
+ if (T->isPointerType() || T->isObjCObjectPointerType()) {
+ if (B->getRHS()->isNullPointerConstant(Ctx,
+ Expr::NPC_ValueDependentIsNull))
+ return;
+ }
+
+ Expr* RHS = B->getRHS()->IgnoreParenCasts();
+ // Special case: self-assignments. These are often used to shut up
+ // "unused variable" compiler warnings.
+ if (DeclRefExpr* RhsDR = dyn_cast<DeclRefExpr>(RHS))
+ if (VD == dyn_cast<VarDecl>(RhsDR->getDecl()))
+ return;
+
+ // Otherwise, issue a warning.
+ DeadStoreKind dsk = Parents.isConsumedExpr(B)
+ ? Enclosing
+ : (isIncrement(VD,B) ? DeadIncrement : Standard);
+
+ CheckVarDecl(VD, DR, B->getRHS(), dsk, AD, Live);
+ }
+ }
+ else if (UnaryOperator* U = dyn_cast<UnaryOperator>(S)) {
+ if (!U->isIncrementOp())
+ return;
+
+ // Handle: ++x within a subexpression. The solution is not warn
+ // about preincrements to dead variables when the preincrement occurs
+ // as a subexpression. This can lead to false negatives, e.g. "(++x);"
+ // A generalized dead code checker should find such issues.
+ if (U->isPrefix() && Parents.isConsumedExpr(U))
+ return;
+
+ Expr *Ex = U->getSubExpr()->IgnoreParenCasts();
+
+ if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(Ex))
+ CheckDeclRef(DR, U, DeadIncrement, AD, Live);
+ }
+ else if (DeclStmt* DS = dyn_cast<DeclStmt>(S))
+ // Iterate through the decls. Warn if any initializers are complex
+ // expressions that are not live (never used).
+ for (DeclStmt::decl_iterator DI=DS->decl_begin(), DE=DS->decl_end();
+ DI != DE; ++DI) {
+
+ VarDecl* V = dyn_cast<VarDecl>(*DI);
+
+ if (!V)
+ continue;
+
+ if (V->hasLocalStorage()) {
+ // Reference types confuse the dead stores checker. Skip them
+ // for now.
+ if (V->getType()->getAs<ReferenceType>())
+ return;
+
+ if (Expr* E = V->getInit()) {
+ // Don't warn on C++ objects (yet) until we can show that their
+ // constructors/destructors don't have side effects.
+ if (isa<CXXConstructExpr>(E))
+ return;
+
+ if (isa<CXXExprWithTemporaries>(E))
+ return;
+
+ // A dead initialization is a variable that is dead after it
+ // is initialized. We don't flag warnings for those variables
+ // marked 'unused'.
+ if (!Live(V, AD) && V->getAttr<UnusedAttr>() == 0) {
+ // Special case: check for initializations with constants.
+ //
+ // e.g. : int x = 0;
+ //
+ // If x is EVER assigned a new value later, don't issue
+ // a warning. This is because such initialization can be
+ // due to defensive programming.
+ if (E->isConstantInitializer(Ctx))
+ return;
+
+ if (DeclRefExpr *DRE=dyn_cast<DeclRefExpr>(E->IgnoreParenCasts()))
+ if (VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ // Special case: check for initialization from constant
+ // variables.
+ //
+ // e.g. extern const int MyConstant;
+ // int x = MyConstant;
+ //
+ if (VD->hasGlobalStorage() &&
+ VD->getType().isConstQualified())
+ return;
+ // Special case: check for initialization from scalar
+ // parameters. This is often a form of defensive
+ // programming. Non-scalars are still an error since
+ // because it more likely represents an actual algorithmic
+ // bug.
+ if (isa<ParmVarDecl>(VD) && VD->getType()->isScalarType())
+ return;
+ }
+
+ Report(V, DeadInit, V->getLocation(), E->getSourceRange());
+ }
+ }
+ }
+ }
+ }
+};
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Driver function to invoke the Dead-Stores checker on a CFG.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class FindEscaped : public CFGRecStmtDeclVisitor<FindEscaped>{
+ CFG *cfg;
+public:
+ FindEscaped(CFG *c) : cfg(c) {}
+
+ CFG& getCFG() { return *cfg; }
+
+ llvm::SmallPtrSet<VarDecl*, 20> Escaped;
+
+ void VisitUnaryOperator(UnaryOperator* U) {
+ // Check for '&'. Any VarDecl whose value has its address-taken we
+ // treat as escaped.
+ Expr* E = U->getSubExpr()->IgnoreParenCasts();
+ if (U->getOpcode() == UnaryOperator::AddrOf)
+ if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(E))
+ if (VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ Escaped.insert(VD);
+ return;
+ }
+ Visit(E);
+ }
+};
+} // end anonymous namespace
+
+
+void clang::CheckDeadStores(CFG &cfg, LiveVariables &L, ParentMap &pmap,
+ BugReporter& BR) {
+ FindEscaped FS(&cfg);
+ FS.getCFG().VisitBlockStmts(FS);
+ DeadStoreObs A(BR.getContext(), BR, pmap, FS.Escaped);
+ L.runOnAllBlocks(cfg, &A);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/CheckObjCDealloc.cpp b/contrib/llvm/tools/clang/lib/Checker/CheckObjCDealloc.cpp
new file mode 100644
index 0000000..11ddaca
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/CheckObjCDealloc.cpp
@@ -0,0 +1,261 @@
+//==- CheckObjCDealloc.cpp - Check ObjC -dealloc implementation --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a CheckObjCDealloc, a checker that
+// analyzes an Objective-C class's implementation to determine if it
+// correctly implements -dealloc.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/Checkers/LocalCheckers.h"
+#include "clang/Checker/BugReporter/PathDiagnostic.h"
+#include "clang/Checker/BugReporter/BugReporter.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/LangOptions.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+static bool scan_dealloc(Stmt* S, Selector Dealloc) {
+
+ if (ObjCMessageExpr* ME = dyn_cast<ObjCMessageExpr>(S))
+ if (ME->getSelector() == Dealloc) {
+ switch (ME->getReceiverKind()) {
+ case ObjCMessageExpr::Instance: return false;
+ case ObjCMessageExpr::SuperInstance: return true;
+ case ObjCMessageExpr::Class: break;
+ case ObjCMessageExpr::SuperClass: break;
+ }
+ }
+
+ // Recurse to children.
+
+ for (Stmt::child_iterator I = S->child_begin(), E= S->child_end(); I!=E; ++I)
+ if (*I && scan_dealloc(*I, Dealloc))
+ return true;
+
+ return false;
+}
+
+static bool scan_ivar_release(Stmt* S, ObjCIvarDecl* ID,
+ const ObjCPropertyDecl* PD,
+ Selector Release,
+ IdentifierInfo* SelfII,
+ ASTContext& Ctx) {
+
+ // [mMyIvar release]
+ if (ObjCMessageExpr* ME = dyn_cast<ObjCMessageExpr>(S))
+ if (ME->getSelector() == Release)
+ if (ME->getInstanceReceiver())
+ if (Expr* Receiver = ME->getInstanceReceiver()->IgnoreParenCasts())
+ if (ObjCIvarRefExpr* E = dyn_cast<ObjCIvarRefExpr>(Receiver))
+ if (E->getDecl() == ID)
+ return true;
+
+ // [self setMyIvar:nil];
+ if (ObjCMessageExpr* ME = dyn_cast<ObjCMessageExpr>(S))
+ if (ME->getInstanceReceiver())
+ if (Expr* Receiver = ME->getInstanceReceiver()->IgnoreParenCasts())
+ if (DeclRefExpr* E = dyn_cast<DeclRefExpr>(Receiver))
+ if (E->getDecl()->getIdentifier() == SelfII)
+ if (ME->getMethodDecl() == PD->getSetterMethodDecl() &&
+ ME->getNumArgs() == 1 &&
+ ME->getArg(0)->isNullPointerConstant(Ctx,
+ Expr::NPC_ValueDependentIsNull))
+ return true;
+
+ // self.myIvar = nil;
+ if (BinaryOperator* BO = dyn_cast<BinaryOperator>(S))
+ if (BO->isAssignmentOp())
+ if (ObjCPropertyRefExpr* PRE =
+ dyn_cast<ObjCPropertyRefExpr>(BO->getLHS()->IgnoreParenCasts()))
+ if (PRE->getProperty() == PD)
+ if (BO->getRHS()->isNullPointerConstant(Ctx,
+ Expr::NPC_ValueDependentIsNull)) {
+ // This is only a 'release' if the property kind is not
+ // 'assign'.
+ return PD->getSetterKind() != ObjCPropertyDecl::Assign;;
+ }
+
+ // Recurse to children.
+ for (Stmt::child_iterator I = S->child_begin(), E= S->child_end(); I!=E; ++I)
+ if (*I && scan_ivar_release(*I, ID, PD, Release, SelfII, Ctx))
+ return true;
+
+ return false;
+}
+
+void clang::CheckObjCDealloc(const ObjCImplementationDecl* D,
+ const LangOptions& LOpts, BugReporter& BR) {
+
+ assert (LOpts.getGCMode() != LangOptions::GCOnly);
+
+ ASTContext& Ctx = BR.getContext();
+ const ObjCInterfaceDecl* ID = D->getClassInterface();
+
+ // Does the class contain any ivars that are pointers (or id<...>)?
+ // If not, skip the check entirely.
+ // NOTE: This is motivated by PR 2517:
+ // http://llvm.org/bugs/show_bug.cgi?id=2517
+
+ bool containsPointerIvar = false;
+
+ for (ObjCInterfaceDecl::ivar_iterator I=ID->ivar_begin(), E=ID->ivar_end();
+ I!=E; ++I) {
+
+ ObjCIvarDecl* ID = *I;
+ QualType T = ID->getType();
+
+ if (!T->isObjCObjectPointerType() ||
+ ID->getAttr<IBOutletAttr>() || // Skip IBOutlets.
+ ID->getAttr<IBOutletCollectionAttr>()) // Skip IBOutletCollections.
+ continue;
+
+ containsPointerIvar = true;
+ break;
+ }
+
+ if (!containsPointerIvar)
+ return;
+
+ // Determine if the class subclasses NSObject.
+ IdentifierInfo* NSObjectII = &Ctx.Idents.get("NSObject");
+ IdentifierInfo* SenTestCaseII = &Ctx.Idents.get("SenTestCase");
+
+
+ for ( ; ID ; ID = ID->getSuperClass()) {
+ IdentifierInfo *II = ID->getIdentifier();
+
+ if (II == NSObjectII)
+ break;
+
+ // FIXME: For now, ignore classes that subclass SenTestCase, as these don't
+ // need to implement -dealloc. They implement tear down in another way,
+ // which we should try and catch later.
+ // http://llvm.org/bugs/show_bug.cgi?id=3187
+ if (II == SenTestCaseII)
+ return;
+ }
+
+ if (!ID)
+ return;
+
+ // Get the "dealloc" selector.
+ IdentifierInfo* II = &Ctx.Idents.get("dealloc");
+ Selector S = Ctx.Selectors.getSelector(0, &II);
+ ObjCMethodDecl* MD = 0;
+
+ // Scan the instance methods for "dealloc".
+ for (ObjCImplementationDecl::instmeth_iterator I = D->instmeth_begin(),
+ E = D->instmeth_end(); I!=E; ++I) {
+
+ if ((*I)->getSelector() == S) {
+ MD = *I;
+ break;
+ }
+ }
+
+ if (!MD) { // No dealloc found.
+
+ const char* name = LOpts.getGCMode() == LangOptions::NonGC
+ ? "missing -dealloc"
+ : "missing -dealloc (Hybrid MM, non-GC)";
+
+ std::string buf;
+ llvm::raw_string_ostream os(buf);
+ os << "Objective-C class '" << D << "' lacks a 'dealloc' instance method";
+
+ BR.EmitBasicReport(name, os.str(), D->getLocStart());
+ return;
+ }
+
+ // dealloc found. Scan for missing [super dealloc].
+ if (MD->getBody() && !scan_dealloc(MD->getBody(), S)) {
+
+ const char* name = LOpts.getGCMode() == LangOptions::NonGC
+ ? "missing [super dealloc]"
+ : "missing [super dealloc] (Hybrid MM, non-GC)";
+
+ std::string buf;
+ llvm::raw_string_ostream os(buf);
+ os << "The 'dealloc' instance method in Objective-C class '" << D
+ << "' does not send a 'dealloc' message to its super class"
+ " (missing [super dealloc])";
+
+ BR.EmitBasicReport(name, os.str(), D->getLocStart());
+ return;
+ }
+
+ // Get the "release" selector.
+ IdentifierInfo* RII = &Ctx.Idents.get("release");
+ Selector RS = Ctx.Selectors.getSelector(0, &RII);
+
+ // Get the "self" identifier
+ IdentifierInfo* SelfII = &Ctx.Idents.get("self");
+
+ // Scan for missing and extra releases of ivars used by implementations
+ // of synthesized properties
+ for (ObjCImplementationDecl::propimpl_iterator I = D->propimpl_begin(),
+ E = D->propimpl_end(); I!=E; ++I) {
+
+ // We can only check the synthesized properties
+ if ((*I)->getPropertyImplementation() != ObjCPropertyImplDecl::Synthesize)
+ continue;
+
+ ObjCIvarDecl* ID = (*I)->getPropertyIvarDecl();
+ if (!ID)
+ continue;
+
+ QualType T = ID->getType();
+ if (!T->isObjCObjectPointerType()) // Skip non-pointer ivars
+ continue;
+
+ const ObjCPropertyDecl* PD = (*I)->getPropertyDecl();
+ if (!PD)
+ continue;
+
+ // ivars cannot be set via read-only properties, so we'll skip them
+ if (PD->isReadOnly())
+ continue;
+
+ // ivar must be released if and only if the kind of setter was not 'assign'
+ bool requiresRelease = PD->getSetterKind() != ObjCPropertyDecl::Assign;
+ if (scan_ivar_release(MD->getBody(), ID, PD, RS, SelfII, Ctx)
+ != requiresRelease) {
+ const char *name;
+ const char* category = "Memory (Core Foundation/Objective-C)";
+
+ std::string buf;
+ llvm::raw_string_ostream os(buf);
+
+ if (requiresRelease) {
+ name = LOpts.getGCMode() == LangOptions::NonGC
+ ? "missing ivar release (leak)"
+ : "missing ivar release (Hybrid MM, non-GC)";
+
+ os << "The '" << ID
+ << "' instance variable was retained by a synthesized property but "
+ "wasn't released in 'dealloc'";
+ } else {
+ name = LOpts.getGCMode() == LangOptions::NonGC
+ ? "extra ivar release (use-after-release)"
+ : "extra ivar release (Hybrid MM, non-GC)";
+
+ os << "The '" << ID
+ << "' instance variable was not retained by a synthesized property "
+ "but was released in 'dealloc'";
+ }
+
+ BR.EmitBasicReport(name, category, os.str(), (*I)->getLocation());
+ }
+ }
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Checker/CheckObjCInstMethSignature.cpp b/contrib/llvm/tools/clang/lib/Checker/CheckObjCInstMethSignature.cpp
new file mode 100644
index 0000000..76a0923
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/CheckObjCInstMethSignature.cpp
@@ -0,0 +1,119 @@
+//=- CheckObjCInstMethodRetTy.cpp - Check ObjC method signatures -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a CheckObjCInstMethSignature, a flow-insenstive check
+// that determines if an Objective-C class interface incorrectly redefines
+// the method signature in a subclass.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/Checkers/LocalCheckers.h"
+#include "clang/Checker/BugReporter/PathDiagnostic.h"
+#include "clang/Checker/BugReporter/BugReporter.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/ASTContext.h"
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+static bool AreTypesCompatible(QualType Derived, QualType Ancestor,
+ ASTContext& C) {
+
+ // Right now don't compare the compatibility of pointers. That involves
+ // looking at subtyping relationships. FIXME: Future patch.
+ if (Derived->isAnyPointerType() && Ancestor->isAnyPointerType())
+ return true;
+
+ return C.typesAreCompatible(Derived, Ancestor);
+}
+
+static void CompareReturnTypes(const ObjCMethodDecl *MethDerived,
+ const ObjCMethodDecl *MethAncestor,
+ BugReporter &BR, ASTContext &Ctx,
+ const ObjCImplementationDecl *ID) {
+
+ QualType ResDerived = MethDerived->getResultType();
+ QualType ResAncestor = MethAncestor->getResultType();
+
+ if (!AreTypesCompatible(ResDerived, ResAncestor, Ctx)) {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ os << "The Objective-C class '"
+ << MethDerived->getClassInterface()
+ << "', which is derived from class '"
+ << MethAncestor->getClassInterface()
+ << "', defines the instance method '"
+ << MethDerived->getSelector().getAsString()
+ << "' whose return type is '"
+ << ResDerived.getAsString()
+ << "'. A method with the same name (same selector) is also defined in "
+ "class '"
+ << MethAncestor->getClassInterface()
+ << "' and has a return type of '"
+ << ResAncestor.getAsString()
+ << "'. These two types are incompatible, and may result in undefined "
+ "behavior for clients of these classes.";
+
+ BR.EmitBasicReport("Incompatible instance method return type",
+ os.str(), MethDerived->getLocStart());
+ }
+}
+
+void clang::CheckObjCInstMethSignature(const ObjCImplementationDecl* ID,
+ BugReporter& BR) {
+
+ const ObjCInterfaceDecl* D = ID->getClassInterface();
+ const ObjCInterfaceDecl* C = D->getSuperClass();
+
+ if (!C)
+ return;
+
+ ASTContext& Ctx = BR.getContext();
+
+ // Build a DenseMap of the methods for quick querying.
+ typedef llvm::DenseMap<Selector,ObjCMethodDecl*> MapTy;
+ MapTy IMeths;
+ unsigned NumMethods = 0;
+
+ for (ObjCImplementationDecl::instmeth_iterator I=ID->instmeth_begin(),
+ E=ID->instmeth_end(); I!=E; ++I) {
+
+ ObjCMethodDecl* M = *I;
+ IMeths[M->getSelector()] = M;
+ ++NumMethods;
+ }
+
+ // Now recurse the class hierarchy chain looking for methods with the
+ // same signatures.
+ while (C && NumMethods) {
+ for (ObjCInterfaceDecl::instmeth_iterator I=C->instmeth_begin(),
+ E=C->instmeth_end(); I!=E; ++I) {
+
+ ObjCMethodDecl* M = *I;
+ Selector S = M->getSelector();
+
+ MapTy::iterator MI = IMeths.find(S);
+
+ if (MI == IMeths.end() || MI->second == 0)
+ continue;
+
+ --NumMethods;
+ ObjCMethodDecl* MethDerived = MI->second;
+ MI->second = 0;
+
+ CompareReturnTypes(MethDerived, M, BR, Ctx, ID);
+ }
+
+ C = C->getSuperClass();
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/CheckSecuritySyntaxOnly.cpp b/contrib/llvm/tools/clang/lib/Checker/CheckSecuritySyntaxOnly.cpp
new file mode 100644
index 0000000..74e12b1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/CheckSecuritySyntaxOnly.cpp
@@ -0,0 +1,494 @@
+//==- CheckSecuritySyntaxOnly.cpp - Basic security checks --------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a set of flow-insensitive security checks.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Checker/BugReporter/BugReporter.h"
+#include "clang/Checker/Checkers/LocalCheckers.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+static bool isArc4RandomAvailable(const ASTContext &Ctx) {
+ const llvm::Triple &T = Ctx.Target.getTriple();
+ return T.getVendor() == llvm::Triple::Apple ||
+ T.getOS() == llvm::Triple::FreeBSD;
+}
+
+namespace {
+class WalkAST : public StmtVisitor<WalkAST> {
+ BugReporter &BR;
+ IdentifierInfo *II_gets;
+ IdentifierInfo *II_getpw;
+ IdentifierInfo *II_mktemp;
+ enum { num_rands = 9 };
+ IdentifierInfo *II_rand[num_rands];
+ IdentifierInfo *II_random;
+ enum { num_setids = 6 };
+ IdentifierInfo *II_setid[num_setids];
+
+ const bool CheckRand;
+
+public:
+ WalkAST(BugReporter &br) : BR(br),
+ II_gets(0), II_getpw(0), II_mktemp(0),
+ II_rand(), II_random(0), II_setid(),
+ CheckRand(isArc4RandomAvailable(BR.getContext())) {}
+
+ // Statement visitor methods.
+ void VisitCallExpr(CallExpr *CE);
+ void VisitForStmt(ForStmt *S);
+ void VisitCompoundStmt (CompoundStmt *S);
+ void VisitStmt(Stmt *S) { VisitChildren(S); }
+
+ void VisitChildren(Stmt *S);
+
+ // Helpers.
+ IdentifierInfo *GetIdentifier(IdentifierInfo *& II, const char *str);
+
+ // Checker-specific methods.
+ void CheckLoopConditionForFloat(const ForStmt *FS);
+ void CheckCall_gets(const CallExpr *CE, const FunctionDecl *FD);
+ void CheckCall_getpw(const CallExpr *CE, const FunctionDecl *FD);
+ void CheckCall_mktemp(const CallExpr *CE, const FunctionDecl *FD);
+ void CheckCall_rand(const CallExpr *CE, const FunctionDecl *FD);
+ void CheckCall_random(const CallExpr *CE, const FunctionDecl *FD);
+ void CheckUncheckedReturnValue(CallExpr *CE);
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Helper methods.
+//===----------------------------------------------------------------------===//
+
+IdentifierInfo *WalkAST::GetIdentifier(IdentifierInfo *& II, const char *str) {
+ if (!II)
+ II = &BR.getContext().Idents.get(str);
+
+ return II;
+}
+
+//===----------------------------------------------------------------------===//
+// AST walking.
+//===----------------------------------------------------------------------===//
+
+void WalkAST::VisitChildren(Stmt *S) {
+ for (Stmt::child_iterator I = S->child_begin(), E = S->child_end(); I!=E; ++I)
+ if (Stmt *child = *I)
+ Visit(child);
+}
+
+void WalkAST::VisitCallExpr(CallExpr *CE) {
+ if (const FunctionDecl *FD = CE->getDirectCallee()) {
+ CheckCall_gets(CE, FD);
+ CheckCall_getpw(CE, FD);
+ CheckCall_mktemp(CE, FD);
+ if (CheckRand) {
+ CheckCall_rand(CE, FD);
+ CheckCall_random(CE, FD);
+ }
+ }
+
+ // Recurse and check children.
+ VisitChildren(CE);
+}
+
+void WalkAST::VisitCompoundStmt(CompoundStmt *S) {
+ for (Stmt::child_iterator I = S->child_begin(), E = S->child_end(); I!=E; ++I)
+ if (Stmt *child = *I) {
+ if (CallExpr *CE = dyn_cast<CallExpr>(child))
+ CheckUncheckedReturnValue(CE);
+ Visit(child);
+ }
+}
+
+void WalkAST::VisitForStmt(ForStmt *FS) {
+ CheckLoopConditionForFloat(FS);
+
+ // Recurse and check children.
+ VisitChildren(FS);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: floating poing variable used as loop counter.
+// Originally: <rdar://problem/6336718>
+// Implements: CERT security coding advisory FLP-30.
+//===----------------------------------------------------------------------===//
+
+static const DeclRefExpr*
+GetIncrementedVar(const Expr *expr, const VarDecl *x, const VarDecl *y) {
+ expr = expr->IgnoreParenCasts();
+
+ if (const BinaryOperator *B = dyn_cast<BinaryOperator>(expr)) {
+ if (!(B->isAssignmentOp() || B->isCompoundAssignmentOp() ||
+ B->getOpcode() == BinaryOperator::Comma))
+ return NULL;
+
+ if (const DeclRefExpr *lhs = GetIncrementedVar(B->getLHS(), x, y))
+ return lhs;
+
+ if (const DeclRefExpr *rhs = GetIncrementedVar(B->getRHS(), x, y))
+ return rhs;
+
+ return NULL;
+ }
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(expr)) {
+ const NamedDecl *ND = DR->getDecl();
+ return ND == x || ND == y ? DR : NULL;
+ }
+
+ if (const UnaryOperator *U = dyn_cast<UnaryOperator>(expr))
+ return U->isIncrementDecrementOp()
+ ? GetIncrementedVar(U->getSubExpr(), x, y) : NULL;
+
+ return NULL;
+}
+
+/// CheckLoopConditionForFloat - This check looks for 'for' statements that
+/// use a floating point variable as a loop counter.
+/// CERT: FLP30-C, FLP30-CPP.
+///
+void WalkAST::CheckLoopConditionForFloat(const ForStmt *FS) {
+ // Does the loop have a condition?
+ const Expr *condition = FS->getCond();
+
+ if (!condition)
+ return;
+
+ // Does the loop have an increment?
+ const Expr *increment = FS->getInc();
+
+ if (!increment)
+ return;
+
+ // Strip away '()' and casts.
+ condition = condition->IgnoreParenCasts();
+ increment = increment->IgnoreParenCasts();
+
+ // Is the loop condition a comparison?
+ const BinaryOperator *B = dyn_cast<BinaryOperator>(condition);
+
+ if (!B)
+ return;
+
+ // Is this a comparison?
+ if (!(B->isRelationalOp() || B->isEqualityOp()))
+ return;
+
+ // Are we comparing variables?
+ const DeclRefExpr *drLHS = dyn_cast<DeclRefExpr>(B->getLHS()->IgnoreParens());
+ const DeclRefExpr *drRHS = dyn_cast<DeclRefExpr>(B->getRHS()->IgnoreParens());
+
+ // Does at least one of the variables have a floating point type?
+ drLHS = drLHS && drLHS->getType()->isFloatingType() ? drLHS : NULL;
+ drRHS = drRHS && drRHS->getType()->isFloatingType() ? drRHS : NULL;
+
+ if (!drLHS && !drRHS)
+ return;
+
+ const VarDecl *vdLHS = drLHS ? dyn_cast<VarDecl>(drLHS->getDecl()) : NULL;
+ const VarDecl *vdRHS = drRHS ? dyn_cast<VarDecl>(drRHS->getDecl()) : NULL;
+
+ if (!vdLHS && !vdRHS)
+ return;
+
+ // Does either variable appear in increment?
+ const DeclRefExpr *drInc = GetIncrementedVar(increment, vdLHS, vdRHS);
+
+ if (!drInc)
+ return;
+
+ // Emit the error. First figure out which DeclRefExpr in the condition
+ // referenced the compared variable.
+ const DeclRefExpr *drCond = vdLHS == drInc->getDecl() ? drLHS : drRHS;
+
+ llvm::SmallVector<SourceRange, 2> ranges;
+ llvm::SmallString<256> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+
+ os << "Variable '" << drCond->getDecl()->getNameAsCString()
+ << "' with floating point type '" << drCond->getType().getAsString()
+ << "' should not be used as a loop counter";
+
+ ranges.push_back(drCond->getSourceRange());
+ ranges.push_back(drInc->getSourceRange());
+
+ const char *bugType = "Floating point variable used as loop counter";
+ BR.EmitBasicReport(bugType, "Security", os.str(),
+ FS->getLocStart(), ranges.data(), ranges.size());
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Any use of 'gets' is insecure.
+// Originally: <rdar://problem/6335715>
+// Implements (part of): 300-BSI (buildsecurityin.us-cert.gov)
+// CWE-242: Use of Inherently Dangerous Function
+//===----------------------------------------------------------------------===//
+
+void WalkAST::CheckCall_gets(const CallExpr *CE, const FunctionDecl *FD) {
+ if (FD->getIdentifier() != GetIdentifier(II_gets, "gets"))
+ return;
+
+ const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FD->getType());
+ if (!FPT)
+ return;
+
+ // Verify that the function takes a single argument.
+ if (FPT->getNumArgs() != 1)
+ return;
+
+ // Is the argument a 'char*'?
+ const PointerType *PT = dyn_cast<PointerType>(FPT->getArgType(0));
+ if (!PT)
+ return;
+
+ if (PT->getPointeeType().getUnqualifiedType() != BR.getContext().CharTy)
+ return;
+
+ // Issue a warning.
+ SourceRange R = CE->getCallee()->getSourceRange();
+ BR.EmitBasicReport("Potential buffer overflow in call to 'gets'",
+ "Security",
+ "Call to function 'gets' is extremely insecure as it can "
+ "always result in a buffer overflow",
+ CE->getLocStart(), &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Any use of 'getpwd' is insecure.
+// CWE-477: Use of Obsolete Functions
+//===----------------------------------------------------------------------===//
+
+void WalkAST::CheckCall_getpw(const CallExpr *CE, const FunctionDecl *FD) {
+ if (FD->getIdentifier() != GetIdentifier(II_getpw, "getpw"))
+ return;
+
+ const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FD->getType());
+ if (!FPT)
+ return;
+
+ // Verify that the function takes two arguments.
+ if (FPT->getNumArgs() != 2)
+ return;
+
+ // Verify the first argument type is integer.
+ if (!FPT->getArgType(0)->isIntegerType())
+ return;
+
+ // Verify the second argument type is char*.
+ const PointerType *PT = dyn_cast<PointerType>(FPT->getArgType(1));
+ if (!PT)
+ return;
+
+ if (PT->getPointeeType().getUnqualifiedType() != BR.getContext().CharTy)
+ return;
+
+ // Issue a warning.
+ SourceRange R = CE->getCallee()->getSourceRange();
+ BR.EmitBasicReport("Potential buffer overflow in call to 'getpw'",
+ "Security",
+ "The getpw() function is dangerous as it may overflow the "
+ "provided buffer. It is obsoleted by getpwuid().",
+ CE->getLocStart(), &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Any use of 'mktemp' is insecure.It is obsoleted by mkstemp().
+// CWE-377: Insecure Temporary File
+//===----------------------------------------------------------------------===//
+
+void WalkAST::CheckCall_mktemp(const CallExpr *CE, const FunctionDecl *FD) {
+ if (FD->getIdentifier() != GetIdentifier(II_mktemp, "mktemp"))
+ return;
+
+ const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FD->getType());
+ if(!FPT)
+ return;
+
+ // Verify that the funcion takes a single argument.
+ if (FPT->getNumArgs() != 1)
+ return;
+
+ // Verify that the argument is Pointer Type.
+ const PointerType *PT = dyn_cast<PointerType>(FPT->getArgType(0));
+ if (!PT)
+ return;
+
+ // Verify that the argument is a 'char*'.
+ if (PT->getPointeeType().getUnqualifiedType() != BR.getContext().CharTy)
+ return;
+
+ // Issue a waring.
+ SourceRange R = CE->getCallee()->getSourceRange();
+ BR.EmitBasicReport("Potential insecure temporary file in call 'mktemp'",
+ "Security",
+ "Call to function 'mktemp' is insecure as it always "
+ "creates or uses insecure temporary file. Use 'mkstemp' instead",
+ CE->getLocStart(), &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Linear congruent random number generators should not be used
+// Originally: <rdar://problem/63371000>
+// CWE-338: Use of cryptographically weak prng
+//===----------------------------------------------------------------------===//
+
+void WalkAST::CheckCall_rand(const CallExpr *CE, const FunctionDecl *FD) {
+ if (II_rand[0] == NULL) {
+ // This check applies to these functions
+ static const char * const identifiers[num_rands] = {
+ "drand48", "erand48", "jrand48", "lrand48", "mrand48", "nrand48",
+ "lcong48",
+ "rand", "rand_r"
+ };
+
+ for (size_t i = 0; i < num_rands; i++)
+ II_rand[i] = &BR.getContext().Idents.get(identifiers[i]);
+ }
+
+ const IdentifierInfo *id = FD->getIdentifier();
+ size_t identifierid;
+
+ for (identifierid = 0; identifierid < num_rands; identifierid++)
+ if (id == II_rand[identifierid])
+ break;
+
+ if (identifierid >= num_rands)
+ return;
+
+ const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FD->getType());
+ if (!FTP)
+ return;
+
+ if (FTP->getNumArgs() == 1) {
+ // Is the argument an 'unsigned short *'?
+ // (Actually any integer type is allowed.)
+ const PointerType *PT = dyn_cast<PointerType>(FTP->getArgType(0));
+ if (!PT)
+ return;
+
+ if (! PT->getPointeeType()->isIntegerType())
+ return;
+ }
+ else if (FTP->getNumArgs() != 0)
+ return;
+
+ // Issue a warning.
+ llvm::SmallString<256> buf1;
+ llvm::raw_svector_ostream os1(buf1);
+ os1 << '\'' << FD << "' is a poor random number generator";
+
+ llvm::SmallString<256> buf2;
+ llvm::raw_svector_ostream os2(buf2);
+ os2 << "Function '" << FD
+ << "' is obsolete because it implements a poor random number generator."
+ << " Use 'arc4random' instead";
+
+ SourceRange R = CE->getCallee()->getSourceRange();
+ BR.EmitBasicReport(os1.str(), "Security", os2.str(),CE->getLocStart(), &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: 'random' should not be used
+// Originally: <rdar://problem/63371000>
+//===----------------------------------------------------------------------===//
+
+void WalkAST::CheckCall_random(const CallExpr *CE, const FunctionDecl *FD) {
+ if (FD->getIdentifier() != GetIdentifier(II_random, "random"))
+ return;
+
+ const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FD->getType());
+ if (!FTP)
+ return;
+
+ // Verify that the function takes no argument.
+ if (FTP->getNumArgs() != 0)
+ return;
+
+ // Issue a warning.
+ SourceRange R = CE->getCallee()->getSourceRange();
+ BR.EmitBasicReport("'random' is not a secure random number generator",
+ "Security",
+ "The 'random' function produces a sequence of values that "
+ "an adversary may be able to predict. Use 'arc4random' "
+ "instead", CE->getLocStart(), &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Should check whether privileges are dropped successfully.
+// Originally: <rdar://problem/6337132>
+//===----------------------------------------------------------------------===//
+
+void WalkAST::CheckUncheckedReturnValue(CallExpr *CE) {
+ const FunctionDecl *FD = CE->getDirectCallee();
+ if (!FD)
+ return;
+
+ if (II_setid[0] == NULL) {
+ static const char * const identifiers[num_setids] = {
+ "setuid", "setgid", "seteuid", "setegid",
+ "setreuid", "setregid"
+ };
+
+ for (size_t i = 0; i < num_setids; i++)
+ II_setid[i] = &BR.getContext().Idents.get(identifiers[i]);
+ }
+
+ const IdentifierInfo *id = FD->getIdentifier();
+ size_t identifierid;
+
+ for (identifierid = 0; identifierid < num_setids; identifierid++)
+ if (id == II_setid[identifierid])
+ break;
+
+ if (identifierid >= num_setids)
+ return;
+
+ const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FD->getType());
+ if (!FTP)
+ return;
+
+ // Verify that the function takes one or two arguments (depending on
+ // the function).
+ if (FTP->getNumArgs() != (identifierid < 4 ? 1 : 2))
+ return;
+
+ // The arguments must be integers.
+ for (unsigned i = 0; i < FTP->getNumArgs(); i++)
+ if (! FTP->getArgType(i)->isIntegerType())
+ return;
+
+ // Issue a warning.
+ llvm::SmallString<256> buf1;
+ llvm::raw_svector_ostream os1(buf1);
+ os1 << "Return value is not checked in call to '" << FD << '\'';
+
+ llvm::SmallString<256> buf2;
+ llvm::raw_svector_ostream os2(buf2);
+ os2 << "The return value from the call to '" << FD
+ << "' is not checked. If an error occurs in '" << FD
+ << "', the following code may execute with unexpected privileges";
+
+ SourceRange R = CE->getCallee()->getSourceRange();
+ BR.EmitBasicReport(os1.str(), "Security", os2.str(),CE->getLocStart(), &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Entry point for check.
+//===----------------------------------------------------------------------===//
+
+void clang::CheckSecuritySyntaxOnly(const Decl *D, BugReporter &BR) {
+ WalkAST walker(BR);
+ walker.Visit(D->getBody());
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/CheckSizeofPointer.cpp b/contrib/llvm/tools/clang/lib/Checker/CheckSizeofPointer.cpp
new file mode 100644
index 0000000..bbe494c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/CheckSizeofPointer.cpp
@@ -0,0 +1,71 @@
+//==- CheckSizeofPointer.cpp - Check for sizeof on pointers ------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a check for unintended use of sizeof() on pointer
+// expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/BugReporter/BugReporter.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Checker/Checkers/LocalCheckers.h"
+
+using namespace clang;
+
+namespace {
+class WalkAST : public StmtVisitor<WalkAST> {
+ BugReporter &BR;
+
+public:
+ WalkAST(BugReporter &br) : BR(br) {}
+ void VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E);
+ void VisitStmt(Stmt *S) { VisitChildren(S); }
+ void VisitChildren(Stmt *S);
+};
+}
+
+void WalkAST::VisitChildren(Stmt *S) {
+ for (Stmt::child_iterator I = S->child_begin(), E = S->child_end(); I!=E; ++I)
+ if (Stmt *child = *I)
+ Visit(child);
+}
+
+// CWE-467: Use of sizeof() on a Pointer Type
+void WalkAST::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E) {
+ if (!E->isSizeOf())
+ return;
+
+ // If an explicit type is used in the code, usually the coder knows what he is
+ // doing.
+ if (E->isArgumentType())
+ return;
+
+ QualType T = E->getTypeOfArgument();
+ if (T->isPointerType()) {
+
+ // Many false positives have the form 'sizeof *p'. This is reasonable
+ // because people know what they are doing when they intentionally
+ // dereference the pointer.
+ Expr *ArgEx = E->getArgumentExpr();
+ if (!isa<DeclRefExpr>(ArgEx->IgnoreParens()))
+ return;
+
+ SourceRange R = ArgEx->getSourceRange();
+ BR.EmitBasicReport("Potential unintended use of sizeof() on pointer type",
+ "Logic",
+ "The code calls sizeof() on a pointer type. "
+ "This can produce an unexpected result.",
+ E->getLocStart(), &R, 1);
+ }
+}
+
+void clang::CheckSizeofPointer(const Decl *D, BugReporter &BR) {
+ WalkAST walker(BR);
+ walker.Visit(D->getBody());
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/Checker.cpp b/contrib/llvm/tools/clang/lib/Checker/Checker.cpp
new file mode 100644
index 0000000..36323b9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/Checker.cpp
@@ -0,0 +1,35 @@
+//== Checker.h - Abstract interface for checkers -----------------*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines Checker and CheckerVisitor, classes used for creating
+// domain-specific checks.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/PathSensitive/Checker.h"
+using namespace clang;
+
+Checker::~Checker() {}
+
+CheckerContext::~CheckerContext() {
+ // Do we need to autotransition? 'Dst' can get populated in a variety of
+ // ways, including 'addTransition()' adding the predecessor node to Dst
+ // without actually generated a new node. We also shouldn't autotransition
+ // if we are building sinks or we generated a node and decided to not
+ // add it as a transition.
+ if (Dst.size() == size && !B.BuildSinks && !B.HasGeneratedNode) {
+ if (ST && ST != B.GetState(Pred)) {
+ static int autoTransitionTag = 0;
+ B.Tag = &autoTransitionTag;
+ addTransition(ST);
+ }
+ else
+ Dst.Add(Pred);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/CocoaConventions.cpp b/contrib/llvm/tools/clang/lib/Checker/CocoaConventions.cpp
new file mode 100644
index 0000000..3ba887c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/CocoaConventions.cpp
@@ -0,0 +1,195 @@
+//===- CocoaConventions.h - Special handling of Cocoa conventions -*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/DomainSpecific/CocoaConventions.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "llvm/ADT/StringExtras.h"
+
+using namespace clang;
+
+using llvm::StringRef;
+
+// The "fundamental rule" for naming conventions of methods:
+// (url broken into two lines)
+// http://developer.apple.com/documentation/Cocoa/Conceptual/
+// MemoryMgmt/Tasks/MemoryManagementRules.html
+//
+// "You take ownership of an object if you create it using a method whose name
+// begins with "alloc" or "new" or contains "copy" (for example, alloc,
+// newObject, or mutableCopy), or if you send it a retain message. You are
+// responsible for relinquishing ownership of objects you own using release
+// or autorelease. Any other time you receive an object, you must
+// not release it."
+//
+
+static bool isWordEnd(char ch, char prev, char next) {
+ return ch == '\0'
+ || (islower(prev) && isupper(ch)) // xxxC
+ || (isupper(prev) && isupper(ch) && islower(next)) // XXCreate
+ || !isalpha(ch);
+}
+
+static const char* parseWord(const char* s) {
+ char ch = *s, prev = '\0';
+ assert(ch != '\0');
+ char next = *(s+1);
+ while (!isWordEnd(ch, prev, next)) {
+ prev = ch;
+ ch = next;
+ next = *((++s)+1);
+ }
+ return s;
+}
+
+cocoa::NamingConvention cocoa::deriveNamingConvention(Selector S) {
+ IdentifierInfo *II = S.getIdentifierInfoForSlot(0);
+
+ if (!II)
+ return NoConvention;
+
+ const char *s = II->getNameStart();
+
+ // A method/function name may contain a prefix. We don't know it is there,
+ // however, until we encounter the first '_'.
+ bool InPossiblePrefix = true;
+ bool AtBeginning = true;
+ NamingConvention C = NoConvention;
+
+ while (*s != '\0') {
+ // Skip '_'.
+ if (*s == '_') {
+ if (InPossiblePrefix) {
+ // If we already have a convention, return it. Otherwise, skip
+ // the prefix as if it wasn't there.
+ if (C != NoConvention)
+ break;
+
+ InPossiblePrefix = false;
+ AtBeginning = true;
+ assert(C == NoConvention);
+ }
+ ++s;
+ continue;
+ }
+
+ // Skip numbers, ':', etc.
+ if (!isalpha(*s)) {
+ ++s;
+ continue;
+ }
+
+ const char *wordEnd = parseWord(s);
+ assert(wordEnd > s);
+ unsigned len = wordEnd - s;
+
+ switch (len) {
+ default:
+ break;
+ case 3:
+ // Methods starting with 'new' follow the create rule.
+ if (AtBeginning && StringRef(s, len).equals_lower("new"))
+ C = CreateRule;
+ break;
+ case 4:
+ // Methods starting with 'alloc' or contain 'copy' follow the
+ // create rule
+ if (C == NoConvention && StringRef(s, len).equals_lower("copy"))
+ C = CreateRule;
+ else // Methods starting with 'init' follow the init rule.
+ if (AtBeginning && StringRef(s, len).equals_lower("init"))
+ C = InitRule;
+ break;
+ case 5:
+ if (AtBeginning && StringRef(s, len).equals_lower("alloc"))
+ C = CreateRule;
+ break;
+ }
+
+ // If we aren't in the prefix and have a derived convention then just
+ // return it now.
+ if (!InPossiblePrefix && C != NoConvention)
+ return C;
+
+ AtBeginning = false;
+ s = wordEnd;
+ }
+
+ // We will get here if there wasn't more than one word
+ // after the prefix.
+ return C;
+}
+
+bool cocoa::isRefType(QualType RetTy, llvm::StringRef Prefix,
+ llvm::StringRef Name) {
+ // Recursively walk the typedef stack, allowing typedefs of reference types.
+ while (TypedefType* TD = dyn_cast<TypedefType>(RetTy.getTypePtr())) {
+ llvm::StringRef TDName = TD->getDecl()->getIdentifier()->getName();
+ if (TDName.startswith(Prefix) && TDName.endswith("Ref"))
+ return true;
+
+ RetTy = TD->getDecl()->getUnderlyingType();
+ }
+
+ if (Name.empty())
+ return false;
+
+ // Is the type void*?
+ const PointerType* PT = RetTy->getAs<PointerType>();
+ if (!(PT->getPointeeType().getUnqualifiedType()->isVoidType()))
+ return false;
+
+ // Does the name start with the prefix?
+ return Name.startswith(Prefix);
+}
+
+bool cocoa::isCFObjectRef(QualType T) {
+ return isRefType(T, "CF") || // Core Foundation.
+ isRefType(T, "CG") || // Core Graphics.
+ isRefType(T, "DADisk") || // Disk Arbitration API.
+ isRefType(T, "DADissenter") ||
+ isRefType(T, "DASessionRef");
+}
+
+
+bool cocoa::isCocoaObjectRef(QualType Ty) {
+ if (!Ty->isObjCObjectPointerType())
+ return false;
+
+ const ObjCObjectPointerType *PT = Ty->getAs<ObjCObjectPointerType>();
+
+ // Can be true for objects with the 'NSObject' attribute.
+ if (!PT)
+ return true;
+
+ // We assume that id<..>, id, and "Class" all represent tracked objects.
+ if (PT->isObjCIdType() || PT->isObjCQualifiedIdType() ||
+ PT->isObjCClassType())
+ return true;
+
+ // Does the interface subclass NSObject?
+ // FIXME: We can memoize here if this gets too expensive.
+ const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
+
+ // Assume that anything declared with a forward declaration and no
+ // @interface subclasses NSObject.
+ if (ID->isForwardDecl())
+ return true;
+
+ for ( ; ID ; ID = ID->getSuperClass())
+ if (ID->getIdentifier()->getName() == "NSObject")
+ return true;
+
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/DereferenceChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/DereferenceChecker.cpp
new file mode 100644
index 0000000..af74c79
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/DereferenceChecker.cpp
@@ -0,0 +1,156 @@
+//== NullDerefChecker.cpp - Null dereference checker ------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines NullDerefChecker, a builtin check in GRExprEngine that performs
+// checks for null pointers at loads and stores.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/Checkers/DereferenceChecker.h"
+#include "clang/Checker/PathSensitive/Checker.h"
+#include "clang/Checker/PathSensitive/GRExprEngine.h"
+
+using namespace clang;
+
+namespace {
+class DereferenceChecker : public Checker {
+ BuiltinBug *BT_null;
+ BuiltinBug *BT_undef;
+ llvm::SmallVector<ExplodedNode*, 2> ImplicitNullDerefNodes;
+public:
+ DereferenceChecker() : BT_null(0), BT_undef(0) {}
+ static void *getTag() { static int tag = 0; return &tag; }
+ void VisitLocation(CheckerContext &C, const Stmt *S, SVal location);
+
+ std::pair<ExplodedNode * const*, ExplodedNode * const*>
+ getImplicitNodes() const {
+ return std::make_pair(ImplicitNullDerefNodes.data(),
+ ImplicitNullDerefNodes.data() +
+ ImplicitNullDerefNodes.size());
+ }
+};
+} // end anonymous namespace
+
+void clang::RegisterDereferenceChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new DereferenceChecker());
+}
+
+std::pair<ExplodedNode * const *, ExplodedNode * const *>
+clang::GetImplicitNullDereferences(GRExprEngine &Eng) {
+ DereferenceChecker *checker = Eng.getChecker<DereferenceChecker>();
+ if (!checker)
+ return std::make_pair((ExplodedNode * const *) 0,
+ (ExplodedNode * const *) 0);
+ return checker->getImplicitNodes();
+}
+
+void DereferenceChecker::VisitLocation(CheckerContext &C, const Stmt *S,
+ SVal l) {
+ // Check for dereference of an undefined value.
+ if (l.isUndef()) {
+ if (ExplodedNode *N = C.GenerateSink()) {
+ if (!BT_undef)
+ BT_undef = new BuiltinBug("Dereference of undefined pointer value");
+
+ EnhancedBugReport *report =
+ new EnhancedBugReport(*BT_undef, BT_undef->getDescription(), N);
+ report->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue,
+ bugreporter::GetDerefExpr(N));
+ C.EmitReport(report);
+ }
+ return;
+ }
+
+ DefinedOrUnknownSVal location = cast<DefinedOrUnknownSVal>(l);
+
+ // Check for null dereferences.
+ if (!isa<Loc>(location))
+ return;
+
+ const GRState *state = C.getState();
+ const GRState *notNullState, *nullState;
+ llvm::tie(notNullState, nullState) = state->Assume(location);
+
+ // The explicit NULL case.
+ if (nullState) {
+ if (!notNullState) {
+ // Generate an error node.
+ ExplodedNode *N = C.GenerateSink(nullState);
+ if (!N)
+ return;
+
+ // We know that 'location' cannot be non-null. This is what
+ // we call an "explicit" null dereference.
+ if (!BT_null)
+ BT_null = new BuiltinBug("Dereference of null pointer");
+
+ llvm::SmallString<100> buf;
+ llvm::SmallVector<SourceRange, 2> Ranges;
+
+ switch (S->getStmtClass()) {
+ case Stmt::UnaryOperatorClass: {
+ const UnaryOperator *U = cast<UnaryOperator>(S);
+ const Expr *SU = U->getSubExpr()->IgnoreParens();
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(SU)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ llvm::raw_svector_ostream os(buf);
+ os << "Dereference of null pointer (loaded from variable '"
+ << VD->getName() << "')";
+ Ranges.push_back(DR->getSourceRange());
+ }
+ }
+ break;
+ }
+ case Stmt::MemberExprClass: {
+ const MemberExpr *M = cast<MemberExpr>(S);
+ if (M->isArrow())
+ if (DeclRefExpr *DR =
+ dyn_cast<DeclRefExpr>(M->getBase()->IgnoreParenCasts())) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ llvm::raw_svector_ostream os(buf);
+ os << "Field access results in a dereference of a null pointer "
+ "(loaded from variable '" << VD->getName() << "')";
+ Ranges.push_back(M->getBase()->getSourceRange());
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ EnhancedBugReport *report =
+ new EnhancedBugReport(*BT_null,
+ buf.empty() ? BT_null->getDescription():buf.str(),
+ N);
+
+ report->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue,
+ bugreporter::GetDerefExpr(N));
+
+ for (llvm::SmallVectorImpl<SourceRange>::iterator
+ I = Ranges.begin(), E = Ranges.end(); I!=E; ++I)
+ report->addRange(*I);
+
+ C.EmitReport(report);
+ return;
+ }
+ else {
+ // Otherwise, we have the case where the location could either be
+ // null or not-null. Record the error node as an "implicit" null
+ // dereference.
+ if (ExplodedNode *N = C.GenerateSink(nullState))
+ ImplicitNullDerefNodes.push_back(N);
+ }
+ }
+
+ // From this point forward, we know that the location is not null.
+ C.addTransition(notNullState);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/DivZeroChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/DivZeroChecker.cpp
new file mode 100644
index 0000000..e09a871
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/DivZeroChecker.cpp
@@ -0,0 +1,85 @@
+//== DivZeroChecker.cpp - Division by zero checker --------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines DivZeroChecker, a builtin check in GRExprEngine that performs
+// checks for division by zeros.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+
+using namespace clang;
+
+namespace {
+class DivZeroChecker : public CheckerVisitor<DivZeroChecker> {
+ BuiltinBug *BT;
+public:
+ DivZeroChecker() : BT(0) {}
+ static void *getTag();
+ void PreVisitBinaryOperator(CheckerContext &C, const BinaryOperator *B);
+};
+} // end anonymous namespace
+
+void clang::RegisterDivZeroChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new DivZeroChecker());
+}
+
+void *DivZeroChecker::getTag() {
+ static int x;
+ return &x;
+}
+
+void DivZeroChecker::PreVisitBinaryOperator(CheckerContext &C,
+ const BinaryOperator *B) {
+ BinaryOperator::Opcode Op = B->getOpcode();
+ if (Op != BinaryOperator::Div &&
+ Op != BinaryOperator::Rem &&
+ Op != BinaryOperator::DivAssign &&
+ Op != BinaryOperator::RemAssign)
+ return;
+
+ if (!B->getRHS()->getType()->isIntegerType() ||
+ !B->getRHS()->getType()->isScalarType())
+ return;
+
+ SVal Denom = C.getState()->getSVal(B->getRHS());
+ const DefinedSVal *DV = dyn_cast<DefinedSVal>(&Denom);
+
+ // Divide-by-undefined handled in the generic checking for uses of
+ // undefined values.
+ if (!DV)
+ return;
+
+ // Check for divide by zero.
+ ConstraintManager &CM = C.getConstraintManager();
+ const GRState *stateNotZero, *stateZero;
+ llvm::tie(stateNotZero, stateZero) = CM.AssumeDual(C.getState(), *DV);
+
+ if (stateZero && !stateNotZero) {
+ if (ExplodedNode *N = C.GenerateSink(stateZero)) {
+ if (!BT)
+ BT = new BuiltinBug("Division by zero");
+
+ EnhancedBugReport *R =
+ new EnhancedBugReport(*BT, BT->getDescription(), N);
+
+ R->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue,
+ bugreporter::GetDenomExpr(N));
+
+ C.EmitReport(R);
+ }
+ return;
+ }
+
+ // If we get here, then the denom should not be zero. We abandon the implicit
+ // zero denom case for now.
+ C.addTransition(stateNotZero);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/Environment.cpp b/contrib/llvm/tools/clang/lib/Checker/Environment.cpp
new file mode 100644
index 0000000..addfc21
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/Environment.cpp
@@ -0,0 +1,191 @@
+//== Environment.cpp - Map from Stmt* to Locations/Values -------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defined the Environment and EnvironmentManager classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Checker/PathSensitive/GRState.h"
+
+using namespace clang;
+
+SVal Environment::GetSVal(const Stmt *E, ValueManager& ValMgr) const {
+
+ for (;;) {
+
+ switch (E->getStmtClass()) {
+
+ case Stmt::AddrLabelExprClass:
+ return ValMgr.makeLoc(cast<AddrLabelExpr>(E));
+
+ // ParenExprs are no-ops.
+
+ case Stmt::ParenExprClass:
+ E = cast<ParenExpr>(E)->getSubExpr();
+ continue;
+
+ case Stmt::CharacterLiteralClass: {
+ const CharacterLiteral* C = cast<CharacterLiteral>(E);
+ return ValMgr.makeIntVal(C->getValue(), C->getType());
+ }
+
+ case Stmt::CXXBoolLiteralExprClass: {
+ const SVal *X = ExprBindings.lookup(E);
+ if (X)
+ return *X;
+ else
+ return ValMgr.makeIntVal(cast<CXXBoolLiteralExpr>(E));
+ }
+ case Stmt::IntegerLiteralClass: {
+ // In C++, this expression may have been bound to a temporary object.
+ SVal const *X = ExprBindings.lookup(E);
+ if (X)
+ return *X;
+ else
+ return ValMgr.makeIntVal(cast<IntegerLiteral>(E));
+ }
+
+ // Casts where the source and target type are the same
+ // are no-ops. We blast through these to get the descendant
+ // subexpression that has a value.
+
+ case Stmt::ImplicitCastExprClass:
+ case Stmt::CStyleCastExprClass: {
+ const CastExpr* C = cast<CastExpr>(E);
+ QualType CT = C->getType();
+
+ if (CT->isVoidType())
+ return UnknownVal();
+
+ break;
+ }
+
+ // Handle all other Stmt* using a lookup.
+
+ default:
+ break;
+ };
+
+ break;
+ }
+
+ return LookupExpr(E);
+}
+
+Environment EnvironmentManager::BindExpr(Environment Env, const Stmt *S,
+ SVal V, bool Invalidate) {
+ assert(S);
+
+ if (V.isUnknown()) {
+ if (Invalidate)
+ return Environment(F.Remove(Env.ExprBindings, S));
+ else
+ return Env;
+ }
+
+ return Environment(F.Add(Env.ExprBindings, S, V));
+}
+
+namespace {
+class MarkLiveCallback : public SymbolVisitor {
+ SymbolReaper &SymReaper;
+public:
+ MarkLiveCallback(SymbolReaper &symreaper) : SymReaper(symreaper) {}
+ bool VisitSymbol(SymbolRef sym) { SymReaper.markLive(sym); return true; }
+};
+} // end anonymous namespace
+
+static bool isBlockExprInCallers(const Stmt *E, const LocationContext *LC) {
+ const LocationContext *ParentLC = LC->getParent();
+ while (ParentLC) {
+ CFG &C = *ParentLC->getCFG();
+ if (C.isBlkExpr(E))
+ return true;
+ ParentLC = ParentLC->getParent();
+ }
+
+ return false;
+}
+
+
+// RemoveDeadBindings:
+// - Remove subexpression bindings.
+// - Remove dead block expression bindings.
+// - Keep live block expression bindings:
+// - Mark their reachable symbols live in SymbolReaper,
+// see ScanReachableSymbols.
+// - Mark the region in DRoots if the binding is a loc::MemRegionVal.
+
+Environment
+EnvironmentManager::RemoveDeadBindings(Environment Env, const Stmt *S,
+ SymbolReaper &SymReaper,
+ const GRState *ST,
+ llvm::SmallVectorImpl<const MemRegion*> &DRoots) {
+
+ CFG &C = *SymReaper.getLocationContext()->getCFG();
+
+ // We construct a new Environment object entirely, as this is cheaper than
+ // individually removing all the subexpression bindings (which will greatly
+ // outnumber block-level expression bindings).
+ Environment NewEnv = getInitialEnvironment();
+
+ // Iterate over the block-expr bindings.
+ for (Environment::iterator I = Env.begin(), E = Env.end();
+ I != E; ++I) {
+
+ const Stmt *BlkExpr = I.getKey();
+ const SVal &X = I.getData();
+
+ // Block-level expressions in callers are assumed always live.
+ if (isBlockExprInCallers(BlkExpr, SymReaper.getLocationContext())) {
+ NewEnv.ExprBindings = F.Add(NewEnv.ExprBindings, BlkExpr, X);
+
+ if (isa<loc::MemRegionVal>(X)) {
+ const MemRegion* R = cast<loc::MemRegionVal>(X).getRegion();
+ DRoots.push_back(R);
+ }
+
+ // Mark all symbols in the block expr's value live.
+ MarkLiveCallback cb(SymReaper);
+ ST->scanReachableSymbols(X, cb);
+ continue;
+ }
+
+ // Not a block-level expression?
+ if (!C.isBlkExpr(BlkExpr))
+ continue;
+
+ if (SymReaper.isLive(S, BlkExpr)) {
+ // Copy the binding to the new map.
+ NewEnv.ExprBindings = F.Add(NewEnv.ExprBindings, BlkExpr, X);
+
+ // If the block expr's value is a memory region, then mark that region.
+ if (isa<loc::MemRegionVal>(X)) {
+ const MemRegion* R = cast<loc::MemRegionVal>(X).getRegion();
+ DRoots.push_back(R);
+ }
+
+ // Mark all symbols in the block expr's value live.
+ MarkLiveCallback cb(SymReaper);
+ ST->scanReachableSymbols(X, cb);
+ continue;
+ }
+
+ // Otherwise the expression is dead with a couple exceptions.
+ // Do not misclean LogicalExpr or ConditionalOperator. It is dead at the
+ // beginning of itself, but we need its UndefinedVal to determine its
+ // SVal.
+ if (X.isUndef() && cast<UndefinedVal>(X).getData())
+ NewEnv.ExprBindings = F.Add(NewEnv.ExprBindings, BlkExpr, X);
+ }
+
+ return NewEnv;
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/ExplodedGraph.cpp b/contrib/llvm/tools/clang/lib/Checker/ExplodedGraph.cpp
new file mode 100644
index 0000000..20429b9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/ExplodedGraph.cpp
@@ -0,0 +1,281 @@
+//=-- ExplodedGraph.cpp - Local, Path-Sens. "Exploded Graph" -*- C++ -*------=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the template classes ExplodedNode and ExplodedGraph,
+// which represent a path-sensitive, intra-procedural "exploded graph."
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/PathSensitive/ExplodedGraph.h"
+#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/AST/Stmt.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include <vector>
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Node auditing.
+//===----------------------------------------------------------------------===//
+
+// An out of line virtual method to provide a home for the class vtable.
+ExplodedNode::Auditor::~Auditor() {}
+
+#ifndef NDEBUG
+static ExplodedNode::Auditor* NodeAuditor = 0;
+#endif
+
+void ExplodedNode::SetAuditor(ExplodedNode::Auditor* A) {
+#ifndef NDEBUG
+ NodeAuditor = A;
+#endif
+}
+
+//===----------------------------------------------------------------------===//
+// ExplodedNode.
+//===----------------------------------------------------------------------===//
+
+static inline BumpVector<ExplodedNode*>& getVector(void* P) {
+ return *reinterpret_cast<BumpVector<ExplodedNode*>*>(P);
+}
+
+void ExplodedNode::addPredecessor(ExplodedNode* V, ExplodedGraph &G) {
+ assert (!V->isSink());
+ Preds.addNode(V, G);
+ V->Succs.addNode(this, G);
+#ifndef NDEBUG
+ if (NodeAuditor) NodeAuditor->AddEdge(V, this);
+#endif
+}
+
+void ExplodedNode::NodeGroup::addNode(ExplodedNode* N, ExplodedGraph &G) {
+ assert((reinterpret_cast<uintptr_t>(N) & Mask) == 0x0);
+ assert(!getFlag());
+
+ if (getKind() == Size1) {
+ if (ExplodedNode* NOld = getNode()) {
+ BumpVectorContext &Ctx = G.getNodeAllocator();
+ BumpVector<ExplodedNode*> *V =
+ G.getAllocator().Allocate<BumpVector<ExplodedNode*> >();
+ new (V) BumpVector<ExplodedNode*>(Ctx, 4);
+
+ assert((reinterpret_cast<uintptr_t>(V) & Mask) == 0x0);
+ V->push_back(NOld, Ctx);
+ V->push_back(N, Ctx);
+ P = reinterpret_cast<uintptr_t>(V) | SizeOther;
+ assert(getPtr() == (void*) V);
+ assert(getKind() == SizeOther);
+ }
+ else {
+ P = reinterpret_cast<uintptr_t>(N);
+ assert(getKind() == Size1);
+ }
+ }
+ else {
+ assert(getKind() == SizeOther);
+ getVector(getPtr()).push_back(N, G.getNodeAllocator());
+ }
+}
+
+unsigned ExplodedNode::NodeGroup::size() const {
+ if (getFlag())
+ return 0;
+
+ if (getKind() == Size1)
+ return getNode() ? 1 : 0;
+ else
+ return getVector(getPtr()).size();
+}
+
+ExplodedNode **ExplodedNode::NodeGroup::begin() const {
+ if (getFlag())
+ return NULL;
+
+ if (getKind() == Size1)
+ return (ExplodedNode**) (getPtr() ? &P : NULL);
+ else
+ return const_cast<ExplodedNode**>(&*(getVector(getPtr()).begin()));
+}
+
+ExplodedNode** ExplodedNode::NodeGroup::end() const {
+ if (getFlag())
+ return NULL;
+
+ if (getKind() == Size1)
+ return (ExplodedNode**) (getPtr() ? &P+1 : NULL);
+ else {
+ // Dereferencing end() is undefined behaviour. The vector is not empty, so
+ // we can dereference the last elem and then add 1 to the result.
+ return const_cast<ExplodedNode**>(getVector(getPtr()).end());
+ }
+}
+
+ExplodedNode *ExplodedGraph::getNode(const ProgramPoint& L,
+ const GRState* State, bool* IsNew) {
+ // Profile 'State' to determine if we already have an existing node.
+ llvm::FoldingSetNodeID profile;
+ void* InsertPos = 0;
+
+ NodeTy::Profile(profile, L, State);
+ NodeTy* V = Nodes.FindNodeOrInsertPos(profile, InsertPos);
+
+ if (!V) {
+ // Allocate a new node.
+ V = (NodeTy*) getAllocator().Allocate<NodeTy>();
+ new (V) NodeTy(L, State);
+
+ // Insert the node into the node set and return it.
+ Nodes.InsertNode(V, InsertPos);
+
+ ++NumNodes;
+
+ if (IsNew) *IsNew = true;
+ }
+ else
+ if (IsNew) *IsNew = false;
+
+ return V;
+}
+
+std::pair<ExplodedGraph*, InterExplodedGraphMap*>
+ExplodedGraph::Trim(const NodeTy* const* NBeg, const NodeTy* const* NEnd,
+ llvm::DenseMap<const void*, const void*> *InverseMap) const {
+
+ if (NBeg == NEnd)
+ return std::make_pair((ExplodedGraph*) 0,
+ (InterExplodedGraphMap*) 0);
+
+ assert (NBeg < NEnd);
+
+ llvm::OwningPtr<InterExplodedGraphMap> M(new InterExplodedGraphMap());
+
+ ExplodedGraph* G = TrimInternal(NBeg, NEnd, M.get(), InverseMap);
+
+ return std::make_pair(static_cast<ExplodedGraph*>(G), M.take());
+}
+
+ExplodedGraph*
+ExplodedGraph::TrimInternal(const ExplodedNode* const* BeginSources,
+ const ExplodedNode* const* EndSources,
+ InterExplodedGraphMap* M,
+ llvm::DenseMap<const void*, const void*> *InverseMap) const {
+
+ typedef llvm::DenseSet<const ExplodedNode*> Pass1Ty;
+ Pass1Ty Pass1;
+
+ typedef llvm::DenseMap<const ExplodedNode*, ExplodedNode*> Pass2Ty;
+ Pass2Ty& Pass2 = M->M;
+
+ llvm::SmallVector<const ExplodedNode*, 10> WL1, WL2;
+
+ // ===- Pass 1 (reverse DFS) -===
+ for (const ExplodedNode* const* I = BeginSources; I != EndSources; ++I) {
+ assert(*I);
+ WL1.push_back(*I);
+ }
+
+ // Process the first worklist until it is empty. Because it is a std::list
+ // it acts like a FIFO queue.
+ while (!WL1.empty()) {
+ const ExplodedNode *N = WL1.back();
+ WL1.pop_back();
+
+ // Have we already visited this node? If so, continue to the next one.
+ if (Pass1.count(N))
+ continue;
+
+ // Otherwise, mark this node as visited.
+ Pass1.insert(N);
+
+ // If this is a root enqueue it to the second worklist.
+ if (N->Preds.empty()) {
+ WL2.push_back(N);
+ continue;
+ }
+
+ // Visit our predecessors and enqueue them.
+ for (ExplodedNode** I=N->Preds.begin(), **E=N->Preds.end(); I!=E; ++I)
+ WL1.push_back(*I);
+ }
+
+ // We didn't hit a root? Return with a null pointer for the new graph.
+ if (WL2.empty())
+ return 0;
+
+ // Create an empty graph.
+ ExplodedGraph* G = MakeEmptyGraph();
+
+ // ===- Pass 2 (forward DFS to construct the new graph) -===
+ while (!WL2.empty()) {
+ const ExplodedNode* N = WL2.back();
+ WL2.pop_back();
+
+ // Skip this node if we have already processed it.
+ if (Pass2.find(N) != Pass2.end())
+ continue;
+
+ // Create the corresponding node in the new graph and record the mapping
+ // from the old node to the new node.
+ ExplodedNode* NewN = G->getNode(N->getLocation(), N->State, NULL);
+ Pass2[N] = NewN;
+
+ // Also record the reverse mapping from the new node to the old node.
+ if (InverseMap) (*InverseMap)[NewN] = N;
+
+ // If this node is a root, designate it as such in the graph.
+ if (N->Preds.empty())
+ G->addRoot(NewN);
+
+ // In the case that some of the intended predecessors of NewN have already
+ // been created, we should hook them up as predecessors.
+
+ // Walk through the predecessors of 'N' and hook up their corresponding
+ // nodes in the new graph (if any) to the freshly created node.
+ for (ExplodedNode **I=N->Preds.begin(), **E=N->Preds.end(); I!=E; ++I) {
+ Pass2Ty::iterator PI = Pass2.find(*I);
+ if (PI == Pass2.end())
+ continue;
+
+ NewN->addPredecessor(PI->second, *G);
+ }
+
+ // In the case that some of the intended successors of NewN have already
+ // been created, we should hook them up as successors. Otherwise, enqueue
+ // the new nodes from the original graph that should have nodes created
+ // in the new graph.
+ for (ExplodedNode **I=N->Succs.begin(), **E=N->Succs.end(); I!=E; ++I) {
+ Pass2Ty::iterator PI = Pass2.find(*I);
+ if (PI != Pass2.end()) {
+ PI->second->addPredecessor(NewN, *G);
+ continue;
+ }
+
+ // Enqueue nodes to the worklist that were marked during pass 1.
+ if (Pass1.count(*I))
+ WL2.push_back(*I);
+ }
+
+ // Finally, explictly mark all nodes without any successors as sinks.
+ if (N->isSink())
+ NewN->markAsSink();
+ }
+
+ return G;
+}
+
+ExplodedNode*
+InterExplodedGraphMap::getMappedNode(const ExplodedNode* N) const {
+ llvm::DenseMap<const ExplodedNode*, ExplodedNode*>::const_iterator I =
+ M.find(N);
+
+ return I == M.end() ? 0 : I->second;
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Checker/FixedAddressChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/FixedAddressChecker.cpp
new file mode 100644
index 0000000..4fce45b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/FixedAddressChecker.cpp
@@ -0,0 +1,71 @@
+//=== FixedAddressChecker.cpp - Fixed address usage checker ----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This files defines FixedAddressChecker, a builtin checker that checks for
+// assignment of a fixed address to a pointer.
+// This check corresponds to CWE-587.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+
+using namespace clang;
+
+namespace {
+class FixedAddressChecker
+ : public CheckerVisitor<FixedAddressChecker> {
+ BuiltinBug *BT;
+public:
+ FixedAddressChecker() : BT(0) {}
+ static void *getTag();
+ void PreVisitBinaryOperator(CheckerContext &C, const BinaryOperator *B);
+};
+}
+
+void *FixedAddressChecker::getTag() {
+ static int x;
+ return &x;
+}
+
+void FixedAddressChecker::PreVisitBinaryOperator(CheckerContext &C,
+ const BinaryOperator *B) {
+ // Using a fixed address is not portable because that address will probably
+ // not be valid in all environments or platforms.
+
+ if (B->getOpcode() != BinaryOperator::Assign)
+ return;
+
+ QualType T = B->getType();
+ if (!T->isPointerType())
+ return;
+
+ const GRState *state = C.getState();
+
+ SVal RV = state->getSVal(B->getRHS());
+
+ if (!RV.isConstant() || RV.isZeroConstant())
+ return;
+
+ if (ExplodedNode *N = C.GenerateNode()) {
+ if (!BT)
+ BT = new BuiltinBug("Use fixed address",
+ "Using a fixed address is not portable because that "
+ "address will probably not be valid in all "
+ "environments or platforms.");
+ RangedBugReport *R = new RangedBugReport(*BT, BT->getDescription(), N);
+ R->addRange(B->getRHS()->getSourceRange());
+ C.EmitReport(R);
+ }
+}
+
+void clang::RegisterFixedAddressChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new FixedAddressChecker());
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/FlatStore.cpp b/contrib/llvm/tools/clang/lib/Checker/FlatStore.cpp
new file mode 100644
index 0000000..7f1c579
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/FlatStore.cpp
@@ -0,0 +1,168 @@
+//=== FlatStore.cpp - Flat region-based store model -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/PathSensitive/GRState.h"
+#include "llvm/ADT/ImmutableIntervalMap.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace clang;
+using llvm::Interval;
+
+// The actual store type.
+typedef llvm::ImmutableIntervalMap<SVal> BindingVal;
+typedef llvm::ImmutableMap<const MemRegion *, BindingVal> RegionBindings;
+
+namespace {
+class FlatStoreManager : public StoreManager {
+ RegionBindings::Factory RBFactory;
+ BindingVal::Factory BVFactory;
+
+public:
+ FlatStoreManager(GRStateManager &mgr)
+ : StoreManager(mgr),
+ RBFactory(mgr.getAllocator()),
+ BVFactory(mgr.getAllocator()) {}
+
+ SVal Retrieve(Store store, Loc L, QualType T);
+ Store Bind(Store store, Loc L, SVal val);
+ Store Remove(Store St, Loc L);
+ Store BindCompoundLiteral(Store store, const CompoundLiteralExpr* cl,
+ const LocationContext *LC, SVal v);
+
+ Store getInitialStore(const LocationContext *InitLoc) {
+ return RBFactory.GetEmptyMap().getRoot();
+ }
+
+ SubRegionMap *getSubRegionMap(Store store) {
+ return 0;
+ }
+
+ SVal ArrayToPointer(Loc Array);
+ const GRState *RemoveDeadBindings(GRState &state, Stmt* Loc,
+ const StackFrameContext *LCtx,
+ SymbolReaper& SymReaper,
+ llvm::SmallVectorImpl<const MemRegion*>& RegionRoots){
+ return StateMgr.getPersistentState(state);
+ }
+
+ Store BindDecl(Store store, const VarRegion *VR, SVal initVal);
+
+ Store BindDeclWithNoInit(Store store, const VarRegion *VR);
+
+ typedef llvm::DenseSet<SymbolRef> InvalidatedSymbols;
+
+ Store InvalidateRegion(Store store, const MemRegion *R, const Expr *E,
+ unsigned Count, InvalidatedSymbols *IS);
+
+ void print(Store store, llvm::raw_ostream& Out, const char* nl,
+ const char *sep);
+ void iterBindings(Store store, BindingsHandler& f);
+
+private:
+ static RegionBindings getRegionBindings(Store store) {
+ return RegionBindings(static_cast<const RegionBindings::TreeTy*>(store));
+ }
+
+ Interval RegionToInterval(const MemRegion *R);
+
+ SVal RetrieveRegionWithNoBinding(const MemRegion *R, QualType T);
+};
+} // end anonymous namespace
+
+StoreManager *clang::CreateFlatStoreManager(GRStateManager &StMgr) {
+ return new FlatStoreManager(StMgr);
+}
+
+SVal FlatStoreManager::Retrieve(Store store, Loc L, QualType T) {
+ const MemRegion *R = cast<loc::MemRegionVal>(L).getRegion();
+ Interval I = RegionToInterval(R);
+ RegionBindings B = getRegionBindings(store);
+ const BindingVal *BV = B.lookup(R);
+ if (BV) {
+ const SVal *V = BVFactory.Lookup(*BV, I);
+ if (V)
+ return *V;
+ else
+ return RetrieveRegionWithNoBinding(R, T);
+ }
+ return RetrieveRegionWithNoBinding(R, T);
+}
+
+SVal FlatStoreManager::RetrieveRegionWithNoBinding(const MemRegion *R,
+ QualType T) {
+ if (R->hasStackNonParametersStorage())
+ return UndefinedVal();
+ else
+ return ValMgr.getRegionValueSymbolVal(cast<TypedRegion>(R));
+}
+
+Store FlatStoreManager::Bind(Store store, Loc L, SVal val) {
+ const MemRegion *R = cast<loc::MemRegionVal>(L).getRegion();
+ RegionBindings B = getRegionBindings(store);
+ const BindingVal *V = B.lookup(R);
+
+ BindingVal BV = BVFactory.GetEmptyMap();
+ if (V)
+ BV = *V;
+
+ Interval I = RegionToInterval(R);
+ BV = BVFactory.Add(BV, I, val);
+ B = RBFactory.Add(B, R, BV);
+ return B.getRoot();
+}
+
+Store FlatStoreManager::Remove(Store store, Loc L) {
+ return store;
+}
+
+Store FlatStoreManager::BindCompoundLiteral(Store store,
+ const CompoundLiteralExpr* cl,
+ const LocationContext *LC,
+ SVal v) {
+ return store;
+}
+
+SVal FlatStoreManager::ArrayToPointer(Loc Array) {
+ return Array;
+}
+
+Store FlatStoreManager::BindDecl(Store store, const VarRegion *VR,
+ SVal initVal) {
+ return store;
+}
+
+Store FlatStoreManager::BindDeclWithNoInit(Store store, const VarRegion *VR) {
+ return store;
+}
+
+Store FlatStoreManager::InvalidateRegion(Store store, const MemRegion *R,
+ const Expr *E, unsigned Count,
+ InvalidatedSymbols *IS) {
+ return store;
+}
+
+void FlatStoreManager::print(Store store, llvm::raw_ostream& Out,
+ const char* nl, const char *sep) {
+}
+
+void FlatStoreManager::iterBindings(Store store, BindingsHandler& f) {
+}
+
+Interval FlatStoreManager::RegionToInterval(const MemRegion *R) {
+ switch (R->getKind()) {
+ case MemRegion::VarRegionKind: {
+ QualType T = cast<VarRegion>(R)->getValueType(Ctx);
+ uint64_t Size = Ctx.getTypeSize(T);
+ return Interval(0, Size-1);
+ }
+ default:
+ llvm_unreachable("Region kind unhandled.");
+ return Interval(0, 0);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/GRBlockCounter.cpp b/contrib/llvm/tools/clang/lib/Checker/GRBlockCounter.cpp
new file mode 100644
index 0000000..cd26060
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/GRBlockCounter.cpp
@@ -0,0 +1,85 @@
+//==- GRBlockCounter.h - ADT for counting block visits -------------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines GRBlockCounter, an abstract data type used to count
+// the number of times a given block has been visited along a path
+// analyzed by GRCoreEngine.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/PathSensitive/GRBlockCounter.h"
+#include "llvm/ADT/ImmutableMap.h"
+
+using namespace clang;
+
+namespace {
+
+class CountKey {
+ const StackFrameContext *CallSite;
+ unsigned BlockID;
+
+public:
+ CountKey(const StackFrameContext *CS, unsigned ID)
+ : CallSite(CS), BlockID(ID) {}
+
+ bool operator==(const CountKey &RHS) const {
+ return (CallSite == RHS.CallSite) && (BlockID == RHS.BlockID);
+ }
+
+ bool operator<(const CountKey &RHS) const {
+ return (CallSite == RHS.CallSite) ? (BlockID < RHS.BlockID)
+ : (CallSite < RHS.CallSite);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddPointer(CallSite);
+ ID.AddInteger(BlockID);
+ }
+};
+
+}
+
+typedef llvm::ImmutableMap<CountKey, unsigned> CountMap;
+
+static inline CountMap GetMap(void* D) {
+ return CountMap(static_cast<CountMap::TreeTy*>(D));
+}
+
+static inline CountMap::Factory& GetFactory(void* F) {
+ return *static_cast<CountMap::Factory*>(F);
+}
+
+unsigned GRBlockCounter::getNumVisited(const StackFrameContext *CallSite,
+ unsigned BlockID) const {
+ CountMap M = GetMap(Data);
+ CountMap::data_type* T = M.lookup(CountKey(CallSite, BlockID));
+ return T ? *T : 0;
+}
+
+GRBlockCounter::Factory::Factory(llvm::BumpPtrAllocator& Alloc) {
+ F = new CountMap::Factory(Alloc);
+}
+
+GRBlockCounter::Factory::~Factory() {
+ delete static_cast<CountMap::Factory*>(F);
+}
+
+GRBlockCounter
+GRBlockCounter::Factory::IncrementCount(GRBlockCounter BC,
+ const StackFrameContext *CallSite,
+ unsigned BlockID) {
+ return GRBlockCounter(GetFactory(F).Add(GetMap(BC.Data),
+ CountKey(CallSite, BlockID),
+ BC.getNumVisited(CallSite, BlockID)+1).getRoot());
+}
+
+GRBlockCounter
+GRBlockCounter::Factory::GetEmptyCounter() {
+ return GRBlockCounter(GetFactory(F).GetEmptyMap().getRoot());
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/GRCXXExprEngine.cpp b/contrib/llvm/tools/clang/lib/Checker/GRCXXExprEngine.cpp
new file mode 100644
index 0000000..18e112c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/GRCXXExprEngine.cpp
@@ -0,0 +1,246 @@
+//===- GRCXXExprEngine.cpp - C++ expr evaluation engine ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the C++ expression evaluation engine.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/PathSensitive/AnalysisManager.h"
+#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "clang/AST/DeclCXX.h"
+
+using namespace clang;
+
+void GRExprEngine::EvalArguments(ExprIterator AI, ExprIterator AE,
+ const FunctionProtoType *FnType,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst) {
+ llvm::SmallVector<CallExprWLItem, 20> WorkList;
+ WorkList.reserve(AE - AI);
+ WorkList.push_back(CallExprWLItem(AI, Pred));
+
+ while (!WorkList.empty()) {
+ CallExprWLItem Item = WorkList.back();
+ WorkList.pop_back();
+
+ if (Item.I == AE) {
+ Dst.insert(Item.N);
+ continue;
+ }
+
+ ExplodedNodeSet Tmp;
+ const unsigned ParamIdx = Item.I - AI;
+ bool VisitAsLvalue = FnType? FnType->getArgType(ParamIdx)->isReferenceType()
+ : false;
+ if (VisitAsLvalue)
+ VisitLValue(*Item.I, Item.N, Tmp);
+ else
+ Visit(*Item.I, Item.N, Tmp);
+
+ ++(Item.I);
+ for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI != NE; ++NI)
+ WorkList.push_back(CallExprWLItem(Item.I, *NI));
+ }
+}
+
+const CXXThisRegion *GRExprEngine::getCXXThisRegion(const CXXMethodDecl *D,
+ const StackFrameContext *SFC) {
+ Type *T = D->getParent()->getTypeForDecl();
+ QualType PT = getContext().getPointerType(QualType(T,0));
+ return ValMgr.getRegionManager().getCXXThisRegion(PT, SFC);
+}
+
+void GRExprEngine::CreateCXXTemporaryObject(Expr *Ex, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ ExplodedNodeSet Tmp;
+ Visit(Ex, Pred, Tmp);
+ for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E; ++I) {
+ const GRState *state = GetState(*I);
+
+ // Bind the temporary object to the value of the expression. Then bind
+ // the expression to the location of the object.
+ SVal V = state->getSVal(Ex);
+
+ const MemRegion *R =
+ ValMgr.getRegionManager().getCXXObjectRegion(Ex,
+ Pred->getLocationContext());
+
+ state = state->bindLoc(loc::MemRegionVal(R), V);
+ MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, loc::MemRegionVal(R)));
+ }
+}
+
+void GRExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *E, SVal Dest,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ if (E->isElidable()) {
+ VisitAggExpr(E->getArg(0), Dest, Pred, Dst);
+ return;
+ }
+
+ const CXXConstructorDecl *CD = E->getConstructor();
+ assert(CD);
+
+ if (!(CD->isThisDeclarationADefinition() && AMgr.shouldInlineCall()))
+ // FIXME: invalidate the object.
+ return;
+
+
+ // Evaluate other arguments.
+ ExplodedNodeSet ArgsEvaluated;
+ const FunctionProtoType *FnType = CD->getType()->getAs<FunctionProtoType>();
+ EvalArguments(const_cast<CXXConstructExpr*>(E)->arg_begin(),
+ const_cast<CXXConstructExpr*>(E)->arg_end(),
+ FnType, Pred, ArgsEvaluated);
+ // The callee stack frame context used to create the 'this' parameter region.
+ const StackFrameContext *SFC = AMgr.getStackFrame(CD,
+ Pred->getLocationContext(),
+ E, Builder->getBlock(), Builder->getIndex());
+
+ const CXXThisRegion *ThisR = getCXXThisRegion(E->getConstructor(), SFC);
+
+ CallEnter Loc(E, CD, Pred->getLocationContext());
+ for (ExplodedNodeSet::iterator NI = ArgsEvaluated.begin(),
+ NE = ArgsEvaluated.end(); NI != NE; ++NI) {
+ const GRState *state = GetState(*NI);
+ // Setup 'this' region.
+ state = state->bindLoc(loc::MemRegionVal(ThisR), Dest);
+ ExplodedNode *N = Builder->generateNode(Loc, state, Pred);
+ if (N)
+ Dst.Add(N);
+ }
+}
+
+void GRExprEngine::VisitCXXMemberCallExpr(const CXXMemberCallExpr *MCE,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ // Get the method type.
+ const FunctionProtoType *FnType =
+ MCE->getCallee()->getType()->getAs<FunctionProtoType>();
+ assert(FnType && "Method type not available");
+
+ // Evaluate explicit arguments with a worklist.
+ ExplodedNodeSet ArgsEvaluated;
+ EvalArguments(const_cast<CXXMemberCallExpr*>(MCE)->arg_begin(),
+ const_cast<CXXMemberCallExpr*>(MCE)->arg_end(),
+ FnType, Pred, ArgsEvaluated);
+
+ // Evaluate the implicit object argument.
+ ExplodedNodeSet AllArgsEvaluated;
+ const MemberExpr *ME = dyn_cast<MemberExpr>(MCE->getCallee()->IgnoreParens());
+ if (!ME)
+ return;
+ Expr *ObjArgExpr = ME->getBase();
+ for (ExplodedNodeSet::iterator I = ArgsEvaluated.begin(),
+ E = ArgsEvaluated.end(); I != E; ++I) {
+ if (ME->isArrow())
+ Visit(ObjArgExpr, *I, AllArgsEvaluated);
+ else
+ VisitLValue(ObjArgExpr, *I, AllArgsEvaluated);
+ }
+
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
+ assert(MD && "not a CXXMethodDecl?");
+
+ if (!(MD->isThisDeclarationADefinition() && AMgr.shouldInlineCall()))
+ // FIXME: conservative method call evaluation.
+ return;
+
+ const StackFrameContext *SFC = AMgr.getStackFrame(MD,
+ Pred->getLocationContext(),
+ MCE,
+ Builder->getBlock(),
+ Builder->getIndex());
+ const CXXThisRegion *ThisR = getCXXThisRegion(MD, SFC);
+ CallEnter Loc(MCE, MD, Pred->getLocationContext());
+ for (ExplodedNodeSet::iterator I = AllArgsEvaluated.begin(),
+ E = AllArgsEvaluated.end(); I != E; ++I) {
+ // Set up 'this' region.
+ const GRState *state = GetState(*I);
+ state = state->bindLoc(loc::MemRegionVal(ThisR),state->getSVal(ObjArgExpr));
+ ExplodedNode *N = Builder->generateNode(Loc, state, *I);
+ if (N)
+ Dst.Add(N);
+ }
+}
+
+void GRExprEngine::VisitCXXNewExpr(CXXNewExpr *CNE, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ if (CNE->isArray()) {
+ // FIXME: allocating an array has not been handled.
+ return;
+ }
+
+ unsigned Count = Builder->getCurrentBlockCount();
+ DefinedOrUnknownSVal SymVal = getValueManager().getConjuredSymbolVal(NULL,CNE,
+ CNE->getType(), Count);
+ const MemRegion *NewReg = cast<loc::MemRegionVal>(SymVal).getRegion();
+
+ QualType ObjTy = CNE->getType()->getAs<PointerType>()->getPointeeType();
+
+ const ElementRegion *EleReg =
+ getStoreManager().GetElementZeroRegion(NewReg, ObjTy);
+
+ // Evaluate constructor arguments.
+ const FunctionProtoType *FnType = NULL;
+ const CXXConstructorDecl *CD = CNE->getConstructor();
+ if (CD)
+ FnType = CD->getType()->getAs<FunctionProtoType>();
+ ExplodedNodeSet ArgsEvaluated;
+ EvalArguments(CNE->constructor_arg_begin(), CNE->constructor_arg_end(),
+ FnType, Pred, ArgsEvaluated);
+
+ // Initialize the object region and bind the 'new' expression.
+ for (ExplodedNodeSet::iterator I = ArgsEvaluated.begin(),
+ E = ArgsEvaluated.end(); I != E; ++I) {
+ const GRState *state = GetState(*I);
+
+ if (ObjTy->isRecordType()) {
+ Store store = state->getStore();
+ StoreManager::InvalidatedSymbols IS;
+ store = getStoreManager().InvalidateRegion(store, EleReg, CNE, Count, &IS);
+ state = state->makeWithStore(store);
+ } else {
+ if (CNE->hasInitializer()) {
+ SVal V = state->getSVal(*CNE->constructor_arg_begin());
+ state = state->bindLoc(loc::MemRegionVal(EleReg), V);
+ } else {
+ // Explicitly set to undefined, because currently we retrieve symbolic
+ // value from symbolic region.
+ state = state->bindLoc(loc::MemRegionVal(EleReg), UndefinedVal());
+ }
+ }
+ state = state->BindExpr(CNE, loc::MemRegionVal(EleReg));
+ MakeNode(Dst, CNE, *I, state);
+ }
+}
+
+void GRExprEngine::VisitCXXDeleteExpr(CXXDeleteExpr *CDE, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ // Should do more checking.
+ ExplodedNodeSet ArgEvaluated;
+ Visit(CDE->getArgument(), Pred, ArgEvaluated);
+ for (ExplodedNodeSet::iterator I = ArgEvaluated.begin(),
+ E = ArgEvaluated.end(); I != E; ++I) {
+ const GRState *state = GetState(*I);
+ MakeNode(Dst, CDE, *I, state);
+ }
+}
+
+void GRExprEngine::VisitCXXThisExpr(CXXThisExpr *TE, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ // Get the this object region from StoreManager.
+ const MemRegion *R =
+ ValMgr.getRegionManager().getCXXThisRegion(
+ getContext().getCanonicalType(TE->getType()),
+ Pred->getLocationContext());
+
+ const GRState *state = GetState(Pred);
+ SVal V = state->getSVal(loc::MemRegionVal(R));
+ MakeNode(Dst, TE, Pred, state->BindExpr(TE, V));
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/GRCoreEngine.cpp b/contrib/llvm/tools/clang/lib/Checker/GRCoreEngine.cpp
new file mode 100644
index 0000000..23a87d3
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/GRCoreEngine.cpp
@@ -0,0 +1,722 @@
+//==- GRCoreEngine.cpp - Path-Sensitive Dataflow Engine ------------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a generic engine for intraprocedural, path-sensitive,
+// dataflow analysis via graph reachability engine.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/PathSensitive/GRCoreEngine.h"
+#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "clang/AST/Expr.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/ADT/DenseMap.h"
+#include <vector>
+#include <queue>
+
+using llvm::cast;
+using llvm::isa;
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Worklist classes for exploration of reachable states.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class DFS : public GRWorkList {
+ llvm::SmallVector<GRWorkListUnit,20> Stack;
+public:
+ virtual bool hasWork() const {
+ return !Stack.empty();
+ }
+
+ virtual void Enqueue(const GRWorkListUnit& U) {
+ Stack.push_back(U);
+ }
+
+ virtual GRWorkListUnit Dequeue() {
+ assert (!Stack.empty());
+ const GRWorkListUnit& U = Stack.back();
+ Stack.pop_back(); // This technically "invalidates" U, but we are fine.
+ return U;
+ }
+};
+
+class BFS : public GRWorkList {
+ std::queue<GRWorkListUnit> Queue;
+public:
+ virtual bool hasWork() const {
+ return !Queue.empty();
+ }
+
+ virtual void Enqueue(const GRWorkListUnit& U) {
+ Queue.push(U);
+ }
+
+ virtual GRWorkListUnit Dequeue() {
+ // Don't use const reference. The subsequent pop_back() might make it
+ // unsafe.
+ GRWorkListUnit U = Queue.front();
+ Queue.pop();
+ return U;
+ }
+};
+
+} // end anonymous namespace
+
+// Place the dstor for GRWorkList here because it contains virtual member
+// functions, and we the code for the dstor generated in one compilation unit.
+GRWorkList::~GRWorkList() {}
+
+GRWorkList *GRWorkList::MakeDFS() { return new DFS(); }
+GRWorkList *GRWorkList::MakeBFS() { return new BFS(); }
+
+namespace {
+ class BFSBlockDFSContents : public GRWorkList {
+ std::queue<GRWorkListUnit> Queue;
+ llvm::SmallVector<GRWorkListUnit,20> Stack;
+ public:
+ virtual bool hasWork() const {
+ return !Queue.empty() || !Stack.empty();
+ }
+
+ virtual void Enqueue(const GRWorkListUnit& U) {
+ if (isa<BlockEntrance>(U.getNode()->getLocation()))
+ Queue.push(U);
+ else
+ Stack.push_back(U);
+ }
+
+ virtual GRWorkListUnit Dequeue() {
+ // Process all basic blocks to completion.
+ if (!Stack.empty()) {
+ const GRWorkListUnit& U = Stack.back();
+ Stack.pop_back(); // This technically "invalidates" U, but we are fine.
+ return U;
+ }
+
+ assert(!Queue.empty());
+ // Don't use const reference. The subsequent pop_back() might make it
+ // unsafe.
+ GRWorkListUnit U = Queue.front();
+ Queue.pop();
+ return U;
+ }
+ };
+} // end anonymous namespace
+
+GRWorkList* GRWorkList::MakeBFSBlockDFSContents() {
+ return new BFSBlockDFSContents();
+}
+
+//===----------------------------------------------------------------------===//
+// Core analysis engine.
+//===----------------------------------------------------------------------===//
+void GRCoreEngine::ProcessEndPath(GREndPathNodeBuilder& Builder) {
+ SubEngine.ProcessEndPath(Builder);
+}
+
+void GRCoreEngine::ProcessStmt(CFGElement E, GRStmtNodeBuilder& Builder) {
+ SubEngine.ProcessStmt(E, Builder);
+}
+
+bool GRCoreEngine::ProcessBlockEntrance(CFGBlock* Blk, const ExplodedNode *Pred,
+ GRBlockCounter BC) {
+ return SubEngine.ProcessBlockEntrance(Blk, Pred, BC);
+}
+
+void GRCoreEngine::ProcessBranch(Stmt* Condition, Stmt* Terminator,
+ GRBranchNodeBuilder& Builder) {
+ SubEngine.ProcessBranch(Condition, Terminator, Builder);
+}
+
+void GRCoreEngine::ProcessIndirectGoto(GRIndirectGotoNodeBuilder& Builder) {
+ SubEngine.ProcessIndirectGoto(Builder);
+}
+
+void GRCoreEngine::ProcessSwitch(GRSwitchNodeBuilder& Builder) {
+ SubEngine.ProcessSwitch(Builder);
+}
+
+void GRCoreEngine::ProcessCallEnter(GRCallEnterNodeBuilder &Builder) {
+ SubEngine.ProcessCallEnter(Builder);
+}
+
+void GRCoreEngine::ProcessCallExit(GRCallExitNodeBuilder &Builder) {
+ SubEngine.ProcessCallExit(Builder);
+}
+
+/// ExecuteWorkList - Run the worklist algorithm for a maximum number of steps.
+bool GRCoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps) {
+
+ if (G->num_roots() == 0) { // Initialize the analysis by constructing
+ // the root if none exists.
+
+ CFGBlock* Entry = &(L->getCFG()->getEntry());
+
+ assert (Entry->empty() &&
+ "Entry block must be empty.");
+
+ assert (Entry->succ_size() == 1 &&
+ "Entry block must have 1 successor.");
+
+ // Get the solitary successor.
+ CFGBlock* Succ = *(Entry->succ_begin());
+
+ // Construct an edge representing the
+ // starting location in the function.
+ BlockEdge StartLoc(Entry, Succ, L);
+
+ // Set the current block counter to being empty.
+ WList->setBlockCounter(BCounterFactory.GetEmptyCounter());
+
+ // Generate the root.
+ GenerateNode(StartLoc, getInitialState(L), 0);
+ }
+
+ while (Steps && WList->hasWork()) {
+ --Steps;
+ const GRWorkListUnit& WU = WList->Dequeue();
+
+ // Set the current block counter.
+ WList->setBlockCounter(WU.getBlockCounter());
+
+ // Retrieve the node.
+ ExplodedNode* Node = WU.getNode();
+
+ // Dispatch on the location type.
+ switch (Node->getLocation().getKind()) {
+ case ProgramPoint::BlockEdgeKind:
+ HandleBlockEdge(cast<BlockEdge>(Node->getLocation()), Node);
+ break;
+
+ case ProgramPoint::BlockEntranceKind:
+ HandleBlockEntrance(cast<BlockEntrance>(Node->getLocation()), Node);
+ break;
+
+ case ProgramPoint::BlockExitKind:
+ assert (false && "BlockExit location never occur in forward analysis.");
+ break;
+
+ case ProgramPoint::CallEnterKind:
+ HandleCallEnter(cast<CallEnter>(Node->getLocation()), WU.getBlock(),
+ WU.getIndex(), Node);
+ break;
+
+ case ProgramPoint::CallExitKind:
+ HandleCallExit(cast<CallExit>(Node->getLocation()), Node);
+ break;
+
+ default:
+ assert(isa<PostStmt>(Node->getLocation()));
+ HandlePostStmt(cast<PostStmt>(Node->getLocation()), WU.getBlock(),
+ WU.getIndex(), Node);
+ break;
+ }
+ }
+
+ return WList->hasWork();
+}
+
+void GRCoreEngine::HandleCallEnter(const CallEnter &L, const CFGBlock *Block,
+ unsigned Index, ExplodedNode *Pred) {
+ GRCallEnterNodeBuilder Builder(*this, Pred, L.getCallExpr(), L.getCallee(),
+ Block, Index);
+ ProcessCallEnter(Builder);
+}
+
+void GRCoreEngine::HandleCallExit(const CallExit &L, ExplodedNode *Pred) {
+ GRCallExitNodeBuilder Builder(*this, Pred);
+ ProcessCallExit(Builder);
+}
+
+void GRCoreEngine::HandleBlockEdge(const BlockEdge& L, ExplodedNode* Pred) {
+
+ CFGBlock* Blk = L.getDst();
+
+ // Check if we are entering the EXIT block.
+ if (Blk == &(L.getLocationContext()->getCFG()->getExit())) {
+
+ assert (L.getLocationContext()->getCFG()->getExit().size() == 0
+ && "EXIT block cannot contain Stmts.");
+
+ // Process the final state transition.
+ GREndPathNodeBuilder Builder(Blk, Pred, this);
+ ProcessEndPath(Builder);
+
+ // This path is done. Don't enqueue any more nodes.
+ return;
+ }
+
+ // FIXME: Should we allow ProcessBlockEntrance to also manipulate state?
+
+ if (ProcessBlockEntrance(Blk, Pred, WList->getBlockCounter()))
+ GenerateNode(BlockEntrance(Blk, Pred->getLocationContext()), Pred->State, Pred);
+}
+
+void GRCoreEngine::HandleBlockEntrance(const BlockEntrance& L,
+ ExplodedNode* Pred) {
+
+ // Increment the block counter.
+ GRBlockCounter Counter = WList->getBlockCounter();
+ Counter = BCounterFactory.IncrementCount(Counter,
+ Pred->getLocationContext()->getCurrentStackFrame(),
+ L.getBlock()->getBlockID());
+ WList->setBlockCounter(Counter);
+
+ // Process the entrance of the block.
+ if (CFGElement E = L.getFirstElement()) {
+ GRStmtNodeBuilder Builder(L.getBlock(), 0, Pred, this,
+ SubEngine.getStateManager());
+ ProcessStmt(E, Builder);
+ }
+ else
+ HandleBlockExit(L.getBlock(), Pred);
+}
+
+void GRCoreEngine::HandleBlockExit(CFGBlock * B, ExplodedNode* Pred) {
+
+ if (Stmt* Term = B->getTerminator()) {
+ switch (Term->getStmtClass()) {
+ default:
+ assert(false && "Analysis for this terminator not implemented.");
+ break;
+
+ case Stmt::BinaryOperatorClass: // '&&' and '||'
+ HandleBranch(cast<BinaryOperator>(Term)->getLHS(), Term, B, Pred);
+ return;
+
+ case Stmt::ConditionalOperatorClass:
+ HandleBranch(cast<ConditionalOperator>(Term)->getCond(), Term, B, Pred);
+ return;
+
+ // FIXME: Use constant-folding in CFG construction to simplify this
+ // case.
+
+ case Stmt::ChooseExprClass:
+ HandleBranch(cast<ChooseExpr>(Term)->getCond(), Term, B, Pred);
+ return;
+
+ case Stmt::DoStmtClass:
+ HandleBranch(cast<DoStmt>(Term)->getCond(), Term, B, Pred);
+ return;
+
+ case Stmt::ForStmtClass:
+ HandleBranch(cast<ForStmt>(Term)->getCond(), Term, B, Pred);
+ return;
+
+ case Stmt::ContinueStmtClass:
+ case Stmt::BreakStmtClass:
+ case Stmt::GotoStmtClass:
+ break;
+
+ case Stmt::IfStmtClass:
+ HandleBranch(cast<IfStmt>(Term)->getCond(), Term, B, Pred);
+ return;
+
+ case Stmt::IndirectGotoStmtClass: {
+ // Only 1 successor: the indirect goto dispatch block.
+ assert (B->succ_size() == 1);
+
+ GRIndirectGotoNodeBuilder
+ builder(Pred, B, cast<IndirectGotoStmt>(Term)->getTarget(),
+ *(B->succ_begin()), this);
+
+ ProcessIndirectGoto(builder);
+ return;
+ }
+
+ case Stmt::ObjCForCollectionStmtClass: {
+ // In the case of ObjCForCollectionStmt, it appears twice in a CFG:
+ //
+ // (1) inside a basic block, which represents the binding of the
+ // 'element' variable to a value.
+ // (2) in a terminator, which represents the branch.
+ //
+ // For (1), subengines will bind a value (i.e., 0 or 1) indicating
+ // whether or not collection contains any more elements. We cannot
+ // just test to see if the element is nil because a container can
+ // contain nil elements.
+ HandleBranch(Term, Term, B, Pred);
+ return;
+ }
+
+ case Stmt::SwitchStmtClass: {
+ GRSwitchNodeBuilder builder(Pred, B, cast<SwitchStmt>(Term)->getCond(),
+ this);
+
+ ProcessSwitch(builder);
+ return;
+ }
+
+ case Stmt::WhileStmtClass:
+ HandleBranch(cast<WhileStmt>(Term)->getCond(), Term, B, Pred);
+ return;
+ }
+ }
+
+ assert (B->succ_size() == 1 &&
+ "Blocks with no terminator should have at most 1 successor.");
+
+ GenerateNode(BlockEdge(B, *(B->succ_begin()), Pred->getLocationContext()),
+ Pred->State, Pred);
+}
+
+void GRCoreEngine::HandleBranch(Stmt* Cond, Stmt* Term, CFGBlock * B,
+ ExplodedNode* Pred) {
+ assert (B->succ_size() == 2);
+
+ GRBranchNodeBuilder Builder(B, *(B->succ_begin()), *(B->succ_begin()+1),
+ Pred, this);
+
+ ProcessBranch(Cond, Term, Builder);
+}
+
+void GRCoreEngine::HandlePostStmt(const PostStmt& L, CFGBlock* B,
+ unsigned StmtIdx, ExplodedNode* Pred) {
+
+ assert (!B->empty());
+
+ if (StmtIdx == B->size())
+ HandleBlockExit(B, Pred);
+ else {
+ GRStmtNodeBuilder Builder(B, StmtIdx, Pred, this,
+ SubEngine.getStateManager());
+ ProcessStmt((*B)[StmtIdx], Builder);
+ }
+}
+
+/// GenerateNode - Utility method to generate nodes, hook up successors,
+/// and add nodes to the worklist.
+void GRCoreEngine::GenerateNode(const ProgramPoint& Loc,
+ const GRState* State, ExplodedNode* Pred) {
+
+ bool IsNew;
+ ExplodedNode* Node = G->getNode(Loc, State, &IsNew);
+
+ if (Pred)
+ Node->addPredecessor(Pred, *G); // Link 'Node' with its predecessor.
+ else {
+ assert (IsNew);
+ G->addRoot(Node); // 'Node' has no predecessor. Make it a root.
+ }
+
+ // Only add 'Node' to the worklist if it was freshly generated.
+ if (IsNew) WList->Enqueue(Node);
+}
+
+GRStmtNodeBuilder::GRStmtNodeBuilder(CFGBlock* b, unsigned idx,
+ ExplodedNode* N, GRCoreEngine* e,
+ GRStateManager &mgr)
+ : Eng(*e), B(*b), Idx(idx), Pred(N), Mgr(mgr), Auditor(0),
+ PurgingDeadSymbols(false), BuildSinks(false), HasGeneratedNode(false),
+ PointKind(ProgramPoint::PostStmtKind), Tag(0) {
+ Deferred.insert(N);
+ CleanedState = Pred->getState();
+}
+
+GRStmtNodeBuilder::~GRStmtNodeBuilder() {
+ for (DeferredTy::iterator I=Deferred.begin(), E=Deferred.end(); I!=E; ++I)
+ if (!(*I)->isSink())
+ GenerateAutoTransition(*I);
+}
+
+void GRStmtNodeBuilder::GenerateAutoTransition(ExplodedNode* N) {
+ assert (!N->isSink());
+
+ // Check if this node entered a callee.
+ if (isa<CallEnter>(N->getLocation())) {
+ // Still use the index of the CallExpr. It's needed to create the callee
+ // StackFrameContext.
+ Eng.WList->Enqueue(N, B, Idx);
+ return;
+ }
+
+ PostStmt Loc(getStmt(), N->getLocationContext());
+
+ if (Loc == N->getLocation()) {
+ // Note: 'N' should be a fresh node because otherwise it shouldn't be
+ // a member of Deferred.
+ Eng.WList->Enqueue(N, B, Idx+1);
+ return;
+ }
+
+ bool IsNew;
+ ExplodedNode* Succ = Eng.G->getNode(Loc, N->State, &IsNew);
+ Succ->addPredecessor(N, *Eng.G);
+
+ if (IsNew)
+ Eng.WList->Enqueue(Succ, B, Idx+1);
+}
+
+ExplodedNode* GRStmtNodeBuilder::MakeNode(ExplodedNodeSet& Dst, Stmt* S,
+ ExplodedNode* Pred, const GRState* St,
+ ProgramPoint::Kind K) {
+ const GRState* PredState = GetState(Pred);
+
+ // If the state hasn't changed, don't generate a new node.
+ if (!BuildSinks && St == PredState && Auditor == 0) {
+ Dst.Add(Pred);
+ return NULL;
+ }
+
+ ExplodedNode* N = generateNode(S, St, Pred, K);
+
+ if (N) {
+ if (BuildSinks)
+ N->markAsSink();
+ else {
+ if (Auditor && Auditor->Audit(N, Mgr))
+ N->markAsSink();
+
+ Dst.Add(N);
+ }
+ }
+
+ return N;
+}
+
+static ProgramPoint GetProgramPoint(const Stmt *S, ProgramPoint::Kind K,
+ const LocationContext *LC, const void *tag){
+ switch (K) {
+ default:
+ assert(false && "Unhandled ProgramPoint kind");
+ case ProgramPoint::PreStmtKind:
+ return PreStmt(S, LC, tag);
+ case ProgramPoint::PostStmtKind:
+ return PostStmt(S, LC, tag);
+ case ProgramPoint::PreLoadKind:
+ return PreLoad(S, LC, tag);
+ case ProgramPoint::PostLoadKind:
+ return PostLoad(S, LC, tag);
+ case ProgramPoint::PreStoreKind:
+ return PreStore(S, LC, tag);
+ case ProgramPoint::PostStoreKind:
+ return PostStore(S, LC, tag);
+ case ProgramPoint::PostLValueKind:
+ return PostLValue(S, LC, tag);
+ case ProgramPoint::PostPurgeDeadSymbolsKind:
+ return PostPurgeDeadSymbols(S, LC, tag);
+ }
+}
+
+ExplodedNode*
+GRStmtNodeBuilder::generateNodeInternal(const Stmt* S, const GRState* state,
+ ExplodedNode* Pred,
+ ProgramPoint::Kind K,
+ const void *tag) {
+
+ const ProgramPoint &L = GetProgramPoint(S, K, Pred->getLocationContext(),tag);
+ return generateNodeInternal(L, state, Pred);
+}
+
+ExplodedNode*
+GRStmtNodeBuilder::generateNodeInternal(const ProgramPoint &Loc,
+ const GRState* State,
+ ExplodedNode* Pred) {
+ bool IsNew;
+ ExplodedNode* N = Eng.G->getNode(Loc, State, &IsNew);
+ N->addPredecessor(Pred, *Eng.G);
+ Deferred.erase(Pred);
+
+ if (IsNew) {
+ Deferred.insert(N);
+ return N;
+ }
+
+ return NULL;
+}
+
+ExplodedNode* GRBranchNodeBuilder::generateNode(const GRState* State,
+ bool branch) {
+
+ // If the branch has been marked infeasible we should not generate a node.
+ if (!isFeasible(branch))
+ return NULL;
+
+ bool IsNew;
+
+ ExplodedNode* Succ =
+ Eng.G->getNode(BlockEdge(Src,branch ? DstT:DstF,Pred->getLocationContext()),
+ State, &IsNew);
+
+ Succ->addPredecessor(Pred, *Eng.G);
+
+ if (branch)
+ GeneratedTrue = true;
+ else
+ GeneratedFalse = true;
+
+ if (IsNew) {
+ Deferred.push_back(Succ);
+ return Succ;
+ }
+
+ return NULL;
+}
+
+GRBranchNodeBuilder::~GRBranchNodeBuilder() {
+ if (!GeneratedTrue) generateNode(Pred->State, true);
+ if (!GeneratedFalse) generateNode(Pred->State, false);
+
+ for (DeferredTy::iterator I=Deferred.begin(), E=Deferred.end(); I!=E; ++I)
+ if (!(*I)->isSink()) Eng.WList->Enqueue(*I);
+}
+
+
+ExplodedNode*
+GRIndirectGotoNodeBuilder::generateNode(const iterator& I, const GRState* St,
+ bool isSink) {
+ bool IsNew;
+
+ ExplodedNode* Succ = Eng.G->getNode(BlockEdge(Src, I.getBlock(),
+ Pred->getLocationContext()), St, &IsNew);
+
+ Succ->addPredecessor(Pred, *Eng.G);
+
+ if (IsNew) {
+
+ if (isSink)
+ Succ->markAsSink();
+ else
+ Eng.WList->Enqueue(Succ);
+
+ return Succ;
+ }
+
+ return NULL;
+}
+
+
+ExplodedNode*
+GRSwitchNodeBuilder::generateCaseStmtNode(const iterator& I, const GRState* St){
+
+ bool IsNew;
+
+ ExplodedNode* Succ = Eng.G->getNode(BlockEdge(Src, I.getBlock(),
+ Pred->getLocationContext()), St, &IsNew);
+ Succ->addPredecessor(Pred, *Eng.G);
+
+ if (IsNew) {
+ Eng.WList->Enqueue(Succ);
+ return Succ;
+ }
+
+ return NULL;
+}
+
+
+ExplodedNode*
+GRSwitchNodeBuilder::generateDefaultCaseNode(const GRState* St, bool isSink) {
+
+ // Get the block for the default case.
+ assert (Src->succ_rbegin() != Src->succ_rend());
+ CFGBlock* DefaultBlock = *Src->succ_rbegin();
+
+ bool IsNew;
+
+ ExplodedNode* Succ = Eng.G->getNode(BlockEdge(Src, DefaultBlock,
+ Pred->getLocationContext()), St, &IsNew);
+ Succ->addPredecessor(Pred, *Eng.G);
+
+ if (IsNew) {
+ if (isSink)
+ Succ->markAsSink();
+ else
+ Eng.WList->Enqueue(Succ);
+
+ return Succ;
+ }
+
+ return NULL;
+}
+
+GREndPathNodeBuilder::~GREndPathNodeBuilder() {
+ // Auto-generate an EOP node if one has not been generated.
+ if (!HasGeneratedNode) {
+ // If we are in an inlined call, generate CallExit node.
+ if (Pred->getLocationContext()->getParent())
+ GenerateCallExitNode(Pred->State);
+ else
+ generateNode(Pred->State);
+ }
+}
+
+ExplodedNode*
+GREndPathNodeBuilder::generateNode(const GRState* State, const void *tag,
+ ExplodedNode* P) {
+ HasGeneratedNode = true;
+ bool IsNew;
+
+ ExplodedNode* Node = Eng.G->getNode(BlockEntrance(&B,
+ Pred->getLocationContext(), tag), State, &IsNew);
+
+ Node->addPredecessor(P ? P : Pred, *Eng.G);
+
+ if (IsNew) {
+ Eng.G->addEndOfPath(Node);
+ return Node;
+ }
+
+ return NULL;
+}
+
+void GREndPathNodeBuilder::GenerateCallExitNode(const GRState *state) {
+ HasGeneratedNode = true;
+ // Create a CallExit node and enqueue it.
+ const StackFrameContext *LocCtx
+ = cast<StackFrameContext>(Pred->getLocationContext());
+ const Stmt *CE = LocCtx->getCallSite();
+
+ // Use the the callee location context.
+ CallExit Loc(CE, LocCtx);
+
+ bool isNew;
+ ExplodedNode *Node = Eng.G->getNode(Loc, state, &isNew);
+ Node->addPredecessor(Pred, *Eng.G);
+
+ if (isNew)
+ Eng.WList->Enqueue(Node);
+}
+
+
+void GRCallEnterNodeBuilder::GenerateNode(const GRState *state,
+ const LocationContext *LocCtx) {
+ // Get the callee entry block.
+ const CFGBlock *Entry = &(LocCtx->getCFG()->getEntry());
+ assert(Entry->empty());
+ assert(Entry->succ_size() == 1);
+
+ // Get the solitary successor.
+ const CFGBlock *SuccB = *(Entry->succ_begin());
+
+ // Construct an edge representing the starting location in the callee.
+ BlockEdge Loc(Entry, SuccB, LocCtx);
+
+ bool isNew;
+ ExplodedNode *Node = Eng.G->getNode(Loc, state, &isNew);
+ Node->addPredecessor(const_cast<ExplodedNode*>(Pred), *Eng.G);
+
+ if (isNew)
+ Eng.WList->Enqueue(Node);
+}
+
+void GRCallExitNodeBuilder::GenerateNode(const GRState *state) {
+ // Get the callee's location context.
+ const StackFrameContext *LocCtx
+ = cast<StackFrameContext>(Pred->getLocationContext());
+
+ PostStmt Loc(LocCtx->getCallSite(), LocCtx->getParent());
+ bool isNew;
+ ExplodedNode *Node = Eng.G->getNode(Loc, state, &isNew);
+ Node->addPredecessor(const_cast<ExplodedNode*>(Pred), *Eng.G);
+ if (isNew)
+ Eng.WList->Enqueue(Node, *const_cast<CFGBlock*>(LocCtx->getCallSiteBlock()),
+ LocCtx->getIndex() + 1);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/GRExprEngine.cpp b/contrib/llvm/tools/clang/lib/Checker/GRExprEngine.cpp
new file mode 100644
index 0000000..2417658
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/GRExprEngine.cpp
@@ -0,0 +1,3481 @@
+//=-- GRExprEngine.cpp - Path-Sensitive Expression-Level Dataflow ---*- C++ -*-=
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a meta-engine for path-sensitive dataflow analysis that
+// is built on GREngine, but provides the boilerplate to execute transfer
+// functions and build the ExplodedGraph at the expression level.
+//
+//===----------------------------------------------------------------------===//
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/AnalysisManager.h"
+#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "clang/Checker/PathSensitive/GRExprEngineBuilders.h"
+#include "clang/Checker/PathSensitive/Checker.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/ImmutableList.h"
+
+#ifndef NDEBUG
+#include "llvm/Support/GraphWriter.h"
+#endif
+
+using namespace clang;
+using llvm::dyn_cast;
+using llvm::dyn_cast_or_null;
+using llvm::cast;
+using llvm::APSInt;
+
+namespace {
+ // Trait class for recording returned expression in the state.
+ struct ReturnExpr {
+ static int TagInt;
+ typedef const Stmt *data_type;
+ };
+ int ReturnExpr::TagInt;
+}
+
+//===----------------------------------------------------------------------===//
+// Utility functions.
+//===----------------------------------------------------------------------===//
+
+static inline Selector GetNullarySelector(const char* name, ASTContext& Ctx) {
+ IdentifierInfo* II = &Ctx.Idents.get(name);
+ return Ctx.Selectors.getSelector(0, &II);
+}
+
+
+static QualType GetCalleeReturnType(const CallExpr *CE) {
+ const Expr *Callee = CE->getCallee();
+ QualType T = Callee->getType();
+ if (const PointerType *PT = T->getAs<PointerType>()) {
+ const FunctionType *FT = PT->getPointeeType()->getAs<FunctionType>();
+ T = FT->getResultType();
+ }
+ else {
+ const BlockPointerType *BT = T->getAs<BlockPointerType>();
+ T = BT->getPointeeType()->getAs<FunctionType>()->getResultType();
+ }
+ return T;
+}
+
+static bool CalleeReturnsReference(const CallExpr *CE) {
+ return (bool) GetCalleeReturnType(CE)->getAs<ReferenceType>();
+}
+
+static bool ReceiverReturnsReference(const ObjCMessageExpr *ME) {
+ const ObjCMethodDecl *MD = ME->getMethodDecl();
+ if (!MD)
+ return false;
+ return MD->getResultType()->getAs<ReferenceType>();
+}
+
+#ifndef NDEBUG
+static bool ReceiverReturnsReferenceOrRecord(const ObjCMessageExpr *ME) {
+ const ObjCMethodDecl *MD = ME->getMethodDecl();
+ if (!MD)
+ return false;
+ QualType T = MD->getResultType();
+ return T->getAs<RecordType>() || T->getAs<ReferenceType>();
+}
+
+static bool CalleeReturnsReferenceOrRecord(const CallExpr *CE) {
+ QualType T = GetCalleeReturnType(CE);
+ return T->getAs<ReferenceType>() || T->getAs<RecordType>();
+}
+#endif
+
+//===----------------------------------------------------------------------===//
+// Batch auditor. DEPRECATED.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class MappedBatchAuditor : public GRSimpleAPICheck {
+ typedef llvm::ImmutableList<GRSimpleAPICheck*> Checks;
+ typedef llvm::DenseMap<void*,Checks> MapTy;
+
+ MapTy M;
+ Checks::Factory F;
+ Checks AllStmts;
+
+public:
+ MappedBatchAuditor(llvm::BumpPtrAllocator& Alloc) :
+ F(Alloc), AllStmts(F.GetEmptyList()) {}
+
+ virtual ~MappedBatchAuditor() {
+ llvm::DenseSet<GRSimpleAPICheck*> AlreadyVisited;
+
+ for (MapTy::iterator MI = M.begin(), ME = M.end(); MI != ME; ++MI)
+ for (Checks::iterator I=MI->second.begin(), E=MI->second.end(); I!=E;++I){
+
+ GRSimpleAPICheck* check = *I;
+
+ if (AlreadyVisited.count(check))
+ continue;
+
+ AlreadyVisited.insert(check);
+ delete check;
+ }
+ }
+
+ void AddCheck(GRSimpleAPICheck *A, Stmt::StmtClass C) {
+ assert (A && "Check cannot be null.");
+ void* key = reinterpret_cast<void*>((uintptr_t) C);
+ MapTy::iterator I = M.find(key);
+ M[key] = F.Concat(A, I == M.end() ? F.GetEmptyList() : I->second);
+ }
+
+ void AddCheck(GRSimpleAPICheck *A) {
+ assert (A && "Check cannot be null.");
+ AllStmts = F.Concat(A, AllStmts);
+ }
+
+ virtual bool Audit(ExplodedNode* N, GRStateManager& VMgr) {
+ // First handle the auditors that accept all statements.
+ bool isSink = false;
+ for (Checks::iterator I = AllStmts.begin(), E = AllStmts.end(); I!=E; ++I)
+ isSink |= (*I)->Audit(N, VMgr);
+
+ // Next handle the auditors that accept only specific statements.
+ const Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
+ void* key = reinterpret_cast<void*>((uintptr_t) S->getStmtClass());
+ MapTy::iterator MI = M.find(key);
+ if (MI != M.end()) {
+ for (Checks::iterator I=MI->second.begin(), E=MI->second.end(); I!=E; ++I)
+ isSink |= (*I)->Audit(N, VMgr);
+ }
+
+ return isSink;
+ }
+};
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Checker worklist routines.
+//===----------------------------------------------------------------------===//
+
+void GRExprEngine::CheckerVisit(Stmt *S, ExplodedNodeSet &Dst,
+ ExplodedNodeSet &Src, bool isPrevisit) {
+
+ if (Checkers.empty()) {
+ Dst.insert(Src);
+ return;
+ }
+
+ ExplodedNodeSet Tmp;
+ ExplodedNodeSet *PrevSet = &Src;
+
+ for (CheckersOrdered::iterator I=Checkers.begin(),E=Checkers.end(); I!=E;++I){
+ ExplodedNodeSet *CurrSet = 0;
+ if (I+1 == E)
+ CurrSet = &Dst;
+ else {
+ CurrSet = (PrevSet == &Tmp) ? &Src : &Tmp;
+ CurrSet->clear();
+ }
+ void *tag = I->first;
+ Checker *checker = I->second;
+
+ for (ExplodedNodeSet::iterator NI = PrevSet->begin(), NE = PrevSet->end();
+ NI != NE; ++NI)
+ checker->GR_Visit(*CurrSet, *Builder, *this, S, *NI, tag, isPrevisit);
+ PrevSet = CurrSet;
+ }
+
+ // Don't autotransition. The CheckerContext objects should do this
+ // automatically.
+}
+
+void GRExprEngine::CheckerEvalNilReceiver(const ObjCMessageExpr *ME,
+ ExplodedNodeSet &Dst,
+ const GRState *state,
+ ExplodedNode *Pred) {
+ bool Evaluated = false;
+ ExplodedNodeSet DstTmp;
+
+ for (CheckersOrdered::iterator I=Checkers.begin(),E=Checkers.end();I!=E;++I) {
+ void *tag = I->first;
+ Checker *checker = I->second;
+
+ if (checker->GR_EvalNilReceiver(DstTmp, *Builder, *this, ME, Pred, state,
+ tag)) {
+ Evaluated = true;
+ break;
+ } else
+ // The checker didn't evaluate the expr. Restore the Dst.
+ DstTmp.clear();
+ }
+
+ if (Evaluated)
+ Dst.insert(DstTmp);
+ else
+ Dst.insert(Pred);
+}
+
+// CheckerEvalCall returns true if one of the checkers processed the node.
+// This may return void when all call evaluation logic goes to some checker
+// in the future.
+bool GRExprEngine::CheckerEvalCall(const CallExpr *CE,
+ ExplodedNodeSet &Dst,
+ ExplodedNode *Pred) {
+ bool Evaluated = false;
+ ExplodedNodeSet DstTmp;
+
+ for (CheckersOrdered::iterator I=Checkers.begin(),E=Checkers.end();I!=E;++I) {
+ void *tag = I->first;
+ Checker *checker = I->second;
+
+ if (checker->GR_EvalCallExpr(DstTmp, *Builder, *this, CE, Pred, tag)) {
+ Evaluated = true;
+ break;
+ } else
+ // The checker didn't evaluate the expr. Restore the DstTmp set.
+ DstTmp.clear();
+ }
+
+ if (Evaluated)
+ Dst.insert(DstTmp);
+ else
+ Dst.insert(Pred);
+
+ return Evaluated;
+}
+
+// FIXME: This is largely copy-paste from CheckerVisit(). Need to
+// unify.
+void GRExprEngine::CheckerVisitBind(const Stmt *AssignE, const Stmt *StoreE,
+ ExplodedNodeSet &Dst,
+ ExplodedNodeSet &Src,
+ SVal location, SVal val, bool isPrevisit) {
+
+ if (Checkers.empty()) {
+ Dst.insert(Src);
+ return;
+ }
+
+ ExplodedNodeSet Tmp;
+ ExplodedNodeSet *PrevSet = &Src;
+
+ for (CheckersOrdered::iterator I=Checkers.begin(),E=Checkers.end(); I!=E; ++I)
+ {
+ ExplodedNodeSet *CurrSet = 0;
+ if (I+1 == E)
+ CurrSet = &Dst;
+ else {
+ CurrSet = (PrevSet == &Tmp) ? &Src : &Tmp;
+ CurrSet->clear();
+ }
+
+ void *tag = I->first;
+ Checker *checker = I->second;
+
+ for (ExplodedNodeSet::iterator NI = PrevSet->begin(), NE = PrevSet->end();
+ NI != NE; ++NI)
+ checker->GR_VisitBind(*CurrSet, *Builder, *this, AssignE, StoreE,
+ *NI, tag, location, val, isPrevisit);
+
+ // Update which NodeSet is the current one.
+ PrevSet = CurrSet;
+ }
+
+ // Don't autotransition. The CheckerContext objects should do this
+ // automatically.
+}
+//===----------------------------------------------------------------------===//
+// Engine construction and deletion.
+//===----------------------------------------------------------------------===//
+
+static void RegisterInternalChecks(GRExprEngine &Eng) {
+ // Register internal "built-in" BugTypes with the BugReporter. These BugTypes
+ // are different than what probably many checks will do since they don't
+ // create BugReports on-the-fly but instead wait until GRExprEngine finishes
+ // analyzing a function. Generation of BugReport objects is done via a call
+ // to 'FlushReports' from BugReporter.
+ // The following checks do not need to have their associated BugTypes
+ // explicitly registered with the BugReporter. If they issue any BugReports,
+ // their associated BugType will get registered with the BugReporter
+ // automatically. Note that the check itself is owned by the GRExprEngine
+ // object.
+ RegisterAdjustedReturnValueChecker(Eng);
+ RegisterAttrNonNullChecker(Eng);
+ RegisterCallAndMessageChecker(Eng);
+ RegisterDereferenceChecker(Eng);
+ RegisterVLASizeChecker(Eng);
+ RegisterDivZeroChecker(Eng);
+ RegisterReturnStackAddressChecker(Eng);
+ RegisterReturnUndefChecker(Eng);
+ RegisterUndefinedArraySubscriptChecker(Eng);
+ RegisterUndefinedAssignmentChecker(Eng);
+ RegisterUndefBranchChecker(Eng);
+ RegisterUndefCapturedBlockVarChecker(Eng);
+ RegisterUndefResultChecker(Eng);
+
+ // This is not a checker yet.
+ RegisterNoReturnFunctionChecker(Eng);
+ RegisterBuiltinFunctionChecker(Eng);
+ RegisterOSAtomicChecker(Eng);
+ RegisterUnixAPIChecker(Eng);
+ RegisterMacOSXAPIChecker(Eng);
+}
+
+GRExprEngine::GRExprEngine(AnalysisManager &mgr, GRTransferFuncs *tf)
+ : AMgr(mgr),
+ CoreEngine(mgr.getASTContext(), *this),
+ G(CoreEngine.getGraph()),
+ Builder(NULL),
+ StateMgr(G.getContext(), mgr.getStoreManagerCreator(),
+ mgr.getConstraintManagerCreator(), G.getAllocator(),
+ *this),
+ SymMgr(StateMgr.getSymbolManager()),
+ ValMgr(StateMgr.getValueManager()),
+ SVator(ValMgr.getSValuator()),
+ CurrentStmt(NULL),
+ NSExceptionII(NULL), NSExceptionInstanceRaiseSelectors(NULL),
+ RaiseSel(GetNullarySelector("raise", G.getContext())),
+ BR(mgr, *this), TF(tf) {
+ // Register internal checks.
+ RegisterInternalChecks(*this);
+
+ // FIXME: Eventually remove the TF object entirely.
+ TF->RegisterChecks(*this);
+ TF->RegisterPrinters(getStateManager().Printers);
+}
+
+GRExprEngine::~GRExprEngine() {
+ BR.FlushReports();
+ delete [] NSExceptionInstanceRaiseSelectors;
+ for (CheckersOrdered::iterator I=Checkers.begin(), E=Checkers.end(); I!=E;++I)
+ delete I->second;
+}
+
+//===----------------------------------------------------------------------===//
+// Utility methods.
+//===----------------------------------------------------------------------===//
+
+void GRExprEngine::AddCheck(GRSimpleAPICheck* A, Stmt::StmtClass C) {
+ if (!BatchAuditor)
+ BatchAuditor.reset(new MappedBatchAuditor(getGraph().getAllocator()));
+
+ ((MappedBatchAuditor*) BatchAuditor.get())->AddCheck(A, C);
+}
+
+void GRExprEngine::AddCheck(GRSimpleAPICheck *A) {
+ if (!BatchAuditor)
+ BatchAuditor.reset(new MappedBatchAuditor(getGraph().getAllocator()));
+
+ ((MappedBatchAuditor*) BatchAuditor.get())->AddCheck(A);
+}
+
+const GRState* GRExprEngine::getInitialState(const LocationContext *InitLoc) {
+ const GRState *state = StateMgr.getInitialState(InitLoc);
+
+ // Preconditions.
+
+ // FIXME: It would be nice if we had a more general mechanism to add
+ // such preconditions. Some day.
+ do {
+ const Decl *D = InitLoc->getDecl();
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // Precondition: the first argument of 'main' is an integer guaranteed
+ // to be > 0.
+ const IdentifierInfo *II = FD->getIdentifier();
+ if (!II || !(II->getName() == "main" && FD->getNumParams() > 0))
+ break;
+
+ const ParmVarDecl *PD = FD->getParamDecl(0);
+ QualType T = PD->getType();
+ if (!T->isIntegerType())
+ break;
+
+ const MemRegion *R = state->getRegion(PD, InitLoc);
+ if (!R)
+ break;
+
+ SVal V = state->getSVal(loc::MemRegionVal(R));
+ SVal Constraint_untested = EvalBinOp(state, BinaryOperator::GT, V,
+ ValMgr.makeZeroVal(T),
+ getContext().IntTy);
+
+ DefinedOrUnknownSVal *Constraint =
+ dyn_cast<DefinedOrUnknownSVal>(&Constraint_untested);
+
+ if (!Constraint)
+ break;
+
+ if (const GRState *newState = state->Assume(*Constraint, true))
+ state = newState;
+
+ break;
+ }
+
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ // Precondition: 'self' is always non-null upon entry to an Objective-C
+ // method.
+ const ImplicitParamDecl *SelfD = MD->getSelfDecl();
+ const MemRegion *R = state->getRegion(SelfD, InitLoc);
+ SVal V = state->getSVal(loc::MemRegionVal(R));
+
+ if (const Loc *LV = dyn_cast<Loc>(&V)) {
+ // Assume that the pointer value in 'self' is non-null.
+ state = state->Assume(*LV, true);
+ assert(state && "'self' cannot be null");
+ }
+ }
+ } while (0);
+
+ return state;
+}
+
+//===----------------------------------------------------------------------===//
+// Top-level transfer function logic (Dispatcher).
+//===----------------------------------------------------------------------===//
+
+/// EvalAssume - Called by ConstraintManager. Used to call checker-specific
+/// logic for handling assumptions on symbolic values.
+const GRState *GRExprEngine::ProcessAssume(const GRState *state, SVal cond,
+ bool assumption) {
+ for (CheckersOrdered::iterator I = Checkers.begin(), E = Checkers.end();
+ I != E; ++I) {
+
+ if (!state)
+ return NULL;
+
+ state = I->second->EvalAssume(state, cond, assumption);
+ }
+
+ if (!state)
+ return NULL;
+
+ return TF->EvalAssume(state, cond, assumption);
+}
+
+void GRExprEngine::ProcessStmt(CFGElement CE, GRStmtNodeBuilder& builder) {
+ CurrentStmt = CE.getStmt();
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+ CurrentStmt->getLocStart(),
+ "Error evaluating statement");
+
+ Builder = &builder;
+ EntryNode = builder.getBasePredecessor();
+
+ // Set up our simple checks.
+ if (BatchAuditor)
+ Builder->setAuditor(BatchAuditor.get());
+
+ // Create the cleaned state.
+ const ExplodedNode *BasePred = Builder->getBasePredecessor();
+
+ SymbolReaper SymReaper(BasePred->getLocationContext(), SymMgr);
+
+ CleanedState = AMgr.shouldPurgeDead()
+ ? StateMgr.RemoveDeadBindings(EntryNode->getState(), CurrentStmt,
+ BasePred->getLocationContext()->getCurrentStackFrame(),
+ SymReaper)
+ : EntryNode->getState();
+
+ // Process any special transfer function for dead symbols.
+ ExplodedNodeSet Tmp;
+
+ if (!SymReaper.hasDeadSymbols())
+ Tmp.Add(EntryNode);
+ else {
+ SaveAndRestore<bool> OldSink(Builder->BuildSinks);
+ SaveOr OldHasGen(Builder->HasGeneratedNode);
+
+ SaveAndRestore<bool> OldPurgeDeadSymbols(Builder->PurgingDeadSymbols);
+ Builder->PurgingDeadSymbols = true;
+
+ // FIXME: This should soon be removed.
+ ExplodedNodeSet Tmp2;
+ getTF().EvalDeadSymbols(Tmp2, *this, *Builder, EntryNode, CurrentStmt,
+ CleanedState, SymReaper);
+
+ if (Checkers.empty())
+ Tmp.insert(Tmp2);
+ else {
+ ExplodedNodeSet Tmp3;
+ ExplodedNodeSet *SrcSet = &Tmp2;
+ for (CheckersOrdered::iterator I = Checkers.begin(), E = Checkers.end();
+ I != E; ++I) {
+ ExplodedNodeSet *DstSet = 0;
+ if (I+1 == E)
+ DstSet = &Tmp;
+ else {
+ DstSet = (SrcSet == &Tmp2) ? &Tmp3 : &Tmp2;
+ DstSet->clear();
+ }
+
+ void *tag = I->first;
+ Checker *checker = I->second;
+ for (ExplodedNodeSet::iterator NI = SrcSet->begin(), NE = SrcSet->end();
+ NI != NE; ++NI)
+ checker->GR_EvalDeadSymbols(*DstSet, *Builder, *this, CurrentStmt,
+ *NI, SymReaper, tag);
+ SrcSet = DstSet;
+ }
+ }
+
+ if (!Builder->BuildSinks && !Builder->HasGeneratedNode)
+ Tmp.Add(EntryNode);
+ }
+
+ bool HasAutoGenerated = false;
+
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+
+ ExplodedNodeSet Dst;
+
+ // Set the cleaned state.
+ Builder->SetCleanedState(*I == EntryNode ? CleanedState : GetState(*I));
+
+ // Visit the statement.
+ if (CE.asLValue())
+ VisitLValue(cast<Expr>(CurrentStmt), *I, Dst);
+ else
+ Visit(CurrentStmt, *I, Dst);
+
+ // Do we need to auto-generate a node? We only need to do this to generate
+ // a node with a "cleaned" state; GRCoreEngine will actually handle
+ // auto-transitions for other cases.
+ if (Dst.size() == 1 && *Dst.begin() == EntryNode
+ && !Builder->HasGeneratedNode && !HasAutoGenerated) {
+ HasAutoGenerated = true;
+ builder.generateNode(CurrentStmt, GetState(EntryNode), *I);
+ }
+ }
+
+ // NULL out these variables to cleanup.
+ CleanedState = NULL;
+ EntryNode = NULL;
+
+ CurrentStmt = 0;
+
+ Builder = NULL;
+}
+
+void GRExprEngine::Visit(Stmt* S, ExplodedNode* Pred, ExplodedNodeSet& Dst) {
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+ S->getLocStart(),
+ "Error evaluating statement");
+
+ // FIXME: add metadata to the CFG so that we can disable
+ // this check when we KNOW that there is no block-level subexpression.
+ // The motivation is that this check requires a hashtable lookup.
+
+ if (S != CurrentStmt && Pred->getLocationContext()->getCFG()->isBlkExpr(S)) {
+ Dst.Add(Pred);
+ return;
+ }
+
+ switch (S->getStmtClass()) {
+ // C++ stuff we don't support yet.
+ case Stmt::CXXBindReferenceExprClass:
+ case Stmt::CXXBindTemporaryExprClass:
+ case Stmt::CXXCatchStmtClass:
+ case Stmt::CXXConstructExprClass:
+ case Stmt::CXXDefaultArgExprClass:
+ case Stmt::CXXDependentScopeMemberExprClass:
+ case Stmt::CXXExprWithTemporariesClass:
+ case Stmt::CXXNullPtrLiteralExprClass:
+ case Stmt::CXXPseudoDestructorExprClass:
+ case Stmt::CXXTemporaryObjectExprClass:
+ case Stmt::CXXThrowExprClass:
+ case Stmt::CXXTryStmtClass:
+ case Stmt::CXXTypeidExprClass:
+ case Stmt::CXXUnresolvedConstructExprClass:
+ case Stmt::CXXZeroInitValueExprClass:
+ case Stmt::DependentScopeDeclRefExprClass:
+ case Stmt::UnaryTypeTraitExprClass:
+ case Stmt::UnresolvedLookupExprClass:
+ case Stmt::UnresolvedMemberExprClass:
+ {
+ SaveAndRestore<bool> OldSink(Builder->BuildSinks);
+ Builder->BuildSinks = true;
+ MakeNode(Dst, S, Pred, GetState(Pred));
+ break;
+ }
+
+ // Cases that should never be evaluated simply because they shouldn't
+ // appear in the CFG.
+ case Stmt::BreakStmtClass:
+ case Stmt::CaseStmtClass:
+ case Stmt::CompoundStmtClass:
+ case Stmt::ContinueStmtClass:
+ case Stmt::DefaultStmtClass:
+ case Stmt::DoStmtClass:
+ case Stmt::GotoStmtClass:
+ case Stmt::IndirectGotoStmtClass:
+ case Stmt::LabelStmtClass:
+ case Stmt::NoStmtClass:
+ case Stmt::NullStmtClass:
+ case Stmt::SwitchCaseClass:
+ llvm_unreachable("Stmt should not be in analyzer evaluation loop");
+ break;
+
+ // Cases not handled yet; but will handle some day.
+ case Stmt::DesignatedInitExprClass:
+ case Stmt::ExtVectorElementExprClass:
+ case Stmt::GNUNullExprClass:
+ case Stmt::ImaginaryLiteralClass:
+ case Stmt::ImplicitValueInitExprClass:
+ case Stmt::ObjCAtCatchStmtClass:
+ case Stmt::ObjCAtFinallyStmtClass:
+ case Stmt::ObjCAtSynchronizedStmtClass:
+ case Stmt::ObjCAtTryStmtClass:
+ case Stmt::ObjCEncodeExprClass:
+ case Stmt::ObjCImplicitSetterGetterRefExprClass:
+ case Stmt::ObjCIsaExprClass:
+ case Stmt::ObjCPropertyRefExprClass:
+ case Stmt::ObjCProtocolExprClass:
+ case Stmt::ObjCSelectorExprClass:
+ case Stmt::ObjCStringLiteralClass:
+ case Stmt::ObjCSuperExprClass:
+ case Stmt::ParenListExprClass:
+ case Stmt::PredefinedExprClass:
+ case Stmt::ShuffleVectorExprClass:
+ case Stmt::TypesCompatibleExprClass:
+ case Stmt::VAArgExprClass:
+ // Fall through.
+
+ // Cases we intentionally don't evaluate, since they don't need
+ // to be explicitly evaluated.
+ case Stmt::AddrLabelExprClass:
+ case Stmt::IntegerLiteralClass:
+ case Stmt::CharacterLiteralClass:
+ case Stmt::CXXBoolLiteralExprClass:
+ case Stmt::FloatingLiteralClass:
+ Dst.Add(Pred); // No-op. Simply propagate the current state unchanged.
+ break;
+
+ case Stmt::ArraySubscriptExprClass:
+ VisitArraySubscriptExpr(cast<ArraySubscriptExpr>(S), Pred, Dst, false);
+ break;
+
+ case Stmt::AsmStmtClass:
+ VisitAsmStmt(cast<AsmStmt>(S), Pred, Dst);
+ break;
+
+ case Stmt::BlockDeclRefExprClass:
+ VisitBlockDeclRefExpr(cast<BlockDeclRefExpr>(S), Pred, Dst, false);
+ break;
+
+ case Stmt::BlockExprClass:
+ VisitBlockExpr(cast<BlockExpr>(S), Pred, Dst);
+ break;
+
+ case Stmt::BinaryOperatorClass: {
+ BinaryOperator* B = cast<BinaryOperator>(S);
+
+ if (B->isLogicalOp()) {
+ VisitLogicalExpr(B, Pred, Dst);
+ break;
+ }
+ else if (B->getOpcode() == BinaryOperator::Comma) {
+ const GRState* state = GetState(Pred);
+ MakeNode(Dst, B, Pred, state->BindExpr(B, state->getSVal(B->getRHS())));
+ break;
+ }
+
+ if (AMgr.shouldEagerlyAssume() &&
+ (B->isRelationalOp() || B->isEqualityOp())) {
+ ExplodedNodeSet Tmp;
+ VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Tmp, false);
+ EvalEagerlyAssume(Dst, Tmp, cast<Expr>(S));
+ }
+ else
+ VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Dst, false);
+
+ break;
+ }
+
+ case Stmt::CallExprClass:
+ case Stmt::CXXOperatorCallExprClass: {
+ CallExpr* C = cast<CallExpr>(S);
+ VisitCall(C, Pred, C->arg_begin(), C->arg_end(), Dst, false);
+ break;
+ }
+
+ case Stmt::CXXMemberCallExprClass: {
+ CXXMemberCallExpr *MCE = cast<CXXMemberCallExpr>(S);
+ VisitCXXMemberCallExpr(MCE, Pred, Dst);
+ break;
+ }
+
+ case Stmt::CXXNewExprClass: {
+ CXXNewExpr *NE = cast<CXXNewExpr>(S);
+ VisitCXXNewExpr(NE, Pred, Dst);
+ break;
+ }
+
+ case Stmt::CXXDeleteExprClass: {
+ CXXDeleteExpr *CDE = cast<CXXDeleteExpr>(S);
+ VisitCXXDeleteExpr(CDE, Pred, Dst);
+ break;
+ }
+ // FIXME: ChooseExpr is really a constant. We need to fix
+ // the CFG do not model them as explicit control-flow.
+
+ case Stmt::ChooseExprClass: { // __builtin_choose_expr
+ ChooseExpr* C = cast<ChooseExpr>(S);
+ VisitGuardedExpr(C, C->getLHS(), C->getRHS(), Pred, Dst);
+ break;
+ }
+
+ case Stmt::CompoundAssignOperatorClass:
+ VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Dst, false);
+ break;
+
+ case Stmt::CompoundLiteralExprClass:
+ VisitCompoundLiteralExpr(cast<CompoundLiteralExpr>(S), Pred, Dst, false);
+ break;
+
+ case Stmt::ConditionalOperatorClass: { // '?' operator
+ ConditionalOperator* C = cast<ConditionalOperator>(S);
+ VisitGuardedExpr(C, C->getLHS(), C->getRHS(), Pred, Dst);
+ break;
+ }
+
+ case Stmt::CXXThisExprClass:
+ VisitCXXThisExpr(cast<CXXThisExpr>(S), Pred, Dst);
+ break;
+
+ case Stmt::DeclRefExprClass:
+ VisitDeclRefExpr(cast<DeclRefExpr>(S), Pred, Dst, false);
+ break;
+
+ case Stmt::DeclStmtClass:
+ VisitDeclStmt(cast<DeclStmt>(S), Pred, Dst);
+ break;
+
+ case Stmt::ForStmtClass:
+ // This case isn't for branch processing, but for handling the
+ // initialization of a condition variable.
+ VisitCondInit(cast<ForStmt>(S)->getConditionVariable(), S, Pred, Dst);
+ break;
+
+ case Stmt::ImplicitCastExprClass:
+ case Stmt::CStyleCastExprClass:
+ case Stmt::CXXStaticCastExprClass:
+ case Stmt::CXXDynamicCastExprClass:
+ case Stmt::CXXReinterpretCastExprClass:
+ case Stmt::CXXConstCastExprClass:
+ case Stmt::CXXFunctionalCastExprClass: {
+ CastExpr* C = cast<CastExpr>(S);
+ VisitCast(C, C->getSubExpr(), Pred, Dst, false);
+ break;
+ }
+
+ case Stmt::IfStmtClass:
+ // This case isn't for branch processing, but for handling the
+ // initialization of a condition variable.
+ VisitCondInit(cast<IfStmt>(S)->getConditionVariable(), S, Pred, Dst);
+ break;
+
+ case Stmt::InitListExprClass:
+ VisitInitListExpr(cast<InitListExpr>(S), Pred, Dst);
+ break;
+
+ case Stmt::MemberExprClass:
+ VisitMemberExpr(cast<MemberExpr>(S), Pred, Dst, false);
+ break;
+
+ case Stmt::ObjCIvarRefExprClass:
+ VisitObjCIvarRefExpr(cast<ObjCIvarRefExpr>(S), Pred, Dst, false);
+ break;
+
+ case Stmt::ObjCForCollectionStmtClass:
+ VisitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(S), Pred, Dst);
+ break;
+
+ case Stmt::ObjCMessageExprClass:
+ VisitObjCMessageExpr(cast<ObjCMessageExpr>(S), Pred, Dst, false);
+ break;
+
+ case Stmt::ObjCAtThrowStmtClass: {
+ // FIXME: This is not complete. We basically treat @throw as
+ // an abort.
+ SaveAndRestore<bool> OldSink(Builder->BuildSinks);
+ Builder->BuildSinks = true;
+ MakeNode(Dst, S, Pred, GetState(Pred));
+ break;
+ }
+
+ case Stmt::ParenExprClass:
+ Visit(cast<ParenExpr>(S)->getSubExpr()->IgnoreParens(), Pred, Dst);
+ break;
+
+ case Stmt::ReturnStmtClass:
+ VisitReturnStmt(cast<ReturnStmt>(S), Pred, Dst);
+ break;
+
+ case Stmt::OffsetOfExprClass:
+ VisitOffsetOfExpr(cast<OffsetOfExpr>(S), Pred, Dst);
+ break;
+
+ case Stmt::SizeOfAlignOfExprClass:
+ VisitSizeOfAlignOfExpr(cast<SizeOfAlignOfExpr>(S), Pred, Dst);
+ break;
+
+ case Stmt::StmtExprClass: {
+ StmtExpr* SE = cast<StmtExpr>(S);
+
+ if (SE->getSubStmt()->body_empty()) {
+ // Empty statement expression.
+ assert(SE->getType() == getContext().VoidTy
+ && "Empty statement expression must have void type.");
+ Dst.Add(Pred);
+ break;
+ }
+
+ if (Expr* LastExpr = dyn_cast<Expr>(*SE->getSubStmt()->body_rbegin())) {
+ const GRState* state = GetState(Pred);
+ MakeNode(Dst, SE, Pred, state->BindExpr(SE, state->getSVal(LastExpr)));
+ }
+ else
+ Dst.Add(Pred);
+
+ break;
+ }
+
+ case Stmt::StringLiteralClass:
+ VisitLValue(cast<StringLiteral>(S), Pred, Dst);
+ break;
+
+ case Stmt::SwitchStmtClass:
+ // This case isn't for branch processing, but for handling the
+ // initialization of a condition variable.
+ VisitCondInit(cast<SwitchStmt>(S)->getConditionVariable(), S, Pred, Dst);
+ break;
+
+ case Stmt::UnaryOperatorClass: {
+ UnaryOperator *U = cast<UnaryOperator>(S);
+ if (AMgr.shouldEagerlyAssume()&&(U->getOpcode() == UnaryOperator::LNot)) {
+ ExplodedNodeSet Tmp;
+ VisitUnaryOperator(U, Pred, Tmp, false);
+ EvalEagerlyAssume(Dst, Tmp, U);
+ }
+ else
+ VisitUnaryOperator(U, Pred, Dst, false);
+ break;
+ }
+
+ case Stmt::WhileStmtClass:
+ // This case isn't for branch processing, but for handling the
+ // initialization of a condition variable.
+ VisitCondInit(cast<WhileStmt>(S)->getConditionVariable(), S, Pred, Dst);
+ break;
+ }
+}
+
+void GRExprEngine::VisitLValue(Expr* Ex, ExplodedNode* Pred,
+ ExplodedNodeSet& Dst) {
+
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+ Ex->getLocStart(),
+ "Error evaluating statement");
+
+
+ Ex = Ex->IgnoreParens();
+
+ if (Ex != CurrentStmt && Pred->getLocationContext()->getCFG()->isBlkExpr(Ex)){
+ Dst.Add(Pred);
+ return;
+ }
+
+ switch (Ex->getStmtClass()) {
+ // C++ stuff we don't support yet.
+ case Stmt::CXXExprWithTemporariesClass:
+ case Stmt::CXXMemberCallExprClass:
+ case Stmt::CXXZeroInitValueExprClass: {
+ SaveAndRestore<bool> OldSink(Builder->BuildSinks);
+ Builder->BuildSinks = true;
+ MakeNode(Dst, Ex, Pred, GetState(Pred));
+ break;
+ }
+
+ case Stmt::ArraySubscriptExprClass:
+ VisitArraySubscriptExpr(cast<ArraySubscriptExpr>(Ex), Pred, Dst, true);
+ return;
+
+ case Stmt::BinaryOperatorClass:
+ case Stmt::CompoundAssignOperatorClass:
+ VisitBinaryOperator(cast<BinaryOperator>(Ex), Pred, Dst, true);
+ return;
+
+ case Stmt::BlockDeclRefExprClass:
+ VisitBlockDeclRefExpr(cast<BlockDeclRefExpr>(Ex), Pred, Dst, true);
+ return;
+
+ case Stmt::CallExprClass:
+ case Stmt::CXXOperatorCallExprClass: {
+ CallExpr *C = cast<CallExpr>(Ex);
+ assert(CalleeReturnsReferenceOrRecord(C));
+ VisitCall(C, Pred, C->arg_begin(), C->arg_end(), Dst, true);
+ break;
+ }
+
+ case Stmt::CompoundLiteralExprClass:
+ VisitCompoundLiteralExpr(cast<CompoundLiteralExpr>(Ex), Pred, Dst, true);
+ return;
+
+ case Stmt::DeclRefExprClass:
+ VisitDeclRefExpr(cast<DeclRefExpr>(Ex), Pred, Dst, true);
+ return;
+
+ case Stmt::ImplicitCastExprClass:
+ case Stmt::CStyleCastExprClass: {
+ CastExpr *C = cast<CastExpr>(Ex);
+ QualType T = Ex->getType();
+ VisitCast(C, C->getSubExpr(), Pred, Dst, true);
+ break;
+ }
+
+ case Stmt::MemberExprClass:
+ VisitMemberExpr(cast<MemberExpr>(Ex), Pred, Dst, true);
+ return;
+
+ case Stmt::ObjCIvarRefExprClass:
+ VisitObjCIvarRefExpr(cast<ObjCIvarRefExpr>(Ex), Pred, Dst, true);
+ return;
+
+ case Stmt::ObjCMessageExprClass: {
+ ObjCMessageExpr *ME = cast<ObjCMessageExpr>(Ex);
+ assert(ReceiverReturnsReferenceOrRecord(ME));
+ VisitObjCMessageExpr(ME, Pred, Dst, true);
+ return;
+ }
+
+ case Stmt::ObjCIsaExprClass:
+ // FIXME: Do something more intelligent with 'x->isa = ...'.
+ // For now, just ignore the assignment.
+ return;
+
+ case Stmt::ObjCPropertyRefExprClass:
+ case Stmt::ObjCImplicitSetterGetterRefExprClass:
+ // FIXME: Property assignments are lvalues, but not really "locations".
+ // e.g.: self.x = something;
+ // Here the "self.x" really can translate to a method call (setter) when
+ // the assignment is made. Moreover, the entire assignment expression
+ // evaluate to whatever "something" is, not calling the "getter" for
+ // the property (which would make sense since it can have side effects).
+ // We'll probably treat this as a location, but not one that we can
+ // take the address of. Perhaps we need a new SVal class for cases
+ // like thsis?
+ // Note that we have a similar problem for bitfields, since they don't
+ // have "locations" in the sense that we can take their address.
+ Dst.Add(Pred);
+ return;
+
+ case Stmt::StringLiteralClass: {
+ const GRState* state = GetState(Pred);
+ SVal V = state->getLValue(cast<StringLiteral>(Ex));
+ MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, V));
+ return;
+ }
+
+ case Stmt::UnaryOperatorClass:
+ VisitUnaryOperator(cast<UnaryOperator>(Ex), Pred, Dst, true);
+ return;
+
+ // In C++, binding an rvalue to a reference requires to create an object.
+ case Stmt::CXXBoolLiteralExprClass:
+ case Stmt::IntegerLiteralClass:
+ CreateCXXTemporaryObject(Ex, Pred, Dst);
+ return;
+
+ default:
+ // Arbitrary subexpressions can return aggregate temporaries that
+ // can be used in a lvalue context. We need to enhance our support
+ // of such temporaries in both the environment and the store, so right
+ // now we just do a regular visit.
+ assert ((Ex->getType()->isAggregateType()) &&
+ "Other kinds of expressions with non-aggregate/union types do"
+ " not have lvalues.");
+
+ Visit(Ex, Pred, Dst);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Block entrance. (Update counters).
+//===----------------------------------------------------------------------===//
+
+bool GRExprEngine::ProcessBlockEntrance(CFGBlock* B, const ExplodedNode *Pred,
+ GRBlockCounter BC) {
+ return BC.getNumVisited(Pred->getLocationContext()->getCurrentStackFrame(),
+ B->getBlockID()) < AMgr.getMaxLoop();
+}
+
+//===----------------------------------------------------------------------===//
+// Generic node creation.
+//===----------------------------------------------------------------------===//
+
+ExplodedNode* GRExprEngine::MakeNode(ExplodedNodeSet& Dst, Stmt* S,
+ ExplodedNode* Pred, const GRState* St,
+ ProgramPoint::Kind K, const void *tag) {
+ assert (Builder && "GRStmtNodeBuilder not present.");
+ SaveAndRestore<const void*> OldTag(Builder->Tag);
+ Builder->Tag = tag;
+ return Builder->MakeNode(Dst, S, Pred, St, K);
+}
+
+//===----------------------------------------------------------------------===//
+// Branch processing.
+//===----------------------------------------------------------------------===//
+
+const GRState* GRExprEngine::MarkBranch(const GRState* state,
+ Stmt* Terminator,
+ bool branchTaken) {
+
+ switch (Terminator->getStmtClass()) {
+ default:
+ return state;
+
+ case Stmt::BinaryOperatorClass: { // '&&' and '||'
+
+ BinaryOperator* B = cast<BinaryOperator>(Terminator);
+ BinaryOperator::Opcode Op = B->getOpcode();
+
+ assert (Op == BinaryOperator::LAnd || Op == BinaryOperator::LOr);
+
+ // For &&, if we take the true branch, then the value of the whole
+ // expression is that of the RHS expression.
+ //
+ // For ||, if we take the false branch, then the value of the whole
+ // expression is that of the RHS expression.
+
+ Expr* Ex = (Op == BinaryOperator::LAnd && branchTaken) ||
+ (Op == BinaryOperator::LOr && !branchTaken)
+ ? B->getRHS() : B->getLHS();
+
+ return state->BindExpr(B, UndefinedVal(Ex));
+ }
+
+ case Stmt::ConditionalOperatorClass: { // ?:
+
+ ConditionalOperator* C = cast<ConditionalOperator>(Terminator);
+
+ // For ?, if branchTaken == true then the value is either the LHS or
+ // the condition itself. (GNU extension).
+
+ Expr* Ex;
+
+ if (branchTaken)
+ Ex = C->getLHS() ? C->getLHS() : C->getCond();
+ else
+ Ex = C->getRHS();
+
+ return state->BindExpr(C, UndefinedVal(Ex));
+ }
+
+ case Stmt::ChooseExprClass: { // ?:
+
+ ChooseExpr* C = cast<ChooseExpr>(Terminator);
+
+ Expr* Ex = branchTaken ? C->getLHS() : C->getRHS();
+ return state->BindExpr(C, UndefinedVal(Ex));
+ }
+ }
+}
+
+/// RecoverCastedSymbol - A helper function for ProcessBranch that is used
+/// to try to recover some path-sensitivity for casts of symbolic
+/// integers that promote their values (which are currently not tracked well).
+/// This function returns the SVal bound to Condition->IgnoreCasts if all the
+// cast(s) did was sign-extend the original value.
+static SVal RecoverCastedSymbol(GRStateManager& StateMgr, const GRState* state,
+ Stmt* Condition, ASTContext& Ctx) {
+
+ Expr *Ex = dyn_cast<Expr>(Condition);
+ if (!Ex)
+ return UnknownVal();
+
+ uint64_t bits = 0;
+ bool bitsInit = false;
+
+ while (CastExpr *CE = dyn_cast<CastExpr>(Ex)) {
+ QualType T = CE->getType();
+
+ if (!T->isIntegerType())
+ return UnknownVal();
+
+ uint64_t newBits = Ctx.getTypeSize(T);
+ if (!bitsInit || newBits < bits) {
+ bitsInit = true;
+ bits = newBits;
+ }
+
+ Ex = CE->getSubExpr();
+ }
+
+ // We reached a non-cast. Is it a symbolic value?
+ QualType T = Ex->getType();
+
+ if (!bitsInit || !T->isIntegerType() || Ctx.getTypeSize(T) > bits)
+ return UnknownVal();
+
+ return state->getSVal(Ex);
+}
+
+void GRExprEngine::ProcessBranch(Stmt* Condition, Stmt* Term,
+ GRBranchNodeBuilder& builder) {
+
+ // Check for NULL conditions; e.g. "for(;;)"
+ if (!Condition) {
+ builder.markInfeasible(false);
+ return;
+ }
+
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+ Condition->getLocStart(),
+ "Error evaluating branch");
+
+ for (CheckersOrdered::iterator I=Checkers.begin(),E=Checkers.end();I!=E;++I) {
+ void *tag = I->first;
+ Checker *checker = I->second;
+ checker->VisitBranchCondition(builder, *this, Condition, tag);
+ }
+
+ // If the branch condition is undefined, return;
+ if (!builder.isFeasible(true) && !builder.isFeasible(false))
+ return;
+
+ const GRState* PrevState = builder.getState();
+ SVal X = PrevState->getSVal(Condition);
+
+ if (X.isUnknown()) {
+ // Give it a chance to recover from unknown.
+ if (const Expr *Ex = dyn_cast<Expr>(Condition)) {
+ if (Ex->getType()->isIntegerType()) {
+ // Try to recover some path-sensitivity. Right now casts of symbolic
+ // integers that promote their values are currently not tracked well.
+ // If 'Condition' is such an expression, try and recover the
+ // underlying value and use that instead.
+ SVal recovered = RecoverCastedSymbol(getStateManager(),
+ builder.getState(), Condition,
+ getContext());
+
+ if (!recovered.isUnknown()) {
+ X = recovered;
+ }
+ }
+ }
+ // If the condition is still unknown, give up.
+ if (X.isUnknown()) {
+ builder.generateNode(MarkBranch(PrevState, Term, true), true);
+ builder.generateNode(MarkBranch(PrevState, Term, false), false);
+ return;
+ }
+ }
+
+ DefinedSVal V = cast<DefinedSVal>(X);
+
+ // Process the true branch.
+ if (builder.isFeasible(true)) {
+ if (const GRState *state = PrevState->Assume(V, true))
+ builder.generateNode(MarkBranch(state, Term, true), true);
+ else
+ builder.markInfeasible(true);
+ }
+
+ // Process the false branch.
+ if (builder.isFeasible(false)) {
+ if (const GRState *state = PrevState->Assume(V, false))
+ builder.generateNode(MarkBranch(state, Term, false), false);
+ else
+ builder.markInfeasible(false);
+ }
+}
+
+/// ProcessIndirectGoto - Called by GRCoreEngine. Used to generate successor
+/// nodes by processing the 'effects' of a computed goto jump.
+void GRExprEngine::ProcessIndirectGoto(GRIndirectGotoNodeBuilder& builder) {
+
+ const GRState *state = builder.getState();
+ SVal V = state->getSVal(builder.getTarget());
+
+ // Three possibilities:
+ //
+ // (1) We know the computed label.
+ // (2) The label is NULL (or some other constant), or Undefined.
+ // (3) We have no clue about the label. Dispatch to all targets.
+ //
+
+ typedef GRIndirectGotoNodeBuilder::iterator iterator;
+
+ if (isa<loc::GotoLabel>(V)) {
+ LabelStmt* L = cast<loc::GotoLabel>(V).getLabel();
+
+ for (iterator I=builder.begin(), E=builder.end(); I != E; ++I) {
+ if (I.getLabel() == L) {
+ builder.generateNode(I, state);
+ return;
+ }
+ }
+
+ assert (false && "No block with label.");
+ return;
+ }
+
+ if (isa<loc::ConcreteInt>(V) || isa<UndefinedVal>(V)) {
+ // Dispatch to the first target and mark it as a sink.
+ //ExplodedNode* N = builder.generateNode(builder.begin(), state, true);
+ // FIXME: add checker visit.
+ // UndefBranches.insert(N);
+ return;
+ }
+
+ // This is really a catch-all. We don't support symbolics yet.
+ // FIXME: Implement dispatch for symbolic pointers.
+
+ for (iterator I=builder.begin(), E=builder.end(); I != E; ++I)
+ builder.generateNode(I, state);
+}
+
+
+void GRExprEngine::VisitGuardedExpr(Expr* Ex, Expr* L, Expr* R,
+ ExplodedNode* Pred, ExplodedNodeSet& Dst) {
+
+ assert(Ex == CurrentStmt &&
+ Pred->getLocationContext()->getCFG()->isBlkExpr(Ex));
+
+ const GRState* state = GetState(Pred);
+ SVal X = state->getSVal(Ex);
+
+ assert (X.isUndef());
+
+ Expr *SE = (Expr*) cast<UndefinedVal>(X).getData();
+ assert(SE);
+ X = state->getSVal(SE);
+
+ // Make sure that we invalidate the previous binding.
+ MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, X, true));
+}
+
+/// ProcessEndPath - Called by GRCoreEngine. Used to generate end-of-path
+/// nodes when the control reaches the end of a function.
+void GRExprEngine::ProcessEndPath(GREndPathNodeBuilder& builder) {
+ getTF().EvalEndPath(*this, builder);
+ StateMgr.EndPath(builder.getState());
+ for (CheckersOrdered::iterator I=Checkers.begin(),E=Checkers.end(); I!=E;++I){
+ void *tag = I->first;
+ Checker *checker = I->second;
+ checker->EvalEndPath(builder, tag, *this);
+ }
+}
+
+/// ProcessSwitch - Called by GRCoreEngine. Used to generate successor
+/// nodes by processing the 'effects' of a switch statement.
+void GRExprEngine::ProcessSwitch(GRSwitchNodeBuilder& builder) {
+ typedef GRSwitchNodeBuilder::iterator iterator;
+ const GRState* state = builder.getState();
+ Expr* CondE = builder.getCondition();
+ SVal CondV_untested = state->getSVal(CondE);
+
+ if (CondV_untested.isUndef()) {
+ //ExplodedNode* N = builder.generateDefaultCaseNode(state, true);
+ // FIXME: add checker
+ //UndefBranches.insert(N);
+
+ return;
+ }
+ DefinedOrUnknownSVal CondV = cast<DefinedOrUnknownSVal>(CondV_untested);
+
+ const GRState *DefaultSt = state;
+ bool defaultIsFeasible = false;
+
+ for (iterator I = builder.begin(), EI = builder.end(); I != EI; ++I) {
+ CaseStmt* Case = cast<CaseStmt>(I.getCase());
+
+ // Evaluate the LHS of the case value.
+ Expr::EvalResult V1;
+ bool b = Case->getLHS()->Evaluate(V1, getContext());
+
+ // Sanity checks. These go away in Release builds.
+ assert(b && V1.Val.isInt() && !V1.HasSideEffects
+ && "Case condition must evaluate to an integer constant.");
+ b = b; // silence unused variable warning
+ assert(V1.Val.getInt().getBitWidth() ==
+ getContext().getTypeSize(CondE->getType()));
+
+ // Get the RHS of the case, if it exists.
+ Expr::EvalResult V2;
+
+ if (Expr* E = Case->getRHS()) {
+ b = E->Evaluate(V2, getContext());
+ assert(b && V2.Val.isInt() && !V2.HasSideEffects
+ && "Case condition must evaluate to an integer constant.");
+ b = b; // silence unused variable warning
+ }
+ else
+ V2 = V1;
+
+ // FIXME: Eventually we should replace the logic below with a range
+ // comparison, rather than concretize the values within the range.
+ // This should be easy once we have "ranges" for NonLVals.
+
+ do {
+ nonloc::ConcreteInt CaseVal(getBasicVals().getValue(V1.Val.getInt()));
+ DefinedOrUnknownSVal Res = SVator.EvalEQ(DefaultSt ? DefaultSt : state,
+ CondV, CaseVal);
+
+ // Now "assume" that the case matches.
+ if (const GRState* stateNew = state->Assume(Res, true)) {
+ builder.generateCaseStmtNode(I, stateNew);
+
+ // If CondV evaluates to a constant, then we know that this
+ // is the *only* case that we can take, so stop evaluating the
+ // others.
+ if (isa<nonloc::ConcreteInt>(CondV))
+ return;
+ }
+
+ // Now "assume" that the case doesn't match. Add this state
+ // to the default state (if it is feasible).
+ if (DefaultSt) {
+ if (const GRState *stateNew = DefaultSt->Assume(Res, false)) {
+ defaultIsFeasible = true;
+ DefaultSt = stateNew;
+ }
+ else {
+ defaultIsFeasible = false;
+ DefaultSt = NULL;
+ }
+ }
+
+ // Concretize the next value in the range.
+ if (V1.Val.getInt() == V2.Val.getInt())
+ break;
+
+ ++V1.Val.getInt();
+ assert (V1.Val.getInt() <= V2.Val.getInt());
+
+ } while (true);
+ }
+
+ // If we reach here, than we know that the default branch is
+ // possible.
+ if (defaultIsFeasible) builder.generateDefaultCaseNode(DefaultSt);
+}
+
+void GRExprEngine::ProcessCallEnter(GRCallEnterNodeBuilder &B) {
+ const FunctionDecl *FD = B.getCallee();
+ const StackFrameContext *LocCtx = AMgr.getStackFrame(FD,
+ B.getLocationContext(),
+ B.getCallExpr(),
+ B.getBlock(),
+ B.getIndex());
+
+ const GRState *state = B.getState();
+ state = getStoreManager().EnterStackFrame(state, LocCtx);
+
+ B.GenerateNode(state, LocCtx);
+}
+
+void GRExprEngine::ProcessCallExit(GRCallExitNodeBuilder &B) {
+ const GRState *state = B.getState();
+ const ExplodedNode *Pred = B.getPredecessor();
+ const StackFrameContext *LocCtx =
+ cast<StackFrameContext>(Pred->getLocationContext());
+ const Stmt *CE = LocCtx->getCallSite();
+
+ // If the callee returns an expression, bind its value to CallExpr.
+ const Stmt *ReturnedExpr = state->get<ReturnExpr>();
+ if (ReturnedExpr) {
+ SVal RetVal = state->getSVal(ReturnedExpr);
+ state = state->BindExpr(CE, RetVal);
+ // Clear the return expr GDM.
+ state = state->remove<ReturnExpr>();
+ }
+
+ // Bind the constructed object value to CXXConstructExpr.
+ if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) {
+ const CXXThisRegion *ThisR = getCXXThisRegion(CCE->getConstructor(),LocCtx);
+ // We might not have 'this' region in the binding if we didn't inline
+ // the ctor call.
+ SVal ThisV = state->getSVal(ThisR);
+ loc::MemRegionVal *V = dyn_cast<loc::MemRegionVal>(&ThisV);
+ if (V) {
+ SVal ObjVal = state->getSVal(V->getRegion());
+ assert(isa<nonloc::LazyCompoundVal>(ObjVal));
+ state = state->BindExpr(CCE, ObjVal);
+ }
+ }
+
+ B.GenerateNode(state);
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer functions: logical operations ('&&', '||').
+//===----------------------------------------------------------------------===//
+
+void GRExprEngine::VisitLogicalExpr(BinaryOperator* B, ExplodedNode* Pred,
+ ExplodedNodeSet& Dst) {
+
+ assert(B->getOpcode() == BinaryOperator::LAnd ||
+ B->getOpcode() == BinaryOperator::LOr);
+
+ assert(B==CurrentStmt && Pred->getLocationContext()->getCFG()->isBlkExpr(B));
+
+ const GRState* state = GetState(Pred);
+ SVal X = state->getSVal(B);
+ assert(X.isUndef());
+
+ const Expr *Ex = (const Expr*) cast<UndefinedVal>(X).getData();
+ assert(Ex);
+
+ if (Ex == B->getRHS()) {
+ X = state->getSVal(Ex);
+
+ // Handle undefined values.
+ if (X.isUndef()) {
+ MakeNode(Dst, B, Pred, state->BindExpr(B, X));
+ return;
+ }
+
+ DefinedOrUnknownSVal XD = cast<DefinedOrUnknownSVal>(X);
+
+ // We took the RHS. Because the value of the '&&' or '||' expression must
+ // evaluate to 0 or 1, we must assume the value of the RHS evaluates to 0
+ // or 1. Alternatively, we could take a lazy approach, and calculate this
+ // value later when necessary. We don't have the machinery in place for
+ // this right now, and since most logical expressions are used for branches,
+ // the payoff is not likely to be large. Instead, we do eager evaluation.
+ if (const GRState *newState = state->Assume(XD, true))
+ MakeNode(Dst, B, Pred,
+ newState->BindExpr(B, ValMgr.makeIntVal(1U, B->getType())));
+
+ if (const GRState *newState = state->Assume(XD, false))
+ MakeNode(Dst, B, Pred,
+ newState->BindExpr(B, ValMgr.makeIntVal(0U, B->getType())));
+ }
+ else {
+ // We took the LHS expression. Depending on whether we are '&&' or
+ // '||' we know what the value of the expression is via properties of
+ // the short-circuiting.
+ X = ValMgr.makeIntVal(B->getOpcode() == BinaryOperator::LAnd ? 0U : 1U,
+ B->getType());
+ MakeNode(Dst, B, Pred, state->BindExpr(B, X));
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer functions: Loads and stores.
+//===----------------------------------------------------------------------===//
+
+void GRExprEngine::VisitBlockExpr(BlockExpr *BE, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+
+ ExplodedNodeSet Tmp;
+
+ CanQualType T = getContext().getCanonicalType(BE->getType());
+ SVal V = ValMgr.getBlockPointer(BE->getBlockDecl(), T,
+ Pred->getLocationContext());
+
+ MakeNode(Tmp, BE, Pred, GetState(Pred)->BindExpr(BE, V),
+ ProgramPoint::PostLValueKind);
+
+ // Post-visit the BlockExpr.
+ CheckerVisit(BE, Dst, Tmp, false);
+}
+
+void GRExprEngine::VisitDeclRefExpr(DeclRefExpr *Ex, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst, bool asLValue) {
+ VisitCommonDeclRefExpr(Ex, Ex->getDecl(), Pred, Dst, asLValue);
+}
+
+void GRExprEngine::VisitBlockDeclRefExpr(BlockDeclRefExpr *Ex,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst, bool asLValue) {
+ VisitCommonDeclRefExpr(Ex, Ex->getDecl(), Pred, Dst, asLValue);
+}
+
+void GRExprEngine::VisitCommonDeclRefExpr(Expr *Ex, const NamedDecl *D,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst, bool asLValue) {
+
+ const GRState *state = GetState(Pred);
+
+ if (const VarDecl* VD = dyn_cast<VarDecl>(D)) {
+
+ SVal V = state->getLValue(VD, Pred->getLocationContext());
+
+ if (asLValue) {
+ // For references, the 'lvalue' is the pointer address stored in the
+ // reference region.
+ if (VD->getType()->isReferenceType()) {
+ if (const MemRegion *R = V.getAsRegion())
+ V = state->getSVal(R);
+ else
+ V = UnknownVal();
+ }
+
+ MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, V),
+ ProgramPoint::PostLValueKind);
+ }
+ else
+ EvalLoad(Dst, Ex, Pred, state, V);
+
+ return;
+ } else if (const EnumConstantDecl* ED = dyn_cast<EnumConstantDecl>(D)) {
+ assert(!asLValue && "EnumConstantDecl does not have lvalue.");
+
+ SVal V = ValMgr.makeIntVal(ED->getInitVal());
+ MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, V));
+ return;
+
+ } else if (const FunctionDecl* FD = dyn_cast<FunctionDecl>(D)) {
+ // This code is valid regardless of the value of 'isLValue'.
+ SVal V = ValMgr.getFunctionPointer(FD);
+ MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, V),
+ ProgramPoint::PostLValueKind);
+ return;
+ }
+
+ assert (false &&
+ "ValueDecl support for this ValueDecl not implemented.");
+}
+
+/// VisitArraySubscriptExpr - Transfer function for array accesses
+void GRExprEngine::VisitArraySubscriptExpr(ArraySubscriptExpr* A,
+ ExplodedNode* Pred,
+ ExplodedNodeSet& Dst, bool asLValue){
+
+ Expr* Base = A->getBase()->IgnoreParens();
+ Expr* Idx = A->getIdx()->IgnoreParens();
+ ExplodedNodeSet Tmp;
+
+ if (Base->getType()->isVectorType()) {
+ // For vector types get its lvalue.
+ // FIXME: This may not be correct. Is the rvalue of a vector its location?
+ // In fact, I think this is just a hack. We need to get the right
+ // semantics.
+ VisitLValue(Base, Pred, Tmp);
+ }
+ else
+ Visit(Base, Pred, Tmp); // Get Base's rvalue, which should be an LocVal.
+
+ for (ExplodedNodeSet::iterator I1=Tmp.begin(), E1=Tmp.end(); I1!=E1; ++I1) {
+ ExplodedNodeSet Tmp2;
+ Visit(Idx, *I1, Tmp2); // Evaluate the index.
+
+ ExplodedNodeSet Tmp3;
+ CheckerVisit(A, Tmp3, Tmp2, true);
+
+ for (ExplodedNodeSet::iterator I2=Tmp3.begin(),E2=Tmp3.end();I2!=E2; ++I2) {
+ const GRState* state = GetState(*I2);
+ SVal V = state->getLValue(A->getType(), state->getSVal(Idx),
+ state->getSVal(Base));
+
+ if (asLValue)
+ MakeNode(Dst, A, *I2, state->BindExpr(A, V),
+ ProgramPoint::PostLValueKind);
+ else
+ EvalLoad(Dst, A, *I2, state, V);
+ }
+ }
+}
+
+/// VisitMemberExpr - Transfer function for member expressions.
+void GRExprEngine::VisitMemberExpr(MemberExpr* M, ExplodedNode* Pred,
+ ExplodedNodeSet& Dst, bool asLValue) {
+
+ Expr* Base = M->getBase()->IgnoreParens();
+ ExplodedNodeSet Tmp;
+
+ if (M->isArrow())
+ Visit(Base, Pred, Tmp); // p->f = ... or ... = p->f
+ else
+ VisitLValue(Base, Pred, Tmp); // x.f = ... or ... = x.f
+
+ FieldDecl *Field = dyn_cast<FieldDecl>(M->getMemberDecl());
+ if (!Field) // FIXME: skipping member expressions for non-fields
+ return;
+
+ for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E; ++I) {
+ const GRState* state = GetState(*I);
+ // FIXME: Should we insert some assumption logic in here to determine
+ // if "Base" is a valid piece of memory? Before we put this assumption
+ // later when using FieldOffset lvals (which we no longer have).
+ SVal L = state->getLValue(Field, state->getSVal(Base));
+
+ if (asLValue)
+ MakeNode(Dst, M, *I, state->BindExpr(M, L), ProgramPoint::PostLValueKind);
+ else
+ EvalLoad(Dst, M, *I, state, L);
+ }
+}
+
+/// EvalBind - Handle the semantics of binding a value to a specific location.
+/// This method is used by EvalStore and (soon) VisitDeclStmt, and others.
+void GRExprEngine::EvalBind(ExplodedNodeSet& Dst, Stmt *AssignE,
+ Stmt* StoreE, ExplodedNode* Pred,
+ const GRState* state, SVal location, SVal Val,
+ bool atDeclInit) {
+
+
+ // Do a previsit of the bind.
+ ExplodedNodeSet CheckedSet, Src;
+ Src.Add(Pred);
+ CheckerVisitBind(AssignE, StoreE, CheckedSet, Src, location, Val, true);
+
+ for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
+ I!=E; ++I) {
+
+ if (Pred != *I)
+ state = GetState(*I);
+
+ const GRState* newState = 0;
+
+ if (atDeclInit) {
+ const VarRegion *VR =
+ cast<VarRegion>(cast<loc::MemRegionVal>(location).getRegion());
+
+ newState = state->bindDecl(VR, Val);
+ }
+ else {
+ if (location.isUnknown()) {
+ // We know that the new state will be the same as the old state since
+ // the location of the binding is "unknown". Consequently, there
+ // is no reason to just create a new node.
+ newState = state;
+ }
+ else {
+ // We are binding to a value other than 'unknown'. Perform the binding
+ // using the StoreManager.
+ newState = state->bindLoc(cast<Loc>(location), Val);
+ }
+ }
+
+ // The next thing to do is check if the GRTransferFuncs object wants to
+ // update the state based on the new binding. If the GRTransferFunc object
+ // doesn't do anything, just auto-propagate the current state.
+ GRStmtNodeBuilderRef BuilderRef(Dst, *Builder, *this, *I, newState, StoreE,
+ newState != state);
+
+ getTF().EvalBind(BuilderRef, location, Val);
+ }
+}
+
+/// EvalStore - Handle the semantics of a store via an assignment.
+/// @param Dst The node set to store generated state nodes
+/// @param Ex The expression representing the location of the store
+/// @param state The current simulation state
+/// @param location The location to store the value
+/// @param Val The value to be stored
+void GRExprEngine::EvalStore(ExplodedNodeSet& Dst, Expr *AssignE,
+ Expr* StoreE,
+ ExplodedNode* Pred,
+ const GRState* state, SVal location, SVal Val,
+ const void *tag) {
+
+ assert(Builder && "GRStmtNodeBuilder must be defined.");
+
+ // Evaluate the location (checks for bad dereferences).
+ ExplodedNodeSet Tmp;
+ EvalLocation(Tmp, StoreE, Pred, state, location, tag, false);
+
+ if (Tmp.empty())
+ return;
+
+ assert(!location.isUndef());
+
+ SaveAndRestore<ProgramPoint::Kind> OldSPointKind(Builder->PointKind,
+ ProgramPoint::PostStoreKind);
+ SaveAndRestore<const void*> OldTag(Builder->Tag, tag);
+
+ // Proceed with the store.
+ for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI!=NE; ++NI)
+ EvalBind(Dst, AssignE, StoreE, *NI, GetState(*NI), location, Val);
+}
+
+void GRExprEngine::EvalLoad(ExplodedNodeSet& Dst, Expr *Ex, ExplodedNode* Pred,
+ const GRState* state, SVal location,
+ const void *tag, QualType LoadTy) {
+
+ // Are we loading from a region? This actually results in two loads; one
+ // to fetch the address of the referenced value and one to fetch the
+ // referenced value.
+ if (const TypedRegion *TR =
+ dyn_cast_or_null<TypedRegion>(location.getAsRegion())) {
+
+ QualType ValTy = TR->getValueType(getContext());
+ if (const ReferenceType *RT = ValTy->getAs<ReferenceType>()) {
+ static int loadReferenceTag = 0;
+ ExplodedNodeSet Tmp;
+ EvalLoadCommon(Tmp, Ex, Pred, state, location, &loadReferenceTag,
+ getContext().getPointerType(RT->getPointeeType()));
+
+ // Perform the load from the referenced value.
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end() ; I!=E; ++I) {
+ state = GetState(*I);
+ location = state->getSVal(Ex);
+ EvalLoadCommon(Dst, Ex, *I, state, location, tag, LoadTy);
+ }
+ return;
+ }
+ }
+
+ EvalLoadCommon(Dst, Ex, Pred, state, location, tag, LoadTy);
+}
+
+void GRExprEngine::EvalLoadCommon(ExplodedNodeSet& Dst, Expr *Ex,
+ ExplodedNode* Pred,
+ const GRState* state, SVal location,
+ const void *tag, QualType LoadTy) {
+
+ // Evaluate the location (checks for bad dereferences).
+ ExplodedNodeSet Tmp;
+ EvalLocation(Tmp, Ex, Pred, state, location, tag, true);
+
+ if (Tmp.empty())
+ return;
+
+ assert(!location.isUndef());
+
+ SaveAndRestore<ProgramPoint::Kind> OldSPointKind(Builder->PointKind);
+ SaveAndRestore<const void*> OldTag(Builder->Tag);
+
+ // Proceed with the load.
+ for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI!=NE; ++NI) {
+ state = GetState(*NI);
+ if (location.isUnknown()) {
+ // This is important. We must nuke the old binding.
+ MakeNode(Dst, Ex, *NI, state->BindExpr(Ex, UnknownVal()),
+ ProgramPoint::PostLoadKind, tag);
+ }
+ else {
+ SVal V = state->getSVal(cast<Loc>(location), LoadTy.isNull() ?
+ Ex->getType() : LoadTy);
+ MakeNode(Dst, Ex, *NI, state->BindExpr(Ex, V), ProgramPoint::PostLoadKind,
+ tag);
+ }
+ }
+}
+
+void GRExprEngine::EvalLocation(ExplodedNodeSet &Dst, Stmt *S,
+ ExplodedNode* Pred,
+ const GRState* state, SVal location,
+ const void *tag, bool isLoad) {
+ // Early checks for performance reason.
+ if (location.isUnknown() || Checkers.empty()) {
+ Dst.Add(Pred);
+ return;
+ }
+
+ ExplodedNodeSet Src, Tmp;
+ Src.Add(Pred);
+ ExplodedNodeSet *PrevSet = &Src;
+
+ for (CheckersOrdered::iterator I=Checkers.begin(),E=Checkers.end(); I!=E; ++I)
+ {
+ ExplodedNodeSet *CurrSet = 0;
+ if (I+1 == E)
+ CurrSet = &Dst;
+ else {
+ CurrSet = (PrevSet == &Tmp) ? &Src : &Tmp;
+ CurrSet->clear();
+ }
+
+ void *tag = I->first;
+ Checker *checker = I->second;
+
+ for (ExplodedNodeSet::iterator NI = PrevSet->begin(), NE = PrevSet->end();
+ NI != NE; ++NI) {
+ // Use the 'state' argument only when the predecessor node is the
+ // same as Pred. This allows us to catch updates to the state.
+ checker->GR_VisitLocation(*CurrSet, *Builder, *this, S, *NI,
+ *NI == Pred ? state : GetState(*NI),
+ location, tag, isLoad);
+ }
+
+ // Update which NodeSet is the current one.
+ PrevSet = CurrSet;
+ }
+}
+
+bool GRExprEngine::InlineCall(ExplodedNodeSet &Dst, const CallExpr *CE,
+ ExplodedNode *Pred) {
+ const GRState *state = GetState(Pred);
+ const Expr *Callee = CE->getCallee();
+ SVal L = state->getSVal(Callee);
+
+ const FunctionDecl *FD = L.getAsFunctionDecl();
+ if (!FD)
+ return false;
+
+ if (!FD->getBody(FD))
+ return false;
+
+ // Now we have the definition of the callee, create a CallEnter node.
+ CallEnter Loc(CE, FD, Pred->getLocationContext());
+
+ ExplodedNode *N = Builder->generateNode(Loc, state, Pred);
+ if (N)
+ Dst.Add(N);
+ return true;
+}
+
+void GRExprEngine::VisitCall(CallExpr* CE, ExplodedNode* Pred,
+ CallExpr::arg_iterator AI,
+ CallExpr::arg_iterator AE,
+ ExplodedNodeSet& Dst, bool asLValue) {
+
+ // Determine the type of function we're calling (if available).
+ const FunctionProtoType *Proto = NULL;
+ QualType FnType = CE->getCallee()->IgnoreParens()->getType();
+ if (const PointerType *FnTypePtr = FnType->getAs<PointerType>())
+ Proto = FnTypePtr->getPointeeType()->getAs<FunctionProtoType>();
+
+ // Create a worklist to process the arguments.
+ llvm::SmallVector<CallExprWLItem, 20> WorkList;
+ WorkList.reserve(AE - AI);
+ WorkList.push_back(CallExprWLItem(AI, Pred));
+
+ ExplodedNodeSet ArgsEvaluated;
+
+ while (!WorkList.empty()) {
+ CallExprWLItem Item = WorkList.back();
+ WorkList.pop_back();
+
+ if (Item.I == AE) {
+ ArgsEvaluated.insert(Item.N);
+ continue;
+ }
+
+ // Evaluate the argument.
+ ExplodedNodeSet Tmp;
+ const unsigned ParamIdx = Item.I - AI;
+
+ bool VisitAsLvalue = false;
+ if (Proto && ParamIdx < Proto->getNumArgs())
+ VisitAsLvalue = Proto->getArgType(ParamIdx)->isReferenceType();
+
+ if (VisitAsLvalue)
+ VisitLValue(*Item.I, Item.N, Tmp);
+ else
+ Visit(*Item.I, Item.N, Tmp);
+
+ // Enqueue evaluating the next argument on the worklist.
+ ++(Item.I);
+
+ for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI!=NE; ++NI)
+ WorkList.push_back(CallExprWLItem(Item.I, *NI));
+ }
+
+ // Now process the call itself.
+ ExplodedNodeSet DstTmp;
+ Expr* Callee = CE->getCallee()->IgnoreParens();
+
+ for (ExplodedNodeSet::iterator NI=ArgsEvaluated.begin(),
+ NE=ArgsEvaluated.end(); NI != NE; ++NI) {
+ // Evaluate the callee.
+ ExplodedNodeSet DstTmp2;
+ Visit(Callee, *NI, DstTmp2);
+ // Perform the previsit of the CallExpr, storing the results in DstTmp.
+ CheckerVisit(CE, DstTmp, DstTmp2, true);
+ }
+
+ // Finally, evaluate the function call. We try each of the checkers
+ // to see if the can evaluate the function call.
+ ExplodedNodeSet DstTmp3;
+
+
+ for (ExplodedNodeSet::iterator DI = DstTmp.begin(), DE = DstTmp.end();
+ DI != DE; ++DI) {
+
+ const GRState* state = GetState(*DI);
+ SVal L = state->getSVal(Callee);
+
+ // FIXME: Add support for symbolic function calls (calls involving
+ // function pointer values that are symbolic).
+ SaveAndRestore<bool> OldSink(Builder->BuildSinks);
+ ExplodedNodeSet DstChecker;
+
+ // If the callee is processed by a checker, skip the rest logic.
+ if (CheckerEvalCall(CE, DstChecker, *DI))
+ DstTmp3.insert(DstChecker);
+ else if (AMgr.shouldInlineCall() && InlineCall(Dst, CE, *DI)) {
+ // Callee is inlined. We shouldn't do post call checking.
+ return;
+ }
+ else {
+ for (ExplodedNodeSet::iterator DI_Checker = DstChecker.begin(),
+ DE_Checker = DstChecker.end();
+ DI_Checker != DE_Checker; ++DI_Checker) {
+
+ // Dispatch to the plug-in transfer function.
+ unsigned OldSize = DstTmp3.size();
+ SaveOr OldHasGen(Builder->HasGeneratedNode);
+ Pred = *DI_Checker;
+
+ // Dispatch to transfer function logic to handle the call itself.
+ // FIXME: Allow us to chain together transfer functions.
+ assert(Builder && "GRStmtNodeBuilder must be defined.");
+ getTF().EvalCall(DstTmp3, *this, *Builder, CE, L, Pred);
+
+ // Handle the case where no nodes where generated. Auto-generate that
+ // contains the updated state if we aren't generating sinks.
+ if (!Builder->BuildSinks && DstTmp3.size() == OldSize &&
+ !Builder->HasGeneratedNode)
+ MakeNode(DstTmp3, CE, Pred, state);
+ }
+ }
+ }
+
+ // Finally, perform the post-condition check of the CallExpr and store
+ // the created nodes in 'Dst'.
+
+ if (!(!asLValue && CalleeReturnsReference(CE))) {
+ CheckerVisit(CE, Dst, DstTmp3, false);
+ return;
+ }
+
+ // Handle the case where the called function returns a reference but
+ // we expect an rvalue. For such cases, convert the reference to
+ // an rvalue.
+ // FIXME: This conversion doesn't actually happen unless the result
+ // of CallExpr is consumed by another expression.
+ ExplodedNodeSet DstTmp4;
+ CheckerVisit(CE, DstTmp4, DstTmp3, false);
+ QualType LoadTy = CE->getType();
+
+ static int *ConvertToRvalueTag = 0;
+ for (ExplodedNodeSet::iterator NI = DstTmp4.begin(), NE = DstTmp4.end();
+ NI!=NE; ++NI) {
+ const GRState *state = GetState(*NI);
+ EvalLoad(Dst, CE, *NI, state, state->getSVal(CE),
+ &ConvertToRvalueTag, LoadTy);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function: Objective-C ivar references.
+//===----------------------------------------------------------------------===//
+
+static std::pair<const void*,const void*> EagerlyAssumeTag
+ = std::pair<const void*,const void*>(&EagerlyAssumeTag,static_cast<void*>(0));
+
+void GRExprEngine::EvalEagerlyAssume(ExplodedNodeSet &Dst, ExplodedNodeSet &Src,
+ Expr *Ex) {
+ for (ExplodedNodeSet::iterator I=Src.begin(), E=Src.end(); I!=E; ++I) {
+ ExplodedNode *Pred = *I;
+
+ // Test if the previous node was as the same expression. This can happen
+ // when the expression fails to evaluate to anything meaningful and
+ // (as an optimization) we don't generate a node.
+ ProgramPoint P = Pred->getLocation();
+ if (!isa<PostStmt>(P) || cast<PostStmt>(P).getStmt() != Ex) {
+ Dst.Add(Pred);
+ continue;
+ }
+
+ const GRState* state = GetState(Pred);
+ SVal V = state->getSVal(Ex);
+ if (nonloc::SymExprVal *SEV = dyn_cast<nonloc::SymExprVal>(&V)) {
+ // First assume that the condition is true.
+ if (const GRState *stateTrue = state->Assume(*SEV, true)) {
+ stateTrue = stateTrue->BindExpr(Ex,
+ ValMgr.makeIntVal(1U, Ex->getType()));
+ Dst.Add(Builder->generateNode(PostStmtCustom(Ex,
+ &EagerlyAssumeTag, Pred->getLocationContext()),
+ stateTrue, Pred));
+ }
+
+ // Next, assume that the condition is false.
+ if (const GRState *stateFalse = state->Assume(*SEV, false)) {
+ stateFalse = stateFalse->BindExpr(Ex,
+ ValMgr.makeIntVal(0U, Ex->getType()));
+ Dst.Add(Builder->generateNode(PostStmtCustom(Ex, &EagerlyAssumeTag,
+ Pred->getLocationContext()),
+ stateFalse, Pred));
+ }
+ }
+ else
+ Dst.Add(Pred);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function: Objective-C ivar references.
+//===----------------------------------------------------------------------===//
+
+void GRExprEngine::VisitObjCIvarRefExpr(ObjCIvarRefExpr* Ex, ExplodedNode* Pred,
+ ExplodedNodeSet& Dst, bool asLValue) {
+
+ Expr* Base = cast<Expr>(Ex->getBase());
+ ExplodedNodeSet Tmp;
+ Visit(Base, Pred, Tmp);
+
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+ const GRState* state = GetState(*I);
+ SVal BaseVal = state->getSVal(Base);
+ SVal location = state->getLValue(Ex->getDecl(), BaseVal);
+
+ if (asLValue)
+ MakeNode(Dst, Ex, *I, state->BindExpr(Ex, location));
+ else
+ EvalLoad(Dst, Ex, *I, state, location);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function: Objective-C fast enumeration 'for' statements.
+//===----------------------------------------------------------------------===//
+
+void GRExprEngine::VisitObjCForCollectionStmt(ObjCForCollectionStmt* S,
+ ExplodedNode* Pred, ExplodedNodeSet& Dst) {
+
+ // ObjCForCollectionStmts are processed in two places. This method
+ // handles the case where an ObjCForCollectionStmt* occurs as one of the
+ // statements within a basic block. This transfer function does two things:
+ //
+ // (1) binds the next container value to 'element'. This creates a new
+ // node in the ExplodedGraph.
+ //
+ // (2) binds the value 0/1 to the ObjCForCollectionStmt* itself, indicating
+ // whether or not the container has any more elements. This value
+ // will be tested in ProcessBranch. We need to explicitly bind
+ // this value because a container can contain nil elements.
+ //
+ // FIXME: Eventually this logic should actually do dispatches to
+ // 'countByEnumeratingWithState:objects:count:' (NSFastEnumeration).
+ // This will require simulating a temporary NSFastEnumerationState, either
+ // through an SVal or through the use of MemRegions. This value can
+ // be affixed to the ObjCForCollectionStmt* instead of 0/1; when the loop
+ // terminates we reclaim the temporary (it goes out of scope) and we
+ // we can test if the SVal is 0 or if the MemRegion is null (depending
+ // on what approach we take).
+ //
+ // For now: simulate (1) by assigning either a symbol or nil if the
+ // container is empty. Thus this transfer function will by default
+ // result in state splitting.
+
+ Stmt* elem = S->getElement();
+ SVal ElementV;
+
+ if (DeclStmt* DS = dyn_cast<DeclStmt>(elem)) {
+ VarDecl* ElemD = cast<VarDecl>(DS->getSingleDecl());
+ assert (ElemD->getInit() == 0);
+ ElementV = GetState(Pred)->getLValue(ElemD, Pred->getLocationContext());
+ VisitObjCForCollectionStmtAux(S, Pred, Dst, ElementV);
+ return;
+ }
+
+ ExplodedNodeSet Tmp;
+ VisitLValue(cast<Expr>(elem), Pred, Tmp);
+
+ for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I!=E; ++I) {
+ const GRState* state = GetState(*I);
+ VisitObjCForCollectionStmtAux(S, *I, Dst, state->getSVal(elem));
+ }
+}
+
+void GRExprEngine::VisitObjCForCollectionStmtAux(ObjCForCollectionStmt* S,
+ ExplodedNode* Pred, ExplodedNodeSet& Dst,
+ SVal ElementV) {
+
+ // Check if the location we are writing back to is a null pointer.
+ Stmt* elem = S->getElement();
+ ExplodedNodeSet Tmp;
+ EvalLocation(Tmp, elem, Pred, GetState(Pred), ElementV, NULL, false);
+
+ if (Tmp.empty())
+ return;
+
+ for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI!=NE; ++NI) {
+ Pred = *NI;
+ const GRState *state = GetState(Pred);
+
+ // Handle the case where the container still has elements.
+ SVal TrueV = ValMgr.makeTruthVal(1);
+ const GRState *hasElems = state->BindExpr(S, TrueV);
+
+ // Handle the case where the container has no elements.
+ SVal FalseV = ValMgr.makeTruthVal(0);
+ const GRState *noElems = state->BindExpr(S, FalseV);
+
+ if (loc::MemRegionVal* MV = dyn_cast<loc::MemRegionVal>(&ElementV))
+ if (const TypedRegion* R = dyn_cast<TypedRegion>(MV->getRegion())) {
+ // FIXME: The proper thing to do is to really iterate over the
+ // container. We will do this with dispatch logic to the store.
+ // For now, just 'conjure' up a symbolic value.
+ QualType T = R->getValueType(getContext());
+ assert(Loc::IsLocType(T));
+ unsigned Count = Builder->getCurrentBlockCount();
+ SymbolRef Sym = SymMgr.getConjuredSymbol(elem, T, Count);
+ SVal V = ValMgr.makeLoc(Sym);
+ hasElems = hasElems->bindLoc(ElementV, V);
+
+ // Bind the location to 'nil' on the false branch.
+ SVal nilV = ValMgr.makeIntVal(0, T);
+ noElems = noElems->bindLoc(ElementV, nilV);
+ }
+
+ // Create the new nodes.
+ MakeNode(Dst, S, Pred, hasElems);
+ MakeNode(Dst, S, Pred, noElems);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function: Objective-C message expressions.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ObjCMsgWLItem {
+public:
+ ObjCMessageExpr::arg_iterator I;
+ ExplodedNode *N;
+
+ ObjCMsgWLItem(const ObjCMessageExpr::arg_iterator &i, ExplodedNode *n)
+ : I(i), N(n) {}
+};
+} // end anonymous namespace
+
+void GRExprEngine::VisitObjCMessageExpr(ObjCMessageExpr* ME, ExplodedNode* Pred,
+ ExplodedNodeSet& Dst, bool asLValue){
+
+ // Create a worklist to process both the arguments.
+ llvm::SmallVector<ObjCMsgWLItem, 20> WL;
+
+ // But first evaluate the receiver (if any).
+ ObjCMessageExpr::arg_iterator AI = ME->arg_begin(), AE = ME->arg_end();
+ if (Expr *Receiver = ME->getInstanceReceiver()) {
+ ExplodedNodeSet Tmp;
+ Visit(Receiver, Pred, Tmp);
+
+ if (Tmp.empty())
+ return;
+
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I)
+ WL.push_back(ObjCMsgWLItem(AI, *I));
+ }
+ else
+ WL.push_back(ObjCMsgWLItem(AI, Pred));
+
+ // Evaluate the arguments.
+ ExplodedNodeSet ArgsEvaluated;
+ while (!WL.empty()) {
+ ObjCMsgWLItem Item = WL.back();
+ WL.pop_back();
+
+ if (Item.I == AE) {
+ ArgsEvaluated.insert(Item.N);
+ continue;
+ }
+
+ // Evaluate the subexpression.
+ ExplodedNodeSet Tmp;
+
+ // FIXME: [Objective-C++] handle arguments that are references
+ Visit(*Item.I, Item.N, Tmp);
+
+ // Enqueue evaluating the next argument on the worklist.
+ ++(Item.I);
+ for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI!=NE; ++NI)
+ WL.push_back(ObjCMsgWLItem(Item.I, *NI));
+ }
+
+ // Now that the arguments are processed, handle the previsits checks.
+ ExplodedNodeSet DstPrevisit;
+ CheckerVisit(ME, DstPrevisit, ArgsEvaluated, true);
+
+ // Proceed with evaluate the message expression.
+ ExplodedNodeSet DstEval;
+
+ for (ExplodedNodeSet::iterator DI = DstPrevisit.begin(),
+ DE = DstPrevisit.end(); DI != DE; ++DI) {
+
+ Pred = *DI;
+ bool RaisesException = false;
+ unsigned OldSize = DstEval.size();
+ SaveAndRestore<bool> OldSink(Builder->BuildSinks);
+ SaveOr OldHasGen(Builder->HasGeneratedNode);
+
+ if (const Expr *Receiver = ME->getInstanceReceiver()) {
+ const GRState *state = GetState(Pred);
+
+ // Bifurcate the state into nil and non-nil ones.
+ DefinedOrUnknownSVal receiverVal =
+ cast<DefinedOrUnknownSVal>(state->getSVal(Receiver));
+
+ const GRState *notNilState, *nilState;
+ llvm::tie(notNilState, nilState) = state->Assume(receiverVal);
+
+ // There are three cases: can be nil or non-nil, must be nil, must be
+ // non-nil. We handle must be nil, and merge the rest two into non-nil.
+ if (nilState && !notNilState) {
+ CheckerEvalNilReceiver(ME, DstEval, nilState, Pred);
+ continue;
+ }
+
+ // Check if the "raise" message was sent.
+ assert(notNilState);
+ if (ME->getSelector() == RaiseSel)
+ RaisesException = true;
+
+ // Check if we raise an exception. For now treat these as sinks.
+ // Eventually we will want to handle exceptions properly.
+ if (RaisesException)
+ Builder->BuildSinks = true;
+
+ // Dispatch to plug-in transfer function.
+ EvalObjCMessageExpr(DstEval, ME, Pred, notNilState);
+ }
+ else if (ObjCInterfaceDecl *Iface = ME->getReceiverInterface()) {
+ IdentifierInfo* ClsName = Iface->getIdentifier();
+ Selector S = ME->getSelector();
+
+ // Check for special instance methods.
+ if (!NSExceptionII) {
+ ASTContext& Ctx = getContext();
+ NSExceptionII = &Ctx.Idents.get("NSException");
+ }
+
+ if (ClsName == NSExceptionII) {
+ enum { NUM_RAISE_SELECTORS = 2 };
+
+ // Lazily create a cache of the selectors.
+ if (!NSExceptionInstanceRaiseSelectors) {
+ ASTContext& Ctx = getContext();
+ NSExceptionInstanceRaiseSelectors =
+ new Selector[NUM_RAISE_SELECTORS];
+ llvm::SmallVector<IdentifierInfo*, NUM_RAISE_SELECTORS> II;
+ unsigned idx = 0;
+
+ // raise:format:
+ II.push_back(&Ctx.Idents.get("raise"));
+ II.push_back(&Ctx.Idents.get("format"));
+ NSExceptionInstanceRaiseSelectors[idx++] =
+ Ctx.Selectors.getSelector(II.size(), &II[0]);
+
+ // raise:format::arguments:
+ II.push_back(&Ctx.Idents.get("arguments"));
+ NSExceptionInstanceRaiseSelectors[idx++] =
+ Ctx.Selectors.getSelector(II.size(), &II[0]);
+ }
+
+ for (unsigned i = 0; i < NUM_RAISE_SELECTORS; ++i)
+ if (S == NSExceptionInstanceRaiseSelectors[i]) {
+ RaisesException = true;
+ break;
+ }
+ }
+
+ // Check if we raise an exception. For now treat these as sinks.
+ // Eventually we will want to handle exceptions properly.
+ if (RaisesException)
+ Builder->BuildSinks = true;
+
+ // Dispatch to plug-in transfer function.
+ EvalObjCMessageExpr(DstEval, ME, Pred, Builder->GetState(Pred));
+ }
+
+ // Handle the case where no nodes where generated. Auto-generate that
+ // contains the updated state if we aren't generating sinks.
+ if (!Builder->BuildSinks && DstEval.size() == OldSize &&
+ !Builder->HasGeneratedNode)
+ MakeNode(DstEval, ME, Pred, GetState(Pred));
+ }
+
+ // Finally, perform the post-condition check of the ObjCMessageExpr and store
+ // the created nodes in 'Dst'.
+ if (!(!asLValue && ReceiverReturnsReference(ME))) {
+ CheckerVisit(ME, Dst, DstEval, false);
+ return;
+ }
+
+ // Handle the case where the message expression returns a reference but
+ // we expect an rvalue. For such cases, convert the reference to
+ // an rvalue.
+ // FIXME: This conversion doesn't actually happen unless the result
+ // of ObjCMessageExpr is consumed by another expression.
+ ExplodedNodeSet DstRValueConvert;
+ CheckerVisit(ME, DstRValueConvert, DstEval, false);
+ QualType LoadTy = ME->getType();
+
+ static int *ConvertToRvalueTag = 0;
+ for (ExplodedNodeSet::iterator NI = DstRValueConvert.begin(),
+ NE = DstRValueConvert.end(); NI != NE; ++NI) {
+ const GRState *state = GetState(*NI);
+ EvalLoad(Dst, ME, *NI, state, state->getSVal(ME),
+ &ConvertToRvalueTag, LoadTy);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer functions: Miscellaneous statements.
+//===----------------------------------------------------------------------===//
+
+void GRExprEngine::VisitCast(CastExpr *CastE, Expr *Ex, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst, bool asLValue) {
+ ExplodedNodeSet S1;
+ QualType T = CastE->getType();
+ QualType ExTy = Ex->getType();
+
+ if (const ExplicitCastExpr *ExCast=dyn_cast_or_null<ExplicitCastExpr>(CastE))
+ T = ExCast->getTypeAsWritten();
+
+ if (ExTy->isArrayType() || ExTy->isFunctionType() || T->isReferenceType() ||
+ asLValue)
+ VisitLValue(Ex, Pred, S1);
+ else
+ Visit(Ex, Pred, S1);
+
+ ExplodedNodeSet S2;
+ CheckerVisit(CastE, S2, S1, true);
+
+ // If we are evaluating the cast in an lvalue context, we implicitly want
+ // the cast to evaluate to a location.
+ if (asLValue) {
+ ASTContext &Ctx = getContext();
+ T = Ctx.getPointerType(Ctx.getCanonicalType(T));
+ ExTy = Ctx.getPointerType(Ctx.getCanonicalType(ExTy));
+ }
+
+ switch (CastE->getCastKind()) {
+ case CastExpr::CK_ToVoid:
+ assert(!asLValue);
+ for (ExplodedNodeSet::iterator I = S2.begin(), E = S2.end(); I != E; ++I)
+ Dst.Add(*I);
+ return;
+
+ case CastExpr::CK_NoOp:
+ case CastExpr::CK_FunctionToPointerDecay:
+ for (ExplodedNodeSet::iterator I = S2.begin(), E = S2.end(); I != E; ++I) {
+ // Copy the SVal of Ex to CastE.
+ ExplodedNode *N = *I;
+ const GRState *state = GetState(N);
+ SVal V = state->getSVal(Ex);
+ state = state->BindExpr(CastE, V);
+ MakeNode(Dst, CastE, N, state);
+ }
+ return;
+
+ case CastExpr::CK_Unknown:
+ case CastExpr::CK_ArrayToPointerDecay:
+ case CastExpr::CK_BitCast:
+ case CastExpr::CK_IntegralCast:
+ case CastExpr::CK_IntegralToPointer:
+ case CastExpr::CK_PointerToIntegral:
+ case CastExpr::CK_IntegralToFloating:
+ case CastExpr::CK_FloatingToIntegral:
+ case CastExpr::CK_FloatingCast:
+ case CastExpr::CK_AnyPointerToObjCPointerCast:
+ case CastExpr::CK_AnyPointerToBlockPointerCast:
+ case CastExpr::CK_DerivedToBase:
+ case CastExpr::CK_UncheckedDerivedToBase:
+ // Delegate to SValuator to process.
+ for (ExplodedNodeSet::iterator I = S2.begin(), E = S2.end(); I != E; ++I) {
+ ExplodedNode* N = *I;
+ const GRState* state = GetState(N);
+ SVal V = state->getSVal(Ex);
+ V = SVator.EvalCast(V, T, ExTy);
+ state = state->BindExpr(CastE, V);
+ MakeNode(Dst, CastE, N, state);
+ }
+ return;
+
+ default:
+ llvm::errs() << "Cast kind " << CastE->getCastKind() << " not handled.\n";
+ assert(0);
+ }
+}
+
+void GRExprEngine::VisitCompoundLiteralExpr(CompoundLiteralExpr* CL,
+ ExplodedNode* Pred,
+ ExplodedNodeSet& Dst,
+ bool asLValue) {
+ InitListExpr* ILE = cast<InitListExpr>(CL->getInitializer()->IgnoreParens());
+ ExplodedNodeSet Tmp;
+ Visit(ILE, Pred, Tmp);
+
+ for (ExplodedNodeSet::iterator I = Tmp.begin(), EI = Tmp.end(); I!=EI; ++I) {
+ const GRState* state = GetState(*I);
+ SVal ILV = state->getSVal(ILE);
+ const LocationContext *LC = (*I)->getLocationContext();
+ state = state->bindCompoundLiteral(CL, LC, ILV);
+
+ if (asLValue) {
+ MakeNode(Dst, CL, *I, state->BindExpr(CL, state->getLValue(CL, LC)));
+ }
+ else
+ MakeNode(Dst, CL, *I, state->BindExpr(CL, ILV));
+ }
+}
+
+void GRExprEngine::VisitDeclStmt(DeclStmt *DS, ExplodedNode *Pred,
+ ExplodedNodeSet& Dst) {
+
+ // The CFG has one DeclStmt per Decl.
+ Decl* D = *DS->decl_begin();
+
+ if (!D || !isa<VarDecl>(D))
+ return;
+
+ const VarDecl* VD = dyn_cast<VarDecl>(D);
+ Expr* InitEx = const_cast<Expr*>(VD->getInit());
+
+ // FIXME: static variables may have an initializer, but the second
+ // time a function is called those values may not be current.
+ ExplodedNodeSet Tmp;
+
+ if (InitEx) {
+ QualType InitTy = InitEx->getType();
+ if (getContext().getLangOptions().CPlusPlus && InitTy->isRecordType()) {
+ // Delegate expressions of C++ record type evaluation to AggExprVisitor.
+ VisitAggExpr(InitEx, GetState(Pred)->getLValue(VD,
+ Pred->getLocationContext()), Pred, Dst);
+ return;
+ } else if (VD->getType()->isReferenceType())
+ VisitLValue(InitEx, Pred, Tmp);
+ else
+ Visit(InitEx, Pred, Tmp);
+ }
+ else
+ Tmp.Add(Pred);
+
+ ExplodedNodeSet Tmp2;
+ CheckerVisit(DS, Tmp2, Tmp, true);
+
+ for (ExplodedNodeSet::iterator I=Tmp2.begin(), E=Tmp2.end(); I!=E; ++I) {
+ ExplodedNode *N = *I;
+ const GRState *state = GetState(N);
+
+ // Decls without InitExpr are not initialized explicitly.
+ const LocationContext *LC = N->getLocationContext();
+
+ if (InitEx) {
+ SVal InitVal = state->getSVal(InitEx);
+
+ // Recover some path-sensitivity if a scalar value evaluated to
+ // UnknownVal.
+ if ((InitVal.isUnknown() ||
+ !getConstraintManager().canReasonAbout(InitVal)) &&
+ !VD->getType()->isReferenceType()) {
+ InitVal = ValMgr.getConjuredSymbolVal(NULL, InitEx,
+ Builder->getCurrentBlockCount());
+ }
+
+ EvalBind(Dst, DS, DS, *I, state,
+ loc::MemRegionVal(state->getRegion(VD, LC)), InitVal, true);
+ }
+ else {
+ state = state->bindDeclWithNoInit(state->getRegion(VD, LC));
+ MakeNode(Dst, DS, *I, state);
+ }
+ }
+}
+
+void GRExprEngine::VisitCondInit(VarDecl *VD, Stmt *S,
+ ExplodedNode *Pred, ExplodedNodeSet& Dst) {
+
+ Expr* InitEx = VD->getInit();
+ ExplodedNodeSet Tmp;
+ Visit(InitEx, Pred, Tmp);
+
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+ ExplodedNode *N = *I;
+ const GRState *state = GetState(N);
+
+ const LocationContext *LC = N->getLocationContext();
+ SVal InitVal = state->getSVal(InitEx);
+
+ // Recover some path-sensitivity if a scalar value evaluated to
+ // UnknownVal.
+ if (InitVal.isUnknown() ||
+ !getConstraintManager().canReasonAbout(InitVal)) {
+ InitVal = ValMgr.getConjuredSymbolVal(NULL, InitEx,
+ Builder->getCurrentBlockCount());
+ }
+
+ EvalBind(Dst, S, S, N, state,
+ loc::MemRegionVal(state->getRegion(VD, LC)), InitVal, true);
+ }
+}
+
+namespace {
+ // This class is used by VisitInitListExpr as an item in a worklist
+ // for processing the values contained in an InitListExpr.
+class InitListWLItem {
+public:
+ llvm::ImmutableList<SVal> Vals;
+ ExplodedNode* N;
+ InitListExpr::reverse_iterator Itr;
+
+ InitListWLItem(ExplodedNode* n, llvm::ImmutableList<SVal> vals,
+ InitListExpr::reverse_iterator itr)
+ : Vals(vals), N(n), Itr(itr) {}
+};
+}
+
+
+void GRExprEngine::VisitInitListExpr(InitListExpr* E, ExplodedNode* Pred,
+ ExplodedNodeSet& Dst) {
+
+ const GRState* state = GetState(Pred);
+ QualType T = getContext().getCanonicalType(E->getType());
+ unsigned NumInitElements = E->getNumInits();
+
+ if (T->isArrayType() || T->isRecordType() || T->isVectorType()) {
+ llvm::ImmutableList<SVal> StartVals = getBasicVals().getEmptySValList();
+
+ // Handle base case where the initializer has no elements.
+ // e.g: static int* myArray[] = {};
+ if (NumInitElements == 0) {
+ SVal V = ValMgr.makeCompoundVal(T, StartVals);
+ MakeNode(Dst, E, Pred, state->BindExpr(E, V));
+ return;
+ }
+
+ // Create a worklist to process the initializers.
+ llvm::SmallVector<InitListWLItem, 10> WorkList;
+ WorkList.reserve(NumInitElements);
+ WorkList.push_back(InitListWLItem(Pred, StartVals, E->rbegin()));
+ InitListExpr::reverse_iterator ItrEnd = E->rend();
+ assert(!(E->rbegin() == E->rend()));
+
+ // Process the worklist until it is empty.
+ while (!WorkList.empty()) {
+ InitListWLItem X = WorkList.back();
+ WorkList.pop_back();
+
+ ExplodedNodeSet Tmp;
+ Visit(*X.Itr, X.N, Tmp);
+
+ InitListExpr::reverse_iterator NewItr = X.Itr + 1;
+
+ for (ExplodedNodeSet::iterator NI=Tmp.begin(),NE=Tmp.end();NI!=NE;++NI) {
+ // Get the last initializer value.
+ state = GetState(*NI);
+ SVal InitV = state->getSVal(cast<Expr>(*X.Itr));
+
+ // Construct the new list of values by prepending the new value to
+ // the already constructed list.
+ llvm::ImmutableList<SVal> NewVals =
+ getBasicVals().consVals(InitV, X.Vals);
+
+ if (NewItr == ItrEnd) {
+ // Now we have a list holding all init values. Make CompoundValData.
+ SVal V = ValMgr.makeCompoundVal(T, NewVals);
+
+ // Make final state and node.
+ MakeNode(Dst, E, *NI, state->BindExpr(E, V));
+ }
+ else {
+ // Still some initializer values to go. Push them onto the worklist.
+ WorkList.push_back(InitListWLItem(*NI, NewVals, NewItr));
+ }
+ }
+ }
+
+ return;
+ }
+
+ if (Loc::IsLocType(T) || T->isIntegerType()) {
+ assert (E->getNumInits() == 1);
+ ExplodedNodeSet Tmp;
+ Expr* Init = E->getInit(0);
+ Visit(Init, Pred, Tmp);
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), EI=Tmp.end(); I != EI; ++I) {
+ state = GetState(*I);
+ MakeNode(Dst, E, *I, state->BindExpr(E, state->getSVal(Init)));
+ }
+ return;
+ }
+
+ assert(0 && "unprocessed InitListExpr type");
+}
+
+/// VisitSizeOfAlignOfExpr - Transfer function for sizeof(type).
+void GRExprEngine::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr* Ex,
+ ExplodedNode* Pred,
+ ExplodedNodeSet& Dst) {
+ QualType T = Ex->getTypeOfArgument();
+ CharUnits amt;
+
+ if (Ex->isSizeOf()) {
+ if (T == getContext().VoidTy) {
+ // sizeof(void) == 1 byte.
+ amt = CharUnits::One();
+ }
+ else if (!T.getTypePtr()->isConstantSizeType()) {
+ // FIXME: Add support for VLAs.
+ Dst.Add(Pred);
+ return;
+ }
+ else if (T->getAs<ObjCObjectType>()) {
+ // Some code tries to take the sizeof an ObjCObjectType, relying that
+ // the compiler has laid out its representation. Just report Unknown
+ // for these.
+ Dst.Add(Pred);
+ return;
+ }
+ else {
+ // All other cases.
+ amt = getContext().getTypeSizeInChars(T);
+ }
+ }
+ else // Get alignment of the type.
+ amt = getContext().getTypeAlignInChars(T);
+
+ MakeNode(Dst, Ex, Pred,
+ GetState(Pred)->BindExpr(Ex,
+ ValMgr.makeIntVal(amt.getQuantity(), Ex->getType())));
+}
+
+void GRExprEngine::VisitOffsetOfExpr(OffsetOfExpr* OOE, ExplodedNode* Pred,
+ ExplodedNodeSet& Dst) {
+ Expr::EvalResult Res;
+ if (OOE->Evaluate(Res, getContext()) && Res.Val.isInt()) {
+ const APSInt &IV = Res.Val.getInt();
+ assert(IV.getBitWidth() == getContext().getTypeSize(OOE->getType()));
+ assert(OOE->getType()->isIntegerType());
+ assert(IV.isSigned() == OOE->getType()->isSignedIntegerType());
+ SVal X = ValMgr.makeIntVal(IV);
+ MakeNode(Dst, OOE, Pred, GetState(Pred)->BindExpr(OOE, X));
+ return;
+ }
+ // FIXME: Handle the case where __builtin_offsetof is not a constant.
+ Dst.Add(Pred);
+}
+
+void GRExprEngine::VisitUnaryOperator(UnaryOperator* U, ExplodedNode* Pred,
+ ExplodedNodeSet& Dst, bool asLValue) {
+
+ switch (U->getOpcode()) {
+
+ default:
+ break;
+
+ case UnaryOperator::Deref: {
+
+ Expr* Ex = U->getSubExpr()->IgnoreParens();
+ ExplodedNodeSet Tmp;
+ Visit(Ex, Pred, Tmp);
+
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+
+ const GRState* state = GetState(*I);
+ SVal location = state->getSVal(Ex);
+
+ if (asLValue)
+ MakeNode(Dst, U, *I, state->BindExpr(U, location),
+ ProgramPoint::PostLValueKind);
+ else
+ EvalLoad(Dst, U, *I, state, location);
+ }
+
+ return;
+ }
+
+ case UnaryOperator::Real: {
+
+ Expr* Ex = U->getSubExpr()->IgnoreParens();
+ ExplodedNodeSet Tmp;
+ Visit(Ex, Pred, Tmp);
+
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+
+ // FIXME: We don't have complex SValues yet.
+ if (Ex->getType()->isAnyComplexType()) {
+ // Just report "Unknown."
+ Dst.Add(*I);
+ continue;
+ }
+
+ // For all other types, UnaryOperator::Real is an identity operation.
+ assert (U->getType() == Ex->getType());
+ const GRState* state = GetState(*I);
+ MakeNode(Dst, U, *I, state->BindExpr(U, state->getSVal(Ex)));
+ }
+
+ return;
+ }
+
+ case UnaryOperator::Imag: {
+
+ Expr* Ex = U->getSubExpr()->IgnoreParens();
+ ExplodedNodeSet Tmp;
+ Visit(Ex, Pred, Tmp);
+
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+ // FIXME: We don't have complex SValues yet.
+ if (Ex->getType()->isAnyComplexType()) {
+ // Just report "Unknown."
+ Dst.Add(*I);
+ continue;
+ }
+
+ // For all other types, UnaryOperator::Float returns 0.
+ assert (Ex->getType()->isIntegerType());
+ const GRState* state = GetState(*I);
+ SVal X = ValMgr.makeZeroVal(Ex->getType());
+ MakeNode(Dst, U, *I, state->BindExpr(U, X));
+ }
+
+ return;
+ }
+
+ case UnaryOperator::OffsetOf: {
+ Expr::EvalResult Res;
+ if (U->Evaluate(Res, getContext()) && Res.Val.isInt()) {
+ const APSInt &IV = Res.Val.getInt();
+ assert(IV.getBitWidth() == getContext().getTypeSize(U->getType()));
+ assert(U->getType()->isIntegerType());
+ assert(IV.isSigned() == U->getType()->isSignedIntegerType());
+ SVal X = ValMgr.makeIntVal(IV);
+ MakeNode(Dst, U, Pred, GetState(Pred)->BindExpr(U, X));
+ return;
+ }
+ // FIXME: Handle the case where __builtin_offsetof is not a constant.
+ Dst.Add(Pred);
+ return;
+ }
+
+ case UnaryOperator::Plus: assert (!asLValue); // FALL-THROUGH.
+ case UnaryOperator::Extension: {
+
+ // Unary "+" is a no-op, similar to a parentheses. We still have places
+ // where it may be a block-level expression, so we need to
+ // generate an extra node that just propagates the value of the
+ // subexpression.
+
+ Expr* Ex = U->getSubExpr()->IgnoreParens();
+ ExplodedNodeSet Tmp;
+ Visit(Ex, Pred, Tmp);
+
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+ const GRState* state = GetState(*I);
+ MakeNode(Dst, U, *I, state->BindExpr(U, state->getSVal(Ex)));
+ }
+
+ return;
+ }
+
+ case UnaryOperator::AddrOf: {
+
+ assert(!asLValue);
+ Expr* Ex = U->getSubExpr()->IgnoreParens();
+ ExplodedNodeSet Tmp;
+ VisitLValue(Ex, Pred, Tmp);
+
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+ const GRState* state = GetState(*I);
+ SVal V = state->getSVal(Ex);
+ state = state->BindExpr(U, V);
+ MakeNode(Dst, U, *I, state);
+ }
+
+ return;
+ }
+
+ case UnaryOperator::LNot:
+ case UnaryOperator::Minus:
+ case UnaryOperator::Not: {
+
+ assert (!asLValue);
+ Expr* Ex = U->getSubExpr()->IgnoreParens();
+ ExplodedNodeSet Tmp;
+ Visit(Ex, Pred, Tmp);
+
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+ const GRState* state = GetState(*I);
+
+ // Get the value of the subexpression.
+ SVal V = state->getSVal(Ex);
+
+ if (V.isUnknownOrUndef()) {
+ MakeNode(Dst, U, *I, state->BindExpr(U, V));
+ continue;
+ }
+
+// QualType DstT = getContext().getCanonicalType(U->getType());
+// QualType SrcT = getContext().getCanonicalType(Ex->getType());
+//
+// if (DstT != SrcT) // Perform promotions.
+// V = EvalCast(V, DstT);
+//
+// if (V.isUnknownOrUndef()) {
+// MakeNode(Dst, U, *I, BindExpr(St, U, V));
+// continue;
+// }
+
+ switch (U->getOpcode()) {
+ default:
+ assert(false && "Invalid Opcode.");
+ break;
+
+ case UnaryOperator::Not:
+ // FIXME: Do we need to handle promotions?
+ state = state->BindExpr(U, EvalComplement(cast<NonLoc>(V)));
+ break;
+
+ case UnaryOperator::Minus:
+ // FIXME: Do we need to handle promotions?
+ state = state->BindExpr(U, EvalMinus(cast<NonLoc>(V)));
+ break;
+
+ case UnaryOperator::LNot:
+
+ // C99 6.5.3.3: "The expression !E is equivalent to (0==E)."
+ //
+ // Note: technically we do "E == 0", but this is the same in the
+ // transfer functions as "0 == E".
+ SVal Result;
+
+ if (isa<Loc>(V)) {
+ Loc X = ValMgr.makeNull();
+ Result = EvalBinOp(state, BinaryOperator::EQ, cast<Loc>(V), X,
+ U->getType());
+ }
+ else {
+ nonloc::ConcreteInt X(getBasicVals().getValue(0, Ex->getType()));
+ Result = EvalBinOp(state, BinaryOperator::EQ, cast<NonLoc>(V), X,
+ U->getType());
+ }
+
+ state = state->BindExpr(U, Result);
+
+ break;
+ }
+
+ MakeNode(Dst, U, *I, state);
+ }
+
+ return;
+ }
+ }
+
+ // Handle ++ and -- (both pre- and post-increment).
+
+ assert (U->isIncrementDecrementOp());
+ ExplodedNodeSet Tmp;
+ Expr* Ex = U->getSubExpr()->IgnoreParens();
+ VisitLValue(Ex, Pred, Tmp);
+
+ for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I!=E; ++I) {
+
+ const GRState* state = GetState(*I);
+ SVal V1 = state->getSVal(Ex);
+
+ // Perform a load.
+ ExplodedNodeSet Tmp2;
+ EvalLoad(Tmp2, Ex, *I, state, V1);
+
+ for (ExplodedNodeSet::iterator I2=Tmp2.begin(), E2=Tmp2.end();I2!=E2;++I2) {
+
+ state = GetState(*I2);
+ SVal V2_untested = state->getSVal(Ex);
+
+ // Propagate unknown and undefined values.
+ if (V2_untested.isUnknownOrUndef()) {
+ MakeNode(Dst, U, *I2, state->BindExpr(U, V2_untested));
+ continue;
+ }
+ DefinedSVal V2 = cast<DefinedSVal>(V2_untested);
+
+ // Handle all other values.
+ BinaryOperator::Opcode Op = U->isIncrementOp() ? BinaryOperator::Add
+ : BinaryOperator::Sub;
+
+ // If the UnaryOperator has non-location type, use its type to create the
+ // constant value. If the UnaryOperator has location type, create the
+ // constant with int type and pointer width.
+ SVal RHS;
+
+ if (U->getType()->isAnyPointerType())
+ RHS = ValMgr.makeIntValWithPtrWidth(1, false);
+ else
+ RHS = ValMgr.makeIntVal(1, U->getType());
+
+ SVal Result = EvalBinOp(state, Op, V2, RHS, U->getType());
+
+ // Conjure a new symbol if necessary to recover precision.
+ if (Result.isUnknown() || !getConstraintManager().canReasonAbout(Result)){
+ DefinedOrUnknownSVal SymVal =
+ ValMgr.getConjuredSymbolVal(NULL, Ex,
+ Builder->getCurrentBlockCount());
+ Result = SymVal;
+
+ // If the value is a location, ++/-- should always preserve
+ // non-nullness. Check if the original value was non-null, and if so
+ // propagate that constraint.
+ if (Loc::IsLocType(U->getType())) {
+ DefinedOrUnknownSVal Constraint =
+ SVator.EvalEQ(state, V2, ValMgr.makeZeroVal(U->getType()));
+
+ if (!state->Assume(Constraint, true)) {
+ // It isn't feasible for the original value to be null.
+ // Propagate this constraint.
+ Constraint = SVator.EvalEQ(state, SymVal,
+ ValMgr.makeZeroVal(U->getType()));
+
+
+ state = state->Assume(Constraint, false);
+ assert(state);
+ }
+ }
+ }
+
+ state = state->BindExpr(U, U->isPostfix() ? V2 : Result);
+
+ // Perform the store.
+ EvalStore(Dst, NULL, U, *I2, state, V1, Result);
+ }
+ }
+}
+
+void GRExprEngine::VisitAsmStmt(AsmStmt* A, ExplodedNode* Pred,
+ ExplodedNodeSet& Dst) {
+ VisitAsmStmtHelperOutputs(A, A->begin_outputs(), A->end_outputs(), Pred, Dst);
+}
+
+void GRExprEngine::VisitAsmStmtHelperOutputs(AsmStmt* A,
+ AsmStmt::outputs_iterator I,
+ AsmStmt::outputs_iterator E,
+ ExplodedNode* Pred, ExplodedNodeSet& Dst) {
+ if (I == E) {
+ VisitAsmStmtHelperInputs(A, A->begin_inputs(), A->end_inputs(), Pred, Dst);
+ return;
+ }
+
+ ExplodedNodeSet Tmp;
+ VisitLValue(*I, Pred, Tmp);
+
+ ++I;
+
+ for (ExplodedNodeSet::iterator NI = Tmp.begin(), NE = Tmp.end();NI != NE;++NI)
+ VisitAsmStmtHelperOutputs(A, I, E, *NI, Dst);
+}
+
+void GRExprEngine::VisitAsmStmtHelperInputs(AsmStmt* A,
+ AsmStmt::inputs_iterator I,
+ AsmStmt::inputs_iterator E,
+ ExplodedNode* Pred,
+ ExplodedNodeSet& Dst) {
+ if (I == E) {
+
+ // We have processed both the inputs and the outputs. All of the outputs
+ // should evaluate to Locs. Nuke all of their values.
+
+ // FIXME: Some day in the future it would be nice to allow a "plug-in"
+ // which interprets the inline asm and stores proper results in the
+ // outputs.
+
+ const GRState* state = GetState(Pred);
+
+ for (AsmStmt::outputs_iterator OI = A->begin_outputs(),
+ OE = A->end_outputs(); OI != OE; ++OI) {
+
+ SVal X = state->getSVal(*OI);
+ assert (!isa<NonLoc>(X)); // Should be an Lval, or unknown, undef.
+
+ if (isa<Loc>(X))
+ state = state->bindLoc(cast<Loc>(X), UnknownVal());
+ }
+
+ MakeNode(Dst, A, Pred, state);
+ return;
+ }
+
+ ExplodedNodeSet Tmp;
+ Visit(*I, Pred, Tmp);
+
+ ++I;
+
+ for (ExplodedNodeSet::iterator NI = Tmp.begin(), NE = Tmp.end(); NI!=NE; ++NI)
+ VisitAsmStmtHelperInputs(A, I, E, *NI, Dst);
+}
+
+void GRExprEngine::VisitReturnStmt(ReturnStmt *RS, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ ExplodedNodeSet Src;
+ if (Expr *RetE = RS->getRetValue()) {
+ // Record the returned expression in the state. It will be used in
+ // ProcessCallExit to bind the return value to the call expr.
+ {
+ static int Tag = 0;
+ SaveAndRestore<const void *> OldTag(Builder->Tag, &Tag);
+ const GRState *state = GetState(Pred);
+ state = state->set<ReturnExpr>(RetE);
+ Pred = Builder->generateNode(RetE, state, Pred);
+ }
+ // We may get a NULL Pred because we generated a cached node.
+ if (Pred)
+ Visit(RetE, Pred, Src);
+ }
+ else {
+ Src.Add(Pred);
+ }
+
+ ExplodedNodeSet CheckedSet;
+ CheckerVisit(RS, CheckedSet, Src, true);
+
+ for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
+ I != E; ++I) {
+
+ assert(Builder && "GRStmtNodeBuilder must be defined.");
+
+ Pred = *I;
+ unsigned size = Dst.size();
+
+ SaveAndRestore<bool> OldSink(Builder->BuildSinks);
+ SaveOr OldHasGen(Builder->HasGeneratedNode);
+
+ getTF().EvalReturn(Dst, *this, *Builder, RS, Pred);
+
+ // Handle the case where no nodes where generated.
+ if (!Builder->BuildSinks && Dst.size() == size &&
+ !Builder->HasGeneratedNode)
+ MakeNode(Dst, RS, Pred, GetState(Pred));
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer functions: Binary operators.
+//===----------------------------------------------------------------------===//
+
+void GRExprEngine::VisitBinaryOperator(BinaryOperator* B,
+ ExplodedNode* Pred,
+ ExplodedNodeSet& Dst, bool asLValue) {
+
+ ExplodedNodeSet Tmp1;
+ Expr* LHS = B->getLHS()->IgnoreParens();
+ Expr* RHS = B->getRHS()->IgnoreParens();
+
+ // FIXME: Add proper support for ObjCImplicitSetterGetterRefExpr.
+ if (isa<ObjCImplicitSetterGetterRefExpr>(LHS)) {
+ Visit(RHS, Pred, Dst);
+ return;
+ }
+
+ if (B->isAssignmentOp())
+ VisitLValue(LHS, Pred, Tmp1);
+ else
+ Visit(LHS, Pred, Tmp1);
+
+ ExplodedNodeSet Tmp3;
+
+ for (ExplodedNodeSet::iterator I1=Tmp1.begin(), E1=Tmp1.end(); I1!=E1; ++I1) {
+ SVal LeftV = GetState(*I1)->getSVal(LHS);
+ ExplodedNodeSet Tmp2;
+ Visit(RHS, *I1, Tmp2);
+
+ ExplodedNodeSet CheckedSet;
+ CheckerVisit(B, CheckedSet, Tmp2, true);
+
+ // With both the LHS and RHS evaluated, process the operation itself.
+
+ for (ExplodedNodeSet::iterator I2=CheckedSet.begin(), E2=CheckedSet.end();
+ I2 != E2; ++I2) {
+
+ const GRState *state = GetState(*I2);
+ const GRState *OldSt = state;
+ SVal RightV = state->getSVal(RHS);
+
+ BinaryOperator::Opcode Op = B->getOpcode();
+
+ if (Op == BinaryOperator::Assign) {
+ // EXPERIMENTAL: "Conjured" symbols.
+ // FIXME: Handle structs.
+ QualType T = RHS->getType();
+
+ if ((RightV.isUnknown()||!getConstraintManager().canReasonAbout(RightV))
+ && (Loc::IsLocType(T) || (T->isScalarType()&&T->isIntegerType()))) {
+ unsigned Count = Builder->getCurrentBlockCount();
+ RightV = ValMgr.getConjuredSymbolVal(NULL, B->getRHS(), Count);
+ }
+
+ SVal ExprVal = asLValue ? LeftV : RightV;
+
+ // Simulate the effects of a "store": bind the value of the RHS
+ // to the L-Value represented by the LHS.
+ EvalStore(Tmp3, B, LHS, *I2, state->BindExpr(B, ExprVal), LeftV,RightV);
+ continue;
+ }
+
+ if (!B->isAssignmentOp()) {
+ // Process non-assignments except commas or short-circuited
+ // logical expressions (LAnd and LOr).
+ SVal Result = EvalBinOp(state, Op, LeftV, RightV, B->getType());
+
+ if (Result.isUnknown()) {
+ if (OldSt != state) {
+ // Generate a new node if we have already created a new state.
+ MakeNode(Tmp3, B, *I2, state);
+ }
+ else
+ Tmp3.Add(*I2);
+
+ continue;
+ }
+
+ state = state->BindExpr(B, Result);
+
+ MakeNode(Tmp3, B, *I2, state);
+ continue;
+ }
+
+ assert (B->isCompoundAssignmentOp());
+
+ switch (Op) {
+ default:
+ assert(0 && "Invalid opcode for compound assignment.");
+ case BinaryOperator::MulAssign: Op = BinaryOperator::Mul; break;
+ case BinaryOperator::DivAssign: Op = BinaryOperator::Div; break;
+ case BinaryOperator::RemAssign: Op = BinaryOperator::Rem; break;
+ case BinaryOperator::AddAssign: Op = BinaryOperator::Add; break;
+ case BinaryOperator::SubAssign: Op = BinaryOperator::Sub; break;
+ case BinaryOperator::ShlAssign: Op = BinaryOperator::Shl; break;
+ case BinaryOperator::ShrAssign: Op = BinaryOperator::Shr; break;
+ case BinaryOperator::AndAssign: Op = BinaryOperator::And; break;
+ case BinaryOperator::XorAssign: Op = BinaryOperator::Xor; break;
+ case BinaryOperator::OrAssign: Op = BinaryOperator::Or; break;
+ }
+
+ // Perform a load (the LHS). This performs the checks for
+ // null dereferences, and so on.
+ ExplodedNodeSet Tmp4;
+ SVal location = state->getSVal(LHS);
+ EvalLoad(Tmp4, LHS, *I2, state, location);
+
+ for (ExplodedNodeSet::iterator I4=Tmp4.begin(), E4=Tmp4.end(); I4!=E4;
+ ++I4) {
+ state = GetState(*I4);
+ SVal V = state->getSVal(LHS);
+
+ // Get the computation type.
+ QualType CTy =
+ cast<CompoundAssignOperator>(B)->getComputationResultType();
+ CTy = getContext().getCanonicalType(CTy);
+
+ QualType CLHSTy =
+ cast<CompoundAssignOperator>(B)->getComputationLHSType();
+ CLHSTy = getContext().getCanonicalType(CLHSTy);
+
+ QualType LTy = getContext().getCanonicalType(LHS->getType());
+ QualType RTy = getContext().getCanonicalType(RHS->getType());
+
+ // Promote LHS.
+ V = SVator.EvalCast(V, CLHSTy, LTy);
+
+ // Compute the result of the operation.
+ SVal Result = SVator.EvalCast(EvalBinOp(state, Op, V, RightV, CTy),
+ B->getType(), CTy);
+
+ // EXPERIMENTAL: "Conjured" symbols.
+ // FIXME: Handle structs.
+
+ SVal LHSVal;
+
+ if ((Result.isUnknown() ||
+ !getConstraintManager().canReasonAbout(Result))
+ && (Loc::IsLocType(CTy)
+ || (CTy->isScalarType() && CTy->isIntegerType()))) {
+
+ unsigned Count = Builder->getCurrentBlockCount();
+
+ // The symbolic value is actually for the type of the left-hand side
+ // expression, not the computation type, as this is the value the
+ // LValue on the LHS will bind to.
+ LHSVal = ValMgr.getConjuredSymbolVal(NULL, B->getRHS(), LTy, Count);
+
+ // However, we need to convert the symbol to the computation type.
+ Result = SVator.EvalCast(LHSVal, CTy, LTy);
+ }
+ else {
+ // The left-hand side may bind to a different value then the
+ // computation type.
+ LHSVal = SVator.EvalCast(Result, LTy, CTy);
+ }
+
+ EvalStore(Tmp3, B, LHS, *I4, state->BindExpr(B, Result),
+ location, LHSVal);
+ }
+ }
+ }
+
+ CheckerVisit(B, Dst, Tmp3, false);
+}
+
+//===----------------------------------------------------------------------===//
+// Checker registration/lookup.
+//===----------------------------------------------------------------------===//
+
+Checker *GRExprEngine::lookupChecker(void *tag) const {
+ CheckerMap::const_iterator I = CheckerM.find(tag);
+ return (I == CheckerM.end()) ? NULL : Checkers[I->second].second;
+}
+
+//===----------------------------------------------------------------------===//
+// Visualization.
+//===----------------------------------------------------------------------===//
+
+#ifndef NDEBUG
+static GRExprEngine* GraphPrintCheckerState;
+static SourceManager* GraphPrintSourceManager;
+
+namespace llvm {
+template<>
+struct DOTGraphTraits<ExplodedNode*> :
+ public DefaultDOTGraphTraits {
+
+ DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
+
+ // FIXME: Since we do not cache error nodes in GRExprEngine now, this does not
+ // work.
+ static std::string getNodeAttributes(const ExplodedNode* N, void*) {
+
+#if 0
+ // FIXME: Replace with a general scheme to tell if the node is
+ // an error node.
+ if (GraphPrintCheckerState->isImplicitNullDeref(N) ||
+ GraphPrintCheckerState->isExplicitNullDeref(N) ||
+ GraphPrintCheckerState->isUndefDeref(N) ||
+ GraphPrintCheckerState->isUndefStore(N) ||
+ GraphPrintCheckerState->isUndefControlFlow(N) ||
+ GraphPrintCheckerState->isUndefResult(N) ||
+ GraphPrintCheckerState->isBadCall(N) ||
+ GraphPrintCheckerState->isUndefArg(N))
+ return "color=\"red\",style=\"filled\"";
+
+ if (GraphPrintCheckerState->isNoReturnCall(N))
+ return "color=\"blue\",style=\"filled\"";
+#endif
+ return "";
+ }
+
+ static std::string getNodeLabel(const ExplodedNode* N, void*){
+
+ std::string sbuf;
+ llvm::raw_string_ostream Out(sbuf);
+
+ // Program Location.
+ ProgramPoint Loc = N->getLocation();
+
+ switch (Loc.getKind()) {
+ case ProgramPoint::BlockEntranceKind:
+ Out << "Block Entrance: B"
+ << cast<BlockEntrance>(Loc).getBlock()->getBlockID();
+ break;
+
+ case ProgramPoint::BlockExitKind:
+ assert (false);
+ break;
+
+ case ProgramPoint::CallEnterKind:
+ Out << "CallEnter";
+ break;
+
+ case ProgramPoint::CallExitKind:
+ Out << "CallExit";
+ break;
+
+ default: {
+ if (StmtPoint *L = dyn_cast<StmtPoint>(&Loc)) {
+ const Stmt* S = L->getStmt();
+ SourceLocation SLoc = S->getLocStart();
+
+ Out << S->getStmtClassName() << ' ' << (void*) S << ' ';
+ LangOptions LO; // FIXME.
+ S->printPretty(Out, 0, PrintingPolicy(LO));
+
+ if (SLoc.isFileID()) {
+ Out << "\\lline="
+ << GraphPrintSourceManager->getInstantiationLineNumber(SLoc)
+ << " col="
+ << GraphPrintSourceManager->getInstantiationColumnNumber(SLoc)
+ << "\\l";
+ }
+
+ if (isa<PreStmt>(Loc))
+ Out << "\\lPreStmt\\l;";
+ else if (isa<PostLoad>(Loc))
+ Out << "\\lPostLoad\\l;";
+ else if (isa<PostStore>(Loc))
+ Out << "\\lPostStore\\l";
+ else if (isa<PostLValue>(Loc))
+ Out << "\\lPostLValue\\l";
+
+#if 0
+ // FIXME: Replace with a general scheme to determine
+ // the name of the check.
+ if (GraphPrintCheckerState->isImplicitNullDeref(N))
+ Out << "\\|Implicit-Null Dereference.\\l";
+ else if (GraphPrintCheckerState->isExplicitNullDeref(N))
+ Out << "\\|Explicit-Null Dereference.\\l";
+ else if (GraphPrintCheckerState->isUndefDeref(N))
+ Out << "\\|Dereference of undefialied value.\\l";
+ else if (GraphPrintCheckerState->isUndefStore(N))
+ Out << "\\|Store to Undefined Loc.";
+ else if (GraphPrintCheckerState->isUndefResult(N))
+ Out << "\\|Result of operation is undefined.";
+ else if (GraphPrintCheckerState->isNoReturnCall(N))
+ Out << "\\|Call to function marked \"noreturn\".";
+ else if (GraphPrintCheckerState->isBadCall(N))
+ Out << "\\|Call to NULL/Undefined.";
+ else if (GraphPrintCheckerState->isUndefArg(N))
+ Out << "\\|Argument in call is undefined";
+#endif
+
+ break;
+ }
+
+ const BlockEdge& E = cast<BlockEdge>(Loc);
+ Out << "Edge: (B" << E.getSrc()->getBlockID() << ", B"
+ << E.getDst()->getBlockID() << ')';
+
+ if (Stmt* T = E.getSrc()->getTerminator()) {
+
+ SourceLocation SLoc = T->getLocStart();
+
+ Out << "\\|Terminator: ";
+ LangOptions LO; // FIXME.
+ E.getSrc()->printTerminator(Out, LO);
+
+ if (SLoc.isFileID()) {
+ Out << "\\lline="
+ << GraphPrintSourceManager->getInstantiationLineNumber(SLoc)
+ << " col="
+ << GraphPrintSourceManager->getInstantiationColumnNumber(SLoc);
+ }
+
+ if (isa<SwitchStmt>(T)) {
+ Stmt* Label = E.getDst()->getLabel();
+
+ if (Label) {
+ if (CaseStmt* C = dyn_cast<CaseStmt>(Label)) {
+ Out << "\\lcase ";
+ LangOptions LO; // FIXME.
+ C->getLHS()->printPretty(Out, 0, PrintingPolicy(LO));
+
+ if (Stmt* RHS = C->getRHS()) {
+ Out << " .. ";
+ RHS->printPretty(Out, 0, PrintingPolicy(LO));
+ }
+
+ Out << ":";
+ }
+ else {
+ assert (isa<DefaultStmt>(Label));
+ Out << "\\ldefault:";
+ }
+ }
+ else
+ Out << "\\l(implicit) default:";
+ }
+ else if (isa<IndirectGotoStmt>(T)) {
+ // FIXME
+ }
+ else {
+ Out << "\\lCondition: ";
+ if (*E.getSrc()->succ_begin() == E.getDst())
+ Out << "true";
+ else
+ Out << "false";
+ }
+
+ Out << "\\l";
+ }
+
+#if 0
+ // FIXME: Replace with a general scheme to determine
+ // the name of the check.
+ if (GraphPrintCheckerState->isUndefControlFlow(N)) {
+ Out << "\\|Control-flow based on\\lUndefined value.\\l";
+ }
+#endif
+ }
+ }
+
+ Out << "\\|StateID: " << (void*) N->getState() << "\\|";
+
+ const GRState *state = N->getState();
+ state->printDOT(Out, *N->getLocationContext()->getCFG());
+
+ Out << "\\l";
+ return Out.str();
+ }
+};
+} // end llvm namespace
+#endif
+
+#ifndef NDEBUG
+template <typename ITERATOR>
+ExplodedNode* GetGraphNode(ITERATOR I) { return *I; }
+
+template <> ExplodedNode*
+GetGraphNode<llvm::DenseMap<ExplodedNode*, Expr*>::iterator>
+ (llvm::DenseMap<ExplodedNode*, Expr*>::iterator I) {
+ return I->first;
+}
+#endif
+
+void GRExprEngine::ViewGraph(bool trim) {
+#ifndef NDEBUG
+ if (trim) {
+ std::vector<ExplodedNode*> Src;
+
+ // Flush any outstanding reports to make sure we cover all the nodes.
+ // This does not cause them to get displayed.
+ for (BugReporter::iterator I=BR.begin(), E=BR.end(); I!=E; ++I)
+ const_cast<BugType*>(*I)->FlushReports(BR);
+
+ // Iterate through the reports and get their nodes.
+ for (BugReporter::iterator I=BR.begin(), E=BR.end(); I!=E; ++I) {
+ for (BugType::const_iterator I2=(*I)->begin(), E2=(*I)->end();
+ I2!=E2; ++I2) {
+ const BugReportEquivClass& EQ = *I2;
+ const BugReport &R = **EQ.begin();
+ ExplodedNode *N = const_cast<ExplodedNode*>(R.getEndNode());
+ if (N) Src.push_back(N);
+ }
+ }
+
+ ViewGraph(&Src[0], &Src[0]+Src.size());
+ }
+ else {
+ GraphPrintCheckerState = this;
+ GraphPrintSourceManager = &getContext().getSourceManager();
+
+ llvm::ViewGraph(*G.roots_begin(), "GRExprEngine");
+
+ GraphPrintCheckerState = NULL;
+ GraphPrintSourceManager = NULL;
+ }
+#endif
+}
+
+void GRExprEngine::ViewGraph(ExplodedNode** Beg, ExplodedNode** End) {
+#ifndef NDEBUG
+ GraphPrintCheckerState = this;
+ GraphPrintSourceManager = &getContext().getSourceManager();
+
+ std::auto_ptr<ExplodedGraph> TrimmedG(G.Trim(Beg, End).first);
+
+ if (!TrimmedG.get())
+ llvm::errs() << "warning: Trimmed ExplodedGraph is empty.\n";
+ else
+ llvm::ViewGraph(*TrimmedG->roots_begin(), "TrimmedGRExprEngine");
+
+ GraphPrintCheckerState = NULL;
+ GraphPrintSourceManager = NULL;
+#endif
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.cpp b/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.cpp
new file mode 100644
index 0000000..6066a1c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.cpp
@@ -0,0 +1,41 @@
+//=-- GRExprEngineExperimentalChecks.h ------------------------------*- C++ -*-=
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines functions to instantiate and register experimental
+// checks in GRExprEngine.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "GRExprEngineExperimentalChecks.h"
+#include "clang/Checker/Checkers/LocalCheckers.h"
+
+using namespace clang;
+
+void clang::RegisterExperimentalChecks(GRExprEngine &Eng) {
+ // These are checks that never belong as internal checks
+ // within GRExprEngine.
+ RegisterPthreadLockChecker(Eng);
+ RegisterMallocChecker(Eng);
+}
+
+void clang::RegisterExperimentalInternalChecks(GRExprEngine &Eng) {
+ // These are internal checks that should eventually migrate to
+ // RegisterInternalChecks() once they have been further tested.
+
+ // Note that this must be registered after ReturnStackAddresEngsChecker.
+ RegisterReturnPointerRangeChecker(Eng);
+
+ RegisterFixedAddressChecker(Eng);
+ RegisterPointerSubChecker(Eng);
+ RegisterPointerArithChecker(Eng);
+ RegisterCastToStructChecker(Eng);
+ RegisterCastSizeChecker(Eng);
+ RegisterArrayBoundChecker(Eng);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.h b/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.h
new file mode 100644
index 0000000..9a9da32
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.h
@@ -0,0 +1,26 @@
+//=-- GRExprEngineExperimentalChecks.h ------------------------------*- C++ -*-=
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines functions to instantiate and register experimental
+// checks in GRExprEngine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GREXPRENGINE_EXPERIMENTAL_CHECKS
+#define LLVM_CLANG_GREXPRENGINE_EXPERIMENTAL_CHECKS
+
+namespace clang {
+
+class GRExprEngine;
+
+void RegisterPthreadLockChecker(GRExprEngine &Eng);
+void RegisterMallocChecker(GRExprEngine &Eng);
+
+} // end clang namespace
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Checker/GRExprEngineInternalChecks.h b/contrib/llvm/tools/clang/lib/Checker/GRExprEngineInternalChecks.h
new file mode 100644
index 0000000..335b85e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/GRExprEngineInternalChecks.h
@@ -0,0 +1,52 @@
+//=-- GRExprEngineInternalChecks.h- Builtin GRExprEngine Checks -----*- C++ -*-=
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines functions to instantiate and register the "built-in"
+// checks in GRExprEngine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GREXPRENGINE_INTERNAL_CHECKS
+#define LLVM_CLANG_GREXPRENGINE_INTERNAL_CHECKS
+
+namespace clang {
+
+class GRExprEngine;
+
+// Foundational checks that handle basic semantics.
+void RegisterAdjustedReturnValueChecker(GRExprEngine &Eng);
+void RegisterArrayBoundChecker(GRExprEngine &Eng);
+void RegisterAttrNonNullChecker(GRExprEngine &Eng);
+void RegisterBuiltinFunctionChecker(GRExprEngine &Eng);
+void RegisterCallAndMessageChecker(GRExprEngine &Eng);
+void RegisterCastToStructChecker(GRExprEngine &Eng);
+void RegisterCastSizeChecker(GRExprEngine &Eng);
+void RegisterDereferenceChecker(GRExprEngine &Eng);
+void RegisterDivZeroChecker(GRExprEngine &Eng);
+void RegisterFixedAddressChecker(GRExprEngine &Eng);
+void RegisterNoReturnFunctionChecker(GRExprEngine &Eng);
+void RegisterPointerArithChecker(GRExprEngine &Eng);
+void RegisterPointerSubChecker(GRExprEngine &Eng);
+void RegisterReturnPointerRangeChecker(GRExprEngine &Eng);
+void RegisterReturnStackAddressChecker(GRExprEngine &Eng);
+void RegisterReturnUndefChecker(GRExprEngine &Eng);
+void RegisterUndefBranchChecker(GRExprEngine &Eng);
+void RegisterUndefCapturedBlockVarChecker(GRExprEngine &Eng);
+void RegisterUndefResultChecker(GRExprEngine &Eng);
+void RegisterUndefinedArraySubscriptChecker(GRExprEngine &Eng);
+void RegisterUndefinedAssignmentChecker(GRExprEngine &Eng);
+void RegisterVLASizeChecker(GRExprEngine &Eng);
+
+// API checks.
+void RegisterMacOSXAPIChecker(GRExprEngine &Eng);
+void RegisterOSAtomicChecker(GRExprEngine &Eng);
+void RegisterUnixAPIChecker(GRExprEngine &Eng);
+
+} // end clang namespace
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Checker/GRState.cpp b/contrib/llvm/tools/clang/lib/Checker/GRState.cpp
new file mode 100644
index 0000000..b16e922
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/GRState.cpp
@@ -0,0 +1,370 @@
+//= GRState.cpp - Path-Sensitive "State" for tracking values -----*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements GRState and GRStateManager.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/CFG.h"
+#include "clang/Checker/PathSensitive/GRStateTrait.h"
+#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/Checker/PathSensitive/GRTransferFuncs.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+// Give the vtable for ConstraintManager somewhere to live.
+// FIXME: Move this elsewhere.
+ConstraintManager::~ConstraintManager() {}
+
+GRStateManager::~GRStateManager() {
+ for (std::vector<GRState::Printer*>::iterator I=Printers.begin(),
+ E=Printers.end(); I!=E; ++I)
+ delete *I;
+
+ for (GDMContextsTy::iterator I=GDMContexts.begin(), E=GDMContexts.end();
+ I!=E; ++I)
+ I->second.second(I->second.first);
+}
+
+const GRState*
+GRStateManager::RemoveDeadBindings(const GRState* state, Stmt* Loc,
+ const StackFrameContext *LCtx,
+ SymbolReaper& SymReaper) {
+
+ // This code essentially performs a "mark-and-sweep" of the VariableBindings.
+ // The roots are any Block-level exprs and Decls that our liveness algorithm
+ // tells us are live. We then see what Decls they may reference, and keep
+ // those around. This code more than likely can be made faster, and the
+ // frequency of which this method is called should be experimented with
+ // for optimum performance.
+ llvm::SmallVector<const MemRegion*, 10> RegionRoots;
+ GRState NewState = *state;
+
+ NewState.Env = EnvMgr.RemoveDeadBindings(NewState.Env, Loc, SymReaper,
+ state, RegionRoots);
+
+ // Clean up the store.
+ const GRState *s = StoreMgr->RemoveDeadBindings(NewState, Loc, LCtx,
+ SymReaper, RegionRoots);
+
+ return ConstraintMgr->RemoveDeadBindings(s, SymReaper);
+}
+
+const GRState *GRState::unbindLoc(Loc LV) const {
+ Store OldStore = getStore();
+ Store NewStore = getStateManager().StoreMgr->Remove(OldStore, LV);
+
+ if (NewStore == OldStore)
+ return this;
+
+ GRState NewSt = *this;
+ NewSt.St = NewStore;
+ return getStateManager().getPersistentState(NewSt);
+}
+
+SVal GRState::getSValAsScalarOrLoc(const MemRegion *R) const {
+ // We only want to do fetches from regions that we can actually bind
+ // values. For example, SymbolicRegions of type 'id<...>' cannot
+ // have direct bindings (but their can be bindings on their subregions).
+ if (!R->isBoundable())
+ return UnknownVal();
+
+ if (const TypedRegion *TR = dyn_cast<TypedRegion>(R)) {
+ QualType T = TR->getValueType(getStateManager().getContext());
+ if (Loc::IsLocType(T) || T->isIntegerType())
+ return getSVal(R);
+ }
+
+ return UnknownVal();
+}
+
+
+const GRState *GRState::BindExpr(const Stmt* Ex, SVal V, bool Invalidate) const{
+ Environment NewEnv = getStateManager().EnvMgr.BindExpr(Env, Ex, V,
+ Invalidate);
+ if (NewEnv == Env)
+ return this;
+
+ GRState NewSt = *this;
+ NewSt.Env = NewEnv;
+ return getStateManager().getPersistentState(NewSt);
+}
+
+const GRState* GRStateManager::getInitialState(const LocationContext *InitLoc) {
+ GRState State(this,
+ EnvMgr.getInitialEnvironment(),
+ StoreMgr->getInitialStore(InitLoc),
+ GDMFactory.GetEmptyMap());
+
+ return getPersistentState(State);
+}
+
+const GRState* GRStateManager::getPersistentState(GRState& State) {
+
+ llvm::FoldingSetNodeID ID;
+ State.Profile(ID);
+ void* InsertPos;
+
+ if (GRState* I = StateSet.FindNodeOrInsertPos(ID, InsertPos))
+ return I;
+
+ GRState* I = (GRState*) Alloc.Allocate<GRState>();
+ new (I) GRState(State);
+ StateSet.InsertNode(I, InsertPos);
+ return I;
+}
+
+const GRState* GRState::makeWithStore(Store store) const {
+ GRState NewSt = *this;
+ NewSt.St = store;
+ return getStateManager().getPersistentState(NewSt);
+}
+
+//===----------------------------------------------------------------------===//
+// State pretty-printing.
+//===----------------------------------------------------------------------===//
+
+void GRState::print(llvm::raw_ostream& Out, CFG &C, const char* nl,
+ const char* sep) const {
+ // Print the store.
+ GRStateManager &Mgr = getStateManager();
+ Mgr.getStoreManager().print(getStore(), Out, nl, sep);
+
+ // Print Subexpression bindings.
+ bool isFirst = true;
+
+ for (Environment::iterator I = Env.begin(), E = Env.end(); I != E; ++I) {
+ if (C.isBlkExpr(I.getKey()))
+ continue;
+
+ if (isFirst) {
+ Out << nl << nl << "Sub-Expressions:" << nl;
+ isFirst = false;
+ }
+ else { Out << nl; }
+
+ Out << " (" << (void*) I.getKey() << ") ";
+ LangOptions LO; // FIXME.
+ I.getKey()->printPretty(Out, 0, PrintingPolicy(LO));
+ Out << " : " << I.getData();
+ }
+
+ // Print block-expression bindings.
+ isFirst = true;
+
+ for (Environment::iterator I = Env.begin(), E = Env.end(); I != E; ++I) {
+ if (!C.isBlkExpr(I.getKey()))
+ continue;
+
+ if (isFirst) {
+ Out << nl << nl << "Block-level Expressions:" << nl;
+ isFirst = false;
+ }
+ else { Out << nl; }
+
+ Out << " (" << (void*) I.getKey() << ") ";
+ LangOptions LO; // FIXME.
+ I.getKey()->printPretty(Out, 0, PrintingPolicy(LO));
+ Out << " : " << I.getData();
+ }
+
+ Mgr.getConstraintManager().print(this, Out, nl, sep);
+
+ // Print checker-specific data.
+ for (std::vector<Printer*>::iterator I = Mgr.Printers.begin(),
+ E = Mgr.Printers.end(); I != E; ++I) {
+ (*I)->Print(Out, this, nl, sep);
+ }
+}
+
+void GRState::printDOT(llvm::raw_ostream& Out, CFG &C) const {
+ print(Out, C, "\\l", "\\|");
+}
+
+void GRState::printStdErr(CFG &C) const {
+ print(llvm::errs(), C);
+}
+
+//===----------------------------------------------------------------------===//
+// Generic Data Map.
+//===----------------------------------------------------------------------===//
+
+void* const* GRState::FindGDM(void* K) const {
+ return GDM.lookup(K);
+}
+
+void*
+GRStateManager::FindGDMContext(void* K,
+ void* (*CreateContext)(llvm::BumpPtrAllocator&),
+ void (*DeleteContext)(void*)) {
+
+ std::pair<void*, void (*)(void*)>& p = GDMContexts[K];
+ if (!p.first) {
+ p.first = CreateContext(Alloc);
+ p.second = DeleteContext;
+ }
+
+ return p.first;
+}
+
+const GRState* GRStateManager::addGDM(const GRState* St, void* Key, void* Data){
+ GRState::GenericDataMap M1 = St->getGDM();
+ GRState::GenericDataMap M2 = GDMFactory.Add(M1, Key, Data);
+
+ if (M1 == M2)
+ return St;
+
+ GRState NewSt = *St;
+ NewSt.GDM = M2;
+ return getPersistentState(NewSt);
+}
+
+const GRState *GRStateManager::removeGDM(const GRState *state, void *Key) {
+ GRState::GenericDataMap OldM = state->getGDM();
+ GRState::GenericDataMap NewM = GDMFactory.Remove(OldM, Key);
+
+ if (NewM == OldM)
+ return state;
+
+ GRState NewState = *state;
+ NewState.GDM = NewM;
+ return getPersistentState(NewState);
+}
+
+//===----------------------------------------------------------------------===//
+// Utility.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ScanReachableSymbols : public SubRegionMap::Visitor {
+ typedef llvm::DenseSet<const MemRegion*> VisitedRegionsTy;
+
+ VisitedRegionsTy visited;
+ const GRState *state;
+ SymbolVisitor &visitor;
+ llvm::OwningPtr<SubRegionMap> SRM;
+public:
+
+ ScanReachableSymbols(const GRState *st, SymbolVisitor& v)
+ : state(st), visitor(v) {}
+
+ bool scan(nonloc::CompoundVal val);
+ bool scan(SVal val);
+ bool scan(const MemRegion *R);
+
+ // From SubRegionMap::Visitor.
+ bool Visit(const MemRegion* Parent, const MemRegion* SubRegion) {
+ return scan(SubRegion);
+ }
+};
+}
+
+bool ScanReachableSymbols::scan(nonloc::CompoundVal val) {
+ for (nonloc::CompoundVal::iterator I=val.begin(), E=val.end(); I!=E; ++I)
+ if (!scan(*I))
+ return false;
+
+ return true;
+}
+
+bool ScanReachableSymbols::scan(SVal val) {
+ if (loc::MemRegionVal *X = dyn_cast<loc::MemRegionVal>(&val))
+ return scan(X->getRegion());
+
+ if (nonloc::LocAsInteger *X = dyn_cast<nonloc::LocAsInteger>(&val))
+ return scan(X->getLoc());
+
+ if (SymbolRef Sym = val.getAsSymbol())
+ return visitor.VisitSymbol(Sym);
+
+ if (nonloc::CompoundVal *X = dyn_cast<nonloc::CompoundVal>(&val))
+ return scan(*X);
+
+ return true;
+}
+
+bool ScanReachableSymbols::scan(const MemRegion *R) {
+ if (isa<MemSpaceRegion>(R) || visited.count(R))
+ return true;
+
+ visited.insert(R);
+
+ // If this is a symbolic region, visit the symbol for the region.
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R))
+ if (!visitor.VisitSymbol(SR->getSymbol()))
+ return false;
+
+ // If this is a subregion, also visit the parent regions.
+ if (const SubRegion *SR = dyn_cast<SubRegion>(R))
+ if (!scan(SR->getSuperRegion()))
+ return false;
+
+ // Now look at the binding to this region (if any).
+ if (!scan(state->getSValAsScalarOrLoc(R)))
+ return false;
+
+ // Now look at the subregions.
+ if (!SRM.get())
+ SRM.reset(state->getStateManager().getStoreManager().
+ getSubRegionMap(state->getStore()));
+
+ return SRM->iterSubRegions(R, *this);
+}
+
+bool GRState::scanReachableSymbols(SVal val, SymbolVisitor& visitor) const {
+ ScanReachableSymbols S(this, visitor);
+ return S.scan(val);
+}
+
+bool GRState::scanReachableSymbols(const SVal *I, const SVal *E,
+ SymbolVisitor &visitor) const {
+ ScanReachableSymbols S(this, visitor);
+ for ( ; I != E; ++I) {
+ if (!S.scan(*I))
+ return false;
+ }
+ return true;
+}
+
+bool GRState::scanReachableSymbols(const MemRegion * const *I,
+ const MemRegion * const *E,
+ SymbolVisitor &visitor) const {
+ ScanReachableSymbols S(this, visitor);
+ for ( ; I != E; ++I) {
+ if (!S.scan(*I))
+ return false;
+ }
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// Queries.
+//===----------------------------------------------------------------------===//
+
+bool GRStateManager::isEqual(const GRState* state, const Expr* Ex,
+ const llvm::APSInt& Y) {
+
+ SVal V = state->getSVal(Ex);
+
+ if (loc::ConcreteInt* X = dyn_cast<loc::ConcreteInt>(&V))
+ return X->getValue() == Y;
+
+ if (nonloc::ConcreteInt* X = dyn_cast<nonloc::ConcreteInt>(&V))
+ return X->getValue() == Y;
+
+ if (SymbolRef Sym = V.getAsSymbol())
+ return ConstraintMgr->isEqual(state, Sym, Y);
+
+ return false;
+}
+
+bool GRStateManager::isEqual(const GRState* state, const Expr* Ex, uint64_t x) {
+ return isEqual(state, Ex, getBasicVals().getValue(x, Ex->getType()));
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/LLVMConventionsChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/LLVMConventionsChecker.cpp
new file mode 100644
index 0000000..39ded43
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/LLVMConventionsChecker.cpp
@@ -0,0 +1,335 @@
+//=== LLVMConventionsChecker.cpp - Check LLVM codebase conventions ---*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines LLVMConventionsChecker, a bunch of small little checks
+// for checking specific coding conventions in the LLVM/Clang codebase.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Checker/Checkers/LocalCheckers.h"
+#include "clang/Checker/BugReporter/BugReporter.h"
+#include <string>
+#include "llvm/ADT/StringRef.h"
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Generic type checking routines.
+//===----------------------------------------------------------------------===//
+
+static bool IsLLVMStringRef(QualType T) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ return llvm::StringRef(QualType(RT, 0).getAsString()) ==
+ "class llvm::StringRef";
+}
+
+static bool InStdNamespace(const Decl *D) {
+ const DeclContext *DC = D->getDeclContext();
+ const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(D->getDeclContext());
+ if (!ND)
+ return false;
+ const IdentifierInfo *II = ND->getIdentifier();
+ if (!II || II->getName() != "std")
+ return false;
+ DC = ND->getDeclContext();
+ return isa<TranslationUnitDecl>(DC);
+}
+
+static bool IsStdString(QualType T) {
+ if (const ElaboratedType *QT = T->getAs<ElaboratedType>())
+ T = QT->getNamedType();
+
+ const TypedefType *TT = T->getAs<TypedefType>();
+ if (!TT)
+ return false;
+
+ const TypedefDecl *TD = TT->getDecl();
+
+ if (!InStdNamespace(TD))
+ return false;
+
+ return TD->getName() == "string";
+}
+
+static bool InClangNamespace(const Decl *D) {
+ const DeclContext *DC = D->getDeclContext();
+ const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(D->getDeclContext());
+ if (!ND)
+ return false;
+ const IdentifierInfo *II = ND->getIdentifier();
+ if (!II || II->getName() != "clang")
+ return false;
+ DC = ND->getDeclContext();
+ return isa<TranslationUnitDecl>(DC);
+}
+
+static bool InLLVMNamespace(const Decl *D) {
+ const DeclContext *DC = D->getDeclContext();
+ const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(D->getDeclContext());
+ if (!ND)
+ return false;
+ const IdentifierInfo *II = ND->getIdentifier();
+ if (!II || II->getName() != "llvm")
+ return false;
+ DC = ND->getDeclContext();
+ return isa<TranslationUnitDecl>(DC);
+}
+
+static bool IsClangType(const RecordDecl *RD) {
+ return RD->getName() == "Type" && InClangNamespace(RD);
+}
+
+static bool IsClangDecl(const RecordDecl *RD) {
+ return RD->getName() == "Decl" && InClangNamespace(RD);
+}
+
+static bool IsClangStmt(const RecordDecl *RD) {
+ return RD->getName() == "Stmt" && InClangNamespace(RD);
+}
+
+static bool isClangAttr(const RecordDecl *RD) {
+ return RD->getName() == "Attr" && InClangNamespace(RD);
+}
+
+static bool IsStdVector(QualType T) {
+ const TemplateSpecializationType *TS = T->getAs<TemplateSpecializationType>();
+ if (!TS)
+ return false;
+
+ TemplateName TM = TS->getTemplateName();
+ TemplateDecl *TD = TM.getAsTemplateDecl();
+
+ if (!TD || !InStdNamespace(TD))
+ return false;
+
+ return TD->getName() == "vector";
+}
+
+static bool IsSmallVector(QualType T) {
+ const TemplateSpecializationType *TS = T->getAs<TemplateSpecializationType>();
+ if (!TS)
+ return false;
+
+ TemplateName TM = TS->getTemplateName();
+ TemplateDecl *TD = TM.getAsTemplateDecl();
+
+ if (!TD || !InLLVMNamespace(TD))
+ return false;
+
+ return TD->getName() == "SmallVector";
+}
+
+//===----------------------------------------------------------------------===//
+// CHECK: a llvm::StringRef should not be bound to a temporary std::string whose
+// lifetime is shorter than the StringRef's.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class StringRefCheckerVisitor : public StmtVisitor<StringRefCheckerVisitor> {
+ BugReporter &BR;
+public:
+ StringRefCheckerVisitor(BugReporter &br) : BR(br) {}
+ void VisitChildren(Stmt *S) {
+ for (Stmt::child_iterator I = S->child_begin(), E = S->child_end() ;
+ I != E; ++I)
+ if (Stmt *child = *I)
+ Visit(child);
+ }
+ void VisitStmt(Stmt *S) { VisitChildren(S); }
+ void VisitDeclStmt(DeclStmt *DS);
+private:
+ void VisitVarDecl(VarDecl *VD);
+ void CheckStringRefBoundtoTemporaryString(VarDecl *VD);
+};
+} // end anonymous namespace
+
+static void CheckStringRefAssignedTemporary(const Decl *D, BugReporter &BR) {
+ StringRefCheckerVisitor walker(BR);
+ walker.Visit(D->getBody());
+}
+
+void StringRefCheckerVisitor::VisitDeclStmt(DeclStmt *S) {
+ VisitChildren(S);
+
+ for (DeclStmt::decl_iterator I = S->decl_begin(), E = S->decl_end();I!=E; ++I)
+ if (VarDecl *VD = dyn_cast<VarDecl>(*I))
+ VisitVarDecl(VD);
+}
+
+void StringRefCheckerVisitor::VisitVarDecl(VarDecl *VD) {
+ Expr *Init = VD->getInit();
+ if (!Init)
+ return;
+
+ // Pattern match for:
+ // llvm::StringRef x = call() (where call returns std::string)
+ if (!IsLLVMStringRef(VD->getType()))
+ return;
+ CXXExprWithTemporaries *Ex1 = dyn_cast<CXXExprWithTemporaries>(Init);
+ if (!Ex1)
+ return;
+ CXXConstructExpr *Ex2 = dyn_cast<CXXConstructExpr>(Ex1->getSubExpr());
+ if (!Ex2 || Ex2->getNumArgs() != 1)
+ return;
+ ImplicitCastExpr *Ex3 = dyn_cast<ImplicitCastExpr>(Ex2->getArg(0));
+ if (!Ex3)
+ return;
+ CXXConstructExpr *Ex4 = dyn_cast<CXXConstructExpr>(Ex3->getSubExpr());
+ if (!Ex4 || Ex4->getNumArgs() != 1)
+ return;
+ ImplicitCastExpr *Ex5 = dyn_cast<ImplicitCastExpr>(Ex4->getArg(0));
+ if (!Ex5)
+ return;
+ CXXBindTemporaryExpr *Ex6 = dyn_cast<CXXBindTemporaryExpr>(Ex5->getSubExpr());
+ if (!Ex6 || !IsStdString(Ex6->getType()))
+ return;
+
+ // Okay, badness! Report an error.
+ const char *desc = "StringRef should not be bound to temporary "
+ "std::string that it outlives";
+
+ BR.EmitBasicReport(desc, "LLVM Conventions", desc,
+ VD->getLocStart(), Init->getSourceRange());
+}
+
+//===----------------------------------------------------------------------===//
+// CHECK: Clang AST nodes should not have fields that can allocate
+// memory.
+//===----------------------------------------------------------------------===//
+
+static bool AllocatesMemory(QualType T) {
+ return IsStdVector(T) || IsStdString(T) || IsSmallVector(T);
+}
+
+// This type checking could be sped up via dynamic programming.
+static bool IsPartOfAST(const CXXRecordDecl *R) {
+ if (IsClangStmt(R) || IsClangType(R) || IsClangDecl(R) || isClangAttr(R))
+ return true;
+
+ for (CXXRecordDecl::base_class_const_iterator I = R->bases_begin(),
+ E = R->bases_end(); I!=E; ++I) {
+ CXXBaseSpecifier BS = *I;
+ QualType T = BS.getType();
+ if (const RecordType *baseT = T->getAs<RecordType>()) {
+ CXXRecordDecl *baseD = cast<CXXRecordDecl>(baseT->getDecl());
+ if (IsPartOfAST(baseD))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+namespace {
+class ASTFieldVisitor {
+ llvm::SmallVector<FieldDecl*, 10> FieldChain;
+ CXXRecordDecl *Root;
+ BugReporter &BR;
+public:
+ ASTFieldVisitor(CXXRecordDecl *root, BugReporter &br)
+ : Root(root), BR(br) {}
+
+ void Visit(FieldDecl *D);
+ void ReportError(QualType T);
+};
+} // end anonymous namespace
+
+static void CheckASTMemory(CXXRecordDecl *R, BugReporter &BR) {
+ if (!IsPartOfAST(R))
+ return;
+
+ for (RecordDecl::field_iterator I = R->field_begin(), E = R->field_end();
+ I != E; ++I) {
+ ASTFieldVisitor walker(R, BR);
+ walker.Visit(*I);
+ }
+}
+
+void ASTFieldVisitor::Visit(FieldDecl *D) {
+ FieldChain.push_back(D);
+
+ QualType T = D->getType();
+
+ if (AllocatesMemory(T))
+ ReportError(T);
+
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl()->getDefinition();
+ for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I)
+ Visit(*I);
+ }
+
+ FieldChain.pop_back();
+}
+
+void ASTFieldVisitor::ReportError(QualType T) {
+ llvm::SmallString<1024> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ os << "AST class '" << Root->getName() << "' has a field '"
+ << FieldChain.front()->getName() << "' that allocates heap memory";
+ if (FieldChain.size() > 1) {
+ os << " via the following chain: ";
+ bool isFirst = true;
+ for (llvm::SmallVectorImpl<FieldDecl*>::iterator I=FieldChain.begin(),
+ E=FieldChain.end(); I!=E; ++I) {
+ if (!isFirst)
+ os << '.';
+ else
+ isFirst = false;
+ os << (*I)->getName();
+ }
+ }
+ os << " (type " << FieldChain.back()->getType().getAsString() << ")";
+ os.flush();
+
+ // Note that this will fire for every translation unit that uses this
+ // class. This is suboptimal, but at least scan-build will merge
+ // duplicate HTML reports. In the future we need a unified way of merging
+ // duplicate reports across translation units. For C++ classes we cannot
+ // just report warnings when we see an out-of-line method definition for a
+ // class, as that heuristic doesn't always work (the complete definition of
+ // the class may be in the header file, for example).
+ BR.EmitBasicReport("AST node allocates heap memory", "LLVM Conventions",
+ os.str(), FieldChain.front()->getLocStart());
+}
+
+//===----------------------------------------------------------------------===//
+// Entry point for all checks.
+//===----------------------------------------------------------------------===//
+
+static void ScanCodeDecls(DeclContext *DC, BugReporter &BR) {
+ for (DeclContext::decl_iterator I=DC->decls_begin(), E=DC->decls_end();
+ I!=E ; ++I) {
+
+ Decl *D = *I;
+
+ if (D->getBody())
+ CheckStringRefAssignedTemporary(D, BR);
+
+ if (CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(D))
+ if (R->isDefinition())
+ CheckASTMemory(R, BR);
+
+ if (DeclContext *DC_child = dyn_cast<DeclContext>(D))
+ ScanCodeDecls(DC_child, BR);
+ }
+}
+
+void clang::CheckLLVMConventions(TranslationUnitDecl &TU,
+ BugReporter &BR) {
+ ScanCodeDecls(&TU, BR);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Checker/MacOSXAPIChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/MacOSXAPIChecker.cpp
new file mode 100644
index 0000000..bcd96e7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/MacOSXAPIChecker.cpp
@@ -0,0 +1,141 @@
+// MacOSXAPIChecker.h - Checks proper use of various MacOS X APIs --*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines MacOSXAPIChecker, which is an assortment of checks on calls
+// to various, widely used Mac OS X functions.
+//
+// FIXME: What's currently in BasicObjCFoundationChecks.cpp should be migrated
+// to here, using the new Checker interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Checker/PathSensitive/GRStateTrait.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+namespace {
+class MacOSXAPIChecker : public CheckerVisitor<MacOSXAPIChecker> {
+ enum SubChecks {
+ DispatchOnce = 0,
+ DispatchOnceF,
+ NumChecks
+ };
+
+ BugType *BTypes[NumChecks];
+
+public:
+ MacOSXAPIChecker() { memset(BTypes, 0, sizeof(*BTypes) * NumChecks); }
+ static void *getTag() { static unsigned tag = 0; return &tag; }
+
+ void PreVisitCallExpr(CheckerContext &C, const CallExpr *CE);
+};
+} //end anonymous namespace
+
+void clang::RegisterMacOSXAPIChecker(GRExprEngine &Eng) {
+ if (Eng.getContext().Target.getTriple().getVendor() == llvm::Triple::Apple)
+ Eng.registerCheck(new MacOSXAPIChecker());
+}
+
+//===----------------------------------------------------------------------===//
+// dispatch_once and dispatch_once_f
+//===----------------------------------------------------------------------===//
+
+static void CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
+ BugType *&BT, const IdentifierInfo *FI) {
+
+ if (!BT) {
+ llvm::SmallString<128> S;
+ llvm::raw_svector_ostream os(S);
+ os << "Improper use of '" << FI->getName() << '\'';
+ BT = new BugType(os.str(), "Mac OS X API");
+ }
+
+ if (CE->getNumArgs() < 1)
+ return;
+
+ // Check if the first argument is stack allocated. If so, issue a warning
+ // because that's likely to be bad news.
+ const GRState *state = C.getState();
+ const MemRegion *R = state->getSVal(CE->getArg(0)).getAsRegion();
+ if (!R || !isa<StackSpaceRegion>(R->getMemorySpace()))
+ return;
+
+ ExplodedNode *N = C.GenerateSink(state);
+ if (!N)
+ return;
+
+ llvm::SmallString<256> S;
+ llvm::raw_svector_ostream os(S);
+ os << "Call to '" << FI->getName() << "' uses";
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R))
+ os << " the local variable '" << VR->getDecl()->getName() << '\'';
+ else
+ os << " stack allocated memory";
+ os << " for the predicate value. Using such transient memory for "
+ "the predicate is potentially dangerous.";
+ if (isa<VarRegion>(R) && isa<StackLocalsSpaceRegion>(R->getMemorySpace()))
+ os << " Perhaps you intended to declare the variable as 'static'?";
+
+ EnhancedBugReport *report = new EnhancedBugReport(*BT, os.str(), N);
+ report->addRange(CE->getArg(0)->getSourceRange());
+ C.EmitReport(report);
+}
+
+//===----------------------------------------------------------------------===//
+// Central dispatch function.
+//===----------------------------------------------------------------------===//
+
+typedef void (*SubChecker)(CheckerContext &C, const CallExpr *CE, BugType *&BT,
+ const IdentifierInfo *FI);
+namespace {
+ class SubCheck {
+ SubChecker SC;
+ BugType **BT;
+ public:
+ SubCheck(SubChecker sc, BugType *& bt) : SC(sc), BT(&bt) {}
+ SubCheck() : SC(NULL), BT(NULL) {}
+
+ void run(CheckerContext &C, const CallExpr *CE,
+ const IdentifierInfo *FI) const {
+ if (SC)
+ SC(C, CE, *BT, FI);
+ }
+ };
+} // end anonymous namespace
+
+void MacOSXAPIChecker::PreVisitCallExpr(CheckerContext &C, const CallExpr *CE) {
+ // FIXME: Mostly copy and paste from UnixAPIChecker. Should refactor.
+ const GRState *state = C.getState();
+ const Expr *Callee = CE->getCallee();
+ const FunctionTextRegion *Fn =
+ dyn_cast_or_null<FunctionTextRegion>(state->getSVal(Callee).getAsRegion());
+
+ if (!Fn)
+ return;
+
+ const IdentifierInfo *FI = Fn->getDecl()->getIdentifier();
+ if (!FI)
+ return;
+
+ const SubCheck &SC =
+ llvm::StringSwitch<SubCheck>(FI->getName())
+ .Case("dispatch_once", SubCheck(CheckDispatchOnce, BTypes[DispatchOnce]))
+ .Case("dispatch_once_f", SubCheck(CheckDispatchOnce,
+ BTypes[DispatchOnceF]))
+ .Default(SubCheck());
+
+ SC.run(C, CE, FI);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/Makefile b/contrib/llvm/tools/clang/lib/Checker/Makefile
new file mode 100644
index 0000000..c45ab29
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/Makefile
@@ -0,0 +1,21 @@
+##===- clang/lib/Checker/Makefile --------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# This implements analyses built on top of source-level CFGs.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../../..
+LIBRARYNAME := clangChecker
+BUILD_ARCHIVE = 1
+
+CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
+
+include $(LEVEL)/Makefile.common
+
diff --git a/contrib/llvm/tools/clang/lib/Checker/MallocChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/MallocChecker.cpp
new file mode 100644
index 0000000..086dbd8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/MallocChecker.cpp
@@ -0,0 +1,367 @@
+//=== MallocChecker.cpp - A malloc/free checker -------------------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines malloc/free checker, which checks for potential memory
+// leaks, double free, and use-after-free problems.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineExperimentalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/Checker/PathSensitive/GRStateTrait.h"
+#include "clang/Checker/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/ImmutableMap.h"
+using namespace clang;
+
+namespace {
+
+class RefState {
+ enum Kind { AllocateUnchecked, AllocateFailed, Released, Escaped } K;
+ const Stmt *S;
+
+public:
+ RefState(Kind k, const Stmt *s) : K(k), S(s) {}
+
+ bool isAllocated() const { return K == AllocateUnchecked; }
+ bool isReleased() const { return K == Released; }
+ bool isEscaped() const { return K == Escaped; }
+
+ bool operator==(const RefState &X) const {
+ return K == X.K && S == X.S;
+ }
+
+ static RefState getAllocateUnchecked(const Stmt *s) {
+ return RefState(AllocateUnchecked, s);
+ }
+ static RefState getAllocateFailed() {
+ return RefState(AllocateFailed, 0);
+ }
+ static RefState getReleased(const Stmt *s) { return RefState(Released, s); }
+ static RefState getEscaped(const Stmt *s) { return RefState(Escaped, s); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(K);
+ ID.AddPointer(S);
+ }
+};
+
+class RegionState {};
+
+class MallocChecker : public CheckerVisitor<MallocChecker> {
+ BuiltinBug *BT_DoubleFree;
+ BuiltinBug *BT_Leak;
+ BuiltinBug *BT_UseFree;
+ IdentifierInfo *II_malloc, *II_free, *II_realloc;
+
+public:
+ MallocChecker()
+ : BT_DoubleFree(0), BT_Leak(0), BT_UseFree(0),
+ II_malloc(0), II_free(0), II_realloc(0) {}
+ static void *getTag();
+ bool EvalCallExpr(CheckerContext &C, const CallExpr *CE);
+ void EvalDeadSymbols(CheckerContext &C,const Stmt *S,SymbolReaper &SymReaper);
+ void EvalEndPath(GREndPathNodeBuilder &B, void *tag, GRExprEngine &Eng);
+ void PreVisitReturnStmt(CheckerContext &C, const ReturnStmt *S);
+ const GRState *EvalAssume(const GRState *state, SVal Cond, bool Assumption);
+ void VisitLocation(CheckerContext &C, const Stmt *S, SVal l);
+
+private:
+ void MallocMem(CheckerContext &C, const CallExpr *CE);
+ const GRState *MallocMemAux(CheckerContext &C, const CallExpr *CE,
+ const Expr *SizeEx, const GRState *state);
+ void FreeMem(CheckerContext &C, const CallExpr *CE);
+ const GRState *FreeMemAux(CheckerContext &C, const CallExpr *CE,
+ const GRState *state);
+
+ void ReallocMem(CheckerContext &C, const CallExpr *CE);
+};
+} // end anonymous namespace
+
+typedef llvm::ImmutableMap<SymbolRef, RefState> RegionStateTy;
+
+namespace clang {
+ template <>
+ struct GRStateTrait<RegionState>
+ : public GRStatePartialTrait<llvm::ImmutableMap<SymbolRef, RefState> > {
+ static void *GDMIndex() { return MallocChecker::getTag(); }
+ };
+}
+
+void clang::RegisterMallocChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new MallocChecker());
+}
+
+void *MallocChecker::getTag() {
+ static int x;
+ return &x;
+}
+
+bool MallocChecker::EvalCallExpr(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ const Expr *Callee = CE->getCallee();
+ SVal L = state->getSVal(Callee);
+
+ const FunctionDecl *FD = L.getAsFunctionDecl();
+ if (!FD)
+ return false;
+
+ ASTContext &Ctx = C.getASTContext();
+ if (!II_malloc)
+ II_malloc = &Ctx.Idents.get("malloc");
+ if (!II_free)
+ II_free = &Ctx.Idents.get("free");
+ if (!II_realloc)
+ II_realloc = &Ctx.Idents.get("realloc");
+
+ if (FD->getIdentifier() == II_malloc) {
+ MallocMem(C, CE);
+ return true;
+ }
+
+ if (FD->getIdentifier() == II_free) {
+ FreeMem(C, CE);
+ return true;
+ }
+
+ if (FD->getIdentifier() == II_realloc) {
+ ReallocMem(C, CE);
+ return true;
+ }
+
+ return false;
+}
+
+void MallocChecker::MallocMem(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = MallocMemAux(C, CE, CE->getArg(0), C.getState());
+ C.addTransition(state);
+}
+
+const GRState *MallocChecker::MallocMemAux(CheckerContext &C,
+ const CallExpr *CE,
+ const Expr *SizeEx,
+ const GRState *state) {
+ unsigned Count = C.getNodeBuilder().getCurrentBlockCount();
+ ValueManager &ValMgr = C.getValueManager();
+
+ SVal RetVal = ValMgr.getConjuredSymbolVal(NULL, CE, CE->getType(), Count);
+
+ SVal Size = state->getSVal(SizeEx);
+
+ state = C.getEngine().getStoreManager().setExtent(state, RetVal.getAsRegion(),
+ Size);
+
+ state = state->BindExpr(CE, RetVal);
+
+ SymbolRef Sym = RetVal.getAsLocSymbol();
+ assert(Sym);
+ // Set the symbol's state to Allocated.
+ return state->set<RegionState>(Sym, RefState::getAllocateUnchecked(CE));
+}
+
+void MallocChecker::FreeMem(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = FreeMemAux(C, CE, C.getState());
+
+ if (state)
+ C.addTransition(state);
+}
+
+const GRState *MallocChecker::FreeMemAux(CheckerContext &C, const CallExpr *CE,
+ const GRState *state) {
+ SVal ArgVal = state->getSVal(CE->getArg(0));
+
+ // If ptr is NULL, no operation is preformed.
+ if (ArgVal.isZeroConstant())
+ return state;
+
+ SymbolRef Sym = ArgVal.getAsLocSymbol();
+
+ // Various cases could lead to non-symbol values here.
+ if (!Sym)
+ return state;
+
+ const RefState *RS = state->get<RegionState>(Sym);
+
+ // If the symbol has not been tracked, return. This is possible when free() is
+ // called on a pointer that does not get its pointee directly from malloc().
+ // Full support of this requires inter-procedural analysis.
+ if (!RS)
+ return state;
+
+ // Check double free.
+ if (RS->isReleased()) {
+ ExplodedNode *N = C.GenerateSink();
+ if (N) {
+ if (!BT_DoubleFree)
+ BT_DoubleFree = new BuiltinBug("Double free",
+ "Try to free a memory block that has been released");
+ // FIXME: should find where it's freed last time.
+ BugReport *R = new BugReport(*BT_DoubleFree,
+ BT_DoubleFree->getDescription(), N);
+ C.EmitReport(R);
+ }
+ return NULL;
+ }
+
+ // Normal free.
+ return state->set<RegionState>(Sym, RefState::getReleased(CE));
+}
+
+void MallocChecker::ReallocMem(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ const Expr *Arg0 = CE->getArg(0);
+ DefinedOrUnknownSVal Arg0Val=cast<DefinedOrUnknownSVal>(state->getSVal(Arg0));
+
+ ValueManager &ValMgr = C.getValueManager();
+ SValuator &SVator = C.getSValuator();
+
+ DefinedOrUnknownSVal PtrEQ = SVator.EvalEQ(state, Arg0Val, ValMgr.makeNull());
+
+ // If the ptr is NULL, the call is equivalent to malloc(size).
+ if (const GRState *stateEqual = state->Assume(PtrEQ, true)) {
+ // Hack: set the NULL symbolic region to released to suppress false warning.
+ // In the future we should add more states for allocated regions, e.g.,
+ // CheckedNull, CheckedNonNull.
+
+ SymbolRef Sym = Arg0Val.getAsLocSymbol();
+ if (Sym)
+ stateEqual = stateEqual->set<RegionState>(Sym, RefState::getReleased(CE));
+
+ const GRState *stateMalloc = MallocMemAux(C, CE, CE->getArg(1), stateEqual);
+ C.addTransition(stateMalloc);
+ }
+
+ if (const GRState *stateNotEqual = state->Assume(PtrEQ, false)) {
+ const Expr *Arg1 = CE->getArg(1);
+ DefinedOrUnknownSVal Arg1Val =
+ cast<DefinedOrUnknownSVal>(stateNotEqual->getSVal(Arg1));
+ DefinedOrUnknownSVal SizeZero = SVator.EvalEQ(stateNotEqual, Arg1Val,
+ ValMgr.makeIntValWithPtrWidth(0, false));
+
+ if (const GRState *stateSizeZero = stateNotEqual->Assume(SizeZero, true)) {
+ const GRState *stateFree = FreeMemAux(C, CE, stateSizeZero);
+ if (stateFree)
+ C.addTransition(stateFree->BindExpr(CE, UndefinedVal(), true));
+ }
+
+ if (const GRState *stateSizeNotZero=stateNotEqual->Assume(SizeZero,false)) {
+ const GRState *stateFree = FreeMemAux(C, CE, stateSizeNotZero);
+ if (stateFree) {
+ // FIXME: We should copy the content of the original buffer.
+ const GRState *stateRealloc = MallocMemAux(C, CE, CE->getArg(1),
+ stateFree);
+ C.addTransition(stateRealloc);
+ }
+ }
+ }
+}
+
+void MallocChecker::EvalDeadSymbols(CheckerContext &C, const Stmt *S,
+ SymbolReaper &SymReaper) {
+ for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
+ E = SymReaper.dead_end(); I != E; ++I) {
+ SymbolRef Sym = *I;
+ const GRState *state = C.getState();
+ const RefState *RS = state->get<RegionState>(Sym);
+ if (!RS)
+ return;
+
+ if (RS->isAllocated()) {
+ ExplodedNode *N = C.GenerateSink();
+ if (N) {
+ if (!BT_Leak)
+ BT_Leak = new BuiltinBug("Memory leak",
+ "Allocated memory never released. Potential memory leak.");
+ // FIXME: where it is allocated.
+ BugReport *R = new BugReport(*BT_Leak, BT_Leak->getDescription(), N);
+ C.EmitReport(R);
+ }
+ }
+ }
+}
+
+void MallocChecker::EvalEndPath(GREndPathNodeBuilder &B, void *tag,
+ GRExprEngine &Eng) {
+ SaveAndRestore<bool> OldHasGen(B.HasGeneratedNode);
+ const GRState *state = B.getState();
+ typedef llvm::ImmutableMap<SymbolRef, RefState> SymMap;
+ SymMap M = state->get<RegionState>();
+
+ for (SymMap::iterator I = M.begin(), E = M.end(); I != E; ++I) {
+ RefState RS = I->second;
+ if (RS.isAllocated()) {
+ ExplodedNode *N = B.generateNode(state, tag, B.getPredecessor());
+ if (N) {
+ if (!BT_Leak)
+ BT_Leak = new BuiltinBug("Memory leak",
+ "Allocated memory never released. Potential memory leak.");
+ BugReport *R = new BugReport(*BT_Leak, BT_Leak->getDescription(), N);
+ Eng.getBugReporter().EmitReport(R);
+ }
+ }
+ }
+}
+
+void MallocChecker::PreVisitReturnStmt(CheckerContext &C, const ReturnStmt *S) {
+ const Expr *RetE = S->getRetValue();
+ if (!RetE)
+ return;
+
+ const GRState *state = C.getState();
+
+ SymbolRef Sym = state->getSVal(RetE).getAsSymbol();
+
+ if (!Sym)
+ return;
+
+ const RefState *RS = state->get<RegionState>(Sym);
+ if (!RS)
+ return;
+
+ // FIXME: check other cases.
+ if (RS->isAllocated())
+ state = state->set<RegionState>(Sym, RefState::getEscaped(S));
+
+ C.addTransition(state);
+}
+
+const GRState *MallocChecker::EvalAssume(const GRState *state, SVal Cond,
+ bool Assumption) {
+ // If a symblic region is assumed to NULL, set its state to AllocateFailed.
+ // FIXME: should also check symbols assumed to non-null.
+
+ RegionStateTy RS = state->get<RegionState>();
+
+ for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
+ if (state->getSymVal(I.getKey()))
+ state = state->set<RegionState>(I.getKey(),RefState::getAllocateFailed());
+ }
+
+ return state;
+}
+
+// Check if the location is a freed symbolic region.
+void MallocChecker::VisitLocation(CheckerContext &C, const Stmt *S, SVal l) {
+ SymbolRef Sym = l.getLocSymbolInBase();
+ if (Sym) {
+ const RefState *RS = C.getState()->get<RegionState>(Sym);
+ if (RS)
+ if (RS->isReleased()) {
+ ExplodedNode *N = C.GenerateSink();
+ if (!BT_UseFree)
+ BT_UseFree = new BuiltinBug("Use dynamically allocated memory after"
+ " it is freed.");
+
+ BugReport *R = new BugReport(*BT_UseFree, BT_UseFree->getDescription(),
+ N);
+ C.EmitReport(R);
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/ManagerRegistry.cpp b/contrib/llvm/tools/clang/lib/Checker/ManagerRegistry.cpp
new file mode 100644
index 0000000..d11a997
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/ManagerRegistry.cpp
@@ -0,0 +1,20 @@
+//===- ManagerRegistry.cpp - Pluggble Analyzer module creators --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the pluggable analyzer module creators.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/ManagerRegistry.h"
+
+using namespace clang;
+
+StoreManagerCreator ManagerRegistry::StoreMgrCreator = 0;
+
+ConstraintManagerCreator ManagerRegistry::ConstraintMgrCreator = 0;
diff --git a/contrib/llvm/tools/clang/lib/Checker/MemRegion.cpp b/contrib/llvm/tools/clang/lib/Checker/MemRegion.cpp
new file mode 100644
index 0000000..575458c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/MemRegion.cpp
@@ -0,0 +1,807 @@
+//== MemRegion.cpp - Abstract memory regions for static analysis --*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines MemRegion and its subclasses. MemRegion defines a
+// partially-typed abstraction of memory useful for path-sensitive dataflow
+// analyses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/PathSensitive/MemRegion.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/Support/BumpVector.h"
+#include "clang/AST/CharUnits.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// MemRegion Construction.
+//===----------------------------------------------------------------------===//
+
+template<typename RegionTy> struct MemRegionManagerTrait;
+
+template <typename RegionTy, typename A1>
+RegionTy* MemRegionManager::getRegion(const A1 a1) {
+
+ const typename MemRegionManagerTrait<RegionTy>::SuperRegionTy *superRegion =
+ MemRegionManagerTrait<RegionTy>::getSuperRegion(*this, a1);
+
+ llvm::FoldingSetNodeID ID;
+ RegionTy::ProfileRegion(ID, a1, superRegion);
+ void* InsertPos;
+ RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
+ InsertPos));
+
+ if (!R) {
+ R = (RegionTy*) A.Allocate<RegionTy>();
+ new (R) RegionTy(a1, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+template <typename RegionTy, typename A1>
+RegionTy* MemRegionManager::getSubRegion(const A1 a1,
+ const MemRegion *superRegion) {
+ llvm::FoldingSetNodeID ID;
+ RegionTy::ProfileRegion(ID, a1, superRegion);
+ void* InsertPos;
+ RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
+ InsertPos));
+
+ if (!R) {
+ R = (RegionTy*) A.Allocate<RegionTy>();
+ new (R) RegionTy(a1, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+template <typename RegionTy, typename A1, typename A2>
+RegionTy* MemRegionManager::getRegion(const A1 a1, const A2 a2) {
+
+ const typename MemRegionManagerTrait<RegionTy>::SuperRegionTy *superRegion =
+ MemRegionManagerTrait<RegionTy>::getSuperRegion(*this, a1, a2);
+
+ llvm::FoldingSetNodeID ID;
+ RegionTy::ProfileRegion(ID, a1, a2, superRegion);
+ void* InsertPos;
+ RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
+ InsertPos));
+
+ if (!R) {
+ R = (RegionTy*) A.Allocate<RegionTy>();
+ new (R) RegionTy(a1, a2, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+template <typename RegionTy, typename A1, typename A2>
+RegionTy* MemRegionManager::getSubRegion(const A1 a1, const A2 a2,
+ const MemRegion *superRegion) {
+
+ llvm::FoldingSetNodeID ID;
+ RegionTy::ProfileRegion(ID, a1, a2, superRegion);
+ void* InsertPos;
+ RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
+ InsertPos));
+
+ if (!R) {
+ R = (RegionTy*) A.Allocate<RegionTy>();
+ new (R) RegionTy(a1, a2, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+template <typename RegionTy, typename A1, typename A2, typename A3>
+RegionTy* MemRegionManager::getSubRegion(const A1 a1, const A2 a2, const A3 a3,
+ const MemRegion *superRegion) {
+
+ llvm::FoldingSetNodeID ID;
+ RegionTy::ProfileRegion(ID, a1, a2, a3, superRegion);
+ void* InsertPos;
+ RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
+ InsertPos));
+
+ if (!R) {
+ R = (RegionTy*) A.Allocate<RegionTy>();
+ new (R) RegionTy(a1, a2, a3, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+//===----------------------------------------------------------------------===//
+// Object destruction.
+//===----------------------------------------------------------------------===//
+
+MemRegion::~MemRegion() {}
+
+MemRegionManager::~MemRegionManager() {
+ // All regions and their data are BumpPtrAllocated. No need to call
+ // their destructors.
+}
+
+//===----------------------------------------------------------------------===//
+// Basic methods.
+//===----------------------------------------------------------------------===//
+
+bool SubRegion::isSubRegionOf(const MemRegion* R) const {
+ const MemRegion* r = getSuperRegion();
+ while (r != 0) {
+ if (r == R)
+ return true;
+ if (const SubRegion* sr = dyn_cast<SubRegion>(r))
+ r = sr->getSuperRegion();
+ else
+ break;
+ }
+ return false;
+}
+
+MemRegionManager* SubRegion::getMemRegionManager() const {
+ const SubRegion* r = this;
+ do {
+ const MemRegion *superRegion = r->getSuperRegion();
+ if (const SubRegion *sr = dyn_cast<SubRegion>(superRegion)) {
+ r = sr;
+ continue;
+ }
+ return superRegion->getMemRegionManager();
+ } while (1);
+}
+
+const StackFrameContext *VarRegion::getStackFrame() const {
+ const StackSpaceRegion *SSR = dyn_cast<StackSpaceRegion>(getMemorySpace());
+ return SSR ? SSR->getStackFrame() : NULL;
+}
+
+//===----------------------------------------------------------------------===//
+// FoldingSet profiling.
+//===----------------------------------------------------------------------===//
+
+void MemSpaceRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ ID.AddInteger((unsigned)getKind());
+}
+
+void StackSpaceRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger((unsigned)getKind());
+ ID.AddPointer(getStackFrame());
+}
+
+void StringRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const StringLiteral* Str,
+ const MemRegion* superRegion) {
+ ID.AddInteger((unsigned) StringRegionKind);
+ ID.AddPointer(Str);
+ ID.AddPointer(superRegion);
+}
+
+void AllocaRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const Expr* Ex, unsigned cnt,
+ const MemRegion *) {
+ ID.AddInteger((unsigned) AllocaRegionKind);
+ ID.AddPointer(Ex);
+ ID.AddInteger(cnt);
+}
+
+void AllocaRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ ProfileRegion(ID, Ex, Cnt, superRegion);
+}
+
+void CompoundLiteralRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ CompoundLiteralRegion::ProfileRegion(ID, CL, superRegion);
+}
+
+void CompoundLiteralRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const CompoundLiteralExpr* CL,
+ const MemRegion* superRegion) {
+ ID.AddInteger((unsigned) CompoundLiteralRegionKind);
+ ID.AddPointer(CL);
+ ID.AddPointer(superRegion);
+}
+
+void CXXThisRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
+ const PointerType *PT,
+ const MemRegion *sRegion) {
+ ID.AddInteger((unsigned) CXXThisRegionKind);
+ ID.AddPointer(PT);
+ ID.AddPointer(sRegion);
+}
+
+void CXXThisRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ CXXThisRegion::ProfileRegion(ID, ThisPointerTy, superRegion);
+}
+
+void DeclRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, const Decl* D,
+ const MemRegion* superRegion, Kind k) {
+ ID.AddInteger((unsigned) k);
+ ID.AddPointer(D);
+ ID.AddPointer(superRegion);
+}
+
+void DeclRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ DeclRegion::ProfileRegion(ID, D, superRegion, getKind());
+}
+
+void VarRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ VarRegion::ProfileRegion(ID, getDecl(), superRegion);
+}
+
+void SymbolicRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, SymbolRef sym,
+ const MemRegion *sreg) {
+ ID.AddInteger((unsigned) MemRegion::SymbolicRegionKind);
+ ID.Add(sym);
+ ID.AddPointer(sreg);
+}
+
+void SymbolicRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ SymbolicRegion::ProfileRegion(ID, sym, getSuperRegion());
+}
+
+void ElementRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ QualType ElementType, SVal Idx,
+ const MemRegion* superRegion) {
+ ID.AddInteger(MemRegion::ElementRegionKind);
+ ID.Add(ElementType);
+ ID.AddPointer(superRegion);
+ Idx.Profile(ID);
+}
+
+void ElementRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ ElementRegion::ProfileRegion(ID, ElementType, Index, superRegion);
+}
+
+void FunctionTextRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const FunctionDecl *FD,
+ const MemRegion*) {
+ ID.AddInteger(MemRegion::FunctionTextRegionKind);
+ ID.AddPointer(FD);
+}
+
+void FunctionTextRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ FunctionTextRegion::ProfileRegion(ID, FD, superRegion);
+}
+
+void BlockTextRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const BlockDecl *BD, CanQualType,
+ const AnalysisContext *AC,
+ const MemRegion*) {
+ ID.AddInteger(MemRegion::BlockTextRegionKind);
+ ID.AddPointer(BD);
+}
+
+void BlockTextRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ BlockTextRegion::ProfileRegion(ID, BD, locTy, AC, superRegion);
+}
+
+void BlockDataRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const BlockTextRegion *BC,
+ const LocationContext *LC,
+ const MemRegion *sReg) {
+ ID.AddInteger(MemRegion::BlockDataRegionKind);
+ ID.AddPointer(BC);
+ ID.AddPointer(LC);
+ ID.AddPointer(sReg);
+}
+
+void BlockDataRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ BlockDataRegion::ProfileRegion(ID, BC, LC, getSuperRegion());
+}
+
+void CXXObjectRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
+ Expr const *Ex,
+ const MemRegion *sReg) {
+ ID.AddPointer(Ex);
+ ID.AddPointer(sReg);
+}
+
+void CXXObjectRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ ProfileRegion(ID, Ex, getSuperRegion());
+}
+
+//===----------------------------------------------------------------------===//
+// Region pretty-printing.
+//===----------------------------------------------------------------------===//
+
+void MemRegion::dump() const {
+ dumpToStream(llvm::errs());
+}
+
+std::string MemRegion::getString() const {
+ std::string s;
+ llvm::raw_string_ostream os(s);
+ dumpToStream(os);
+ return os.str();
+}
+
+void MemRegion::dumpToStream(llvm::raw_ostream& os) const {
+ os << "<Unknown Region>";
+}
+
+void AllocaRegion::dumpToStream(llvm::raw_ostream& os) const {
+ os << "alloca{" << (void*) Ex << ',' << Cnt << '}';
+}
+
+void FunctionTextRegion::dumpToStream(llvm::raw_ostream& os) const {
+ os << "code{" << getDecl()->getDeclName().getAsString() << '}';
+}
+
+void BlockTextRegion::dumpToStream(llvm::raw_ostream& os) const {
+ os << "block_code{" << (void*) this << '}';
+}
+
+void BlockDataRegion::dumpToStream(llvm::raw_ostream& os) const {
+ os << "block_data{" << BC << '}';
+}
+
+
+void CompoundLiteralRegion::dumpToStream(llvm::raw_ostream& os) const {
+ // FIXME: More elaborate pretty-printing.
+ os << "{ " << (void*) CL << " }";
+}
+
+void CXXThisRegion::dumpToStream(llvm::raw_ostream &os) const {
+ os << "this";
+}
+
+void ElementRegion::dumpToStream(llvm::raw_ostream& os) const {
+ os << "element{" << superRegion << ','
+ << Index << ',' << getElementType().getAsString() << '}';
+}
+
+void FieldRegion::dumpToStream(llvm::raw_ostream& os) const {
+ os << superRegion << "->" << getDecl();
+}
+
+void ObjCIvarRegion::dumpToStream(llvm::raw_ostream& os) const {
+ os << "ivar{" << superRegion << ',' << getDecl() << '}';
+}
+
+void StringRegion::dumpToStream(llvm::raw_ostream& os) const {
+ Str->printPretty(os, 0, PrintingPolicy(getContext().getLangOptions()));
+}
+
+void SymbolicRegion::dumpToStream(llvm::raw_ostream& os) const {
+ os << "SymRegion{" << sym << '}';
+}
+
+void VarRegion::dumpToStream(llvm::raw_ostream& os) const {
+ os << cast<VarDecl>(D);
+}
+
+void RegionRawOffset::dump() const {
+ dumpToStream(llvm::errs());
+}
+
+void RegionRawOffset::dumpToStream(llvm::raw_ostream& os) const {
+ os << "raw_offset{" << getRegion() << ',' << getByteOffset() << '}';
+}
+
+//===----------------------------------------------------------------------===//
+// MemRegionManager methods.
+//===----------------------------------------------------------------------===//
+
+template <typename REG>
+const REG *MemRegionManager::LazyAllocate(REG*& region) {
+ if (!region) {
+ region = (REG*) A.Allocate<REG>();
+ new (region) REG(this);
+ }
+
+ return region;
+}
+
+template <typename REG, typename ARG>
+const REG *MemRegionManager::LazyAllocate(REG*& region, ARG a) {
+ if (!region) {
+ region = (REG*) A.Allocate<REG>();
+ new (region) REG(this, a);
+ }
+
+ return region;
+}
+
+const StackLocalsSpaceRegion*
+MemRegionManager::getStackLocalsRegion(const StackFrameContext *STC) {
+ assert(STC);
+ StackLocalsSpaceRegion *&R = StackLocalsSpaceRegions[STC];
+
+ if (R)
+ return R;
+
+ R = A.Allocate<StackLocalsSpaceRegion>();
+ new (R) StackLocalsSpaceRegion(this, STC);
+ return R;
+}
+
+const StackArgumentsSpaceRegion *
+MemRegionManager::getStackArgumentsRegion(const StackFrameContext *STC) {
+ assert(STC);
+ StackArgumentsSpaceRegion *&R = StackArgumentsSpaceRegions[STC];
+
+ if (R)
+ return R;
+
+ R = A.Allocate<StackArgumentsSpaceRegion>();
+ new (R) StackArgumentsSpaceRegion(this, STC);
+ return R;
+}
+
+const GlobalsSpaceRegion *MemRegionManager::getGlobalsRegion() {
+ return LazyAllocate(globals);
+}
+
+const HeapSpaceRegion *MemRegionManager::getHeapRegion() {
+ return LazyAllocate(heap);
+}
+
+const MemSpaceRegion *MemRegionManager::getUnknownRegion() {
+ return LazyAllocate(unknown);
+}
+
+const MemSpaceRegion *MemRegionManager::getCodeRegion() {
+ return LazyAllocate(code);
+}
+
+//===----------------------------------------------------------------------===//
+// Constructing regions.
+//===----------------------------------------------------------------------===//
+
+const StringRegion* MemRegionManager::getStringRegion(const StringLiteral* Str) {
+ return getSubRegion<StringRegion>(Str, getGlobalsRegion());
+}
+
+const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
+ const LocationContext *LC) {
+ const MemRegion *sReg = 0;
+
+ if (D->hasLocalStorage()) {
+ // FIXME: Once we implement scope handling, we will need to properly lookup
+ // 'D' to the proper LocationContext.
+ const DeclContext *DC = D->getDeclContext();
+ const StackFrameContext *STC = LC->getStackFrameForDeclContext(DC);
+
+ if (!STC)
+ sReg = getUnknownRegion();
+ else {
+ sReg = isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)
+ ? static_cast<const MemRegion*>(getStackArgumentsRegion(STC))
+ : static_cast<const MemRegion*>(getStackLocalsRegion(STC));
+ }
+ }
+ else {
+ sReg = getGlobalsRegion();
+ }
+
+ return getSubRegion<VarRegion>(D, sReg);
+}
+
+const VarRegion *MemRegionManager::getVarRegion(const VarDecl *D,
+ const MemRegion *superR) {
+ return getSubRegion<VarRegion>(D, superR);
+}
+
+const BlockDataRegion *
+MemRegionManager::getBlockDataRegion(const BlockTextRegion *BC,
+ const LocationContext *LC) {
+ const MemRegion *sReg = 0;
+
+ if (LC) {
+ // FIXME: Once we implement scope handling, we want the parent region
+ // to be the scope.
+ const StackFrameContext *STC = LC->getCurrentStackFrame();
+ assert(STC);
+ sReg = getStackLocalsRegion(STC);
+ }
+ else {
+ // We allow 'LC' to be NULL for cases where want BlockDataRegions
+ // without context-sensitivity.
+ sReg = getUnknownRegion();
+ }
+
+ return getSubRegion<BlockDataRegion>(BC, LC, sReg);
+}
+
+const CompoundLiteralRegion*
+MemRegionManager::getCompoundLiteralRegion(const CompoundLiteralExpr* CL,
+ const LocationContext *LC) {
+
+ const MemRegion *sReg = 0;
+
+ if (CL->isFileScope())
+ sReg = getGlobalsRegion();
+ else {
+ const StackFrameContext *STC = LC->getCurrentStackFrame();
+ assert(STC);
+ sReg = getStackLocalsRegion(STC);
+ }
+
+ return getSubRegion<CompoundLiteralRegion>(CL, sReg);
+}
+
+const ElementRegion*
+MemRegionManager::getElementRegion(QualType elementType, SVal Idx,
+ const MemRegion* superRegion,
+ ASTContext& Ctx){
+
+ QualType T = Ctx.getCanonicalType(elementType).getUnqualifiedType();
+
+ llvm::FoldingSetNodeID ID;
+ ElementRegion::ProfileRegion(ID, T, Idx, superRegion);
+
+ void* InsertPos;
+ MemRegion* data = Regions.FindNodeOrInsertPos(ID, InsertPos);
+ ElementRegion* R = cast_or_null<ElementRegion>(data);
+
+ if (!R) {
+ R = (ElementRegion*) A.Allocate<ElementRegion>();
+ new (R) ElementRegion(T, Idx, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+const FunctionTextRegion *
+MemRegionManager::getFunctionTextRegion(const FunctionDecl *FD) {
+ return getSubRegion<FunctionTextRegion>(FD, getCodeRegion());
+}
+
+const BlockTextRegion *
+MemRegionManager::getBlockTextRegion(const BlockDecl *BD, CanQualType locTy,
+ AnalysisContext *AC) {
+ return getSubRegion<BlockTextRegion>(BD, locTy, AC, getCodeRegion());
+}
+
+
+/// getSymbolicRegion - Retrieve or create a "symbolic" memory region.
+const SymbolicRegion *MemRegionManager::getSymbolicRegion(SymbolRef sym) {
+ return getSubRegion<SymbolicRegion>(sym, getUnknownRegion());
+}
+
+const FieldRegion*
+MemRegionManager::getFieldRegion(const FieldDecl* d,
+ const MemRegion* superRegion){
+ return getSubRegion<FieldRegion>(d, superRegion);
+}
+
+const ObjCIvarRegion*
+MemRegionManager::getObjCIvarRegion(const ObjCIvarDecl* d,
+ const MemRegion* superRegion) {
+ return getSubRegion<ObjCIvarRegion>(d, superRegion);
+}
+
+const CXXObjectRegion*
+MemRegionManager::getCXXObjectRegion(Expr const *E,
+ LocationContext const *LC) {
+ const StackFrameContext *SFC = LC->getCurrentStackFrame();
+ assert(SFC);
+ return getSubRegion<CXXObjectRegion>(E, getStackLocalsRegion(SFC));
+}
+
+const CXXThisRegion*
+MemRegionManager::getCXXThisRegion(QualType thisPointerTy,
+ const LocationContext *LC) {
+ const StackFrameContext *STC = LC->getCurrentStackFrame();
+ assert(STC);
+ const PointerType *PT = thisPointerTy->getAs<PointerType>();
+ assert(PT);
+ return getSubRegion<CXXThisRegion>(PT, getStackArgumentsRegion(STC));
+}
+
+const AllocaRegion*
+MemRegionManager::getAllocaRegion(const Expr* E, unsigned cnt,
+ const LocationContext *LC) {
+ const StackFrameContext *STC = LC->getCurrentStackFrame();
+ assert(STC);
+ return getSubRegion<AllocaRegion>(E, cnt, getStackLocalsRegion(STC));
+}
+
+const MemSpaceRegion *MemRegion::getMemorySpace() const {
+ const MemRegion *R = this;
+ const SubRegion* SR = dyn_cast<SubRegion>(this);
+
+ while (SR) {
+ R = SR->getSuperRegion();
+ SR = dyn_cast<SubRegion>(R);
+ }
+
+ return dyn_cast<MemSpaceRegion>(R);
+}
+
+bool MemRegion::hasStackStorage() const {
+ return isa<StackSpaceRegion>(getMemorySpace());
+}
+
+bool MemRegion::hasStackNonParametersStorage() const {
+ return isa<StackLocalsSpaceRegion>(getMemorySpace());
+}
+
+bool MemRegion::hasStackParametersStorage() const {
+ return isa<StackArgumentsSpaceRegion>(getMemorySpace());
+}
+
+bool MemRegion::hasGlobalsOrParametersStorage() const {
+ const MemSpaceRegion *MS = getMemorySpace();
+ return isa<StackArgumentsSpaceRegion>(MS) ||
+ isa<GlobalsSpaceRegion>(MS);
+}
+
+// getBaseRegion strips away all elements and fields, and get the base region
+// of them.
+const MemRegion *MemRegion::getBaseRegion() const {
+ const MemRegion *R = this;
+ while (true) {
+ switch (R->getKind()) {
+ case MemRegion::ElementRegionKind:
+ case MemRegion::FieldRegionKind:
+ case MemRegion::ObjCIvarRegionKind:
+ R = cast<SubRegion>(R)->getSuperRegion();
+ continue;
+ default:
+ break;
+ }
+ break;
+ }
+ return R;
+}
+
+//===----------------------------------------------------------------------===//
+// View handling.
+//===----------------------------------------------------------------------===//
+
+const MemRegion *MemRegion::StripCasts() const {
+ const MemRegion *R = this;
+ while (true) {
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ // FIXME: generalize. Essentially we want to strip away ElementRegions
+ // that were layered on a symbolic region because of casts. We only
+ // want to strip away ElementRegions, however, where the index is 0.
+ SVal index = ER->getIndex();
+ if (nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(&index)) {
+ if (CI->getValue().getSExtValue() == 0) {
+ R = ER->getSuperRegion();
+ continue;
+ }
+ }
+ }
+ break;
+ }
+ return R;
+}
+
+// FIXME: Merge with the implementation of the same method in Store.cpp
+static bool IsCompleteType(ASTContext &Ctx, QualType Ty) {
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *D = RT->getDecl();
+ if (!D->getDefinition())
+ return false;
+ }
+
+ return true;
+}
+
+RegionRawOffset ElementRegion::getAsRawOffset() const {
+ CharUnits offset = CharUnits::Zero();
+ const ElementRegion *ER = this;
+ const MemRegion *superR = NULL;
+ ASTContext &C = getContext();
+
+ // FIXME: Handle multi-dimensional arrays.
+
+ while (ER) {
+ superR = ER->getSuperRegion();
+
+ // FIXME: generalize to symbolic offsets.
+ SVal index = ER->getIndex();
+ if (nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(&index)) {
+ // Update the offset.
+ int64_t i = CI->getValue().getSExtValue();
+
+ if (i != 0) {
+ QualType elemType = ER->getElementType();
+
+ // If we are pointing to an incomplete type, go no further.
+ if (!IsCompleteType(C, elemType)) {
+ superR = ER;
+ break;
+ }
+
+ CharUnits size = C.getTypeSizeInChars(elemType);
+ offset += (i * size);
+ }
+
+ // Go to the next ElementRegion (if any).
+ ER = dyn_cast<ElementRegion>(superR);
+ continue;
+ }
+
+ return NULL;
+ }
+
+ assert(superR && "super region cannot be NULL");
+ return RegionRawOffset(superR, offset.getQuantity());
+}
+
+//===----------------------------------------------------------------------===//
+// BlockDataRegion
+//===----------------------------------------------------------------------===//
+
+void BlockDataRegion::LazyInitializeReferencedVars() {
+ if (ReferencedVars)
+ return;
+
+ AnalysisContext *AC = getCodeRegion()->getAnalysisContext();
+ AnalysisContext::referenced_decls_iterator I, E;
+ llvm::tie(I, E) = AC->getReferencedBlockVars(BC->getDecl());
+
+ if (I == E) {
+ ReferencedVars = (void*) 0x1;
+ return;
+ }
+
+ MemRegionManager &MemMgr = *getMemRegionManager();
+ llvm::BumpPtrAllocator &A = MemMgr.getAllocator();
+ BumpVectorContext BC(A);
+
+ typedef BumpVector<const MemRegion*> VarVec;
+ VarVec *BV = (VarVec*) A.Allocate<VarVec>();
+ new (BV) VarVec(BC, E - I);
+
+ for ( ; I != E; ++I) {
+ const VarDecl *VD = *I;
+ const VarRegion *VR = 0;
+
+ if (!VD->getAttr<BlocksAttr>() && VD->hasLocalStorage())
+ VR = MemMgr.getVarRegion(VD, this);
+ else {
+ if (LC)
+ VR = MemMgr.getVarRegion(VD, LC);
+ else {
+ VR = MemMgr.getVarRegion(VD, MemMgr.getUnknownRegion());
+ }
+ }
+
+ assert(VR);
+ BV->push_back(VR, BC);
+ }
+
+ ReferencedVars = BV;
+}
+
+BlockDataRegion::referenced_vars_iterator
+BlockDataRegion::referenced_vars_begin() const {
+ const_cast<BlockDataRegion*>(this)->LazyInitializeReferencedVars();
+
+ BumpVector<const MemRegion*> *Vec =
+ static_cast<BumpVector<const MemRegion*>*>(ReferencedVars);
+
+ return BlockDataRegion::referenced_vars_iterator(Vec == (void*) 0x1 ?
+ NULL : Vec->begin());
+}
+
+BlockDataRegion::referenced_vars_iterator
+BlockDataRegion::referenced_vars_end() const {
+ const_cast<BlockDataRegion*>(this)->LazyInitializeReferencedVars();
+
+ BumpVector<const MemRegion*> *Vec =
+ static_cast<BumpVector<const MemRegion*>*>(ReferencedVars);
+
+ return BlockDataRegion::referenced_vars_iterator(Vec == (void*) 0x1 ?
+ NULL : Vec->end());
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/NSAutoreleasePoolChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/NSAutoreleasePoolChecker.cpp
new file mode 100644
index 0000000..48f03a3
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/NSAutoreleasePoolChecker.cpp
@@ -0,0 +1,86 @@
+//=- NSAutoreleasePoolChecker.cpp --------------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a NSAutoreleasePoolChecker, a small checker that warns
+// about subpar uses of NSAutoreleasePool. Note that while the check itself
+// (in it's current form) could be written as a flow-insensitive check, in
+// can be potentially enhanced in the future with flow-sensitive information.
+// It is also a good example of the CheckerVisitor interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/BugReporter/BugReporter.h"
+#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "BasicObjCFoundationChecks.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Decl.h"
+
+using namespace clang;
+
+namespace {
+class NSAutoreleasePoolChecker
+ : public CheckerVisitor<NSAutoreleasePoolChecker> {
+
+ Selector releaseS;
+
+public:
+ NSAutoreleasePoolChecker(Selector release_s) : releaseS(release_s) {}
+
+ static void *getTag() {
+ static int x = 0;
+ return &x;
+ }
+
+ void PreVisitObjCMessageExpr(CheckerContext &C, const ObjCMessageExpr *ME);
+};
+
+} // end anonymous namespace
+
+
+void clang::RegisterNSAutoreleasePoolChecks(GRExprEngine &Eng) {
+ ASTContext &Ctx = Eng.getContext();
+ if (Ctx.getLangOptions().getGCMode() != LangOptions::NonGC) {
+ Eng.registerCheck(new NSAutoreleasePoolChecker(GetNullarySelector("release",
+ Ctx)));
+ }
+}
+
+void
+NSAutoreleasePoolChecker::PreVisitObjCMessageExpr(CheckerContext &C,
+ const ObjCMessageExpr *ME) {
+
+ const Expr *receiver = ME->getInstanceReceiver();
+ if (!receiver)
+ return;
+
+ // FIXME: Enhance with value-tracking information instead of consulting
+ // the type of the expression.
+ const ObjCObjectPointerType* PT =
+ receiver->getType()->getAs<ObjCObjectPointerType>();
+
+ if (!PT)
+ return;
+ const ObjCInterfaceDecl* OD = PT->getInterfaceDecl();
+ if (!OD)
+ return;
+ if (!OD->getIdentifier()->getName().equals("NSAutoreleasePool"))
+ return;
+
+ // Sending 'release' message?
+ if (ME->getSelector() != releaseS)
+ return;
+
+ SourceRange R = ME->getSourceRange();
+
+ C.getBugReporter().EmitBasicReport("Use -drain instead of -release",
+ "API Upgrade (Apple)",
+ "Use -drain instead of -release when using NSAutoreleasePool "
+ "and garbage collection", ME->getLocStart(), &R, 1);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/NSErrorChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/NSErrorChecker.cpp
new file mode 100644
index 0000000..e30d54c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/NSErrorChecker.cpp
@@ -0,0 +1,237 @@
+//=- NSErrorCheckerer.cpp - Coding conventions for uses of NSError -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a CheckNSError, a flow-insenstive check
+// that determines if an Objective-C class interface correctly returns
+// a non-void return type.
+//
+// File under feature request PR 2600.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/Checkers/LocalCheckers.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "clang/Checker/Checkers/DereferenceChecker.h"
+#include "BasicObjCFoundationChecks.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Decl.h"
+#include "llvm/ADT/SmallVector.h"
+
+using namespace clang;
+
+namespace {
+class NSErrorChecker : public BugType {
+ const Decl &CodeDecl;
+ const bool isNSErrorWarning;
+ IdentifierInfo * const II;
+ GRExprEngine &Eng;
+
+ void CheckSignature(const ObjCMethodDecl& MD, QualType& ResultTy,
+ llvm::SmallVectorImpl<VarDecl*>& ErrorParams);
+
+ void CheckSignature(const FunctionDecl& MD, QualType& ResultTy,
+ llvm::SmallVectorImpl<VarDecl*>& ErrorParams);
+
+ bool CheckNSErrorArgument(QualType ArgTy);
+ bool CheckCFErrorArgument(QualType ArgTy);
+
+ void CheckParamDeref(const VarDecl *V, const LocationContext *LC,
+ const GRState *state, BugReporter& BR);
+
+ void EmitRetTyWarning(BugReporter& BR, const Decl& CodeDecl);
+
+public:
+ NSErrorChecker(const Decl &D, bool isNSError, GRExprEngine& eng)
+ : BugType(isNSError ? "NSError** null dereference"
+ : "CFErrorRef* null dereference",
+ "Coding conventions (Apple)"),
+ CodeDecl(D),
+ isNSErrorWarning(isNSError),
+ II(&eng.getContext().Idents.get(isNSErrorWarning ? "NSError":"CFErrorRef")),
+ Eng(eng) {}
+
+ void FlushReports(BugReporter& BR);
+};
+
+} // end anonymous namespace
+
+void clang::RegisterNSErrorChecks(BugReporter& BR, GRExprEngine &Eng,
+ const Decl &D) {
+ BR.Register(new NSErrorChecker(D, true, Eng));
+ BR.Register(new NSErrorChecker(D, false, Eng));
+}
+
+void NSErrorChecker::FlushReports(BugReporter& BR) {
+ // Get the analysis engine and the exploded analysis graph.
+ ExplodedGraph& G = Eng.getGraph();
+
+ // Get the ASTContext, which is useful for querying type information.
+ ASTContext &Ctx = BR.getContext();
+
+ QualType ResultTy;
+ llvm::SmallVector<VarDecl*, 5> ErrorParams;
+
+ if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(&CodeDecl))
+ CheckSignature(*MD, ResultTy, ErrorParams);
+ else if (const FunctionDecl* FD = dyn_cast<FunctionDecl>(&CodeDecl))
+ CheckSignature(*FD, ResultTy, ErrorParams);
+ else
+ return;
+
+ if (ErrorParams.empty())
+ return;
+
+ if (ResultTy == Ctx.VoidTy) EmitRetTyWarning(BR, CodeDecl);
+
+ for (ExplodedGraph::roots_iterator RI=G.roots_begin(), RE=G.roots_end();
+ RI!=RE; ++RI) {
+ // Scan the parameters for an implicit null dereference.
+ for (llvm::SmallVectorImpl<VarDecl*>::iterator I=ErrorParams.begin(),
+ E=ErrorParams.end(); I!=E; ++I)
+ CheckParamDeref(*I, (*RI)->getLocationContext(), (*RI)->getState(), BR);
+ }
+}
+
+void NSErrorChecker::EmitRetTyWarning(BugReporter& BR, const Decl& CodeDecl) {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ if (isa<ObjCMethodDecl>(CodeDecl))
+ os << "Method";
+ else
+ os << "Function";
+
+ os << " accepting ";
+ os << (isNSErrorWarning ? "NSError**" : "CFErrorRef*");
+ os << " should have a non-void return value to indicate whether or not an "
+ "error occurred";
+
+ BR.EmitBasicReport(isNSErrorWarning
+ ? "Bad return type when passing NSError**"
+ : "Bad return type when passing CFError*",
+ getCategory(), os.str(),
+ CodeDecl.getLocation());
+}
+
+void
+NSErrorChecker::CheckSignature(const ObjCMethodDecl& M, QualType& ResultTy,
+ llvm::SmallVectorImpl<VarDecl*>& ErrorParams) {
+
+ ResultTy = M.getResultType();
+
+ for (ObjCMethodDecl::param_iterator I=M.param_begin(),
+ E=M.param_end(); I!=E; ++I) {
+
+ QualType T = (*I)->getType();
+
+ if (isNSErrorWarning) {
+ if (CheckNSErrorArgument(T)) ErrorParams.push_back(*I);
+ }
+ else if (CheckCFErrorArgument(T))
+ ErrorParams.push_back(*I);
+ }
+}
+
+void
+NSErrorChecker::CheckSignature(const FunctionDecl& F, QualType& ResultTy,
+ llvm::SmallVectorImpl<VarDecl*>& ErrorParams) {
+
+ ResultTy = F.getResultType();
+
+ for (FunctionDecl::param_const_iterator I = F.param_begin(),
+ E = F.param_end(); I != E; ++I) {
+
+ QualType T = (*I)->getType();
+
+ if (isNSErrorWarning) {
+ if (CheckNSErrorArgument(T)) ErrorParams.push_back(*I);
+ }
+ else if (CheckCFErrorArgument(T))
+ ErrorParams.push_back(*I);
+ }
+}
+
+
+bool NSErrorChecker::CheckNSErrorArgument(QualType ArgTy) {
+
+ const PointerType* PPT = ArgTy->getAs<PointerType>();
+ if (!PPT)
+ return false;
+
+ const ObjCObjectPointerType* PT =
+ PPT->getPointeeType()->getAs<ObjCObjectPointerType>();
+
+ if (!PT)
+ return false;
+
+ const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
+
+ // FIXME: Can ID ever be NULL?
+ if (ID)
+ return II == ID->getIdentifier();
+
+ return false;
+}
+
+bool NSErrorChecker::CheckCFErrorArgument(QualType ArgTy) {
+
+ const PointerType* PPT = ArgTy->getAs<PointerType>();
+ if (!PPT) return false;
+
+ const TypedefType* TT = PPT->getPointeeType()->getAs<TypedefType>();
+ if (!TT) return false;
+
+ return TT->getDecl()->getIdentifier() == II;
+}
+
+void NSErrorChecker::CheckParamDeref(const VarDecl *Param,
+ const LocationContext *LC,
+ const GRState *rootState,
+ BugReporter& BR) {
+
+ SVal ParamL = rootState->getLValue(Param, LC);
+ const MemRegion* ParamR = cast<loc::MemRegionVal>(ParamL).getRegionAs<VarRegion>();
+ assert (ParamR && "Parameters always have VarRegions.");
+ SVal ParamSVal = rootState->getSVal(ParamR);
+
+ // FIXME: For now assume that ParamSVal is symbolic. We need to generalize
+ // this later.
+ SymbolRef ParamSym = ParamSVal.getAsLocSymbol();
+ if (!ParamSym)
+ return;
+
+ // Iterate over the implicit-null dereferences.
+ ExplodedNode *const* I, *const* E;
+ llvm::tie(I, E) = GetImplicitNullDereferences(Eng);
+ for ( ; I != E; ++I) {
+ const GRState *state = (*I)->getState();
+ SVal location = state->getSVal((*I)->getLocationAs<StmtPoint>()->getStmt());
+ if (location.getAsSymbol() != ParamSym)
+ continue;
+
+ // Emit an error.
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ os << "Potential null dereference. According to coding standards ";
+
+ if (isNSErrorWarning)
+ os << "in 'Creating and Returning NSError Objects' the parameter '";
+ else
+ os << "documented in CoreFoundation/CFError.h the parameter '";
+
+ os << Param << "' may be null.";
+
+ BugReport *report = new BugReport(*this, os.str(), *I);
+ // FIXME: Notable symbols are now part of the report. We should
+ // add support for notable symbols in BugReport.
+ // BR.addNotableSymbol(SV->getSymbol());
+ BR.EmitReport(report);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/NoReturnFunctionChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/NoReturnFunctionChecker.cpp
new file mode 100644
index 0000000..12527e0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/NoReturnFunctionChecker.cpp
@@ -0,0 +1,79 @@
+//=== NoReturnFunctionChecker.cpp -------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines NoReturnFunctionChecker, which evaluates functions that do not
+// return to the caller.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "llvm/ADT/StringSwitch.h"
+
+using namespace clang;
+
+namespace {
+
+class NoReturnFunctionChecker : public CheckerVisitor<NoReturnFunctionChecker> {
+public:
+ static void *getTag() { static int tag = 0; return &tag; }
+ void PostVisitCallExpr(CheckerContext &C, const CallExpr *CE);
+};
+
+}
+
+void clang::RegisterNoReturnFunctionChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new NoReturnFunctionChecker());
+}
+
+void NoReturnFunctionChecker::PostVisitCallExpr(CheckerContext &C,
+ const CallExpr *CE) {
+ const GRState *state = C.getState();
+ const Expr *Callee = CE->getCallee();
+
+ bool BuildSinks = getFunctionExtInfo(Callee->getType()).getNoReturn();
+
+ if (!BuildSinks) {
+ SVal L = state->getSVal(Callee);
+ const FunctionDecl *FD = L.getAsFunctionDecl();
+ if (!FD)
+ return;
+
+ if (FD->getAttr<AnalyzerNoReturnAttr>())
+ BuildSinks = true;
+ else if (const IdentifierInfo *II = FD->getIdentifier()) {
+ // HACK: Some functions are not marked noreturn, and don't return.
+ // Here are a few hardwired ones. If this takes too long, we can
+ // potentially cache these results.
+ BuildSinks
+ = llvm::StringSwitch<bool>(llvm::StringRef(II->getName()))
+ .Case("exit", true)
+ .Case("panic", true)
+ .Case("error", true)
+ .Case("Assert", true)
+ // FIXME: This is just a wrapper around throwing an exception.
+ // Eventually inter-procedural analysis should handle this easily.
+ .Case("ziperr", true)
+ .Case("assfail", true)
+ .Case("db_error", true)
+ .Case("__assert", true)
+ .Case("__assert_rtn", true)
+ .Case("__assert_fail", true)
+ .Case("dtrace_assfail", true)
+ .Case("yy_fatal_error", true)
+ .Case("_XCAssertionFailureHandler", true)
+ .Case("_DTAssertionFailureHandler", true)
+ .Case("_TSAssertionFailureHandler", true)
+ .Default(false);
+ }
+ }
+
+ if (BuildSinks)
+ C.GenerateSink(CE);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/OSAtomicChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/OSAtomicChecker.cpp
new file mode 100644
index 0000000..e743528
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/OSAtomicChecker.cpp
@@ -0,0 +1,196 @@
+//=== OSAtomicChecker.cpp - OSAtomic functions evaluator --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker evaluates OSAtomic functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/PathSensitive/Checker.h"
+#include "clang/Basic/Builtins.h"
+
+using namespace clang;
+
+namespace {
+
+class OSAtomicChecker : public Checker {
+public:
+ static void *getTag() { static int tag = 0; return &tag; }
+ virtual bool EvalCallExpr(CheckerContext &C, const CallExpr *CE);
+
+private:
+ bool EvalOSAtomicCompareAndSwap(CheckerContext &C, const CallExpr *CE);
+};
+
+}
+
+void clang::RegisterOSAtomicChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new OSAtomicChecker());
+}
+
+bool OSAtomicChecker::EvalCallExpr(CheckerContext &C,const CallExpr *CE) {
+ const GRState *state = C.getState();
+ const Expr *Callee = CE->getCallee();
+ SVal L = state->getSVal(Callee);
+
+ const FunctionDecl* FD = L.getAsFunctionDecl();
+ if (!FD)
+ return false;
+
+ const IdentifierInfo *II = FD->getIdentifier();
+ if (!II)
+ return false;
+
+ llvm::StringRef FName(II->getName());
+
+ // Check for compare and swap.
+ if (FName.startswith("OSAtomicCompareAndSwap") ||
+ FName.startswith("objc_atomicCompareAndSwap"))
+ return EvalOSAtomicCompareAndSwap(C, CE);
+
+ // FIXME: Other atomics.
+ return false;
+}
+
+bool OSAtomicChecker::EvalOSAtomicCompareAndSwap(CheckerContext &C,
+ const CallExpr *CE) {
+ // Not enough arguments to match OSAtomicCompareAndSwap?
+ if (CE->getNumArgs() != 3)
+ return false;
+
+ ASTContext &Ctx = C.getASTContext();
+ const Expr *oldValueExpr = CE->getArg(0);
+ QualType oldValueType = Ctx.getCanonicalType(oldValueExpr->getType());
+
+ const Expr *newValueExpr = CE->getArg(1);
+ QualType newValueType = Ctx.getCanonicalType(newValueExpr->getType());
+
+ // Do the types of 'oldValue' and 'newValue' match?
+ if (oldValueType != newValueType)
+ return false;
+
+ const Expr *theValueExpr = CE->getArg(2);
+ const PointerType *theValueType=theValueExpr->getType()->getAs<PointerType>();
+
+ // theValueType not a pointer?
+ if (!theValueType)
+ return false;
+
+ QualType theValueTypePointee =
+ Ctx.getCanonicalType(theValueType->getPointeeType()).getUnqualifiedType();
+
+ // The pointee must match newValueType and oldValueType.
+ if (theValueTypePointee != newValueType)
+ return false;
+
+ static unsigned magic_load = 0;
+ static unsigned magic_store = 0;
+
+ const void *OSAtomicLoadTag = &magic_load;
+ const void *OSAtomicStoreTag = &magic_store;
+
+ // Load 'theValue'.
+ GRExprEngine &Engine = C.getEngine();
+ const GRState *state = C.getState();
+ ExplodedNodeSet Tmp;
+ SVal location = state->getSVal(theValueExpr);
+ // Here we should use the value type of the region as the load type.
+ QualType LoadTy;
+ if (const TypedRegion *TR =
+ dyn_cast_or_null<TypedRegion>(location.getAsRegion())) {
+ LoadTy = TR->getValueType(Ctx);
+ }
+ Engine.EvalLoad(Tmp, const_cast<Expr *>(theValueExpr), C.getPredecessor(),
+ state, location, OSAtomicLoadTag, LoadTy);
+
+ if (Tmp.empty()) {
+ // If no nodes were generated, other checkers must generated sinks. But
+ // since the builder state was restored, we set it manually to prevent
+ // auto transition.
+ // FIXME: there should be a better approach.
+ C.getNodeBuilder().BuildSinks = true;
+ return true;
+ }
+
+ for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end();
+ I != E; ++I) {
+
+ ExplodedNode *N = *I;
+ const GRState *stateLoad = N->getState();
+ SVal theValueVal_untested = stateLoad->getSVal(theValueExpr);
+ SVal oldValueVal_untested = stateLoad->getSVal(oldValueExpr);
+
+ // FIXME: Issue an error.
+ if (theValueVal_untested.isUndef() || oldValueVal_untested.isUndef()) {
+ return false;
+ }
+
+ DefinedOrUnknownSVal theValueVal =
+ cast<DefinedOrUnknownSVal>(theValueVal_untested);
+ DefinedOrUnknownSVal oldValueVal =
+ cast<DefinedOrUnknownSVal>(oldValueVal_untested);
+
+ SValuator &SVator = Engine.getSValuator();
+
+ // Perform the comparison.
+ DefinedOrUnknownSVal Cmp = SVator.EvalEQ(stateLoad,theValueVal,oldValueVal);
+
+ const GRState *stateEqual = stateLoad->Assume(Cmp, true);
+
+ // Were they equal?
+ if (stateEqual) {
+ // Perform the store.
+ ExplodedNodeSet TmpStore;
+ SVal val = stateEqual->getSVal(newValueExpr);
+
+ // Handle implicit value casts.
+ if (const TypedRegion *R =
+ dyn_cast_or_null<TypedRegion>(location.getAsRegion())) {
+ val = SVator.EvalCast(val,R->getValueType(Ctx),newValueExpr->getType());
+ }
+
+ Engine.EvalStore(TmpStore, NULL, const_cast<Expr *>(theValueExpr), N,
+ stateEqual, location, val, OSAtomicStoreTag);
+
+ if (TmpStore.empty()) {
+ // If no nodes were generated, other checkers must generated sinks. But
+ // since the builder state was restored, we set it manually to prevent
+ // auto transition.
+ // FIXME: there should be a better approach.
+ C.getNodeBuilder().BuildSinks = true;
+ return true;
+ }
+
+ // Now bind the result of the comparison.
+ for (ExplodedNodeSet::iterator I2 = TmpStore.begin(),
+ E2 = TmpStore.end(); I2 != E2; ++I2) {
+ ExplodedNode *predNew = *I2;
+ const GRState *stateNew = predNew->getState();
+ // Check for 'void' return type if we have a bogus function prototype.
+ SVal Res = UnknownVal();
+ QualType T = CE->getType();
+ if (!T->isVoidType())
+ Res = Engine.getValueManager().makeTruthVal(true, T);
+ C.GenerateNode(stateNew->BindExpr(CE, Res), predNew);
+ }
+ }
+
+ // Were they not equal?
+ if (const GRState *stateNotEqual = stateLoad->Assume(Cmp, false)) {
+ // Check for 'void' return type if we have a bogus function prototype.
+ SVal Res = UnknownVal();
+ QualType T = CE->getType();
+ if (!T->isVoidType())
+ Res = Engine.getValueManager().makeTruthVal(false, CE->getType());
+ C.GenerateNode(stateNotEqual->BindExpr(CE, Res), N);
+ }
+ }
+
+ return true;
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/ObjCUnusedIVarsChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/ObjCUnusedIVarsChecker.cpp
new file mode 100644
index 0000000..2523cff
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/ObjCUnusedIVarsChecker.cpp
@@ -0,0 +1,161 @@
+//==- ObjCUnusedIVarsChecker.cpp - Check for unused ivars --------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a CheckObjCUnusedIvars, a checker that
+// analyzes an Objective-C class's interface/implementation to determine if it
+// has any ivars that are never accessed.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/Checkers/LocalCheckers.h"
+#include "clang/Checker/BugReporter/PathDiagnostic.h"
+#include "clang/Checker/BugReporter/BugReporter.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceManager.h"
+
+using namespace clang;
+
+enum IVarState { Unused, Used };
+typedef llvm::DenseMap<const ObjCIvarDecl*,IVarState> IvarUsageMap;
+
+static void Scan(IvarUsageMap& M, const Stmt* S) {
+ if (!S)
+ return;
+
+ if (const ObjCIvarRefExpr *Ex = dyn_cast<ObjCIvarRefExpr>(S)) {
+ const ObjCIvarDecl *D = Ex->getDecl();
+ IvarUsageMap::iterator I = M.find(D);
+ if (I != M.end())
+ I->second = Used;
+ return;
+ }
+
+ // Blocks can reference an instance variable of a class.
+ if (const BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
+ Scan(M, BE->getBody());
+ return;
+ }
+
+ for (Stmt::const_child_iterator I=S->child_begin(),E=S->child_end(); I!=E;++I)
+ Scan(M, *I);
+}
+
+static void Scan(IvarUsageMap& M, const ObjCPropertyImplDecl* D) {
+ if (!D)
+ return;
+
+ const ObjCIvarDecl* ID = D->getPropertyIvarDecl();
+
+ if (!ID)
+ return;
+
+ IvarUsageMap::iterator I = M.find(ID);
+ if (I != M.end())
+ I->second = Used;
+}
+
+static void Scan(IvarUsageMap& M, const ObjCContainerDecl* D) {
+ // Scan the methods for accesses.
+ for (ObjCContainerDecl::instmeth_iterator I = D->instmeth_begin(),
+ E = D->instmeth_end(); I!=E; ++I)
+ Scan(M, (*I)->getBody());
+
+ if (const ObjCImplementationDecl *ID = dyn_cast<ObjCImplementationDecl>(D)) {
+ // Scan for @synthesized property methods that act as setters/getters
+ // to an ivar.
+ for (ObjCImplementationDecl::propimpl_iterator I = ID->propimpl_begin(),
+ E = ID->propimpl_end(); I!=E; ++I)
+ Scan(M, *I);
+
+ // Scan the associated categories as well.
+ for (const ObjCCategoryDecl *CD =
+ ID->getClassInterface()->getCategoryList(); CD ;
+ CD = CD->getNextClassCategory()) {
+ if (const ObjCCategoryImplDecl *CID = CD->getImplementation())
+ Scan(M, CID);
+ }
+ }
+}
+
+static void Scan(IvarUsageMap &M, const DeclContext *C, const FileID FID,
+ SourceManager &SM) {
+ for (DeclContext::decl_iterator I=C->decls_begin(), E=C->decls_end();
+ I!=E; ++I)
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
+ SourceLocation L = FD->getLocStart();
+ if (SM.getFileID(L) == FID)
+ Scan(M, FD->getBody());
+ }
+}
+
+void clang::CheckObjCUnusedIvar(const ObjCImplementationDecl *D,
+ BugReporter &BR) {
+
+ const ObjCInterfaceDecl* ID = D->getClassInterface();
+ IvarUsageMap M;
+
+ // Iterate over the ivars.
+ for (ObjCInterfaceDecl::ivar_iterator I=ID->ivar_begin(),
+ E=ID->ivar_end(); I!=E; ++I) {
+
+ const ObjCIvarDecl* ID = *I;
+
+ // Ignore ivars that...
+ // (a) aren't private
+ // (b) explicitly marked unused
+ // (c) are iboutlets
+ if (ID->getAccessControl() != ObjCIvarDecl::Private ||
+ ID->getAttr<UnusedAttr>() || ID->getAttr<IBOutletAttr>() ||
+ ID->getAttr<IBOutletCollectionAttr>())
+ continue;
+
+ M[ID] = Unused;
+ }
+
+ if (M.empty())
+ return;
+
+ // Now scan the implementation declaration.
+ Scan(M, D);
+
+ // Any potentially unused ivars?
+ bool hasUnused = false;
+ for (IvarUsageMap::iterator I = M.begin(), E = M.end(); I!=E; ++I)
+ if (I->second == Unused) {
+ hasUnused = true;
+ break;
+ }
+
+ if (!hasUnused)
+ return;
+
+ // We found some potentially unused ivars. Scan the entire translation unit
+ // for functions inside the @implementation that reference these ivars.
+ // FIXME: In the future hopefully we can just use the lexical DeclContext
+ // to go from the ObjCImplementationDecl to the lexically "nested"
+ // C functions.
+ SourceManager &SM = BR.getSourceManager();
+ Scan(M, D->getDeclContext(), SM.getFileID(D->getLocation()), SM);
+
+ // Find ivars that are unused.
+ for (IvarUsageMap::iterator I = M.begin(), E = M.end(); I!=E; ++I)
+ if (I->second == Unused) {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ os << "Instance variable '" << I->first << "' in class '" << ID
+ << "' is never used by the methods in its @implementation "
+ "(although it may be used by category methods).";
+
+ BR.EmitBasicReport("Unused instance variable", "Optimization",
+ os.str(), I->first->getLocation());
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/PathDiagnostic.cpp b/contrib/llvm/tools/clang/lib/Checker/PathDiagnostic.cpp
new file mode 100644
index 0000000..963923c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/PathDiagnostic.cpp
@@ -0,0 +1,281 @@
+//===--- PathDiagnostic.cpp - Path-Specific Diagnostic Handling -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PathDiagnostic-related interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/BugReporter/PathDiagnostic.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/StmtCXX.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/Casting.h"
+
+using namespace clang;
+using llvm::dyn_cast;
+using llvm::isa;
+
+bool PathDiagnosticMacroPiece::containsEvent() const {
+ for (const_iterator I = begin(), E = end(); I!=E; ++I) {
+ if (isa<PathDiagnosticEventPiece>(*I))
+ return true;
+
+ if (PathDiagnosticMacroPiece *MP = dyn_cast<PathDiagnosticMacroPiece>(*I))
+ if (MP->containsEvent())
+ return true;
+ }
+
+ return false;
+}
+
+static llvm::StringRef StripTrailingDots(llvm::StringRef s) {
+ for (llvm::StringRef::size_type i = s.size(); i != 0; --i)
+ if (s[i - 1] != '.')
+ return s.substr(0, i);
+ return "";
+}
+
+PathDiagnosticPiece::PathDiagnosticPiece(llvm::StringRef s,
+ Kind k, DisplayHint hint)
+ : str(StripTrailingDots(s)), kind(k), Hint(hint) {}
+
+PathDiagnosticPiece::PathDiagnosticPiece(Kind k, DisplayHint hint)
+ : kind(k), Hint(hint) {}
+
+PathDiagnosticPiece::~PathDiagnosticPiece() {}
+PathDiagnosticEventPiece::~PathDiagnosticEventPiece() {}
+PathDiagnosticControlFlowPiece::~PathDiagnosticControlFlowPiece() {}
+
+PathDiagnosticMacroPiece::~PathDiagnosticMacroPiece() {
+ for (iterator I = begin(), E = end(); I != E; ++I) delete *I;
+}
+
+PathDiagnostic::PathDiagnostic() : Size(0) {}
+
+PathDiagnostic::~PathDiagnostic() {
+ for (iterator I = begin(), E = end(); I != E; ++I) delete &*I;
+}
+
+void PathDiagnostic::resetPath(bool deletePieces) {
+ Size = 0;
+
+ if (deletePieces)
+ for (iterator I=begin(), E=end(); I!=E; ++I)
+ delete &*I;
+
+ path.clear();
+}
+
+
+PathDiagnostic::PathDiagnostic(llvm::StringRef bugtype, llvm::StringRef desc,
+ llvm::StringRef category)
+ : Size(0),
+ BugType(StripTrailingDots(bugtype)),
+ Desc(StripTrailingDots(desc)),
+ Category(StripTrailingDots(category)) {}
+
+void PathDiagnosticClient::HandleDiagnostic(Diagnostic::Level DiagLevel,
+ const DiagnosticInfo &Info) {
+
+ // Create a PathDiagnostic with a single piece.
+
+ PathDiagnostic* D = new PathDiagnostic();
+
+ const char *LevelStr;
+ switch (DiagLevel) {
+ default:
+ case Diagnostic::Ignored: assert(0 && "Invalid diagnostic type");
+ case Diagnostic::Note: LevelStr = "note: "; break;
+ case Diagnostic::Warning: LevelStr = "warning: "; break;
+ case Diagnostic::Error: LevelStr = "error: "; break;
+ case Diagnostic::Fatal: LevelStr = "fatal error: "; break;
+ }
+
+ llvm::SmallString<100> StrC;
+ StrC += LevelStr;
+ Info.FormatDiagnostic(StrC);
+
+ PathDiagnosticPiece *P =
+ new PathDiagnosticEventPiece(Info.getLocation(), StrC.str());
+
+ for (unsigned i = 0, e = Info.getNumRanges(); i != e; ++i)
+ P->addRange(Info.getRange(i));
+ for (unsigned i = 0, e = Info.getNumFixItHints(); i != e; ++i)
+ P->addFixItHint(Info.getFixItHint(i));
+ D->push_front(P);
+
+ HandlePathDiagnostic(D);
+}
+
+//===----------------------------------------------------------------------===//
+// PathDiagnosticLocation methods.
+//===----------------------------------------------------------------------===//
+
+FullSourceLoc PathDiagnosticLocation::asLocation() const {
+ assert(isValid());
+ // Note that we want a 'switch' here so that the compiler can warn us in
+ // case we add more cases.
+ switch (K) {
+ case SingleLocK:
+ case RangeK:
+ break;
+ case StmtK:
+ return FullSourceLoc(S->getLocStart(), const_cast<SourceManager&>(*SM));
+ case DeclK:
+ return FullSourceLoc(D->getLocation(), const_cast<SourceManager&>(*SM));
+ }
+
+ return FullSourceLoc(R.getBegin(), const_cast<SourceManager&>(*SM));
+}
+
+PathDiagnosticRange PathDiagnosticLocation::asRange() const {
+ assert(isValid());
+ // Note that we want a 'switch' here so that the compiler can warn us in
+ // case we add more cases.
+ switch (K) {
+ case SingleLocK:
+ return PathDiagnosticRange(R, true);
+ case RangeK:
+ break;
+ case StmtK: {
+ const Stmt *S = asStmt();
+ switch (S->getStmtClass()) {
+ default:
+ break;
+ case Stmt::DeclStmtClass: {
+ const DeclStmt *DS = cast<DeclStmt>(S);
+ if (DS->isSingleDecl()) {
+ // Should always be the case, but we'll be defensive.
+ return SourceRange(DS->getLocStart(),
+ DS->getSingleDecl()->getLocation());
+ }
+ break;
+ }
+ // FIXME: Provide better range information for different
+ // terminators.
+ case Stmt::IfStmtClass:
+ case Stmt::WhileStmtClass:
+ case Stmt::DoStmtClass:
+ case Stmt::ForStmtClass:
+ case Stmt::ChooseExprClass:
+ case Stmt::IndirectGotoStmtClass:
+ case Stmt::SwitchStmtClass:
+ case Stmt::ConditionalOperatorClass:
+ case Stmt::ObjCForCollectionStmtClass: {
+ SourceLocation L = S->getLocStart();
+ return SourceRange(L, L);
+ }
+ }
+
+ return S->getSourceRange();
+ }
+ case DeclK:
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
+ return MD->getSourceRange();
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // FIXME: We would like to always get the function body, even
+ // when it needs to be de-serialized, but getting the
+ // ASTContext here requires significant changes.
+ if (Stmt *Body = FD->getBody()) {
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Body))
+ return CS->getSourceRange();
+ else
+ return cast<CXXTryStmt>(Body)->getSourceRange();
+ }
+ }
+ else {
+ SourceLocation L = D->getLocation();
+ return PathDiagnosticRange(SourceRange(L, L), true);
+ }
+ }
+
+ return R;
+}
+
+void PathDiagnosticLocation::flatten() {
+ if (K == StmtK) {
+ R = asRange();
+ K = RangeK;
+ S = 0;
+ D = 0;
+ }
+ else if (K == DeclK) {
+ SourceLocation L = D->getLocation();
+ R = SourceRange(L, L);
+ K = SingleLocK;
+ S = 0;
+ D = 0;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// FoldingSet profiling methods.
+//===----------------------------------------------------------------------===//
+
+void PathDiagnosticLocation::Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger((unsigned) K);
+ switch (K) {
+ case RangeK:
+ ID.AddInteger(R.getBegin().getRawEncoding());
+ ID.AddInteger(R.getEnd().getRawEncoding());
+ break;
+ case SingleLocK:
+ ID.AddInteger(R.getBegin().getRawEncoding());
+ break;
+ case StmtK:
+ ID.Add(S);
+ break;
+ case DeclK:
+ ID.Add(D);
+ break;
+ }
+ return;
+}
+
+void PathDiagnosticPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger((unsigned) getKind());
+ ID.AddString(str);
+ // FIXME: Add profiling support for code hints.
+ ID.AddInteger((unsigned) getDisplayHint());
+ for (range_iterator I = ranges_begin(), E = ranges_end(); I != E; ++I) {
+ ID.AddInteger(I->getBegin().getRawEncoding());
+ ID.AddInteger(I->getEnd().getRawEncoding());
+ }
+}
+
+void PathDiagnosticSpotPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+ PathDiagnosticPiece::Profile(ID);
+ ID.Add(Pos);
+}
+
+void PathDiagnosticControlFlowPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+ PathDiagnosticPiece::Profile(ID);
+ for (const_iterator I = begin(), E = end(); I != E; ++I)
+ ID.Add(*I);
+}
+
+void PathDiagnosticMacroPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+ PathDiagnosticSpotPiece::Profile(ID);
+ for (const_iterator I = begin(), E = end(); I != E; ++I)
+ ID.Add(**I);
+}
+
+void PathDiagnostic::Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(Size);
+ ID.AddString(BugType);
+ ID.AddString(Desc);
+ ID.AddString(Category);
+ for (const_iterator I = begin(), E = end(); I != E; ++I)
+ ID.Add(*I);
+
+ for (meta_iterator I = meta_begin(), E = meta_end(); I != E; ++I)
+ ID.AddString(*I);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/PointerArithChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/PointerArithChecker.cpp
new file mode 100644
index 0000000..ed60c42
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/PointerArithChecker.cpp
@@ -0,0 +1,72 @@
+//=== PointerArithChecker.cpp - Pointer arithmetic checker -----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This files defines PointerArithChecker, a builtin checker that checks for
+// pointer arithmetic on locations other than array elements.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+
+using namespace clang;
+
+namespace {
+class PointerArithChecker
+ : public CheckerVisitor<PointerArithChecker> {
+ BuiltinBug *BT;
+public:
+ PointerArithChecker() : BT(0) {}
+ static void *getTag();
+ void PreVisitBinaryOperator(CheckerContext &C, const BinaryOperator *B);
+};
+}
+
+void *PointerArithChecker::getTag() {
+ static int x;
+ return &x;
+}
+
+void PointerArithChecker::PreVisitBinaryOperator(CheckerContext &C,
+ const BinaryOperator *B) {
+ if (B->getOpcode() != BinaryOperator::Sub &&
+ B->getOpcode() != BinaryOperator::Add)
+ return;
+
+ const GRState *state = C.getState();
+ SVal LV = state->getSVal(B->getLHS());
+ SVal RV = state->getSVal(B->getRHS());
+
+ const MemRegion *LR = LV.getAsRegion();
+
+ if (!LR || !RV.isConstant())
+ return;
+
+ // If pointer arithmetic is done on variables of non-array type, this often
+ // means behavior rely on memory organization, which is dangerous.
+ if (isa<VarRegion>(LR) || isa<CodeTextRegion>(LR) ||
+ isa<CompoundLiteralRegion>(LR)) {
+
+ if (ExplodedNode *N = C.GenerateNode()) {
+ if (!BT)
+ BT = new BuiltinBug("Dangerous pointer arithmetic",
+ "Pointer arithmetic done on non-array variables "
+ "means reliance on memory layout, which is "
+ "dangerous.");
+ RangedBugReport *R = new RangedBugReport(*BT, BT->getDescription(), N);
+ R->addRange(B->getSourceRange());
+ C.EmitReport(R);
+ }
+ }
+}
+
+void clang::RegisterPointerArithChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new PointerArithChecker());
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/PointerSubChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/PointerSubChecker.cpp
new file mode 100644
index 0000000..bc0fd24
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/PointerSubChecker.cpp
@@ -0,0 +1,78 @@
+//=== PointerSubChecker.cpp - Pointer subtraction checker ------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This files defines PointerSubChecker, a builtin checker that checks for
+// pointer subtractions on two pointers pointing to different memory chunks.
+// This check corresponds to CWE-469.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+
+using namespace clang;
+
+namespace {
+class PointerSubChecker
+ : public CheckerVisitor<PointerSubChecker> {
+ BuiltinBug *BT;
+public:
+ PointerSubChecker() : BT(0) {}
+ static void *getTag();
+ void PreVisitBinaryOperator(CheckerContext &C, const BinaryOperator *B);
+};
+}
+
+void *PointerSubChecker::getTag() {
+ static int x;
+ return &x;
+}
+
+void PointerSubChecker::PreVisitBinaryOperator(CheckerContext &C,
+ const BinaryOperator *B) {
+ // When doing pointer subtraction, if the two pointers do not point to the
+ // same memory chunk, emit a warning.
+ if (B->getOpcode() != BinaryOperator::Sub)
+ return;
+
+ const GRState *state = C.getState();
+ SVal LV = state->getSVal(B->getLHS());
+ SVal RV = state->getSVal(B->getRHS());
+
+ const MemRegion *LR = LV.getAsRegion();
+ const MemRegion *RR = RV.getAsRegion();
+
+ if (!(LR && RR))
+ return;
+
+ const MemRegion *BaseLR = LR->getBaseRegion();
+ const MemRegion *BaseRR = RR->getBaseRegion();
+
+ if (BaseLR == BaseRR)
+ return;
+
+ // Allow arithmetic on different symbolic regions.
+ if (isa<SymbolicRegion>(BaseLR) || isa<SymbolicRegion>(BaseRR))
+ return;
+
+ if (ExplodedNode *N = C.GenerateNode()) {
+ if (!BT)
+ BT = new BuiltinBug("Pointer subtraction",
+ "Subtraction of two pointers that do not point to "
+ "the same memory chunk may cause incorrect result.");
+ RangedBugReport *R = new RangedBugReport(*BT, BT->getDescription(), N);
+ R->addRange(B->getSourceRange());
+ C.EmitReport(R);
+ }
+}
+
+void clang::RegisterPointerSubChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new PointerSubChecker());
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/PthreadLockChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/PthreadLockChecker.cpp
new file mode 100644
index 0000000..74e266c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/PthreadLockChecker.cpp
@@ -0,0 +1,141 @@
+//===--- PthreadLockChecker.h - Undefined arguments checker ----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines PthreadLockChecker, a simple lock -> unlock checker. Eventually
+// this shouldn't be registered with GRExprEngineInternalChecks.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Checker/BugReporter/BugReporter.h"
+#include "clang/Checker/PathSensitive/GRStateTrait.h"
+#include "GRExprEngineExperimentalChecks.h"
+#include "llvm/ADT/ImmutableSet.h"
+
+using namespace clang;
+
+namespace {
+class PthreadLockChecker
+ : public CheckerVisitor<PthreadLockChecker> {
+ BugType *BT;
+public:
+ PthreadLockChecker() : BT(0) {}
+ static void *getTag() {
+ static int x = 0;
+ return &x;
+ }
+ void PostVisitCallExpr(CheckerContext &C, const CallExpr *CE);
+
+ void AcquireLock(CheckerContext &C, const CallExpr *CE,
+ SVal lock, bool isTryLock);
+
+ void ReleaseLock(CheckerContext &C, const CallExpr *CE,
+ SVal lock);
+
+};
+} // end anonymous namespace
+
+// GDM Entry for tracking lock state.
+namespace { class LockSet {}; }
+namespace clang {
+template <> struct GRStateTrait<LockSet> :
+ public GRStatePartialTrait<llvm::ImmutableSet<const MemRegion*> > {
+ static void* GDMIndex() { return PthreadLockChecker::getTag(); }
+};
+} // end clang namespace
+
+void clang::RegisterPthreadLockChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new PthreadLockChecker());
+}
+
+
+void PthreadLockChecker::PostVisitCallExpr(CheckerContext &C,
+ const CallExpr *CE) {
+ const GRState *state = C.getState();
+ const Expr *Callee = CE->getCallee();
+ const FunctionTextRegion *R =
+ dyn_cast_or_null<FunctionTextRegion>(state->getSVal(Callee).getAsRegion());
+
+ if (!R)
+ return;
+
+ llvm::StringRef FName = R->getDecl()->getName();
+
+ if (FName == "pthread_mutex_lock") {
+ if (CE->getNumArgs() != 1)
+ return;
+ AcquireLock(C, CE, state->getSVal(CE->getArg(0)), false);
+ }
+ else if (FName == "pthread_mutex_trylock") {
+ if (CE->getNumArgs() != 1)
+ return;
+ AcquireLock(C, CE, state->getSVal(CE->getArg(0)), true);
+ }
+ else if (FName == "pthread_mutex_unlock") {
+ if (CE->getNumArgs() != 1)
+ return;
+ ReleaseLock(C, CE, state->getSVal(CE->getArg(0)));
+ }
+}
+
+void PthreadLockChecker::AcquireLock(CheckerContext &C, const CallExpr *CE,
+ SVal lock, bool isTryLock) {
+
+ const MemRegion *lockR = lock.getAsRegion();
+ if (!lockR)
+ return;
+
+ const GRState *state = C.getState();
+
+ SVal X = state->getSVal(CE);
+ if (X.isUnknownOrUndef())
+ return;
+
+ DefinedSVal retVal = cast<DefinedSVal>(X);
+ const GRState *lockSucc = state;
+
+ if (isTryLock) {
+ // Bifurcate the state, and allow a mode where the lock acquisition fails.
+ const GRState *lockFail;
+ llvm::tie(lockFail, lockSucc) = state->Assume(retVal);
+ assert(lockFail && lockSucc);
+ C.addTransition(C.GenerateNode(CE, lockFail));
+ }
+ else {
+ // Assume that the return value was 0.
+ lockSucc = state->Assume(retVal, false);
+ assert(lockSucc);
+ }
+
+ // Record that the lock was acquired.
+ lockSucc = lockSucc->add<LockSet>(lockR);
+
+ C.addTransition(lockSucc != state ? C.GenerateNode(CE, lockSucc) :
+ C.getPredecessor());
+}
+
+void PthreadLockChecker::ReleaseLock(CheckerContext &C, const CallExpr *CE,
+ SVal lock) {
+
+ const MemRegion *lockR = lock.getAsRegion();
+ if (!lockR)
+ return;
+
+ const GRState *state = C.getState();
+
+ // Record that the lock was released.
+ // FIXME: Handle unlocking locks that were never acquired. This may
+ // require IPA for wrappers.
+ const GRState *unlockState = state->remove<LockSet>(lockR);
+
+ if (state == unlockState)
+ return;
+
+ C.addTransition(C.GenerateNode(CE, unlockState));
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/RangeConstraintManager.cpp b/contrib/llvm/tools/clang/lib/Checker/RangeConstraintManager.cpp
new file mode 100644
index 0000000..c904c33
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/RangeConstraintManager.cpp
@@ -0,0 +1,359 @@
+//== RangeConstraintManager.cpp - Manage range constraints.------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines RangeConstraintManager, a class that tracks simple
+// equality and inequality constraints on symbolic values of GRState.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SimpleConstraintManager.h"
+#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/Checker/PathSensitive/GRStateTrait.h"
+#include "clang/Checker/PathSensitive/GRTransferFuncs.h"
+#include "clang/Checker/ManagerRegistry.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/ImmutableSet.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+namespace { class ConstraintRange {}; }
+static int ConstraintRangeIndex = 0;
+
+/// A Range represents the closed range [from, to]. The caller must
+/// guarantee that from <= to. Note that Range is immutable, so as not
+/// to subvert RangeSet's immutability.
+namespace {
+class Range : public std::pair<const llvm::APSInt*,
+ const llvm::APSInt*> {
+public:
+ Range(const llvm::APSInt &from, const llvm::APSInt &to)
+ : std::pair<const llvm::APSInt*, const llvm::APSInt*>(&from, &to) {
+ assert(from <= to);
+ }
+ bool Includes(const llvm::APSInt &v) const {
+ return *first <= v && v <= *second;
+ }
+ const llvm::APSInt &From() const {
+ return *first;
+ }
+ const llvm::APSInt &To() const {
+ return *second;
+ }
+ const llvm::APSInt *getConcreteValue() const {
+ return &From() == &To() ? &From() : NULL;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddPointer(&From());
+ ID.AddPointer(&To());
+ }
+};
+
+
+class RangeTrait : public llvm::ImutContainerInfo<Range> {
+public:
+ // When comparing if one Range is less than another, we should compare
+ // the actual APSInt values instead of their pointers. This keeps the order
+ // consistent (instead of comparing by pointer values) and can potentially
+ // be used to speed up some of the operations in RangeSet.
+ static inline bool isLess(key_type_ref lhs, key_type_ref rhs) {
+ return *lhs.first < *rhs.first || (!(*rhs.first < *lhs.first) &&
+ *lhs.second < *rhs.second);
+ }
+};
+
+/// RangeSet contains a set of ranges. If the set is empty, then
+/// there the value of a symbol is overly constrained and there are no
+/// possible values for that symbol.
+class RangeSet {
+ typedef llvm::ImmutableSet<Range, RangeTrait> PrimRangeSet;
+ PrimRangeSet ranges; // no need to make const, since it is an
+ // ImmutableSet - this allows default operator=
+ // to work.
+public:
+ typedef PrimRangeSet::Factory Factory;
+ typedef PrimRangeSet::iterator iterator;
+
+ RangeSet(PrimRangeSet RS) : ranges(RS) {}
+ RangeSet(Factory& F) : ranges(F.GetEmptySet()) {}
+
+ iterator begin() const { return ranges.begin(); }
+ iterator end() const { return ranges.end(); }
+
+ bool isEmpty() const { return ranges.isEmpty(); }
+
+ /// Construct a new RangeSet representing '{ [from, to] }'.
+ RangeSet(Factory &F, const llvm::APSInt &from, const llvm::APSInt &to)
+ : ranges(F.Add(F.GetEmptySet(), Range(from, to))) {}
+
+ /// Profile - Generates a hash profile of this RangeSet for use
+ /// by FoldingSet.
+ void Profile(llvm::FoldingSetNodeID &ID) const { ranges.Profile(ID); }
+
+ /// getConcreteValue - If a symbol is contrained to equal a specific integer
+ /// constant then this method returns that value. Otherwise, it returns
+ /// NULL.
+ const llvm::APSInt* getConcreteValue() const {
+ return ranges.isSingleton() ? ranges.begin()->getConcreteValue() : 0;
+ }
+
+ /// AddEQ - Create a new RangeSet with the additional constraint that the
+ /// value be equal to V.
+ RangeSet AddEQ(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
+ // Search for a range that includes 'V'. If so, return a new RangeSet
+ // representing { [V, V] }.
+ for (PrimRangeSet::iterator i = begin(), e = end(); i!=e; ++i)
+ if (i->Includes(V))
+ return RangeSet(F, V, V);
+
+ return RangeSet(F);
+ }
+
+ /// AddNE - Create a new RangeSet with the additional constraint that the
+ /// value be not be equal to V.
+ RangeSet AddNE(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
+ PrimRangeSet newRanges = ranges;
+
+ // FIXME: We can perhaps enhance ImmutableSet to do this search for us
+ // in log(N) time using the sorted property of the internal AVL tree.
+ for (iterator i = begin(), e = end(); i != e; ++i) {
+ if (i->Includes(V)) {
+ // Remove the old range.
+ newRanges = F.Remove(newRanges, *i);
+ // Split the old range into possibly one or two ranges.
+ if (V != i->From())
+ newRanges = F.Add(newRanges, Range(i->From(), BV.Sub1(V)));
+ if (V != i->To())
+ newRanges = F.Add(newRanges, Range(BV.Add1(V), i->To()));
+ // All of the ranges are non-overlapping, so we can stop.
+ break;
+ }
+ }
+
+ return newRanges;
+ }
+
+ /// AddNE - Create a new RangeSet with the additional constraint that the
+ /// value be less than V.
+ RangeSet AddLT(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
+ PrimRangeSet newRanges = F.GetEmptySet();
+
+ for (iterator i = begin(), e = end() ; i != e ; ++i) {
+ if (i->Includes(V) && i->From() < V)
+ newRanges = F.Add(newRanges, Range(i->From(), BV.Sub1(V)));
+ else if (i->To() < V)
+ newRanges = F.Add(newRanges, *i);
+ }
+
+ return newRanges;
+ }
+
+ RangeSet AddLE(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
+ PrimRangeSet newRanges = F.GetEmptySet();
+
+ for (iterator i = begin(), e = end(); i != e; ++i) {
+ // Strictly we should test for includes *V + 1, but no harm is
+ // done by this formulation
+ if (i->Includes(V))
+ newRanges = F.Add(newRanges, Range(i->From(), V));
+ else if (i->To() <= V)
+ newRanges = F.Add(newRanges, *i);
+ }
+
+ return newRanges;
+ }
+
+ RangeSet AddGT(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
+ PrimRangeSet newRanges = F.GetEmptySet();
+
+ for (PrimRangeSet::iterator i = begin(), e = end(); i != e; ++i) {
+ if (i->Includes(V) && i->To() > V)
+ newRanges = F.Add(newRanges, Range(BV.Add1(V), i->To()));
+ else if (i->From() > V)
+ newRanges = F.Add(newRanges, *i);
+ }
+
+ return newRanges;
+ }
+
+ RangeSet AddGE(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
+ PrimRangeSet newRanges = F.GetEmptySet();
+
+ for (PrimRangeSet::iterator i = begin(), e = end(); i != e; ++i) {
+ // Strictly we should test for includes *V - 1, but no harm is
+ // done by this formulation
+ if (i->Includes(V))
+ newRanges = F.Add(newRanges, Range(V, i->To()));
+ else if (i->From() >= V)
+ newRanges = F.Add(newRanges, *i);
+ }
+
+ return newRanges;
+ }
+
+ void print(llvm::raw_ostream &os) const {
+ bool isFirst = true;
+ os << "{ ";
+ for (iterator i = begin(), e = end(); i != e; ++i) {
+ if (isFirst)
+ isFirst = false;
+ else
+ os << ", ";
+
+ os << '[' << i->From().toString(10) << ", " << i->To().toString(10)
+ << ']';
+ }
+ os << " }";
+ }
+
+ bool operator==(const RangeSet &other) const {
+ return ranges == other.ranges;
+ }
+};
+} // end anonymous namespace
+
+typedef llvm::ImmutableMap<SymbolRef,RangeSet> ConstraintRangeTy;
+
+namespace clang {
+template<>
+struct GRStateTrait<ConstraintRange>
+ : public GRStatePartialTrait<ConstraintRangeTy> {
+ static inline void* GDMIndex() { return &ConstraintRangeIndex; }
+};
+}
+
+namespace {
+class RangeConstraintManager : public SimpleConstraintManager{
+ RangeSet GetRange(const GRState *state, SymbolRef sym);
+public:
+ RangeConstraintManager(GRSubEngine &subengine)
+ : SimpleConstraintManager(subengine) {}
+
+ const GRState* AssumeSymNE(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V);
+
+ const GRState* AssumeSymEQ(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V);
+
+ const GRState* AssumeSymLT(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V);
+
+ const GRState* AssumeSymGT(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V);
+
+ const GRState* AssumeSymGE(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V);
+
+ const GRState* AssumeSymLE(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V);
+
+ const llvm::APSInt* getSymVal(const GRState* St, SymbolRef sym) const;
+
+ // FIXME: Refactor into SimpleConstraintManager?
+ bool isEqual(const GRState* St, SymbolRef sym, const llvm::APSInt& V) const {
+ const llvm::APSInt *i = getSymVal(St, sym);
+ return i ? *i == V : false;
+ }
+
+ const GRState* RemoveDeadBindings(const GRState* St, SymbolReaper& SymReaper);
+
+ void print(const GRState* St, llvm::raw_ostream& Out,
+ const char* nl, const char *sep);
+
+private:
+ RangeSet::Factory F;
+};
+
+} // end anonymous namespace
+
+ConstraintManager* clang::CreateRangeConstraintManager(GRStateManager&,
+ GRSubEngine &subeng) {
+ return new RangeConstraintManager(subeng);
+}
+
+const llvm::APSInt* RangeConstraintManager::getSymVal(const GRState* St,
+ SymbolRef sym) const {
+ const ConstraintRangeTy::data_type *T = St->get<ConstraintRange>(sym);
+ return T ? T->getConcreteValue() : NULL;
+}
+
+/// Scan all symbols referenced by the constraints. If the symbol is not alive
+/// as marked in LSymbols, mark it as dead in DSymbols.
+const GRState*
+RangeConstraintManager::RemoveDeadBindings(const GRState* state,
+ SymbolReaper& SymReaper) {
+
+ ConstraintRangeTy CR = state->get<ConstraintRange>();
+ ConstraintRangeTy::Factory& CRFactory = state->get_context<ConstraintRange>();
+
+ for (ConstraintRangeTy::iterator I = CR.begin(), E = CR.end(); I != E; ++I) {
+ SymbolRef sym = I.getKey();
+ if (SymReaper.maybeDead(sym))
+ CR = CRFactory.Remove(CR, sym);
+ }
+
+ return state->set<ConstraintRange>(CR);
+}
+
+//===------------------------------------------------------------------------===
+// AssumeSymX methods: public interface for RangeConstraintManager.
+//===------------------------------------------------------------------------===/
+
+RangeSet
+RangeConstraintManager::GetRange(const GRState *state, SymbolRef sym) {
+ if (ConstraintRangeTy::data_type* V = state->get<ConstraintRange>(sym))
+ return *V;
+
+ // Lazily generate a new RangeSet representing all possible values for the
+ // given symbol type.
+ QualType T = state->getSymbolManager().getType(sym);
+ BasicValueFactory& BV = state->getBasicVals();
+ return RangeSet(F, BV.getMinValue(T), BV.getMaxValue(T));
+}
+
+//===------------------------------------------------------------------------===
+// AssumeSymX methods: public interface for RangeConstraintManager.
+//===------------------------------------------------------------------------===/
+
+#define AssumeX(OP)\
+const GRState*\
+RangeConstraintManager::AssumeSym ## OP(const GRState* state, SymbolRef sym,\
+ const llvm::APSInt& V){\
+ const RangeSet& R = GetRange(state, sym).Add##OP(state->getBasicVals(), F, V);\
+ return !R.isEmpty() ? state->set<ConstraintRange>(sym, R) : NULL;\
+}
+
+AssumeX(EQ)
+AssumeX(NE)
+AssumeX(LT)
+AssumeX(GT)
+AssumeX(LE)
+AssumeX(GE)
+
+//===------------------------------------------------------------------------===
+// Pretty-printing.
+//===------------------------------------------------------------------------===/
+
+void RangeConstraintManager::print(const GRState* St, llvm::raw_ostream& Out,
+ const char* nl, const char *sep) {
+
+ ConstraintRangeTy Ranges = St->get<ConstraintRange>();
+
+ if (Ranges.isEmpty())
+ return;
+
+ Out << nl << sep << "ranges of symbol values:";
+
+ for (ConstraintRangeTy::iterator I=Ranges.begin(), E=Ranges.end(); I!=E; ++I){
+ Out << nl << ' ' << I.getKey() << " : ";
+ I.getData().print(Out);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/RegionStore.cpp b/contrib/llvm/tools/clang/lib/Checker/RegionStore.cpp
new file mode 100644
index 0000000..c4072fd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/RegionStore.cpp
@@ -0,0 +1,1917 @@
+//== RegionStore.cpp - Field-sensitive store model --------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a basic region store model. In this model, we do have field
+// sensitivity. But we assume nothing about the heap shape. So recursive data
+// structures are largely ignored. Basically we do 1-limiting analysis.
+// Parameter pointers are assumed with no aliasing. Pointee objects of
+// parameters are created lazily.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/Checker/PathSensitive/GRStateTrait.h"
+#include "clang/Checker/PathSensitive/MemRegion.h"
+#include "llvm/ADT/ImmutableList.h"
+#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using llvm::Optional;
+
+//===----------------------------------------------------------------------===//
+// Representation of binding keys.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class BindingKey {
+public:
+ enum Kind { Direct = 0x0, Default = 0x1 };
+private:
+ llvm ::PointerIntPair<const MemRegion*, 1> P;
+ uint64_t Offset;
+
+ explicit BindingKey(const MemRegion *r, uint64_t offset, Kind k)
+ : P(r, (unsigned) k), Offset(offset) { assert(r); }
+public:
+
+ bool isDefault() const { return P.getInt() == Default; }
+ bool isDirect() const { return P.getInt() == Direct; }
+
+ const MemRegion *getRegion() const { return P.getPointer(); }
+ uint64_t getOffset() const { return Offset; }
+
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ ID.AddPointer(P.getOpaqueValue());
+ ID.AddInteger(Offset);
+ }
+
+ static BindingKey Make(const MemRegion *R, Kind k);
+
+ bool operator<(const BindingKey &X) const {
+ if (P.getOpaqueValue() < X.P.getOpaqueValue())
+ return true;
+ if (P.getOpaqueValue() > X.P.getOpaqueValue())
+ return false;
+ return Offset < X.Offset;
+ }
+
+ bool operator==(const BindingKey &X) const {
+ return P.getOpaqueValue() == X.P.getOpaqueValue() &&
+ Offset == X.Offset;
+ }
+};
+} // end anonymous namespace
+
+namespace llvm {
+ static inline
+ llvm::raw_ostream& operator<<(llvm::raw_ostream& os, BindingKey K) {
+ os << '(' << K.getRegion() << ',' << K.getOffset()
+ << ',' << (K.isDirect() ? "direct" : "default")
+ << ')';
+ return os;
+ }
+} // end llvm namespace
+
+//===----------------------------------------------------------------------===//
+// Actual Store type.
+//===----------------------------------------------------------------------===//
+
+typedef llvm::ImmutableMap<BindingKey, SVal> RegionBindings;
+
+//===----------------------------------------------------------------------===//
+// Fine-grained control of RegionStoreManager.
+//===----------------------------------------------------------------------===//
+
+namespace {
+struct minimal_features_tag {};
+struct maximal_features_tag {};
+
+class RegionStoreFeatures {
+ bool SupportsFields;
+ bool SupportsRemaining;
+
+public:
+ RegionStoreFeatures(minimal_features_tag) :
+ SupportsFields(false), SupportsRemaining(false) {}
+
+ RegionStoreFeatures(maximal_features_tag) :
+ SupportsFields(true), SupportsRemaining(false) {}
+
+ void enableFields(bool t) { SupportsFields = t; }
+
+ bool supportsFields() const { return SupportsFields; }
+ bool supportsRemaining() const { return SupportsRemaining; }
+};
+}
+
+//===----------------------------------------------------------------------===//
+// Region "Extents"
+//===----------------------------------------------------------------------===//
+//
+// MemRegions represent chunks of memory with a size (their "extent"). This
+// GDM entry tracks the extents for regions. Extents are in bytes.
+//
+namespace { class RegionExtents {}; }
+static int RegionExtentsIndex = 0;
+namespace clang {
+ template<> struct GRStateTrait<RegionExtents>
+ : public GRStatePartialTrait<llvm::ImmutableMap<const MemRegion*, SVal> > {
+ static void* GDMIndex() { return &RegionExtentsIndex; }
+ };
+}
+
+//===----------------------------------------------------------------------===//
+// Utility functions.
+//===----------------------------------------------------------------------===//
+
+static bool IsAnyPointerOrIntptr(QualType ty, ASTContext &Ctx) {
+ if (ty->isAnyPointerType())
+ return true;
+
+ return ty->isIntegerType() && ty->isScalarType() &&
+ Ctx.getTypeSize(ty) == Ctx.getTypeSize(Ctx.VoidPtrTy);
+}
+
+//===----------------------------------------------------------------------===//
+// Main RegionStore logic.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class RegionStoreSubRegionMap : public SubRegionMap {
+public:
+ typedef llvm::ImmutableSet<const MemRegion*> Set;
+ typedef llvm::DenseMap<const MemRegion*, Set> Map;
+private:
+ Set::Factory F;
+ Map M;
+public:
+ bool add(const MemRegion* Parent, const MemRegion* SubRegion) {
+ Map::iterator I = M.find(Parent);
+
+ if (I == M.end()) {
+ M.insert(std::make_pair(Parent, F.Add(F.GetEmptySet(), SubRegion)));
+ return true;
+ }
+
+ I->second = F.Add(I->second, SubRegion);
+ return false;
+ }
+
+ void process(llvm::SmallVectorImpl<const SubRegion*> &WL, const SubRegion *R);
+
+ ~RegionStoreSubRegionMap() {}
+
+ const Set *getSubRegions(const MemRegion *Parent) const {
+ Map::const_iterator I = M.find(Parent);
+ return I == M.end() ? NULL : &I->second;
+ }
+
+ bool iterSubRegions(const MemRegion* Parent, Visitor& V) const {
+ Map::const_iterator I = M.find(Parent);
+
+ if (I == M.end())
+ return true;
+
+ Set S = I->second;
+ for (Set::iterator SI=S.begin(),SE=S.end(); SI != SE; ++SI) {
+ if (!V.Visit(Parent, *SI))
+ return false;
+ }
+
+ return true;
+ }
+};
+
+
+class RegionStoreManager : public StoreManager {
+ const RegionStoreFeatures Features;
+ RegionBindings::Factory RBFactory;
+
+public:
+ RegionStoreManager(GRStateManager& mgr, const RegionStoreFeatures &f)
+ : StoreManager(mgr),
+ Features(f),
+ RBFactory(mgr.getAllocator()) {}
+
+ SubRegionMap *getSubRegionMap(Store store) {
+ return getRegionStoreSubRegionMap(store);
+ }
+
+ RegionStoreSubRegionMap *getRegionStoreSubRegionMap(Store store);
+
+ Optional<SVal> getBinding(RegionBindings B, const MemRegion *R);
+ Optional<SVal> getDirectBinding(RegionBindings B, const MemRegion *R);
+ /// getDefaultBinding - Returns an SVal* representing an optional default
+ /// binding associated with a region and its subregions.
+ Optional<SVal> getDefaultBinding(RegionBindings B, const MemRegion *R);
+
+ /// setImplicitDefaultValue - Set the default binding for the provided
+ /// MemRegion to the value implicitly defined for compound literals when
+ /// the value is not specified.
+ Store setImplicitDefaultValue(Store store, const MemRegion *R, QualType T);
+
+ /// ArrayToPointer - Emulates the "decay" of an array to a pointer
+ /// type. 'Array' represents the lvalue of the array being decayed
+ /// to a pointer, and the returned SVal represents the decayed
+ /// version of that lvalue (i.e., a pointer to the first element of
+ /// the array). This is called by GRExprEngine when evaluating
+ /// casts from arrays to pointers.
+ SVal ArrayToPointer(Loc Array);
+
+ SVal EvalBinOp(BinaryOperator::Opcode Op,Loc L, NonLoc R, QualType resultTy);
+
+ Store getInitialStore(const LocationContext *InitLoc) {
+ return RBFactory.GetEmptyMap().getRoot();
+ }
+
+ //===-------------------------------------------------------------------===//
+ // Binding values to regions.
+ //===-------------------------------------------------------------------===//
+
+ Store InvalidateRegion(Store store, const MemRegion *R, const Expr *E,
+ unsigned Count, InvalidatedSymbols *IS) {
+ return RegionStoreManager::InvalidateRegions(store, &R, &R+1, E, Count, IS);
+ }
+
+ Store InvalidateRegions(Store store,
+ const MemRegion * const *Begin,
+ const MemRegion * const *End,
+ const Expr *E, unsigned Count,
+ InvalidatedSymbols *IS);
+
+public: // Made public for helper classes.
+
+ void RemoveSubRegionBindings(RegionBindings &B, const MemRegion *R,
+ RegionStoreSubRegionMap &M);
+
+ RegionBindings Add(RegionBindings B, BindingKey K, SVal V);
+
+ RegionBindings Add(RegionBindings B, const MemRegion *R,
+ BindingKey::Kind k, SVal V);
+
+ const SVal *Lookup(RegionBindings B, BindingKey K);
+ const SVal *Lookup(RegionBindings B, const MemRegion *R, BindingKey::Kind k);
+
+ RegionBindings Remove(RegionBindings B, BindingKey K);
+ RegionBindings Remove(RegionBindings B, const MemRegion *R,
+ BindingKey::Kind k);
+
+ RegionBindings Remove(RegionBindings B, const MemRegion *R) {
+ return Remove(Remove(B, R, BindingKey::Direct), R, BindingKey::Default);
+ }
+
+ Store Remove(Store store, BindingKey K);
+
+public: // Part of public interface to class.
+
+ Store Bind(Store store, Loc LV, SVal V);
+
+ Store BindCompoundLiteral(Store store, const CompoundLiteralExpr* CL,
+ const LocationContext *LC, SVal V);
+
+ Store BindDecl(Store store, const VarRegion *VR, SVal InitVal);
+
+ Store BindDeclWithNoInit(Store store, const VarRegion *) {
+ return store;
+ }
+
+ /// BindStruct - Bind a compound value to a structure.
+ Store BindStruct(Store store, const TypedRegion* R, SVal V);
+
+ Store BindArray(Store store, const TypedRegion* R, SVal V);
+
+ /// KillStruct - Set the entire struct to unknown.
+ Store KillStruct(Store store, const TypedRegion* R);
+
+ Store Remove(Store store, Loc LV);
+
+
+ //===------------------------------------------------------------------===//
+ // Loading values from regions.
+ //===------------------------------------------------------------------===//
+
+ /// The high level logic for this method is this:
+ /// Retrieve (L)
+ /// if L has binding
+ /// return L's binding
+ /// else if L is in killset
+ /// return unknown
+ /// else
+ /// if L is on stack or heap
+ /// return undefined
+ /// else
+ /// return symbolic
+ SVal Retrieve(Store store, Loc L, QualType T = QualType());
+
+ SVal RetrieveElement(Store store, const ElementRegion *R);
+
+ SVal RetrieveField(Store store, const FieldRegion *R);
+
+ SVal RetrieveObjCIvar(Store store, const ObjCIvarRegion *R);
+
+ SVal RetrieveVar(Store store, const VarRegion *R);
+
+ SVal RetrieveLazySymbol(const TypedRegion *R);
+
+ SVal RetrieveFieldOrElementCommon(Store store, const TypedRegion *R,
+ QualType Ty, const MemRegion *superR);
+
+ /// Retrieve the values in a struct and return a CompoundVal, used when doing
+ /// struct copy:
+ /// struct s x, y;
+ /// x = y;
+ /// y's value is retrieved by this method.
+ SVal RetrieveStruct(Store store, const TypedRegion* R);
+
+ SVal RetrieveArray(Store store, const TypedRegion* R);
+
+ /// Get the state and region whose binding this region R corresponds to.
+ std::pair<Store, const MemRegion*>
+ GetLazyBinding(RegionBindings B, const MemRegion *R);
+
+ Store CopyLazyBindings(nonloc::LazyCompoundVal V, Store store,
+ const TypedRegion *R);
+
+ //===------------------------------------------------------------------===//
+ // State pruning.
+ //===------------------------------------------------------------------===//
+
+ /// RemoveDeadBindings - Scans the RegionStore of 'state' for dead values.
+ /// It returns a new Store with these values removed.
+ const GRState *RemoveDeadBindings(GRState &state, Stmt* Loc,
+ const StackFrameContext *LCtx,
+ SymbolReaper& SymReaper,
+ llvm::SmallVectorImpl<const MemRegion*>& RegionRoots);
+
+ const GRState *EnterStackFrame(const GRState *state,
+ const StackFrameContext *frame);
+
+ //===------------------------------------------------------------------===//
+ // Region "extents".
+ //===------------------------------------------------------------------===//
+
+ const GRState *setExtent(const GRState *state,const MemRegion* R,SVal Extent){
+ return state->set<RegionExtents>(R, Extent);
+ }
+
+ Optional<SVal> getExtent(const GRState *state, const MemRegion *R) {
+ const SVal *V = state->get<RegionExtents>(R);
+ if (V)
+ return *V;
+ else
+ return Optional<SVal>();
+ }
+
+ DefinedOrUnknownSVal getSizeInElements(const GRState *state,
+ const MemRegion* R, QualType EleTy);
+
+ //===------------------------------------------------------------------===//
+ // Utility methods.
+ //===------------------------------------------------------------------===//
+
+ static inline RegionBindings GetRegionBindings(Store store) {
+ return RegionBindings(static_cast<const RegionBindings::TreeTy*>(store));
+ }
+
+ void print(Store store, llvm::raw_ostream& Out, const char* nl,
+ const char *sep);
+
+ void iterBindings(Store store, BindingsHandler& f) {
+ // FIXME: Implement.
+ }
+
+ // FIXME: Remove.
+ BasicValueFactory& getBasicVals() {
+ return StateMgr.getBasicVals();
+ }
+
+ // FIXME: Remove.
+ ASTContext& getContext() { return StateMgr.getContext(); }
+};
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// RegionStore creation.
+//===----------------------------------------------------------------------===//
+
+StoreManager *clang::CreateRegionStoreManager(GRStateManager& StMgr) {
+ RegionStoreFeatures F = maximal_features_tag();
+ return new RegionStoreManager(StMgr, F);
+}
+
+StoreManager *clang::CreateFieldsOnlyRegionStoreManager(GRStateManager &StMgr) {
+ RegionStoreFeatures F = minimal_features_tag();
+ F.enableFields(true);
+ return new RegionStoreManager(StMgr, F);
+}
+
+void
+RegionStoreSubRegionMap::process(llvm::SmallVectorImpl<const SubRegion*> &WL,
+ const SubRegion *R) {
+ const MemRegion *superR = R->getSuperRegion();
+ if (add(superR, R))
+ if (const SubRegion *sr = dyn_cast<SubRegion>(superR))
+ WL.push_back(sr);
+}
+
+RegionStoreSubRegionMap*
+RegionStoreManager::getRegionStoreSubRegionMap(Store store) {
+ RegionBindings B = GetRegionBindings(store);
+ RegionStoreSubRegionMap *M = new RegionStoreSubRegionMap();
+
+ llvm::SmallVector<const SubRegion*, 10> WL;
+
+ for (RegionBindings::iterator I=B.begin(), E=B.end(); I!=E; ++I)
+ if (const SubRegion *R = dyn_cast<SubRegion>(I.getKey().getRegion()))
+ M->process(WL, R);
+
+ // We also need to record in the subregion map "intermediate" regions that
+ // don't have direct bindings but are super regions of those that do.
+ while (!WL.empty()) {
+ const SubRegion *R = WL.back();
+ WL.pop_back();
+ M->process(WL, R);
+ }
+
+ return M;
+}
+
+//===----------------------------------------------------------------------===//
+// Region Cluster analysis.
+//===----------------------------------------------------------------------===//
+
+namespace {
+template <typename DERIVED>
+class ClusterAnalysis {
+protected:
+ typedef BumpVector<BindingKey> RegionCluster;
+ typedef llvm::DenseMap<const MemRegion *, RegionCluster *> ClusterMap;
+ llvm::DenseMap<const RegionCluster*, unsigned> Visited;
+ typedef llvm::SmallVector<std::pair<const MemRegion *, RegionCluster*>, 10>
+ WorkList;
+
+ BumpVectorContext BVC;
+ ClusterMap ClusterM;
+ WorkList WL;
+
+ RegionStoreManager &RM;
+ ASTContext &Ctx;
+ ValueManager &ValMgr;
+
+ RegionBindings B;
+
+public:
+ ClusterAnalysis(RegionStoreManager &rm, GRStateManager &StateMgr,
+ RegionBindings b)
+ : RM(rm), Ctx(StateMgr.getContext()), ValMgr(StateMgr.getValueManager()),
+ B(b) {}
+
+ RegionBindings getRegionBindings() const { return B; }
+
+ void AddToCluster(BindingKey K) {
+ const MemRegion *R = K.getRegion();
+ const MemRegion *baseR = R->getBaseRegion();
+ RegionCluster &C = getCluster(baseR);
+ C.push_back(K, BVC);
+ static_cast<DERIVED*>(this)->VisitAddedToCluster(baseR, C);
+ }
+
+ bool isVisited(const MemRegion *R) {
+ return (bool) Visited[&getCluster(R->getBaseRegion())];
+ }
+
+ RegionCluster& getCluster(const MemRegion *R) {
+ RegionCluster *&CRef = ClusterM[R];
+ if (!CRef) {
+ void *Mem = BVC.getAllocator().template Allocate<RegionCluster>();
+ CRef = new (Mem) RegionCluster(BVC, 10);
+ }
+ return *CRef;
+ }
+
+ void GenerateClusters() {
+ // Scan the entire set of bindings and make the region clusters.
+ for (RegionBindings::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI){
+ AddToCluster(RI.getKey());
+ if (const MemRegion *R = RI.getData().getAsRegion()) {
+ // Generate a cluster, but don't add the region to the cluster
+ // if there aren't any bindings.
+ getCluster(R->getBaseRegion());
+ }
+ }
+ }
+
+ bool AddToWorkList(const MemRegion *R, RegionCluster &C) {
+ if (unsigned &visited = Visited[&C])
+ return false;
+ else
+ visited = 1;
+
+ WL.push_back(std::make_pair(R, &C));
+ return true;
+ }
+
+ bool AddToWorkList(BindingKey K) {
+ return AddToWorkList(K.getRegion());
+ }
+
+ bool AddToWorkList(const MemRegion *R) {
+ const MemRegion *baseR = R->getBaseRegion();
+ return AddToWorkList(baseR, getCluster(baseR));
+ }
+
+ void RunWorkList() {
+ while (!WL.empty()) {
+ const MemRegion *baseR;
+ RegionCluster *C;
+ llvm::tie(baseR, C) = WL.back();
+ WL.pop_back();
+
+ // First visit the cluster.
+ static_cast<DERIVED*>(this)->VisitCluster(baseR, C->begin(), C->end());
+
+ // Next, visit the base region.
+ static_cast<DERIVED*>(this)->VisitBaseRegion(baseR);
+ }
+ }
+
+public:
+ void VisitAddedToCluster(const MemRegion *baseR, RegionCluster &C) {}
+ void VisitCluster(const MemRegion *baseR, BindingKey *I, BindingKey *E) {}
+ void VisitBaseRegion(const MemRegion *baseR) {}
+};
+}
+
+//===----------------------------------------------------------------------===//
+// Binding invalidation.
+//===----------------------------------------------------------------------===//
+
+void RegionStoreManager::RemoveSubRegionBindings(RegionBindings &B,
+ const MemRegion *R,
+ RegionStoreSubRegionMap &M) {
+
+ if (const RegionStoreSubRegionMap::Set *S = M.getSubRegions(R))
+ for (RegionStoreSubRegionMap::Set::iterator I = S->begin(), E = S->end();
+ I != E; ++I)
+ RemoveSubRegionBindings(B, *I, M);
+
+ B = Remove(B, R);
+}
+
+namespace {
+class InvalidateRegionsWorker : public ClusterAnalysis<InvalidateRegionsWorker>
+{
+ const Expr *Ex;
+ unsigned Count;
+ StoreManager::InvalidatedSymbols *IS;
+public:
+ InvalidateRegionsWorker(RegionStoreManager &rm,
+ GRStateManager &stateMgr,
+ RegionBindings b,
+ const Expr *ex, unsigned count,
+ StoreManager::InvalidatedSymbols *is)
+ : ClusterAnalysis<InvalidateRegionsWorker>(rm, stateMgr, b),
+ Ex(ex), Count(count), IS(is) {}
+
+ void VisitCluster(const MemRegion *baseR, BindingKey *I, BindingKey *E);
+ void VisitBaseRegion(const MemRegion *baseR);
+
+private:
+ void VisitBinding(SVal V);
+};
+}
+
+void InvalidateRegionsWorker::VisitBinding(SVal V) {
+ // A symbol? Mark it touched by the invalidation.
+ if (IS)
+ if (SymbolRef Sym = V.getAsSymbol())
+ IS->insert(Sym);
+
+ if (const MemRegion *R = V.getAsRegion()) {
+ AddToWorkList(R);
+ return;
+ }
+
+ // Is it a LazyCompoundVal? All references get invalidated as well.
+ if (const nonloc::LazyCompoundVal *LCS =
+ dyn_cast<nonloc::LazyCompoundVal>(&V)) {
+
+ const MemRegion *LazyR = LCS->getRegion();
+ RegionBindings B = RegionStoreManager::GetRegionBindings(LCS->getStore());
+
+ for (RegionBindings::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI){
+ const MemRegion *baseR = RI.getKey().getRegion();
+ if (cast<SubRegion>(baseR)->isSubRegionOf(LazyR))
+ VisitBinding(RI.getData());
+ }
+
+ return;
+ }
+}
+
+void InvalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
+ BindingKey *I, BindingKey *E) {
+ for ( ; I != E; ++I) {
+ // Get the old binding. Is it a region? If so, add it to the worklist.
+ const BindingKey &K = *I;
+ if (const SVal *V = RM.Lookup(B, K))
+ VisitBinding(*V);
+
+ B = RM.Remove(B, K);
+ }
+}
+
+void InvalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) {
+ if (IS) {
+ // Symbolic region? Mark that symbol touched by the invalidation.
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(baseR))
+ IS->insert(SR->getSymbol());
+ }
+
+ // BlockDataRegion? If so, invalidate captured variables that are passed
+ // by reference.
+ if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(baseR)) {
+ for (BlockDataRegion::referenced_vars_iterator
+ BI = BR->referenced_vars_begin(), BE = BR->referenced_vars_end() ;
+ BI != BE; ++BI) {
+ const VarRegion *VR = *BI;
+ const VarDecl *VD = VR->getDecl();
+ if (VD->getAttr<BlocksAttr>() || !VD->hasLocalStorage())
+ AddToWorkList(VR);
+ }
+ return;
+ }
+
+ if (isa<AllocaRegion>(baseR) || isa<SymbolicRegion>(baseR)) {
+ // Invalidate the region by setting its default value to
+ // conjured symbol. The type of the symbol is irrelavant.
+ DefinedOrUnknownSVal V = ValMgr.getConjuredSymbolVal(baseR, Ex, Ctx.IntTy,
+ Count);
+ B = RM.Add(B, baseR, BindingKey::Default, V);
+ return;
+ }
+
+ if (!baseR->isBoundable())
+ return;
+
+ const TypedRegion *TR = cast<TypedRegion>(baseR);
+ QualType T = TR->getValueType(Ctx);
+
+ // Invalidate the binding.
+ if (const RecordType *RT = T->getAsStructureType()) {
+ const RecordDecl *RD = RT->getDecl()->getDefinition();
+ // No record definition. There is nothing we can do.
+ if (!RD) {
+ B = RM.Remove(B, baseR);
+ return;
+ }
+
+ // Invalidate the region by setting its default value to
+ // conjured symbol. The type of the symbol is irrelavant.
+ DefinedOrUnknownSVal V = ValMgr.getConjuredSymbolVal(baseR, Ex, Ctx.IntTy,
+ Count);
+ B = RM.Add(B, baseR, BindingKey::Default, V);
+ return;
+ }
+
+ if (const ArrayType *AT = Ctx.getAsArrayType(T)) {
+ // Set the default value of the array to conjured symbol.
+ DefinedOrUnknownSVal V =
+ ValMgr.getConjuredSymbolVal(baseR, Ex, AT->getElementType(), Count);
+ B = RM.Add(B, baseR, BindingKey::Default, V);
+ return;
+ }
+
+ DefinedOrUnknownSVal V = ValMgr.getConjuredSymbolVal(baseR, Ex, T, Count);
+ assert(SymbolManager::canSymbolicate(T) || V.isUnknown());
+ B = RM.Add(B, baseR, BindingKey::Direct, V);
+}
+
+Store RegionStoreManager::InvalidateRegions(Store store,
+ const MemRegion * const *I,
+ const MemRegion * const *E,
+ const Expr *Ex, unsigned Count,
+ InvalidatedSymbols *IS) {
+ InvalidateRegionsWorker W(*this, StateMgr,
+ RegionStoreManager::GetRegionBindings(store),
+ Ex, Count, IS);
+
+ // Scan the bindings and generate the clusters.
+ W.GenerateClusters();
+
+ // Add I .. E to the worklist.
+ for ( ; I != E; ++I)
+ W.AddToWorkList(*I);
+
+ W.RunWorkList();
+
+ // Return the new bindings.
+ return W.getRegionBindings().getRoot();
+}
+
+//===----------------------------------------------------------------------===//
+// Extents for regions.
+//===----------------------------------------------------------------------===//
+
+DefinedOrUnknownSVal RegionStoreManager::getSizeInElements(const GRState *state,
+ const MemRegion *R,
+ QualType EleTy) {
+
+ switch (R->getKind()) {
+ case MemRegion::CXXThisRegionKind:
+ assert(0 && "Cannot get size of 'this' region");
+ case MemRegion::GenericMemSpaceRegionKind:
+ case MemRegion::StackLocalsSpaceRegionKind:
+ case MemRegion::StackArgumentsSpaceRegionKind:
+ case MemRegion::HeapSpaceRegionKind:
+ case MemRegion::GlobalsSpaceRegionKind:
+ case MemRegion::UnknownSpaceRegionKind:
+ assert(0 && "Cannot index into a MemSpace");
+ return UnknownVal();
+
+ case MemRegion::FunctionTextRegionKind:
+ case MemRegion::BlockTextRegionKind:
+ case MemRegion::BlockDataRegionKind:
+ // Technically this can happen if people do funny things with casts.
+ return UnknownVal();
+
+ // Not yet handled.
+ case MemRegion::AllocaRegionKind:
+ case MemRegion::CompoundLiteralRegionKind:
+ case MemRegion::ElementRegionKind:
+ case MemRegion::FieldRegionKind:
+ case MemRegion::ObjCIvarRegionKind:
+ case MemRegion::CXXObjectRegionKind:
+ return UnknownVal();
+
+ case MemRegion::SymbolicRegionKind: {
+ const SVal *Size = state->get<RegionExtents>(R);
+ if (!Size)
+ return UnknownVal();
+ const nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(Size);
+ if (!CI)
+ return UnknownVal();
+
+ CharUnits RegionSize =
+ CharUnits::fromQuantity(CI->getValue().getSExtValue());
+ CharUnits EleSize = getContext().getTypeSizeInChars(EleTy);
+ assert(RegionSize % EleSize == 0);
+
+ return ValMgr.makeIntVal(RegionSize / EleSize, false);
+ }
+
+ case MemRegion::StringRegionKind: {
+ const StringLiteral* Str = cast<StringRegion>(R)->getStringLiteral();
+ // We intentionally made the size value signed because it participates in
+ // operations with signed indices.
+ return ValMgr.makeIntVal(Str->getByteLength()+1, false);
+ }
+
+ case MemRegion::VarRegionKind: {
+ const VarRegion* VR = cast<VarRegion>(R);
+ // Get the type of the variable.
+ QualType T = VR->getDesugaredValueType(getContext());
+
+ // FIXME: Handle variable-length arrays.
+ if (isa<VariableArrayType>(T))
+ return UnknownVal();
+
+ if (const ConstantArrayType* CAT = dyn_cast<ConstantArrayType>(T)) {
+ // return the size as signed integer.
+ return ValMgr.makeIntVal(CAT->getSize(), false);
+ }
+
+ // Clients can reinterpret ordinary variables as arrays, possibly of
+ // another type. The width is rounded down to ensure that an access is
+ // entirely within bounds.
+ CharUnits VarSize = getContext().getTypeSizeInChars(T);
+ CharUnits EleSize = getContext().getTypeSizeInChars(EleTy);
+ return ValMgr.makeIntVal(VarSize / EleSize, false);
+ }
+ }
+
+ assert(0 && "Unreachable");
+ return UnknownVal();
+}
+
+//===----------------------------------------------------------------------===//
+// Location and region casting.
+//===----------------------------------------------------------------------===//
+
+/// ArrayToPointer - Emulates the "decay" of an array to a pointer
+/// type. 'Array' represents the lvalue of the array being decayed
+/// to a pointer, and the returned SVal represents the decayed
+/// version of that lvalue (i.e., a pointer to the first element of
+/// the array). This is called by GRExprEngine when evaluating casts
+/// from arrays to pointers.
+SVal RegionStoreManager::ArrayToPointer(Loc Array) {
+ if (!isa<loc::MemRegionVal>(Array))
+ return UnknownVal();
+
+ const MemRegion* R = cast<loc::MemRegionVal>(&Array)->getRegion();
+ const TypedRegion* ArrayR = dyn_cast<TypedRegion>(R);
+
+ if (!ArrayR)
+ return UnknownVal();
+
+ // Strip off typedefs from the ArrayRegion's ValueType.
+ QualType T = ArrayR->getValueType(getContext()).getDesugaredType();
+ ArrayType *AT = cast<ArrayType>(T);
+ T = AT->getElementType();
+
+ SVal ZeroIdx = ValMgr.makeZeroArrayIndex();
+ return loc::MemRegionVal(MRMgr.getElementRegion(T, ZeroIdx, ArrayR,
+ getContext()));
+}
+
+//===----------------------------------------------------------------------===//
+// Pointer arithmetic.
+//===----------------------------------------------------------------------===//
+
+SVal RegionStoreManager::EvalBinOp(BinaryOperator::Opcode Op, Loc L, NonLoc R,
+ QualType resultTy) {
+ // Assume the base location is MemRegionVal.
+ if (!isa<loc::MemRegionVal>(L))
+ return UnknownVal();
+
+ const MemRegion* MR = cast<loc::MemRegionVal>(L).getRegion();
+ const ElementRegion *ER = 0;
+
+ switch (MR->getKind()) {
+ case MemRegion::SymbolicRegionKind: {
+ const SymbolicRegion *SR = cast<SymbolicRegion>(MR);
+ SymbolRef Sym = SR->getSymbol();
+ QualType T = Sym->getType(getContext());
+ QualType EleTy;
+
+ if (const PointerType *PT = T->getAs<PointerType>())
+ EleTy = PT->getPointeeType();
+ else
+ EleTy = T->getAs<ObjCObjectPointerType>()->getPointeeType();
+
+ SVal ZeroIdx = ValMgr.makeZeroArrayIndex();
+ ER = MRMgr.getElementRegion(EleTy, ZeroIdx, SR, getContext());
+ break;
+ }
+ case MemRegion::AllocaRegionKind: {
+ const AllocaRegion *AR = cast<AllocaRegion>(MR);
+ QualType T = getContext().CharTy; // Create an ElementRegion of bytes.
+ QualType EleTy = T->getAs<PointerType>()->getPointeeType();
+ SVal ZeroIdx = ValMgr.makeZeroArrayIndex();
+ ER = MRMgr.getElementRegion(EleTy, ZeroIdx, AR, getContext());
+ break;
+ }
+
+ case MemRegion::ElementRegionKind: {
+ ER = cast<ElementRegion>(MR);
+ break;
+ }
+
+ // Not yet handled.
+ case MemRegion::VarRegionKind:
+ case MemRegion::StringRegionKind: {
+
+ }
+ // Fall-through.
+ case MemRegion::CompoundLiteralRegionKind:
+ case MemRegion::FieldRegionKind:
+ case MemRegion::ObjCIvarRegionKind:
+ case MemRegion::CXXObjectRegionKind:
+ return UnknownVal();
+
+ case MemRegion::FunctionTextRegionKind:
+ case MemRegion::BlockTextRegionKind:
+ case MemRegion::BlockDataRegionKind:
+ // Technically this can happen if people do funny things with casts.
+ return UnknownVal();
+
+ case MemRegion::CXXThisRegionKind:
+ assert(0 &&
+ "Cannot perform pointer arithmetic on implicit argument 'this'");
+ case MemRegion::GenericMemSpaceRegionKind:
+ case MemRegion::StackLocalsSpaceRegionKind:
+ case MemRegion::StackArgumentsSpaceRegionKind:
+ case MemRegion::HeapSpaceRegionKind:
+ case MemRegion::GlobalsSpaceRegionKind:
+ case MemRegion::UnknownSpaceRegionKind:
+ assert(0 && "Cannot perform pointer arithmetic on a MemSpace");
+ return UnknownVal();
+ }
+
+ SVal Idx = ER->getIndex();
+ nonloc::ConcreteInt* Base = dyn_cast<nonloc::ConcreteInt>(&Idx);
+
+ // For now, only support:
+ // (a) concrete integer indices that can easily be resolved
+ // (b) 0 + symbolic index
+ if (Base) {
+ if (nonloc::ConcreteInt *Offset = dyn_cast<nonloc::ConcreteInt>(&R)) {
+ // FIXME: Should use SValuator here.
+ SVal NewIdx =
+ Base->evalBinOp(ValMgr, Op,
+ cast<nonloc::ConcreteInt>(ValMgr.convertToArrayIndex(*Offset)));
+ const MemRegion* NewER =
+ MRMgr.getElementRegion(ER->getElementType(), NewIdx,
+ ER->getSuperRegion(), getContext());
+ return ValMgr.makeLoc(NewER);
+ }
+ if (0 == Base->getValue()) {
+ const MemRegion* NewER =
+ MRMgr.getElementRegion(ER->getElementType(), R,
+ ER->getSuperRegion(), getContext());
+ return ValMgr.makeLoc(NewER);
+ }
+ }
+
+ return UnknownVal();
+}
+
+//===----------------------------------------------------------------------===//
+// Loading values from regions.
+//===----------------------------------------------------------------------===//
+
+Optional<SVal> RegionStoreManager::getDirectBinding(RegionBindings B,
+ const MemRegion *R) {
+ if (const SVal *V = Lookup(B, R, BindingKey::Direct))
+ return *V;
+
+ return Optional<SVal>();
+}
+
+Optional<SVal> RegionStoreManager::getDefaultBinding(RegionBindings B,
+ const MemRegion *R) {
+ if (R->isBoundable())
+ if (const TypedRegion *TR = dyn_cast<TypedRegion>(R))
+ if (TR->getValueType(getContext())->isUnionType())
+ return UnknownVal();
+
+ if (const SVal *V = Lookup(B, R, BindingKey::Default))
+ return *V;
+
+ return Optional<SVal>();
+}
+
+Optional<SVal> RegionStoreManager::getBinding(RegionBindings B,
+ const MemRegion *R) {
+
+ if (const Optional<SVal> &V = getDirectBinding(B, R))
+ return V;
+
+ return getDefaultBinding(B, R);
+}
+
+static bool IsReinterpreted(QualType RTy, QualType UsedTy, ASTContext &Ctx) {
+ RTy = Ctx.getCanonicalType(RTy);
+ UsedTy = Ctx.getCanonicalType(UsedTy);
+
+ if (RTy == UsedTy)
+ return false;
+
+
+ // Recursively check the types. We basically want to see if a pointer value
+ // is ever reinterpreted as a non-pointer, e.g. void** and intptr_t*
+ // represents a reinterpretation.
+ if (Loc::IsLocType(RTy) && Loc::IsLocType(UsedTy)) {
+ const PointerType *PRTy = RTy->getAs<PointerType>();
+ const PointerType *PUsedTy = UsedTy->getAs<PointerType>();
+
+ return PUsedTy && PRTy &&
+ IsReinterpreted(PRTy->getPointeeType(),
+ PUsedTy->getPointeeType(), Ctx);
+ }
+
+ return true;
+}
+
+SVal RegionStoreManager::Retrieve(Store store, Loc L, QualType T) {
+ assert(!isa<UnknownVal>(L) && "location unknown");
+ assert(!isa<UndefinedVal>(L) && "location undefined");
+
+ // FIXME: Is this even possible? Shouldn't this be treated as a null
+ // dereference at a higher level?
+ if (isa<loc::ConcreteInt>(L))
+ return UndefinedVal();
+
+ const MemRegion *MR = cast<loc::MemRegionVal>(L).getRegion();
+
+ if (isa<AllocaRegion>(MR) || isa<SymbolicRegion>(MR))
+ MR = GetElementZeroRegion(MR, T);
+
+ if (isa<CodeTextRegion>(MR)) {
+ assert(0 && "Why load from a code text region?");
+ return UnknownVal();
+ }
+
+ // FIXME: Perhaps this method should just take a 'const MemRegion*' argument
+ // instead of 'Loc', and have the other Loc cases handled at a higher level.
+ const TypedRegion *R = cast<TypedRegion>(MR);
+ QualType RTy = R->getValueType(getContext());
+
+ // FIXME: We should eventually handle funny addressing. e.g.:
+ //
+ // int x = ...;
+ // int *p = &x;
+ // char *q = (char*) p;
+ // char c = *q; // returns the first byte of 'x'.
+ //
+ // Such funny addressing will occur due to layering of regions.
+
+#if 0
+ ASTContext &Ctx = getContext();
+ if (!T.isNull() && IsReinterpreted(RTy, T, Ctx)) {
+ SVal ZeroIdx = ValMgr.makeZeroArrayIndex();
+ R = MRMgr.getElementRegion(T, ZeroIdx, R, Ctx);
+ RTy = T;
+ assert(Ctx.getCanonicalType(RTy) ==
+ Ctx.getCanonicalType(R->getValueType(Ctx)));
+ }
+#endif
+
+ if (RTy->isStructureOrClassType())
+ return RetrieveStruct(store, R);
+
+ // FIXME: Handle unions.
+ if (RTy->isUnionType())
+ return UnknownVal();
+
+ if (RTy->isArrayType())
+ return RetrieveArray(store, R);
+
+ // FIXME: handle Vector types.
+ if (RTy->isVectorType())
+ return UnknownVal();
+
+ if (const FieldRegion* FR = dyn_cast<FieldRegion>(R))
+ return CastRetrievedVal(RetrieveField(store, FR), FR, T, false);
+
+ if (const ElementRegion* ER = dyn_cast<ElementRegion>(R)) {
+ // FIXME: Here we actually perform an implicit conversion from the loaded
+ // value to the element type. Eventually we want to compose these values
+ // more intelligently. For example, an 'element' can encompass multiple
+ // bound regions (e.g., several bound bytes), or could be a subset of
+ // a larger value.
+ return CastRetrievedVal(RetrieveElement(store, ER), ER, T, false);
+ }
+
+ if (const ObjCIvarRegion *IVR = dyn_cast<ObjCIvarRegion>(R)) {
+ // FIXME: Here we actually perform an implicit conversion from the loaded
+ // value to the ivar type. What we should model is stores to ivars
+ // that blow past the extent of the ivar. If the address of the ivar is
+ // reinterpretted, it is possible we stored a different value that could
+ // fit within the ivar. Either we need to cast these when storing them
+ // or reinterpret them lazily (as we do here).
+ return CastRetrievedVal(RetrieveObjCIvar(store, IVR), IVR, T, false);
+ }
+
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ // FIXME: Here we actually perform an implicit conversion from the loaded
+ // value to the variable type. What we should model is stores to variables
+ // that blow past the extent of the variable. If the address of the
+ // variable is reinterpretted, it is possible we stored a different value
+ // that could fit within the variable. Either we need to cast these when
+ // storing them or reinterpret them lazily (as we do here).
+ return CastRetrievedVal(RetrieveVar(store, VR), VR, T, false);
+ }
+
+ RegionBindings B = GetRegionBindings(store);
+ const SVal *V = Lookup(B, R, BindingKey::Direct);
+
+ // Check if the region has a binding.
+ if (V)
+ return *V;
+
+ // The location does not have a bound value. This means that it has
+ // the value it had upon its creation and/or entry to the analyzed
+ // function/method. These are either symbolic values or 'undefined'.
+ if (R->hasStackNonParametersStorage()) {
+ // All stack variables are considered to have undefined values
+ // upon creation. All heap allocated blocks are considered to
+ // have undefined values as well unless they are explicitly bound
+ // to specific values.
+ return UndefinedVal();
+ }
+
+ // All other values are symbolic.
+ return ValMgr.getRegionValueSymbolVal(R);
+}
+
+std::pair<Store, const MemRegion *>
+RegionStoreManager::GetLazyBinding(RegionBindings B, const MemRegion *R) {
+ if (Optional<SVal> OV = getDirectBinding(B, R))
+ if (const nonloc::LazyCompoundVal *V =
+ dyn_cast<nonloc::LazyCompoundVal>(OV.getPointer()))
+ return std::make_pair(V->getStore(), V->getRegion());
+
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ const std::pair<Store, const MemRegion *> &X =
+ GetLazyBinding(B, ER->getSuperRegion());
+
+ if (X.second)
+ return std::make_pair(X.first,
+ MRMgr.getElementRegionWithSuper(ER, X.second));
+ }
+ else if (const FieldRegion *FR = dyn_cast<FieldRegion>(R)) {
+ const std::pair<Store, const MemRegion *> &X =
+ GetLazyBinding(B, FR->getSuperRegion());
+
+ if (X.second)
+ return std::make_pair(X.first,
+ MRMgr.getFieldRegionWithSuper(FR, X.second));
+ }
+ // The NULL MemRegion indicates an non-existent lazy binding. A NULL Store is
+ // possible for a valid lazy binding.
+ return std::make_pair((Store) 0, (const MemRegion *) 0);
+}
+
+SVal RegionStoreManager::RetrieveElement(Store store,
+ const ElementRegion* R) {
+ // Check if the region has a binding.
+ RegionBindings B = GetRegionBindings(store);
+ if (const Optional<SVal> &V = getDirectBinding(B, R))
+ return *V;
+
+ const MemRegion* superR = R->getSuperRegion();
+
+ // Check if the region is an element region of a string literal.
+ if (const StringRegion *StrR=dyn_cast<StringRegion>(superR)) {
+ // FIXME: Handle loads from strings where the literal is treated as
+ // an integer, e.g., *((unsigned int*)"hello")
+ ASTContext &Ctx = getContext();
+ QualType T = Ctx.getAsArrayType(StrR->getValueType(Ctx))->getElementType();
+ if (T != Ctx.getCanonicalType(R->getElementType()))
+ return UnknownVal();
+
+ const StringLiteral *Str = StrR->getStringLiteral();
+ SVal Idx = R->getIndex();
+ if (nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(&Idx)) {
+ int64_t i = CI->getValue().getSExtValue();
+ int64_t byteLength = Str->getByteLength();
+ if (i > byteLength) {
+ // Buffer overflow checking in GRExprEngine should handle this case,
+ // but we shouldn't rely on it to not overflow here if that checking
+ // is disabled.
+ return UnknownVal();
+ }
+ char c = (i == byteLength) ? '\0' : Str->getStrData()[i];
+ return ValMgr.makeIntVal(c, T);
+ }
+ }
+
+ // Check if the immediate super region has a direct binding.
+ if (const Optional<SVal> &V = getDirectBinding(B, superR)) {
+ if (SymbolRef parentSym = V->getAsSymbol())
+ return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R);
+
+ if (V->isUnknownOrUndef())
+ return *V;
+
+ // Handle LazyCompoundVals for the immediate super region. Other cases
+ // are handled in 'RetrieveFieldOrElementCommon'.
+ if (const nonloc::LazyCompoundVal *LCV =
+ dyn_cast<nonloc::LazyCompoundVal>(V)) {
+
+ R = MRMgr.getElementRegionWithSuper(R, LCV->getRegion());
+ return RetrieveElement(LCV->getStore(), R);
+ }
+
+ // Other cases: give up.
+ return UnknownVal();
+ }
+
+ return RetrieveFieldOrElementCommon(store, R, R->getElementType(), superR);
+}
+
+SVal RegionStoreManager::RetrieveField(Store store,
+ const FieldRegion* R) {
+
+ // Check if the region has a binding.
+ RegionBindings B = GetRegionBindings(store);
+ if (const Optional<SVal> &V = getDirectBinding(B, R))
+ return *V;
+
+ QualType Ty = R->getValueType(getContext());
+ return RetrieveFieldOrElementCommon(store, R, Ty, R->getSuperRegion());
+}
+
+SVal RegionStoreManager::RetrieveFieldOrElementCommon(Store store,
+ const TypedRegion *R,
+ QualType Ty,
+ const MemRegion *superR) {
+
+ // At this point we have already checked in either RetrieveElement or
+ // RetrieveField if 'R' has a direct binding.
+
+ RegionBindings B = GetRegionBindings(store);
+
+ while (superR) {
+ if (const Optional<SVal> &D = getDefaultBinding(B, superR)) {
+ if (SymbolRef parentSym = D->getAsSymbol())
+ return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R);
+
+ if (D->isZeroConstant())
+ return ValMgr.makeZeroVal(Ty);
+
+ if (D->isUnknown())
+ return *D;
+
+ assert(0 && "Unknown default value");
+ }
+
+ // If our super region is a field or element itself, walk up the region
+ // hierarchy to see if there is a default value installed in an ancestor.
+ if (isa<FieldRegion>(superR) || isa<ElementRegion>(superR)) {
+ superR = cast<SubRegion>(superR)->getSuperRegion();
+ continue;
+ }
+
+ break;
+ }
+
+ // Lazy binding?
+ Store lazyBindingStore = NULL;
+ const MemRegion *lazyBindingRegion = NULL;
+ llvm::tie(lazyBindingStore, lazyBindingRegion) = GetLazyBinding(B, R);
+
+ if (lazyBindingRegion) {
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(lazyBindingRegion))
+ return RetrieveElement(lazyBindingStore, ER);
+ return RetrieveField(lazyBindingStore,
+ cast<FieldRegion>(lazyBindingRegion));
+ }
+
+ if (R->hasStackNonParametersStorage()) {
+ if (isa<ElementRegion>(R)) {
+ // Currently we don't reason specially about Clang-style vectors. Check
+ // if superR is a vector and if so return Unknown.
+ if (const TypedRegion *typedSuperR = dyn_cast<TypedRegion>(superR)) {
+ if (typedSuperR->getValueType(getContext())->isVectorType())
+ return UnknownVal();
+ }
+ }
+
+ return UndefinedVal();
+ }
+
+ // All other values are symbolic.
+ return ValMgr.getRegionValueSymbolVal(R);
+}
+
+SVal RegionStoreManager::RetrieveObjCIvar(Store store, const ObjCIvarRegion* R){
+
+ // Check if the region has a binding.
+ RegionBindings B = GetRegionBindings(store);
+
+ if (const Optional<SVal> &V = getDirectBinding(B, R))
+ return *V;
+
+ const MemRegion *superR = R->getSuperRegion();
+
+ // Check if the super region has a default binding.
+ if (const Optional<SVal> &V = getDefaultBinding(B, superR)) {
+ if (SymbolRef parentSym = V->getAsSymbol())
+ return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R);
+
+ // Other cases: give up.
+ return UnknownVal();
+ }
+
+ return RetrieveLazySymbol(R);
+}
+
+SVal RegionStoreManager::RetrieveVar(Store store, const VarRegion *R) {
+
+ // Check if the region has a binding.
+ RegionBindings B = GetRegionBindings(store);
+
+ if (const Optional<SVal> &V = getDirectBinding(B, R))
+ return *V;
+
+ // Lazily derive a value for the VarRegion.
+ const VarDecl *VD = R->getDecl();
+ QualType T = VD->getType();
+ const MemSpaceRegion *MS = R->getMemorySpace();
+
+ if (isa<UnknownSpaceRegion>(MS) ||
+ isa<StackArgumentsSpaceRegion>(MS))
+ return ValMgr.getRegionValueSymbolVal(R);
+
+ if (isa<GlobalsSpaceRegion>(MS)) {
+ if (VD->isFileVarDecl()) {
+ // Is 'VD' declared constant? If so, retrieve the constant value.
+ QualType CT = Ctx.getCanonicalType(T);
+ if (CT.isConstQualified()) {
+ const Expr *Init = VD->getInit();
+ // Do the null check first, as we want to call 'IgnoreParenCasts'.
+ if (Init)
+ if (const IntegerLiteral *IL =
+ dyn_cast<IntegerLiteral>(Init->IgnoreParenCasts())) {
+ const nonloc::ConcreteInt &V = ValMgr.makeIntVal(IL);
+ return ValMgr.getSValuator().EvalCast(V, Init->getType(),
+ IL->getType());
+ }
+ }
+
+ return ValMgr.getRegionValueSymbolVal(R);
+ }
+
+ if (T->isIntegerType())
+ return ValMgr.makeIntVal(0, T);
+ if (T->isPointerType())
+ return ValMgr.makeNull();
+
+ return UnknownVal();
+ }
+
+ return UndefinedVal();
+}
+
+SVal RegionStoreManager::RetrieveLazySymbol(const TypedRegion *R) {
+
+ QualType valTy = R->getValueType(getContext());
+
+ // All other values are symbolic.
+ return ValMgr.getRegionValueSymbolVal(R);
+}
+
+SVal RegionStoreManager::RetrieveStruct(Store store, const TypedRegion* R) {
+ QualType T = R->getValueType(getContext());
+ assert(T->isStructureOrClassType());
+ return ValMgr.makeLazyCompoundVal(store, R);
+}
+
+SVal RegionStoreManager::RetrieveArray(Store store, const TypedRegion * R) {
+ assert(isa<ConstantArrayType>(R->getValueType(getContext())));
+ return ValMgr.makeLazyCompoundVal(store, R);
+}
+
+//===----------------------------------------------------------------------===//
+// Binding values to regions.
+//===----------------------------------------------------------------------===//
+
+Store RegionStoreManager::Remove(Store store, Loc L) {
+ if (isa<loc::MemRegionVal>(L))
+ if (const MemRegion* R = cast<loc::MemRegionVal>(L).getRegion())
+ return Remove(GetRegionBindings(store), R).getRoot();
+
+ return store;
+}
+
+Store RegionStoreManager::Bind(Store store, Loc L, SVal V) {
+ if (isa<loc::ConcreteInt>(L))
+ return store;
+
+ // If we get here, the location should be a region.
+ const MemRegion *R = cast<loc::MemRegionVal>(L).getRegion();
+
+ // Check if the region is a struct region.
+ if (const TypedRegion* TR = dyn_cast<TypedRegion>(R))
+ if (TR->getValueType(getContext())->isStructureOrClassType())
+ return BindStruct(store, TR, V);
+
+ // Special case: the current region represents a cast and it and the super
+ // region both have pointer types or intptr_t types. If so, perform the
+ // bind to the super region.
+ // This is needed to support OSAtomicCompareAndSwap and friends or other
+ // loads that treat integers as pointers and vis versa.
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ if (ER->getIndex().isZeroConstant()) {
+ if (const TypedRegion *superR =
+ dyn_cast<TypedRegion>(ER->getSuperRegion())) {
+ ASTContext &Ctx = getContext();
+ QualType superTy = superR->getValueType(Ctx);
+ QualType erTy = ER->getValueType(Ctx);
+
+ if (IsAnyPointerOrIntptr(superTy, Ctx) &&
+ IsAnyPointerOrIntptr(erTy, Ctx)) {
+ V = ValMgr.getSValuator().EvalCast(V, superTy, erTy);
+ return Bind(store, loc::MemRegionVal(superR), V);
+ }
+ // For now, just invalidate the fields of the struct/union/class.
+ // FIXME: Precisely handle the fields of the record.
+ if (superTy->isRecordType())
+ return InvalidateRegion(store, superR, NULL, 0, NULL);
+ }
+ }
+ }
+ else if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
+ // Binding directly to a symbolic region should be treated as binding
+ // to element 0.
+ QualType T = SR->getSymbol()->getType(getContext());
+
+ // FIXME: Is this the right way to handle symbols that are references?
+ if (const PointerType *PT = T->getAs<PointerType>())
+ T = PT->getPointeeType();
+ else
+ T = T->getAs<ReferenceType>()->getPointeeType();
+
+ R = GetElementZeroRegion(SR, T);
+ }
+
+ // Perform the binding.
+ RegionBindings B = GetRegionBindings(store);
+ return Add(B, R, BindingKey::Direct, V).getRoot();
+}
+
+Store RegionStoreManager::BindDecl(Store store, const VarRegion *VR,
+ SVal InitVal) {
+
+ QualType T = VR->getDecl()->getType();
+
+ if (T->isArrayType())
+ return BindArray(store, VR, InitVal);
+ if (T->isStructureOrClassType())
+ return BindStruct(store, VR, InitVal);
+
+ return Bind(store, ValMgr.makeLoc(VR), InitVal);
+}
+
+// FIXME: this method should be merged into Bind().
+Store RegionStoreManager::BindCompoundLiteral(Store store,
+ const CompoundLiteralExpr *CL,
+ const LocationContext *LC,
+ SVal V) {
+ return Bind(store, loc::MemRegionVal(MRMgr.getCompoundLiteralRegion(CL, LC)),
+ V);
+}
+
+Store RegionStoreManager::setImplicitDefaultValue(Store store,
+ const MemRegion *R,
+ QualType T) {
+ RegionBindings B = GetRegionBindings(store);
+ SVal V;
+
+ if (Loc::IsLocType(T))
+ V = ValMgr.makeNull();
+ else if (T->isIntegerType())
+ V = ValMgr.makeZeroVal(T);
+ else if (T->isStructureOrClassType() || T->isArrayType()) {
+ // Set the default value to a zero constant when it is a structure
+ // or array. The type doesn't really matter.
+ V = ValMgr.makeZeroVal(ValMgr.getContext().IntTy);
+ }
+ else {
+ return store;
+ }
+
+ return Add(B, R, BindingKey::Default, V).getRoot();
+}
+
+Store RegionStoreManager::BindArray(Store store, const TypedRegion* R,
+ SVal Init) {
+
+ ASTContext &Ctx = getContext();
+ const ArrayType *AT =
+ cast<ArrayType>(Ctx.getCanonicalType(R->getValueType(Ctx)));
+ QualType ElementTy = AT->getElementType();
+ Optional<uint64_t> Size;
+
+ if (const ConstantArrayType* CAT = dyn_cast<ConstantArrayType>(AT))
+ Size = CAT->getSize().getZExtValue();
+
+ // Check if the init expr is a StringLiteral.
+ if (isa<loc::MemRegionVal>(Init)) {
+ const MemRegion* InitR = cast<loc::MemRegionVal>(Init).getRegion();
+ const StringLiteral* S = cast<StringRegion>(InitR)->getStringLiteral();
+ const char* str = S->getStrData();
+ unsigned len = S->getByteLength();
+ unsigned j = 0;
+
+ // Copy bytes from the string literal into the target array. Trailing bytes
+ // in the array that are not covered by the string literal are initialized
+ // to zero.
+
+ // We assume that string constants are bound to
+ // constant arrays.
+ uint64_t size = Size.getValue();
+
+ for (uint64_t i = 0; i < size; ++i, ++j) {
+ if (j >= len)
+ break;
+
+ SVal Idx = ValMgr.makeArrayIndex(i);
+ const ElementRegion* ER = MRMgr.getElementRegion(ElementTy, Idx, R,
+ getContext());
+
+ SVal V = ValMgr.makeIntVal(str[j], sizeof(char)*8, true);
+ store = Bind(store, loc::MemRegionVal(ER), V);
+ }
+
+ return store;
+ }
+
+ // Handle lazy compound values.
+ if (nonloc::LazyCompoundVal *LCV = dyn_cast<nonloc::LazyCompoundVal>(&Init))
+ return CopyLazyBindings(*LCV, store, R);
+
+ // Remaining case: explicit compound values.
+
+ if (Init.isUnknown())
+ return setImplicitDefaultValue(store, R, ElementTy);
+
+ nonloc::CompoundVal& CV = cast<nonloc::CompoundVal>(Init);
+ nonloc::CompoundVal::iterator VI = CV.begin(), VE = CV.end();
+ uint64_t i = 0;
+
+ for (; Size.hasValue() ? i < Size.getValue() : true ; ++i, ++VI) {
+ // The init list might be shorter than the array length.
+ if (VI == VE)
+ break;
+
+ SVal Idx = ValMgr.makeArrayIndex(i);
+ const ElementRegion *ER = MRMgr.getElementRegion(ElementTy, Idx, R, getContext());
+
+ if (ElementTy->isStructureOrClassType())
+ store = BindStruct(store, ER, *VI);
+ else
+ store = Bind(store, ValMgr.makeLoc(ER), *VI);
+ }
+
+ // If the init list is shorter than the array length, set the
+ // array default value.
+ if (Size.hasValue() && i < Size.getValue())
+ store = setImplicitDefaultValue(store, R, ElementTy);
+
+ return store;
+}
+
+Store RegionStoreManager::BindStruct(Store store, const TypedRegion* R,
+ SVal V) {
+
+ if (!Features.supportsFields())
+ return store;
+
+ QualType T = R->getValueType(getContext());
+ assert(T->isStructureOrClassType());
+
+ const RecordType* RT = T->getAs<RecordType>();
+ RecordDecl* RD = RT->getDecl();
+
+ if (!RD->isDefinition())
+ return store;
+
+ // Handle lazy compound values.
+ if (const nonloc::LazyCompoundVal *LCV=dyn_cast<nonloc::LazyCompoundVal>(&V))
+ return CopyLazyBindings(*LCV, store, R);
+
+ // We may get non-CompoundVal accidentally due to imprecise cast logic.
+ // Ignore them and kill the field values.
+ if (V.isUnknown() || !isa<nonloc::CompoundVal>(V))
+ return KillStruct(store, R);
+
+ nonloc::CompoundVal& CV = cast<nonloc::CompoundVal>(V);
+ nonloc::CompoundVal::iterator VI = CV.begin(), VE = CV.end();
+
+ RecordDecl::field_iterator FI, FE;
+
+ for (FI = RD->field_begin(), FE = RD->field_end(); FI != FE; ++FI, ++VI) {
+
+ if (VI == VE)
+ break;
+
+ QualType FTy = (*FI)->getType();
+ const FieldRegion* FR = MRMgr.getFieldRegion(*FI, R);
+
+ if (FTy->isArrayType())
+ store = BindArray(store, FR, *VI);
+ else if (FTy->isStructureOrClassType())
+ store = BindStruct(store, FR, *VI);
+ else
+ store = Bind(store, ValMgr.makeLoc(FR), *VI);
+ }
+
+ // There may be fewer values in the initialize list than the fields of struct.
+ if (FI != FE) {
+ RegionBindings B = GetRegionBindings(store);
+ B = Add(B, R, BindingKey::Default, ValMgr.makeIntVal(0, false));
+ store = B.getRoot();
+ }
+
+ return store;
+}
+
+Store RegionStoreManager::KillStruct(Store store, const TypedRegion* R) {
+ RegionBindings B = GetRegionBindings(store);
+ llvm::OwningPtr<RegionStoreSubRegionMap>
+ SubRegions(getRegionStoreSubRegionMap(store));
+ RemoveSubRegionBindings(B, R, *SubRegions);
+
+ // Set the default value of the struct region to "unknown".
+ return Add(B, R, BindingKey::Default, UnknownVal()).getRoot();
+}
+
+Store RegionStoreManager::CopyLazyBindings(nonloc::LazyCompoundVal V,
+ Store store, const TypedRegion *R) {
+
+ // Nuke the old bindings stemming from R.
+ RegionBindings B = GetRegionBindings(store);
+
+ llvm::OwningPtr<RegionStoreSubRegionMap>
+ SubRegions(getRegionStoreSubRegionMap(store));
+
+ // B and DVM are updated after the call to RemoveSubRegionBindings.
+ RemoveSubRegionBindings(B, R, *SubRegions.get());
+
+ // Now copy the bindings. This amounts to just binding 'V' to 'R'. This
+ // results in a zero-copy algorithm.
+ return Add(B, R, BindingKey::Direct, V).getRoot();
+}
+
+//===----------------------------------------------------------------------===//
+// "Raw" retrievals and bindings.
+//===----------------------------------------------------------------------===//
+
+BindingKey BindingKey::Make(const MemRegion *R, Kind k) {
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ const RegionRawOffset &O = ER->getAsRawOffset();
+
+ if (O.getRegion())
+ return BindingKey(O.getRegion(), O.getByteOffset(), k);
+
+ // FIXME: There are some ElementRegions for which we cannot compute
+ // raw offsets yet, including regions with symbolic offsets.
+ }
+
+ return BindingKey(R, 0, k);
+}
+
+RegionBindings RegionStoreManager::Add(RegionBindings B, BindingKey K, SVal V) {
+ return RBFactory.Add(B, K, V);
+}
+
+RegionBindings RegionStoreManager::Add(RegionBindings B, const MemRegion *R,
+ BindingKey::Kind k, SVal V) {
+ return Add(B, BindingKey::Make(R, k), V);
+}
+
+const SVal *RegionStoreManager::Lookup(RegionBindings B, BindingKey K) {
+ return B.lookup(K);
+}
+
+const SVal *RegionStoreManager::Lookup(RegionBindings B,
+ const MemRegion *R,
+ BindingKey::Kind k) {
+ return Lookup(B, BindingKey::Make(R, k));
+}
+
+RegionBindings RegionStoreManager::Remove(RegionBindings B, BindingKey K) {
+ return RBFactory.Remove(B, K);
+}
+
+RegionBindings RegionStoreManager::Remove(RegionBindings B, const MemRegion *R,
+ BindingKey::Kind k){
+ return Remove(B, BindingKey::Make(R, k));
+}
+
+Store RegionStoreManager::Remove(Store store, BindingKey K) {
+ RegionBindings B = GetRegionBindings(store);
+ return Remove(B, K).getRoot();
+}
+
+//===----------------------------------------------------------------------===//
+// State pruning.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class RemoveDeadBindingsWorker :
+ public ClusterAnalysis<RemoveDeadBindingsWorker> {
+ llvm::SmallVector<const SymbolicRegion*, 12> Postponed;
+ SymbolReaper &SymReaper;
+ Stmt *Loc;
+ const StackFrameContext *CurrentLCtx;
+
+public:
+ RemoveDeadBindingsWorker(RegionStoreManager &rm, GRStateManager &stateMgr,
+ RegionBindings b, SymbolReaper &symReaper,
+ Stmt *loc, const StackFrameContext *LCtx)
+ : ClusterAnalysis<RemoveDeadBindingsWorker>(rm, stateMgr, b),
+ SymReaper(symReaper), Loc(loc), CurrentLCtx(LCtx) {}
+
+ // Called by ClusterAnalysis.
+ void VisitAddedToCluster(const MemRegion *baseR, RegionCluster &C);
+ void VisitCluster(const MemRegion *baseR, BindingKey *I, BindingKey *E);
+
+ void VisitBindingKey(BindingKey K);
+ bool UpdatePostponed();
+ void VisitBinding(SVal V);
+};
+}
+
+void RemoveDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
+ RegionCluster &C) {
+
+ if (const VarRegion *VR = dyn_cast<VarRegion>(baseR)) {
+ if (SymReaper.isLive(Loc, VR))
+ AddToWorkList(baseR, C);
+
+ return;
+ }
+
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(baseR)) {
+ if (SymReaper.isLive(SR->getSymbol()))
+ AddToWorkList(SR, C);
+ else
+ Postponed.push_back(SR);
+
+ return;
+ }
+
+ // CXXThisRegion in the current or parent location context is live.
+ if (const CXXThisRegion *TR = dyn_cast<CXXThisRegion>(baseR)) {
+ const StackArgumentsSpaceRegion *StackReg =
+ cast<StackArgumentsSpaceRegion>(TR->getSuperRegion());
+ const StackFrameContext *RegCtx = StackReg->getStackFrame();
+ if (RegCtx == CurrentLCtx || RegCtx->isParentOf(CurrentLCtx))
+ AddToWorkList(TR, C);
+ }
+}
+
+void RemoveDeadBindingsWorker::VisitCluster(const MemRegion *baseR,
+ BindingKey *I, BindingKey *E) {
+ for ( ; I != E; ++I)
+ VisitBindingKey(*I);
+}
+
+void RemoveDeadBindingsWorker::VisitBinding(SVal V) {
+ // Is it a LazyCompoundVal? All referenced regions are live as well.
+ if (const nonloc::LazyCompoundVal *LCS =
+ dyn_cast<nonloc::LazyCompoundVal>(&V)) {
+
+ const MemRegion *LazyR = LCS->getRegion();
+ RegionBindings B = RegionStoreManager::GetRegionBindings(LCS->getStore());
+ for (RegionBindings::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI){
+ const MemRegion *baseR = RI.getKey().getRegion();
+ if (cast<SubRegion>(baseR)->isSubRegionOf(LazyR))
+ VisitBinding(RI.getData());
+ }
+ return;
+ }
+
+ // If V is a region, then add it to the worklist.
+ if (const MemRegion *R = V.getAsRegion())
+ AddToWorkList(R);
+
+ // Update the set of live symbols.
+ for (SVal::symbol_iterator SI=V.symbol_begin(), SE=V.symbol_end();
+ SI!=SE;++SI)
+ SymReaper.markLive(*SI);
+}
+
+void RemoveDeadBindingsWorker::VisitBindingKey(BindingKey K) {
+ const MemRegion *R = K.getRegion();
+
+ // Mark this region "live" by adding it to the worklist. This will cause
+ // use to visit all regions in the cluster (if we haven't visited them
+ // already).
+ if (AddToWorkList(R)) {
+ // Mark the symbol for any live SymbolicRegion as "live". This means we
+ // should continue to track that symbol.
+ if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(R))
+ SymReaper.markLive(SymR->getSymbol());
+
+ // For BlockDataRegions, enqueue the VarRegions for variables marked
+ // with __block (passed-by-reference).
+ // via BlockDeclRefExprs.
+ if (const BlockDataRegion *BD = dyn_cast<BlockDataRegion>(R)) {
+ for (BlockDataRegion::referenced_vars_iterator
+ RI = BD->referenced_vars_begin(), RE = BD->referenced_vars_end();
+ RI != RE; ++RI) {
+ if ((*RI)->getDecl()->getAttr<BlocksAttr>())
+ AddToWorkList(*RI);
+ }
+
+ // No possible data bindings on a BlockDataRegion.
+ return;
+ }
+ }
+
+ // Visit the data binding for K.
+ if (const SVal *V = RM.Lookup(B, K))
+ VisitBinding(*V);
+}
+
+bool RemoveDeadBindingsWorker::UpdatePostponed() {
+ // See if any postponed SymbolicRegions are actually live now, after
+ // having done a scan.
+ bool changed = false;
+
+ for (llvm::SmallVectorImpl<const SymbolicRegion*>::iterator
+ I = Postponed.begin(), E = Postponed.end() ; I != E ; ++I) {
+ if (const SymbolicRegion *SR = cast_or_null<SymbolicRegion>(*I)) {
+ if (SymReaper.isLive(SR->getSymbol())) {
+ changed |= AddToWorkList(SR);
+ *I = NULL;
+ }
+ }
+ }
+
+ return changed;
+}
+
+const GRState *RegionStoreManager::RemoveDeadBindings(GRState &state, Stmt* Loc,
+ const StackFrameContext *LCtx,
+ SymbolReaper& SymReaper,
+ llvm::SmallVectorImpl<const MemRegion*>& RegionRoots)
+{
+ RegionBindings B = GetRegionBindings(state.getStore());
+ RemoveDeadBindingsWorker W(*this, StateMgr, B, SymReaper, Loc, LCtx);
+ W.GenerateClusters();
+
+ // Enqueue the region roots onto the worklist.
+ for (llvm::SmallVectorImpl<const MemRegion*>::iterator I=RegionRoots.begin(),
+ E=RegionRoots.end(); I!=E; ++I)
+ W.AddToWorkList(*I);
+
+ do W.RunWorkList(); while (W.UpdatePostponed());
+
+ // We have now scanned the store, marking reachable regions and symbols
+ // as live. We now remove all the regions that are dead from the store
+ // as well as update DSymbols with the set symbols that are now dead.
+ for (RegionBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ const BindingKey &K = I.getKey();
+
+ // If the cluster has been visited, we know the region has been marked.
+ if (W.isVisited(K.getRegion()))
+ continue;
+
+ // Remove the dead entry.
+ B = Remove(B, K);
+
+ // Mark all non-live symbols that this binding references as dead.
+ if (const SymbolicRegion* SymR = dyn_cast<SymbolicRegion>(K.getRegion()))
+ SymReaper.maybeDead(SymR->getSymbol());
+
+ SVal X = I.getData();
+ SVal::symbol_iterator SI = X.symbol_begin(), SE = X.symbol_end();
+ for (; SI != SE; ++SI)
+ SymReaper.maybeDead(*SI);
+ }
+ state.setStore(B.getRoot());
+ const GRState *s = StateMgr.getPersistentState(state);
+ // Remove the extents of dead symbolic regions.
+ llvm::ImmutableMap<const MemRegion*,SVal> Extents = s->get<RegionExtents>();
+ for (llvm::ImmutableMap<const MemRegion *, SVal>::iterator I=Extents.begin(),
+ E = Extents.end(); I != E; ++I) {
+ if (!W.isVisited(I->first))
+ s = s->remove<RegionExtents>(I->first);
+ }
+ return s;
+}
+
+
+GRState const *RegionStoreManager::EnterStackFrame(GRState const *state,
+ StackFrameContext const *frame) {
+ FunctionDecl const *FD = cast<FunctionDecl>(frame->getDecl());
+ FunctionDecl::param_const_iterator PI = FD->param_begin();
+ Store store = state->getStore();
+
+ if (CallExpr const *CE = dyn_cast<CallExpr>(frame->getCallSite())) {
+ CallExpr::const_arg_iterator AI = CE->arg_begin(), AE = CE->arg_end();
+
+ // Copy the arg expression value to the arg variables.
+ for (; AI != AE; ++AI, ++PI) {
+ SVal ArgVal = state->getSVal(*AI);
+ store = Bind(store, ValMgr.makeLoc(MRMgr.getVarRegion(*PI,frame)),ArgVal);
+ }
+ } else if (const CXXConstructExpr *CE =
+ dyn_cast<CXXConstructExpr>(frame->getCallSite())) {
+ CXXConstructExpr::const_arg_iterator AI = CE->arg_begin(),
+ AE = CE->arg_end();
+
+ // Copy the arg expression value to the arg variables.
+ for (; AI != AE; ++AI, ++PI) {
+ SVal ArgVal = state->getSVal(*AI);
+ store = Bind(store, ValMgr.makeLoc(MRMgr.getVarRegion(*PI,frame)),ArgVal);
+ }
+ } else
+ assert(0 && "Unhandled call expression.");
+
+ return state->makeWithStore(store);
+}
+
+//===----------------------------------------------------------------------===//
+// Utility methods.
+//===----------------------------------------------------------------------===//
+
+void RegionStoreManager::print(Store store, llvm::raw_ostream& OS,
+ const char* nl, const char *sep) {
+ RegionBindings B = GetRegionBindings(store);
+ OS << "Store (direct and default bindings):" << nl;
+
+ for (RegionBindings::iterator I = B.begin(), E = B.end(); I != E; ++I)
+ OS << ' ' << I.getKey() << " : " << I.getData() << nl;
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/ReturnPointerRangeChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/ReturnPointerRangeChecker.cpp
new file mode 100644
index 0000000..14edf56
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/ReturnPointerRangeChecker.cpp
@@ -0,0 +1,97 @@
+//== ReturnPointerRangeChecker.cpp ------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ReturnPointerRangeChecker, which is a path-sensitive check
+// which looks for an out-of-bound pointer being returned to callers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Checker/PathSensitive/GRExprEngine.h"
+
+using namespace clang;
+
+namespace {
+class ReturnPointerRangeChecker :
+ public CheckerVisitor<ReturnPointerRangeChecker> {
+ BuiltinBug *BT;
+public:
+ ReturnPointerRangeChecker() : BT(0) {}
+ static void *getTag();
+ void PreVisitReturnStmt(CheckerContext &C, const ReturnStmt *RS);
+};
+}
+
+void clang::RegisterReturnPointerRangeChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new ReturnPointerRangeChecker());
+}
+
+void *ReturnPointerRangeChecker::getTag() {
+ static int x = 0; return &x;
+}
+
+void ReturnPointerRangeChecker::PreVisitReturnStmt(CheckerContext &C,
+ const ReturnStmt *RS) {
+ const GRState *state = C.getState();
+
+ const Expr *RetE = RS->getRetValue();
+ if (!RetE)
+ return;
+
+ SVal V = state->getSVal(RetE);
+ const MemRegion *R = V.getAsRegion();
+ if (!R)
+ return;
+
+ R = R->StripCasts();
+ if (!R)
+ return;
+
+ const ElementRegion *ER = dyn_cast_or_null<ElementRegion>(R);
+ if (!ER)
+ return;
+
+ DefinedOrUnknownSVal &Idx = cast<DefinedOrUnknownSVal>(ER->getIndex());
+
+ // FIXME: All of this out-of-bounds checking should eventually be refactored
+ // into a common place.
+
+ DefinedOrUnknownSVal NumElements
+ = C.getStoreManager().getSizeInElements(state, ER->getSuperRegion(),
+ ER->getValueType(C.getASTContext()));
+
+ const GRState *StInBound = state->AssumeInBound(Idx, NumElements, true);
+ const GRState *StOutBound = state->AssumeInBound(Idx, NumElements, false);
+ if (StOutBound && !StInBound) {
+ ExplodedNode *N = C.GenerateSink(StOutBound);
+
+ if (!N)
+ return;
+
+ // FIXME: This bug correspond to CWE-466. Eventually we should have bug
+ // types explicitly reference such exploit categories (when applicable).
+ if (!BT)
+ BT = new BuiltinBug("Return of pointer value outside of expected range",
+ "Returned pointer value points outside the original object "
+ "(potential buffer overflow)");
+
+ // FIXME: It would be nice to eventually make this diagnostic more clear,
+ // e.g., by referencing the original declaration or by saying *why* this
+ // reference is outside the range.
+
+ // Generate a report for this bug.
+ RangedBugReport *report =
+ new RangedBugReport(*BT, BT->getDescription(), N);
+
+ report->addRange(RetE->getSourceRange());
+ C.EmitReport(report);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/ReturnStackAddressChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/ReturnStackAddressChecker.cpp
new file mode 100644
index 0000000..35b1cde
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/ReturnStackAddressChecker.cpp
@@ -0,0 +1,125 @@
+//== ReturnStackAddressChecker.cpp ------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ReturnStackAddressChecker, which is a path-sensitive
+// check which looks for the addresses of stack variables being returned to
+// callers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+
+namespace {
+class ReturnStackAddressChecker :
+ public CheckerVisitor<ReturnStackAddressChecker> {
+ BuiltinBug *BT;
+public:
+ ReturnStackAddressChecker() : BT(0) {}
+ static void *getTag();
+ void PreVisitReturnStmt(CheckerContext &C, const ReturnStmt *RS);
+private:
+ void EmitStackError(CheckerContext &C, const MemRegion *R, const Expr *RetE);
+};
+}
+
+void clang::RegisterReturnStackAddressChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new ReturnStackAddressChecker());
+}
+
+void *ReturnStackAddressChecker::getTag() {
+ static int x = 0; return &x;
+}
+
+void ReturnStackAddressChecker::EmitStackError(CheckerContext &C,
+ const MemRegion *R,
+ const Expr *RetE) {
+ ExplodedNode *N = C.GenerateSink();
+
+ if (!N)
+ return;
+
+ if (!BT)
+ BT = new BuiltinBug("Return of address to stack-allocated memory");
+
+ // Generate a report for this bug.
+ llvm::SmallString<512> buf;
+ llvm::raw_svector_ostream os(buf);
+ SourceRange range;
+
+ // Get the base region, stripping away fields and elements.
+ R = R->getBaseRegion();
+
+ // Check if the region is a compound literal.
+ if (const CompoundLiteralRegion* CR = dyn_cast<CompoundLiteralRegion>(R)) {
+ const CompoundLiteralExpr* CL = CR->getLiteralExpr();
+ os << "Address of stack memory associated with a compound literal "
+ "declared on line "
+ << C.getSourceManager().getInstantiationLineNumber(CL->getLocStart())
+ << " returned to caller";
+ range = CL->getSourceRange();
+ }
+ else if (const AllocaRegion* AR = dyn_cast<AllocaRegion>(R)) {
+ const Expr* ARE = AR->getExpr();
+ SourceLocation L = ARE->getLocStart();
+ range = ARE->getSourceRange();
+ os << "Address of stack memory allocated by call to alloca() on line "
+ << C.getSourceManager().getInstantiationLineNumber(L)
+ << " returned to caller";
+ }
+ else if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(R)) {
+ const BlockDecl *BD = BR->getCodeRegion()->getDecl();
+ SourceLocation L = BD->getLocStart();
+ range = BD->getSourceRange();
+ os << "Address of stack-allocated block declared on line "
+ << C.getSourceManager().getInstantiationLineNumber(L)
+ << " returned to caller";
+ }
+ else if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ os << "Address of stack memory associated with local variable '"
+ << VR->getString() << "' returned";
+ range = VR->getDecl()->getSourceRange();
+ }
+ else {
+ assert(false && "Invalid region in ReturnStackAddressChecker.");
+ return;
+ }
+
+ RangedBugReport *report = new RangedBugReport(*BT, os.str(), N);
+ report->addRange(RetE->getSourceRange());
+ if (range.isValid())
+ report->addRange(range);
+
+ C.EmitReport(report);
+}
+
+void ReturnStackAddressChecker::PreVisitReturnStmt(CheckerContext &C,
+ const ReturnStmt *RS) {
+
+ const Expr *RetE = RS->getRetValue();
+ if (!RetE)
+ return;
+
+ SVal V = C.getState()->getSVal(RetE);
+ const MemRegion *R = V.getAsRegion();
+
+ if (!R || !R->hasStackStorage())
+ return;
+
+ if (R->hasStackStorage()) {
+ EmitStackError(C, R, RetE);
+ return;
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/ReturnUndefChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/ReturnUndefChecker.cpp
new file mode 100644
index 0000000..52a0b30
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/ReturnUndefChecker.cpp
@@ -0,0 +1,67 @@
+//== ReturnUndefChecker.cpp -------------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ReturnUndefChecker, which is a path-sensitive
+// check which looks for undefined or garbage values being returned to the
+// caller.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Checker/PathSensitive/GRExprEngine.h"
+
+using namespace clang;
+
+namespace {
+class ReturnUndefChecker :
+ public CheckerVisitor<ReturnUndefChecker> {
+ BuiltinBug *BT;
+public:
+ ReturnUndefChecker() : BT(0) {}
+ static void *getTag();
+ void PreVisitReturnStmt(CheckerContext &C, const ReturnStmt *RS);
+};
+}
+
+void clang::RegisterReturnUndefChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new ReturnUndefChecker());
+}
+
+void *ReturnUndefChecker::getTag() {
+ static int x = 0; return &x;
+}
+
+void ReturnUndefChecker::PreVisitReturnStmt(CheckerContext &C,
+ const ReturnStmt *RS) {
+
+ const Expr *RetE = RS->getRetValue();
+ if (!RetE)
+ return;
+
+ if (!C.getState()->getSVal(RetE).isUndef())
+ return;
+
+ ExplodedNode *N = C.GenerateSink();
+
+ if (!N)
+ return;
+
+ if (!BT)
+ BT = new BuiltinBug("Garbage return value",
+ "Undefined or garbage value returned to caller");
+
+ EnhancedBugReport *report =
+ new EnhancedBugReport(*BT, BT->getDescription(), N);
+
+ report->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue, RetE);
+
+ C.EmitReport(report);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/SVals.cpp b/contrib/llvm/tools/clang/lib/Checker/SVals.cpp
new file mode 100644
index 0000000..d756be7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/SVals.cpp
@@ -0,0 +1,347 @@
+//= RValues.cpp - Abstract RValues for Path-Sens. Value Tracking -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SVal, Loc, and NonLoc, classes that represent
+// abstract r-values for use with path-sensitive value tracking.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/Basic/IdentifierTable.h"
+
+using namespace clang;
+using llvm::dyn_cast;
+using llvm::cast;
+using llvm::APSInt;
+
+//===----------------------------------------------------------------------===//
+// Symbol iteration within an SVal.
+//===----------------------------------------------------------------------===//
+
+
+//===----------------------------------------------------------------------===//
+// Utility methods.
+//===----------------------------------------------------------------------===//
+
+bool SVal::hasConjuredSymbol() const {
+ if (const nonloc::SymbolVal* SV = dyn_cast<nonloc::SymbolVal>(this)) {
+ SymbolRef sym = SV->getSymbol();
+ if (isa<SymbolConjured>(sym))
+ return true;
+ }
+
+ if (const loc::MemRegionVal *RV = dyn_cast<loc::MemRegionVal>(this)) {
+ const MemRegion *R = RV->getRegion();
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
+ SymbolRef sym = SR->getSymbol();
+ if (isa<SymbolConjured>(sym))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+const FunctionDecl *SVal::getAsFunctionDecl() const {
+ if (const loc::MemRegionVal* X = dyn_cast<loc::MemRegionVal>(this)) {
+ const MemRegion* R = X->getRegion();
+ if (const FunctionTextRegion *CTR = R->getAs<FunctionTextRegion>())
+ return CTR->getDecl();
+ }
+
+ return NULL;
+}
+
+/// getAsLocSymbol - If this SVal is a location (subclasses Loc) and
+/// wraps a symbol, return that SymbolRef. Otherwise return 0.
+// FIXME: should we consider SymbolRef wrapped in CodeTextRegion?
+SymbolRef SVal::getAsLocSymbol() const {
+ if (const loc::MemRegionVal *X = dyn_cast<loc::MemRegionVal>(this)) {
+ const MemRegion *R = X->StripCasts();
+ if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(R))
+ return SymR->getSymbol();
+ }
+ return NULL;
+}
+
+/// Get the symbol in the SVal or its base region.
+SymbolRef SVal::getLocSymbolInBase() const {
+ const loc::MemRegionVal *X = dyn_cast<loc::MemRegionVal>(this);
+
+ if (!X)
+ return 0;
+
+ const MemRegion *R = X->getRegion();
+
+ while (const SubRegion *SR = dyn_cast<SubRegion>(R)) {
+ if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(SR))
+ return SymR->getSymbol();
+ else
+ R = SR->getSuperRegion();
+ }
+
+ return 0;
+}
+
+/// getAsSymbol - If this Sval wraps a symbol return that SymbolRef.
+/// Otherwise return 0.
+// FIXME: should we consider SymbolRef wrapped in CodeTextRegion?
+SymbolRef SVal::getAsSymbol() const {
+ if (const nonloc::SymbolVal *X = dyn_cast<nonloc::SymbolVal>(this))
+ return X->getSymbol();
+
+ if (const nonloc::SymExprVal *X = dyn_cast<nonloc::SymExprVal>(this))
+ if (SymbolRef Y = dyn_cast<SymbolData>(X->getSymbolicExpression()))
+ return Y;
+
+ return getAsLocSymbol();
+}
+
+/// getAsSymbolicExpression - If this Sval wraps a symbolic expression then
+/// return that expression. Otherwise return NULL.
+const SymExpr *SVal::getAsSymbolicExpression() const {
+ if (const nonloc::SymExprVal *X = dyn_cast<nonloc::SymExprVal>(this))
+ return X->getSymbolicExpression();
+
+ return getAsSymbol();
+}
+
+const MemRegion *SVal::getAsRegion() const {
+ if (const loc::MemRegionVal *X = dyn_cast<loc::MemRegionVal>(this))
+ return X->getRegion();
+
+ if (const nonloc::LocAsInteger *X = dyn_cast<nonloc::LocAsInteger>(this)) {
+ return X->getLoc().getAsRegion();
+ }
+
+ return 0;
+}
+
+const MemRegion *loc::MemRegionVal::StripCasts() const {
+ const MemRegion *R = getRegion();
+ return R ? R->StripCasts() : NULL;
+}
+
+bool SVal::symbol_iterator::operator==(const symbol_iterator &X) const {
+ return itr == X.itr;
+}
+
+bool SVal::symbol_iterator::operator!=(const symbol_iterator &X) const {
+ return itr != X.itr;
+}
+
+SVal::symbol_iterator::symbol_iterator(const SymExpr *SE) {
+ itr.push_back(SE);
+ while (!isa<SymbolData>(itr.back())) expand();
+}
+
+SVal::symbol_iterator& SVal::symbol_iterator::operator++() {
+ assert(!itr.empty() && "attempting to iterate on an 'end' iterator");
+ assert(isa<SymbolData>(itr.back()));
+ itr.pop_back();
+ if (!itr.empty())
+ while (!isa<SymbolData>(itr.back())) expand();
+ return *this;
+}
+
+SymbolRef SVal::symbol_iterator::operator*() {
+ assert(!itr.empty() && "attempting to dereference an 'end' iterator");
+ return cast<SymbolData>(itr.back());
+}
+
+void SVal::symbol_iterator::expand() {
+ const SymExpr *SE = itr.back();
+ itr.pop_back();
+
+ if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(SE)) {
+ itr.push_back(SIE->getLHS());
+ return;
+ }
+ else if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(SE)) {
+ itr.push_back(SSE->getLHS());
+ itr.push_back(SSE->getRHS());
+ return;
+ }
+
+ assert(false && "unhandled expansion case");
+}
+
+const void *nonloc::LazyCompoundVal::getStore() const {
+ return static_cast<const LazyCompoundValData*>(Data)->getStore();
+}
+
+const TypedRegion *nonloc::LazyCompoundVal::getRegion() const {
+ return static_cast<const LazyCompoundValData*>(Data)->getRegion();
+}
+
+//===----------------------------------------------------------------------===//
+// Other Iterators.
+//===----------------------------------------------------------------------===//
+
+nonloc::CompoundVal::iterator nonloc::CompoundVal::begin() const {
+ return getValue()->begin();
+}
+
+nonloc::CompoundVal::iterator nonloc::CompoundVal::end() const {
+ return getValue()->end();
+}
+
+//===----------------------------------------------------------------------===//
+// Useful predicates.
+//===----------------------------------------------------------------------===//
+
+bool SVal::isConstant() const {
+ return isa<nonloc::ConcreteInt>(this) || isa<loc::ConcreteInt>(this);
+}
+
+bool SVal::isZeroConstant() const {
+ if (isa<loc::ConcreteInt>(*this))
+ return cast<loc::ConcreteInt>(*this).getValue() == 0;
+ else if (isa<nonloc::ConcreteInt>(*this))
+ return cast<nonloc::ConcreteInt>(*this).getValue() == 0;
+ else
+ return false;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Transfer function dispatch for Non-Locs.
+//===----------------------------------------------------------------------===//
+
+SVal nonloc::ConcreteInt::evalBinOp(ValueManager &ValMgr,
+ BinaryOperator::Opcode Op,
+ const nonloc::ConcreteInt& R) const {
+ const llvm::APSInt* X =
+ ValMgr.getBasicValueFactory().EvaluateAPSInt(Op, getValue(), R.getValue());
+
+ if (X)
+ return nonloc::ConcreteInt(*X);
+ else
+ return UndefinedVal();
+}
+
+nonloc::ConcreteInt
+nonloc::ConcreteInt::evalComplement(ValueManager &ValMgr) const {
+ return ValMgr.makeIntVal(~getValue());
+}
+
+nonloc::ConcreteInt nonloc::ConcreteInt::evalMinus(ValueManager &ValMgr) const {
+ return ValMgr.makeIntVal(-getValue());
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function dispatch for Locs.
+//===----------------------------------------------------------------------===//
+
+SVal loc::ConcreteInt::EvalBinOp(BasicValueFactory& BasicVals,
+ BinaryOperator::Opcode Op,
+ const loc::ConcreteInt& R) const {
+
+ assert (Op == BinaryOperator::Add || Op == BinaryOperator::Sub ||
+ (Op >= BinaryOperator::LT && Op <= BinaryOperator::NE));
+
+ const llvm::APSInt* X = BasicVals.EvaluateAPSInt(Op, getValue(), R.getValue());
+
+ if (X)
+ return loc::ConcreteInt(*X);
+ else
+ return UndefinedVal();
+}
+
+//===----------------------------------------------------------------------===//
+// Pretty-Printing.
+//===----------------------------------------------------------------------===//
+
+void SVal::dump() const { dumpToStream(llvm::errs()); }
+
+void SVal::dumpToStream(llvm::raw_ostream& os) const {
+ switch (getBaseKind()) {
+ case UnknownKind:
+ os << "Invalid";
+ break;
+ case NonLocKind:
+ cast<NonLoc>(this)->dumpToStream(os);
+ break;
+ case LocKind:
+ cast<Loc>(this)->dumpToStream(os);
+ break;
+ case UndefinedKind:
+ os << "Undefined";
+ break;
+ default:
+ assert (false && "Invalid SVal.");
+ }
+}
+
+void NonLoc::dumpToStream(llvm::raw_ostream& os) const {
+ switch (getSubKind()) {
+ case nonloc::ConcreteIntKind:
+ os << cast<nonloc::ConcreteInt>(this)->getValue().getZExtValue();
+ if (cast<nonloc::ConcreteInt>(this)->getValue().isUnsigned())
+ os << 'U';
+ break;
+ case nonloc::SymbolValKind:
+ os << '$' << cast<nonloc::SymbolVal>(this)->getSymbol();
+ break;
+ case nonloc::SymExprValKind: {
+ const nonloc::SymExprVal& C = *cast<nonloc::SymExprVal>(this);
+ const SymExpr *SE = C.getSymbolicExpression();
+ os << SE;
+ break;
+ }
+ case nonloc::LocAsIntegerKind: {
+ const nonloc::LocAsInteger& C = *cast<nonloc::LocAsInteger>(this);
+ os << C.getLoc() << " [as " << C.getNumBits() << " bit integer]";
+ break;
+ }
+ case nonloc::CompoundValKind: {
+ const nonloc::CompoundVal& C = *cast<nonloc::CompoundVal>(this);
+ os << "compoundVal{";
+ bool first = true;
+ for (nonloc::CompoundVal::iterator I=C.begin(), E=C.end(); I!=E; ++I) {
+ if (first) {
+ os << ' '; first = false;
+ }
+ else
+ os << ", ";
+
+ (*I).dumpToStream(os);
+ }
+ os << "}";
+ break;
+ }
+ case nonloc::LazyCompoundValKind: {
+ const nonloc::LazyCompoundVal &C = *cast<nonloc::LazyCompoundVal>(this);
+ os << "lazyCompoundVal{" << const_cast<void *>(C.getStore())
+ << ',' << C.getRegion()
+ << '}';
+ break;
+ }
+ default:
+ assert (false && "Pretty-printed not implemented for this NonLoc.");
+ break;
+ }
+}
+
+void Loc::dumpToStream(llvm::raw_ostream& os) const {
+ switch (getSubKind()) {
+ case loc::ConcreteIntKind:
+ os << cast<loc::ConcreteInt>(this)->getValue().getZExtValue() << " (Loc)";
+ break;
+ case loc::GotoLabelKind:
+ os << "&&" << cast<loc::GotoLabel>(this)->getLabel()->getID()->getName();
+ break;
+ case loc::MemRegionKind:
+ os << '&' << cast<loc::MemRegionVal>(this)->getRegion()->getString();
+ break;
+ default:
+ assert(false && "Pretty-printing not implemented for this Loc.");
+ break;
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/SValuator.cpp b/contrib/llvm/tools/clang/lib/Checker/SValuator.cpp
new file mode 100644
index 0000000..542fc1b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/SValuator.cpp
@@ -0,0 +1,157 @@
+// SValuator.cpp - Basic class for all SValuator implementations --*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SValuator, the base class for all (complete) SValuator
+// implementations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/PathSensitive/SValuator.h"
+#include "clang/Checker/PathSensitive/GRState.h"
+
+using namespace clang;
+
+
+SVal SValuator::EvalBinOp(const GRState *ST, BinaryOperator::Opcode Op,
+ SVal L, SVal R, QualType T) {
+
+ if (L.isUndef() || R.isUndef())
+ return UndefinedVal();
+
+ if (L.isUnknown() || R.isUnknown())
+ return UnknownVal();
+
+ if (isa<Loc>(L)) {
+ if (isa<Loc>(R))
+ return EvalBinOpLL(Op, cast<Loc>(L), cast<Loc>(R), T);
+
+ return EvalBinOpLN(ST, Op, cast<Loc>(L), cast<NonLoc>(R), T);
+ }
+
+ if (isa<Loc>(R)) {
+ // Support pointer arithmetic where the increment/decrement operand
+ // is on the left and the pointer on the right.
+ assert(Op == BinaryOperator::Add || Op == BinaryOperator::Sub);
+
+ // Commute the operands.
+ return EvalBinOpLN(ST, Op, cast<Loc>(R), cast<NonLoc>(L), T);
+ }
+
+ return EvalBinOpNN(ST, Op, cast<NonLoc>(L), cast<NonLoc>(R), T);
+}
+
+DefinedOrUnknownSVal SValuator::EvalEQ(const GRState *ST,
+ DefinedOrUnknownSVal L,
+ DefinedOrUnknownSVal R) {
+ return cast<DefinedOrUnknownSVal>(EvalBinOp(ST, BinaryOperator::EQ, L, R,
+ ValMgr.getContext().IntTy));
+}
+
+SVal SValuator::EvalCast(SVal val, QualType castTy, QualType originalTy) {
+ if (val.isUnknownOrUndef() || castTy == originalTy)
+ return val;
+
+ ASTContext &C = ValMgr.getContext();
+
+ // For const casts, just propagate the value.
+ if (!castTy->isVariableArrayType() && !originalTy->isVariableArrayType())
+ if (C.hasSameUnqualifiedType(castTy, originalTy))
+ return val;
+
+ // Check for casts to real or complex numbers. We don't handle these at all
+ // right now.
+ if (castTy->isFloatingType() || castTy->isAnyComplexType())
+ return UnknownVal();
+
+ // Check for casts from integers to integers.
+ if (castTy->isIntegerType() && originalTy->isIntegerType())
+ return EvalCastNL(cast<NonLoc>(val), castTy);
+
+ // Check for casts from pointers to integers.
+ if (castTy->isIntegerType() && Loc::IsLocType(originalTy))
+ return EvalCastL(cast<Loc>(val), castTy);
+
+ // Check for casts from integers to pointers.
+ if (Loc::IsLocType(castTy) && originalTy->isIntegerType()) {
+ if (nonloc::LocAsInteger *LV = dyn_cast<nonloc::LocAsInteger>(&val)) {
+ if (const MemRegion *R = LV->getLoc().getAsRegion()) {
+ StoreManager &storeMgr = ValMgr.getStateManager().getStoreManager();
+ R = storeMgr.CastRegion(R, castTy);
+ return R ? SVal(loc::MemRegionVal(R)) : UnknownVal();
+ }
+ return LV->getLoc();
+ }
+ goto DispatchCast;
+ }
+
+ // Just pass through function and block pointers.
+ if (originalTy->isBlockPointerType() || originalTy->isFunctionPointerType()) {
+ assert(Loc::IsLocType(castTy));
+ return val;
+ }
+
+ // Check for casts from array type to another type.
+ if (originalTy->isArrayType()) {
+ // We will always decay to a pointer.
+ val = ValMgr.getStateManager().ArrayToPointer(cast<Loc>(val));
+
+ // Are we casting from an array to a pointer? If so just pass on
+ // the decayed value.
+ if (castTy->isPointerType())
+ return val;
+
+ // Are we casting from an array to an integer? If so, cast the decayed
+ // pointer value to an integer.
+ assert(castTy->isIntegerType());
+
+ // FIXME: Keep these here for now in case we decide soon that we
+ // need the original decayed type.
+ // QualType elemTy = cast<ArrayType>(originalTy)->getElementType();
+ // QualType pointerTy = C.getPointerType(elemTy);
+ return EvalCastL(cast<Loc>(val), castTy);
+ }
+
+ // Check for casts from a region to a specific type.
+ if (const MemRegion *R = val.getAsRegion()) {
+ // FIXME: We should handle the case where we strip off view layers to get
+ // to a desugared type.
+
+ assert(Loc::IsLocType(castTy));
+ // We get a symbolic function pointer for a dereference of a function
+ // pointer, but it is of function type. Example:
+
+ // struct FPRec {
+ // void (*my_func)(int * x);
+ // };
+ //
+ // int bar(int x);
+ //
+ // int f1_a(struct FPRec* foo) {
+ // int x;
+ // (*foo->my_func)(&x);
+ // return bar(x)+1; // no-warning
+ // }
+
+ assert(Loc::IsLocType(originalTy) || originalTy->isFunctionType() ||
+ originalTy->isBlockPointerType());
+
+ StoreManager &storeMgr = ValMgr.getStateManager().getStoreManager();
+
+ // Delegate to store manager to get the result of casting a region to a
+ // different type. If the MemRegion* returned is NULL, this expression
+ // evaluates to UnknownVal.
+ R = storeMgr.CastRegion(R, castTy);
+ return R ? SVal(loc::MemRegionVal(R)) : UnknownVal();
+ }
+
+DispatchCast:
+ // All other cases.
+ return isa<Loc>(val) ? EvalCastL(cast<Loc>(val), castTy)
+ : EvalCastNL(cast<NonLoc>(val), castTy);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.cpp b/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.cpp
new file mode 100644
index 0000000..8c423a9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.cpp
@@ -0,0 +1,249 @@
+//== SimpleConstraintManager.cpp --------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SimpleConstraintManager, a class that holds code shared
+// between BasicConstraintManager and RangeConstraintManager.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SimpleConstraintManager.h"
+#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/Checker/PathSensitive/Checker.h"
+
+namespace clang {
+
+SimpleConstraintManager::~SimpleConstraintManager() {}
+
+bool SimpleConstraintManager::canReasonAbout(SVal X) const {
+ if (nonloc::SymExprVal *SymVal = dyn_cast<nonloc::SymExprVal>(&X)) {
+ const SymExpr *SE = SymVal->getSymbolicExpression();
+
+ if (isa<SymbolData>(SE))
+ return true;
+
+ if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(SE)) {
+ switch (SIE->getOpcode()) {
+ // We don't reason yet about bitwise-constraints on symbolic values.
+ case BinaryOperator::And:
+ case BinaryOperator::Or:
+ case BinaryOperator::Xor:
+ return false;
+ // We don't reason yet about arithmetic constraints on symbolic values.
+ case BinaryOperator::Mul:
+ case BinaryOperator::Div:
+ case BinaryOperator::Rem:
+ case BinaryOperator::Add:
+ case BinaryOperator::Sub:
+ case BinaryOperator::Shl:
+ case BinaryOperator::Shr:
+ return false;
+ // All other cases.
+ default:
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ return true;
+}
+
+const GRState *SimpleConstraintManager::Assume(const GRState *state,
+ DefinedSVal Cond,
+ bool Assumption) {
+ if (isa<NonLoc>(Cond))
+ return Assume(state, cast<NonLoc>(Cond), Assumption);
+ else
+ return Assume(state, cast<Loc>(Cond), Assumption);
+}
+
+const GRState *SimpleConstraintManager::Assume(const GRState *state, Loc cond,
+ bool assumption) {
+ state = AssumeAux(state, cond, assumption);
+ return SU.ProcessAssume(state, cond, assumption);
+}
+
+const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
+ Loc Cond, bool Assumption) {
+
+ BasicValueFactory &BasicVals = state->getBasicVals();
+
+ switch (Cond.getSubKind()) {
+ default:
+ assert (false && "'Assume' not implemented for this Loc.");
+ return state;
+
+ case loc::MemRegionKind: {
+ // FIXME: Should this go into the storemanager?
+
+ const MemRegion *R = cast<loc::MemRegionVal>(Cond).getRegion();
+ const SubRegion *SubR = dyn_cast<SubRegion>(R);
+
+ while (SubR) {
+ // FIXME: now we only find the first symbolic region.
+ if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(SubR)) {
+ if (Assumption)
+ return AssumeSymNE(state, SymR->getSymbol(),
+ BasicVals.getZeroWithPtrWidth());
+ else
+ return AssumeSymEQ(state, SymR->getSymbol(),
+ BasicVals.getZeroWithPtrWidth());
+ }
+ SubR = dyn_cast<SubRegion>(SubR->getSuperRegion());
+ }
+
+ // FALL-THROUGH.
+ }
+
+ case loc::GotoLabelKind:
+ return Assumption ? state : NULL;
+
+ case loc::ConcreteIntKind: {
+ bool b = cast<loc::ConcreteInt>(Cond).getValue() != 0;
+ bool isFeasible = b ? Assumption : !Assumption;
+ return isFeasible ? state : NULL;
+ }
+ } // end switch
+}
+
+const GRState *SimpleConstraintManager::Assume(const GRState *state,
+ NonLoc cond,
+ bool assumption) {
+ state = AssumeAux(state, cond, assumption);
+ return SU.ProcessAssume(state, cond, assumption);
+}
+
+const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
+ NonLoc Cond,
+ bool Assumption) {
+
+ // We cannot reason about SymIntExpr and SymSymExpr.
+ if (!canReasonAbout(Cond)) {
+ // Just return the current state indicating that the path is feasible.
+ // This may be an over-approximation of what is possible.
+ return state;
+ }
+
+ BasicValueFactory &BasicVals = state->getBasicVals();
+ SymbolManager &SymMgr = state->getSymbolManager();
+
+ switch (Cond.getSubKind()) {
+ default:
+ assert(false && "'Assume' not implemented for this NonLoc");
+
+ case nonloc::SymbolValKind: {
+ nonloc::SymbolVal& SV = cast<nonloc::SymbolVal>(Cond);
+ SymbolRef sym = SV.getSymbol();
+ QualType T = SymMgr.getType(sym);
+ const llvm::APSInt &zero = BasicVals.getValue(0, T);
+
+ return Assumption ? AssumeSymNE(state, sym, zero)
+ : AssumeSymEQ(state, sym, zero);
+ }
+
+ case nonloc::SymExprValKind: {
+ nonloc::SymExprVal V = cast<nonloc::SymExprVal>(Cond);
+ if (const SymIntExpr *SE = dyn_cast<SymIntExpr>(V.getSymbolicExpression())){
+ // FIXME: This is a hack. It silently converts the RHS integer to be
+ // of the same type as on the left side. This should be removed once
+ // we support truncation/extension of symbolic values.
+ GRStateManager &StateMgr = state->getStateManager();
+ ASTContext &Ctx = StateMgr.getContext();
+ QualType LHSType = SE->getLHS()->getType(Ctx);
+ BasicValueFactory &BasicVals = StateMgr.getBasicVals();
+ const llvm::APSInt &RHS = BasicVals.Convert(LHSType, SE->getRHS());
+ SymIntExpr SENew(SE->getLHS(), SE->getOpcode(), RHS, SE->getType(Ctx));
+
+ return AssumeSymInt(state, Assumption, &SENew);
+ }
+
+ // For all other symbolic expressions, over-approximate and consider
+ // the constraint feasible.
+ return state;
+ }
+
+ case nonloc::ConcreteIntKind: {
+ bool b = cast<nonloc::ConcreteInt>(Cond).getValue() != 0;
+ bool isFeasible = b ? Assumption : !Assumption;
+ return isFeasible ? state : NULL;
+ }
+
+ case nonloc::LocAsIntegerKind:
+ return AssumeAux(state, cast<nonloc::LocAsInteger>(Cond).getLoc(),
+ Assumption);
+ } // end switch
+}
+
+const GRState *SimpleConstraintManager::AssumeSymInt(const GRState *state,
+ bool Assumption,
+ const SymIntExpr *SE) {
+
+
+ // Here we assume that LHS is a symbol. This is consistent with the
+ // rest of the constraint manager logic.
+ SymbolRef Sym = cast<SymbolData>(SE->getLHS());
+ const llvm::APSInt &Int = SE->getRHS();
+
+ switch (SE->getOpcode()) {
+ default:
+ // No logic yet for other operators. Assume the constraint is feasible.
+ return state;
+
+ case BinaryOperator::EQ:
+ return Assumption ? AssumeSymEQ(state, Sym, Int)
+ : AssumeSymNE(state, Sym, Int);
+
+ case BinaryOperator::NE:
+ return Assumption ? AssumeSymNE(state, Sym, Int)
+ : AssumeSymEQ(state, Sym, Int);
+ case BinaryOperator::GT:
+ return Assumption ? AssumeSymGT(state, Sym, Int)
+ : AssumeSymLE(state, Sym, Int);
+
+ case BinaryOperator::GE:
+ return Assumption ? AssumeSymGE(state, Sym, Int)
+ : AssumeSymLT(state, Sym, Int);
+
+ case BinaryOperator::LT:
+ return Assumption ? AssumeSymLT(state, Sym, Int)
+ : AssumeSymGE(state, Sym, Int);
+
+ case BinaryOperator::LE:
+ return Assumption ? AssumeSymLE(state, Sym, Int)
+ : AssumeSymGT(state, Sym, Int);
+ } // end switch
+}
+
+const GRState *SimpleConstraintManager::AssumeInBound(const GRState *state,
+ DefinedSVal Idx,
+ DefinedSVal UpperBound,
+ bool Assumption) {
+
+ // Only support ConcreteInt for now.
+ if (!(isa<nonloc::ConcreteInt>(Idx) && isa<nonloc::ConcreteInt>(UpperBound)))
+ return state;
+
+ const llvm::APSInt& Zero = state->getBasicVals().getZeroWithPtrWidth(false);
+ llvm::APSInt IdxV = cast<nonloc::ConcreteInt>(Idx).getValue();
+ // IdxV might be too narrow.
+ if (IdxV.getBitWidth() < Zero.getBitWidth())
+ IdxV.extend(Zero.getBitWidth());
+ // UBV might be too narrow, too.
+ llvm::APSInt UBV = cast<nonloc::ConcreteInt>(UpperBound).getValue();
+ if (UBV.getBitWidth() < Zero.getBitWidth())
+ UBV.extend(Zero.getBitWidth());
+
+ bool InBound = (Zero <= IdxV) && (IdxV < UBV);
+ bool isFeasible = Assumption ? InBound : !InBound;
+ return isFeasible ? state : NULL;
+}
+
+} // end of namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.h b/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.h
new file mode 100644
index 0000000..5f20e00
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.h
@@ -0,0 +1,83 @@
+//== SimpleConstraintManager.h ----------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Code shared between BasicConstraintManager and RangeConstraintManager.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_SIMPLE_CONSTRAINT_MANAGER_H
+#define LLVM_CLANG_ANALYSIS_SIMPLE_CONSTRAINT_MANAGER_H
+
+#include "clang/Checker/PathSensitive/ConstraintManager.h"
+#include "clang/Checker/PathSensitive/GRState.h"
+
+namespace clang {
+
+class SimpleConstraintManager : public ConstraintManager {
+ GRSubEngine &SU;
+public:
+ SimpleConstraintManager(GRSubEngine &subengine) : SU(subengine) {}
+ virtual ~SimpleConstraintManager();
+
+ //===------------------------------------------------------------------===//
+ // Common implementation for the interface provided by ConstraintManager.
+ //===------------------------------------------------------------------===//
+
+ bool canReasonAbout(SVal X) const;
+
+ const GRState *Assume(const GRState *state, DefinedSVal Cond,
+ bool Assumption);
+
+ const GRState *Assume(const GRState *state, Loc Cond, bool Assumption);
+
+ const GRState *Assume(const GRState *state, NonLoc Cond, bool Assumption);
+
+ const GRState *AssumeSymInt(const GRState *state, bool Assumption,
+ const SymIntExpr *SE);
+
+ const GRState *AssumeInBound(const GRState *state, DefinedSVal Idx,
+ DefinedSVal UpperBound,
+ bool Assumption);
+
+protected:
+
+ //===------------------------------------------------------------------===//
+ // Interface that subclasses must implement.
+ //===------------------------------------------------------------------===//
+
+ virtual const GRState *AssumeSymNE(const GRState *state, SymbolRef sym,
+ const llvm::APSInt& V) = 0;
+
+ virtual const GRState *AssumeSymEQ(const GRState *state, SymbolRef sym,
+ const llvm::APSInt& V) = 0;
+
+ virtual const GRState *AssumeSymLT(const GRState *state, SymbolRef sym,
+ const llvm::APSInt& V) = 0;
+
+ virtual const GRState *AssumeSymGT(const GRState *state, SymbolRef sym,
+ const llvm::APSInt& V) = 0;
+
+ virtual const GRState *AssumeSymLE(const GRState *state, SymbolRef sym,
+ const llvm::APSInt& V) = 0;
+
+ virtual const GRState *AssumeSymGE(const GRState *state, SymbolRef sym,
+ const llvm::APSInt& V) = 0;
+
+ //===------------------------------------------------------------------===//
+ // Internal implementation.
+ //===------------------------------------------------------------------===//
+
+ const GRState *AssumeAux(const GRState *state, Loc Cond,bool Assumption);
+
+ const GRState *AssumeAux(const GRState *state, NonLoc Cond, bool Assumption);
+};
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Checker/SimpleSValuator.cpp b/contrib/llvm/tools/clang/lib/Checker/SimpleSValuator.cpp
new file mode 100644
index 0000000..dd38a43
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/SimpleSValuator.cpp
@@ -0,0 +1,434 @@
+// SimpleSValuator.cpp - A basic SValuator ------------------------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SimpleSValuator, a basic implementation of SValuator.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/PathSensitive/SValuator.h"
+#include "clang/Checker/PathSensitive/GRState.h"
+
+using namespace clang;
+
+namespace {
+class SimpleSValuator : public SValuator {
+protected:
+ virtual SVal EvalCastNL(NonLoc val, QualType castTy);
+ virtual SVal EvalCastL(Loc val, QualType castTy);
+
+public:
+ SimpleSValuator(ValueManager &valMgr) : SValuator(valMgr) {}
+ virtual ~SimpleSValuator() {}
+
+ virtual SVal EvalMinus(NonLoc val);
+ virtual SVal EvalComplement(NonLoc val);
+ virtual SVal EvalBinOpNN(const GRState *state, BinaryOperator::Opcode op,
+ NonLoc lhs, NonLoc rhs, QualType resultTy);
+ virtual SVal EvalBinOpLL(BinaryOperator::Opcode op, Loc lhs, Loc rhs,
+ QualType resultTy);
+ virtual SVal EvalBinOpLN(const GRState *state, BinaryOperator::Opcode op,
+ Loc lhs, NonLoc rhs, QualType resultTy);
+};
+} // end anonymous namespace
+
+SValuator *clang::CreateSimpleSValuator(ValueManager &valMgr) {
+ return new SimpleSValuator(valMgr);
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function for Casts.
+//===----------------------------------------------------------------------===//
+
+SVal SimpleSValuator::EvalCastNL(NonLoc val, QualType castTy) {
+
+ bool isLocType = Loc::IsLocType(castTy);
+
+ if (nonloc::LocAsInteger *LI = dyn_cast<nonloc::LocAsInteger>(&val)) {
+ if (isLocType)
+ return LI->getLoc();
+
+ // FIXME: Correctly support promotions/truncations.
+ ASTContext &Ctx = ValMgr.getContext();
+ unsigned castSize = Ctx.getTypeSize(castTy);
+ if (castSize == LI->getNumBits())
+ return val;
+
+ return ValMgr.makeLocAsInteger(LI->getLoc(), castSize);
+ }
+
+ if (const SymExpr *se = val.getAsSymbolicExpression()) {
+ ASTContext &Ctx = ValMgr.getContext();
+ QualType T = Ctx.getCanonicalType(se->getType(Ctx));
+ if (T == Ctx.getCanonicalType(castTy))
+ return val;
+
+ // FIXME: Remove this hack when we support symbolic truncation/extension.
+ // HACK: If both castTy and T are integers, ignore the cast. This is
+ // not a permanent solution. Eventually we want to precisely handle
+ // extension/truncation of symbolic integers. This prevents us from losing
+ // precision when we assign 'x = y' and 'y' is symbolic and x and y are
+ // different integer types.
+ if (T->isIntegerType() && castTy->isIntegerType())
+ return val;
+
+ return UnknownVal();
+ }
+
+ if (!isa<nonloc::ConcreteInt>(val))
+ return UnknownVal();
+
+ // Only handle casts from integers to integers.
+ if (!isLocType && !castTy->isIntegerType())
+ return UnknownVal();
+
+ llvm::APSInt i = cast<nonloc::ConcreteInt>(val).getValue();
+ i.setIsUnsigned(castTy->isUnsignedIntegerType() || Loc::IsLocType(castTy));
+ i.extOrTrunc(ValMgr.getContext().getTypeSize(castTy));
+
+ if (isLocType)
+ return ValMgr.makeIntLocVal(i);
+ else
+ return ValMgr.makeIntVal(i);
+}
+
+SVal SimpleSValuator::EvalCastL(Loc val, QualType castTy) {
+
+ // Casts from pointers -> pointers, just return the lval.
+ //
+ // Casts from pointers -> references, just return the lval. These
+ // can be introduced by the frontend for corner cases, e.g
+ // casting from va_list* to __builtin_va_list&.
+ //
+ if (Loc::IsLocType(castTy) || castTy->isReferenceType())
+ return val;
+
+ // FIXME: Handle transparent unions where a value can be "transparently"
+ // lifted into a union type.
+ if (castTy->isUnionType())
+ return UnknownVal();
+
+ if (castTy->isIntegerType()) {
+ unsigned BitWidth = ValMgr.getContext().getTypeSize(castTy);
+
+ if (!isa<loc::ConcreteInt>(val))
+ return ValMgr.makeLocAsInteger(val, BitWidth);
+
+ llvm::APSInt i = cast<loc::ConcreteInt>(val).getValue();
+ i.setIsUnsigned(castTy->isUnsignedIntegerType() || Loc::IsLocType(castTy));
+ i.extOrTrunc(BitWidth);
+ return ValMgr.makeIntVal(i);
+ }
+
+ // All other cases: return 'UnknownVal'. This includes casting pointers
+ // to floats, which is probably badness it itself, but this is a good
+ // intermediate solution until we do something better.
+ return UnknownVal();
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function for unary operators.
+//===----------------------------------------------------------------------===//
+
+SVal SimpleSValuator::EvalMinus(NonLoc val) {
+ switch (val.getSubKind()) {
+ case nonloc::ConcreteIntKind:
+ return cast<nonloc::ConcreteInt>(val).evalMinus(ValMgr);
+ default:
+ return UnknownVal();
+ }
+}
+
+SVal SimpleSValuator::EvalComplement(NonLoc X) {
+ switch (X.getSubKind()) {
+ case nonloc::ConcreteIntKind:
+ return cast<nonloc::ConcreteInt>(X).evalComplement(ValMgr);
+ default:
+ return UnknownVal();
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function for binary operators.
+//===----------------------------------------------------------------------===//
+
+static BinaryOperator::Opcode NegateComparison(BinaryOperator::Opcode op) {
+ switch (op) {
+ default:
+ assert(false && "Invalid opcode.");
+ case BinaryOperator::LT: return BinaryOperator::GE;
+ case BinaryOperator::GT: return BinaryOperator::LE;
+ case BinaryOperator::LE: return BinaryOperator::GT;
+ case BinaryOperator::GE: return BinaryOperator::LT;
+ case BinaryOperator::EQ: return BinaryOperator::NE;
+ case BinaryOperator::NE: return BinaryOperator::EQ;
+ }
+}
+
+// Equality operators for Locs.
+// FIXME: All this logic will be revamped when we have MemRegion::getLocation()
+// implemented.
+
+static SVal EvalEquality(ValueManager &ValMgr, Loc lhs, Loc rhs, bool isEqual,
+ QualType resultTy) {
+
+ switch (lhs.getSubKind()) {
+ default:
+ assert(false && "EQ/NE not implemented for this Loc.");
+ return UnknownVal();
+
+ case loc::ConcreteIntKind: {
+ if (SymbolRef rSym = rhs.getAsSymbol())
+ return ValMgr.makeNonLoc(rSym,
+ isEqual ? BinaryOperator::EQ
+ : BinaryOperator::NE,
+ cast<loc::ConcreteInt>(lhs).getValue(),
+ resultTy);
+ break;
+ }
+ case loc::MemRegionKind: {
+ if (SymbolRef lSym = lhs.getAsLocSymbol()) {
+ if (isa<loc::ConcreteInt>(rhs)) {
+ return ValMgr.makeNonLoc(lSym,
+ isEqual ? BinaryOperator::EQ
+ : BinaryOperator::NE,
+ cast<loc::ConcreteInt>(rhs).getValue(),
+ resultTy);
+ }
+ }
+ break;
+ }
+
+ case loc::GotoLabelKind:
+ break;
+ }
+
+ return ValMgr.makeTruthVal(isEqual ? lhs == rhs : lhs != rhs, resultTy);
+}
+
+SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
+ BinaryOperator::Opcode op,
+ NonLoc lhs, NonLoc rhs,
+ QualType resultTy) {
+ // Handle trivial case where left-side and right-side are the same.
+ if (lhs == rhs)
+ switch (op) {
+ default:
+ break;
+ case BinaryOperator::EQ:
+ case BinaryOperator::LE:
+ case BinaryOperator::GE:
+ return ValMgr.makeTruthVal(true, resultTy);
+ case BinaryOperator::LT:
+ case BinaryOperator::GT:
+ case BinaryOperator::NE:
+ return ValMgr.makeTruthVal(false, resultTy);
+ }
+
+ while (1) {
+ switch (lhs.getSubKind()) {
+ default:
+ return UnknownVal();
+ case nonloc::LocAsIntegerKind: {
+ Loc lhsL = cast<nonloc::LocAsInteger>(lhs).getLoc();
+ switch (rhs.getSubKind()) {
+ case nonloc::LocAsIntegerKind:
+ return EvalBinOpLL(op, lhsL, cast<nonloc::LocAsInteger>(rhs).getLoc(),
+ resultTy);
+ case nonloc::ConcreteIntKind: {
+ // Transform the integer into a location and compare.
+ ASTContext& Ctx = ValMgr.getContext();
+ llvm::APSInt i = cast<nonloc::ConcreteInt>(rhs).getValue();
+ i.setIsUnsigned(true);
+ i.extOrTrunc(Ctx.getTypeSize(Ctx.VoidPtrTy));
+ return EvalBinOpLL(op, lhsL, ValMgr.makeLoc(i), resultTy);
+ }
+ default:
+ switch (op) {
+ case BinaryOperator::EQ:
+ return ValMgr.makeTruthVal(false, resultTy);
+ case BinaryOperator::NE:
+ return ValMgr.makeTruthVal(true, resultTy);
+ default:
+ // This case also handles pointer arithmetic.
+ return UnknownVal();
+ }
+ }
+ }
+ case nonloc::SymExprValKind: {
+ // Logical not?
+ if (!(op == BinaryOperator::EQ && rhs.isZeroConstant()))
+ return UnknownVal();
+
+ const SymExpr *symExpr =
+ cast<nonloc::SymExprVal>(lhs).getSymbolicExpression();
+
+ // Only handle ($sym op constant) for now.
+ if (const SymIntExpr *symIntExpr = dyn_cast<SymIntExpr>(symExpr)) {
+ BinaryOperator::Opcode opc = symIntExpr->getOpcode();
+ switch (opc) {
+ case BinaryOperator::LAnd:
+ case BinaryOperator::LOr:
+ assert(false && "Logical operators handled by branching logic.");
+ return UnknownVal();
+ case BinaryOperator::Assign:
+ case BinaryOperator::MulAssign:
+ case BinaryOperator::DivAssign:
+ case BinaryOperator::RemAssign:
+ case BinaryOperator::AddAssign:
+ case BinaryOperator::SubAssign:
+ case BinaryOperator::ShlAssign:
+ case BinaryOperator::ShrAssign:
+ case BinaryOperator::AndAssign:
+ case BinaryOperator::XorAssign:
+ case BinaryOperator::OrAssign:
+ case BinaryOperator::Comma:
+ assert(false && "'=' and ',' operators handled by GRExprEngine.");
+ return UnknownVal();
+ case BinaryOperator::PtrMemD:
+ case BinaryOperator::PtrMemI:
+ assert(false && "Pointer arithmetic not handled here.");
+ return UnknownVal();
+ case BinaryOperator::Mul:
+ case BinaryOperator::Div:
+ case BinaryOperator::Rem:
+ case BinaryOperator::Add:
+ case BinaryOperator::Sub:
+ case BinaryOperator::Shl:
+ case BinaryOperator::Shr:
+ case BinaryOperator::And:
+ case BinaryOperator::Xor:
+ case BinaryOperator::Or:
+ // Not handled yet.
+ return UnknownVal();
+ case BinaryOperator::LT:
+ case BinaryOperator::GT:
+ case BinaryOperator::LE:
+ case BinaryOperator::GE:
+ case BinaryOperator::EQ:
+ case BinaryOperator::NE:
+ opc = NegateComparison(opc);
+ assert(symIntExpr->getType(ValMgr.getContext()) == resultTy);
+ return ValMgr.makeNonLoc(symIntExpr->getLHS(), opc,
+ symIntExpr->getRHS(), resultTy);
+ }
+ }
+ }
+ case nonloc::ConcreteIntKind: {
+ if (isa<nonloc::ConcreteInt>(rhs)) {
+ const nonloc::ConcreteInt& lhsInt = cast<nonloc::ConcreteInt>(lhs);
+ return lhsInt.evalBinOp(ValMgr, op, cast<nonloc::ConcreteInt>(rhs));
+ }
+ else {
+ // Swap the left and right sides and flip the operator if doing so
+ // allows us to better reason about the expression (this is a form
+ // of expression canonicalization).
+ NonLoc tmp = rhs;
+ rhs = lhs;
+ lhs = tmp;
+
+ switch (op) {
+ case BinaryOperator::LT: op = BinaryOperator::GT; continue;
+ case BinaryOperator::GT: op = BinaryOperator::LT; continue;
+ case BinaryOperator::LE: op = BinaryOperator::GE; continue;
+ case BinaryOperator::GE: op = BinaryOperator::LE; continue;
+ case BinaryOperator::EQ:
+ case BinaryOperator::NE:
+ case BinaryOperator::Add:
+ case BinaryOperator::Mul:
+ continue;
+ default:
+ return UnknownVal();
+ }
+ }
+ }
+ case nonloc::SymbolValKind: {
+ nonloc::SymbolVal *slhs = cast<nonloc::SymbolVal>(&lhs);
+ SymbolRef Sym = slhs->getSymbol();
+
+ // Does the symbol simplify to a constant? If so, "fold" the constant
+ // by setting 'lhs' to a ConcreteInt and try again.
+ if (Sym->getType(ValMgr.getContext())->isIntegerType())
+ if (const llvm::APSInt *Constant = state->getSymVal(Sym)) {
+ // The symbol evaluates to a constant. If necessary, promote the
+ // folded constant (LHS) to the result type.
+ BasicValueFactory &BVF = ValMgr.getBasicValueFactory();
+ const llvm::APSInt &lhs_I = BVF.Convert(resultTy, *Constant);
+ lhs = nonloc::ConcreteInt(lhs_I);
+
+ // Also promote the RHS (if necessary).
+
+ // For shifts, it necessary promote the RHS to the result type.
+ if (BinaryOperator::isShiftOp(op))
+ continue;
+
+ // Other operators: do an implicit conversion. This shouldn't be
+ // necessary once we support truncation/extension of symbolic values.
+ if (nonloc::ConcreteInt *rhs_I = dyn_cast<nonloc::ConcreteInt>(&rhs)){
+ rhs = nonloc::ConcreteInt(BVF.Convert(resultTy, rhs_I->getValue()));
+ }
+
+ continue;
+ }
+
+ if (isa<nonloc::ConcreteInt>(rhs)) {
+ return ValMgr.makeNonLoc(slhs->getSymbol(), op,
+ cast<nonloc::ConcreteInt>(rhs).getValue(),
+ resultTy);
+ }
+
+ return UnknownVal();
+ }
+ }
+ }
+}
+
+SVal SimpleSValuator::EvalBinOpLL(BinaryOperator::Opcode op, Loc lhs, Loc rhs,
+ QualType resultTy) {
+ switch (op) {
+ default:
+ return UnknownVal();
+ case BinaryOperator::EQ:
+ case BinaryOperator::NE:
+ return EvalEquality(ValMgr, lhs, rhs, op == BinaryOperator::EQ, resultTy);
+ case BinaryOperator::LT:
+ case BinaryOperator::GT:
+ // FIXME: Generalize. For now, just handle the trivial case where
+ // the two locations are identical.
+ if (lhs == rhs)
+ return ValMgr.makeTruthVal(false, resultTy);
+ return UnknownVal();
+ }
+}
+
+SVal SimpleSValuator::EvalBinOpLN(const GRState *state,
+ BinaryOperator::Opcode op,
+ Loc lhs, NonLoc rhs, QualType resultTy) {
+ // Special case: 'rhs' is an integer that has the same width as a pointer and
+ // we are using the integer location in a comparison. Normally this cannot be
+ // triggered, but transfer functions like those for OSCommpareAndSwapBarrier32
+ // can generate comparisons that trigger this code.
+ // FIXME: Are all locations guaranteed to have pointer width?
+ if (BinaryOperator::isEqualityOp(op)) {
+ if (nonloc::ConcreteInt *rhsInt = dyn_cast<nonloc::ConcreteInt>(&rhs)) {
+ const llvm::APSInt *x = &rhsInt->getValue();
+ ASTContext &ctx = ValMgr.getContext();
+ if (ctx.getTypeSize(ctx.VoidPtrTy) == x->getBitWidth()) {
+ // Convert the signedness of the integer (if necessary).
+ if (x->isSigned())
+ x = &ValMgr.getBasicValueFactory().getValue(*x, true);
+
+ return EvalBinOpLL(op, lhs, loc::ConcreteInt(*x), resultTy);
+ }
+ }
+ }
+
+ // Delegate pointer arithmetic to the StoreManager.
+ return state->getStateManager().getStoreManager().EvalBinOp(op, lhs,
+ rhs, resultTy);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/Store.cpp b/contrib/llvm/tools/clang/lib/Checker/Store.cpp
new file mode 100644
index 0000000..c12065b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/Store.cpp
@@ -0,0 +1,335 @@
+//== Store.cpp - Interface for maps from Locations to Values ----*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defined the types Store and StoreManager.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/PathSensitive/Store.h"
+#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/AST/CharUnits.h"
+
+using namespace clang;
+
+StoreManager::StoreManager(GRStateManager &stateMgr)
+ : ValMgr(stateMgr.getValueManager()), StateMgr(stateMgr),
+ MRMgr(ValMgr.getRegionManager()), Ctx(stateMgr.getContext()) {}
+
+const MemRegion *StoreManager::MakeElementRegion(const MemRegion *Base,
+ QualType EleTy, uint64_t index) {
+ SVal idx = ValMgr.makeArrayIndex(index);
+ return MRMgr.getElementRegion(EleTy, idx, Base, ValMgr.getContext());
+}
+
+// FIXME: Merge with the implementation of the same method in MemRegion.cpp
+static bool IsCompleteType(ASTContext &Ctx, QualType Ty) {
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *D = RT->getDecl();
+ if (!D->getDefinition())
+ return false;
+ }
+
+ return true;
+}
+
+const ElementRegion *StoreManager::GetElementZeroRegion(const MemRegion *R,
+ QualType T) {
+ SVal idx = ValMgr.makeZeroArrayIndex();
+ assert(!T.isNull());
+ return MRMgr.getElementRegion(T, idx, R, Ctx);
+}
+
+const MemRegion *StoreManager::CastRegion(const MemRegion *R, QualType CastToTy) {
+
+ ASTContext& Ctx = StateMgr.getContext();
+
+ // Handle casts to Objective-C objects.
+ if (CastToTy->isObjCObjectPointerType())
+ return R->StripCasts();
+
+ if (CastToTy->isBlockPointerType()) {
+ // FIXME: We may need different solutions, depending on the symbol
+ // involved. Blocks can be casted to/from 'id', as they can be treated
+ // as Objective-C objects. This could possibly be handled by enhancing
+ // our reasoning of downcasts of symbolic objects.
+ if (isa<CodeTextRegion>(R) || isa<SymbolicRegion>(R))
+ return R;
+
+ // We don't know what to make of it. Return a NULL region, which
+ // will be interpretted as UnknownVal.
+ return NULL;
+ }
+
+ // Now assume we are casting from pointer to pointer. Other cases should
+ // already be handled.
+ QualType PointeeTy = CastToTy->getAs<PointerType>()->getPointeeType();
+ QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy);
+
+ // Handle casts to void*. We just pass the region through.
+ if (CanonPointeeTy.getLocalUnqualifiedType() == Ctx.VoidTy)
+ return R;
+
+ // Handle casts from compatible types.
+ if (R->isBoundable())
+ if (const TypedRegion *TR = dyn_cast<TypedRegion>(R)) {
+ QualType ObjTy = Ctx.getCanonicalType(TR->getValueType(Ctx));
+ if (CanonPointeeTy == ObjTy)
+ return R;
+ }
+
+ // Process region cast according to the kind of the region being cast.
+ switch (R->getKind()) {
+ case MemRegion::CXXThisRegionKind:
+ case MemRegion::GenericMemSpaceRegionKind:
+ case MemRegion::StackLocalsSpaceRegionKind:
+ case MemRegion::StackArgumentsSpaceRegionKind:
+ case MemRegion::HeapSpaceRegionKind:
+ case MemRegion::UnknownSpaceRegionKind:
+ case MemRegion::GlobalsSpaceRegionKind: {
+ assert(0 && "Invalid region cast");
+ break;
+ }
+
+ case MemRegion::FunctionTextRegionKind:
+ case MemRegion::BlockTextRegionKind:
+ case MemRegion::BlockDataRegionKind: {
+ // CodeTextRegion should be cast to only a function or block pointer type,
+ // although they can in practice be casted to anything, e.g, void*, char*,
+ // etc.
+ // Just return the region.
+ return R;
+ }
+
+ case MemRegion::StringRegionKind:
+ // FIXME: Need to handle arbitrary downcasts.
+ case MemRegion::SymbolicRegionKind:
+ case MemRegion::AllocaRegionKind:
+ case MemRegion::CompoundLiteralRegionKind:
+ case MemRegion::FieldRegionKind:
+ case MemRegion::ObjCIvarRegionKind:
+ case MemRegion::VarRegionKind:
+ case MemRegion::CXXObjectRegionKind:
+ return MakeElementRegion(R, PointeeTy);
+
+ case MemRegion::ElementRegionKind: {
+ // If we are casting from an ElementRegion to another type, the
+ // algorithm is as follows:
+ //
+ // (1) Compute the "raw offset" of the ElementRegion from the
+ // base region. This is done by calling 'getAsRawOffset()'.
+ //
+ // (2a) If we get a 'RegionRawOffset' after calling
+ // 'getAsRawOffset()', determine if the absolute offset
+ // can be exactly divided into chunks of the size of the
+ // casted-pointee type. If so, create a new ElementRegion with
+ // the pointee-cast type as the new ElementType and the index
+ // being the offset divded by the chunk size. If not, create
+ // a new ElementRegion at offset 0 off the raw offset region.
+ //
+ // (2b) If we don't a get a 'RegionRawOffset' after calling
+ // 'getAsRawOffset()', it means that we are at offset 0.
+ //
+ // FIXME: Handle symbolic raw offsets.
+
+ const ElementRegion *elementR = cast<ElementRegion>(R);
+ const RegionRawOffset &rawOff = elementR->getAsRawOffset();
+ const MemRegion *baseR = rawOff.getRegion();
+
+ // If we cannot compute a raw offset, throw up our hands and return
+ // a NULL MemRegion*.
+ if (!baseR)
+ return NULL;
+
+ CharUnits off = CharUnits::fromQuantity(rawOff.getByteOffset());
+
+ if (off.isZero()) {
+ // Edge case: we are at 0 bytes off the beginning of baseR. We
+ // check to see if type we are casting to is the same as the base
+ // region. If so, just return the base region.
+ if (const TypedRegion *TR = dyn_cast<TypedRegion>(baseR)) {
+ QualType ObjTy = Ctx.getCanonicalType(TR->getValueType(Ctx));
+ QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy);
+ if (CanonPointeeTy == ObjTy)
+ return baseR;
+ }
+
+ // Otherwise, create a new ElementRegion at offset 0.
+ return MakeElementRegion(baseR, PointeeTy);
+ }
+
+ // We have a non-zero offset from the base region. We want to determine
+ // if the offset can be evenly divided by sizeof(PointeeTy). If so,
+ // we create an ElementRegion whose index is that value. Otherwise, we
+ // create two ElementRegions, one that reflects a raw offset and the other
+ // that reflects the cast.
+
+ // Compute the index for the new ElementRegion.
+ int64_t newIndex = 0;
+ const MemRegion *newSuperR = 0;
+
+ // We can only compute sizeof(PointeeTy) if it is a complete type.
+ if (IsCompleteType(Ctx, PointeeTy)) {
+ // Compute the size in **bytes**.
+ CharUnits pointeeTySize = Ctx.getTypeSizeInChars(PointeeTy);
+ if (!pointeeTySize.isZero()) {
+ // Is the offset a multiple of the size? If so, we can layer the
+ // ElementRegion (with elementType == PointeeTy) directly on top of
+ // the base region.
+ if (off % pointeeTySize == 0) {
+ newIndex = off / pointeeTySize;
+ newSuperR = baseR;
+ }
+ }
+ }
+
+ if (!newSuperR) {
+ // Create an intermediate ElementRegion to represent the raw byte.
+ // This will be the super region of the final ElementRegion.
+ newSuperR = MakeElementRegion(baseR, Ctx.CharTy, off.getQuantity());
+ }
+
+ return MakeElementRegion(newSuperR, PointeeTy, newIndex);
+ }
+ }
+
+ assert(0 && "unreachable");
+ return 0;
+}
+
+
+/// CastRetrievedVal - Used by subclasses of StoreManager to implement
+/// implicit casts that arise from loads from regions that are reinterpreted
+/// as another region.
+SVal StoreManager::CastRetrievedVal(SVal V, const TypedRegion *R,
+ QualType castTy, bool performTestOnly) {
+
+ if (castTy.isNull())
+ return V;
+
+ ASTContext &Ctx = ValMgr.getContext();
+
+ if (performTestOnly) {
+ // Automatically translate references to pointers.
+ QualType T = R->getValueType(Ctx);
+ if (const ReferenceType *RT = T->getAs<ReferenceType>())
+ T = Ctx.getPointerType(RT->getPointeeType());
+
+ assert(ValMgr.getContext().hasSameUnqualifiedType(castTy, T));
+ return V;
+ }
+
+ if (const Loc *L = dyn_cast<Loc>(&V))
+ return ValMgr.getSValuator().EvalCastL(*L, castTy);
+ else if (const NonLoc *NL = dyn_cast<NonLoc>(&V))
+ return ValMgr.getSValuator().EvalCastNL(*NL, castTy);
+
+ return V;
+}
+
+Store StoreManager::InvalidateRegions(Store store,
+ const MemRegion * const *I,
+ const MemRegion * const *End,
+ const Expr *E, unsigned Count,
+ InvalidatedSymbols *IS) {
+ for ( ; I != End ; ++I)
+ store = InvalidateRegion(store, *I, E, Count, IS);
+
+ return store;
+}
+
+SVal StoreManager::getLValueFieldOrIvar(const Decl* D, SVal Base) {
+ if (Base.isUnknownOrUndef())
+ return Base;
+
+ Loc BaseL = cast<Loc>(Base);
+ const MemRegion* BaseR = 0;
+
+ switch (BaseL.getSubKind()) {
+ case loc::MemRegionKind:
+ BaseR = cast<loc::MemRegionVal>(BaseL).getRegion();
+ break;
+
+ case loc::GotoLabelKind:
+ // These are anormal cases. Flag an undefined value.
+ return UndefinedVal();
+
+ case loc::ConcreteIntKind:
+ // While these seem funny, this can happen through casts.
+ // FIXME: What we should return is the field offset. For example,
+ // add the field offset to the integer value. That way funny things
+ // like this work properly: &(((struct foo *) 0xa)->f)
+ return Base;
+
+ default:
+ assert(0 && "Unhandled Base.");
+ return Base;
+ }
+
+ // NOTE: We must have this check first because ObjCIvarDecl is a subclass
+ // of FieldDecl.
+ if (const ObjCIvarDecl *ID = dyn_cast<ObjCIvarDecl>(D))
+ return loc::MemRegionVal(MRMgr.getObjCIvarRegion(ID, BaseR));
+
+ return loc::MemRegionVal(MRMgr.getFieldRegion(cast<FieldDecl>(D), BaseR));
+}
+
+SVal StoreManager::getLValueElement(QualType elementType, SVal Offset,
+ SVal Base) {
+
+ // If the base is an unknown or undefined value, just return it back.
+ // FIXME: For absolute pointer addresses, we just return that value back as
+ // well, although in reality we should return the offset added to that
+ // value.
+ if (Base.isUnknownOrUndef() || isa<loc::ConcreteInt>(Base))
+ return Base;
+
+ // Only handle integer offsets... for now.
+ if (!isa<nonloc::ConcreteInt>(Offset))
+ return UnknownVal();
+
+ const MemRegion* BaseRegion = cast<loc::MemRegionVal>(Base).getRegion();
+
+ // Pointer of any type can be cast and used as array base.
+ const ElementRegion *ElemR = dyn_cast<ElementRegion>(BaseRegion);
+
+ // Convert the offset to the appropriate size and signedness.
+ Offset = ValMgr.convertToArrayIndex(Offset);
+
+ if (!ElemR) {
+ //
+ // If the base region is not an ElementRegion, create one.
+ // This can happen in the following example:
+ //
+ // char *p = __builtin_alloc(10);
+ // p[1] = 8;
+ //
+ // Observe that 'p' binds to an AllocaRegion.
+ //
+ return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Offset,
+ BaseRegion, Ctx));
+ }
+
+ SVal BaseIdx = ElemR->getIndex();
+
+ if (!isa<nonloc::ConcreteInt>(BaseIdx))
+ return UnknownVal();
+
+ const llvm::APSInt& BaseIdxI = cast<nonloc::ConcreteInt>(BaseIdx).getValue();
+ const llvm::APSInt& OffI = cast<nonloc::ConcreteInt>(Offset).getValue();
+ assert(BaseIdxI.isSigned());
+
+ // Compute the new index.
+ SVal NewIdx = nonloc::ConcreteInt(
+ ValMgr.getBasicValueFactory().getValue(BaseIdxI + OffI));
+
+ // Construct the new ElementRegion.
+ const MemRegion *ArrayR = ElemR->getSuperRegion();
+ return loc::MemRegionVal(MRMgr.getElementRegion(elementType, NewIdx, ArrayR,
+ Ctx));
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/SymbolManager.cpp b/contrib/llvm/tools/clang/lib/Checker/SymbolManager.cpp
new file mode 100644
index 0000000..f3a803c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/SymbolManager.cpp
@@ -0,0 +1,231 @@
+//== SymbolManager.h - Management of Symbolic Values ------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SymbolManager, a class that manages symbolic values
+// created for use by GRExprEngine and related classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/PathSensitive/SymbolManager.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Checker/PathSensitive/MemRegion.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+void SymExpr::dump() const {
+ dumpToStream(llvm::errs());
+}
+
+static void print(llvm::raw_ostream& os, BinaryOperator::Opcode Op) {
+ switch (Op) {
+ default:
+ assert(false && "operator printing not implemented");
+ break;
+ case BinaryOperator::Mul: os << '*' ; break;
+ case BinaryOperator::Div: os << '/' ; break;
+ case BinaryOperator::Rem: os << '%' ; break;
+ case BinaryOperator::Add: os << '+' ; break;
+ case BinaryOperator::Sub: os << '-' ; break;
+ case BinaryOperator::Shl: os << "<<" ; break;
+ case BinaryOperator::Shr: os << ">>" ; break;
+ case BinaryOperator::LT: os << "<" ; break;
+ case BinaryOperator::GT: os << '>' ; break;
+ case BinaryOperator::LE: os << "<=" ; break;
+ case BinaryOperator::GE: os << ">=" ; break;
+ case BinaryOperator::EQ: os << "==" ; break;
+ case BinaryOperator::NE: os << "!=" ; break;
+ case BinaryOperator::And: os << '&' ; break;
+ case BinaryOperator::Xor: os << '^' ; break;
+ case BinaryOperator::Or: os << '|' ; break;
+ }
+}
+
+void SymIntExpr::dumpToStream(llvm::raw_ostream& os) const {
+ os << '(';
+ getLHS()->dumpToStream(os);
+ os << ") ";
+ print(os, getOpcode());
+ os << ' ' << getRHS().getZExtValue();
+ if (getRHS().isUnsigned()) os << 'U';
+}
+
+void SymSymExpr::dumpToStream(llvm::raw_ostream& os) const {
+ os << '(';
+ getLHS()->dumpToStream(os);
+ os << ") ";
+ os << '(';
+ getRHS()->dumpToStream(os);
+ os << ')';
+}
+
+void SymbolConjured::dumpToStream(llvm::raw_ostream& os) const {
+ os << "conj_$" << getSymbolID() << '{' << T.getAsString() << '}';
+}
+
+void SymbolDerived::dumpToStream(llvm::raw_ostream& os) const {
+ os << "derived_$" << getSymbolID() << '{'
+ << getParentSymbol() << ',' << getRegion() << '}';
+}
+
+void SymbolRegionValue::dumpToStream(llvm::raw_ostream& os) const {
+ os << "reg_$" << getSymbolID() << "<" << R << ">";
+}
+
+const SymbolRegionValue*
+SymbolManager::getRegionValueSymbol(const TypedRegion* R) {
+ llvm::FoldingSetNodeID profile;
+ SymbolRegionValue::Profile(profile, R);
+ void* InsertPos;
+ SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+ if (!SD) {
+ SD = (SymExpr*) BPAlloc.Allocate<SymbolRegionValue>();
+ new (SD) SymbolRegionValue(SymbolCounter, R);
+ DataSet.InsertNode(SD, InsertPos);
+ ++SymbolCounter;
+ }
+
+ return cast<SymbolRegionValue>(SD);
+}
+
+const SymbolConjured*
+SymbolManager::getConjuredSymbol(const Stmt* E, QualType T, unsigned Count,
+ const void* SymbolTag) {
+
+ llvm::FoldingSetNodeID profile;
+ SymbolConjured::Profile(profile, E, T, Count, SymbolTag);
+ void* InsertPos;
+ SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+ if (!SD) {
+ SD = (SymExpr*) BPAlloc.Allocate<SymbolConjured>();
+ new (SD) SymbolConjured(SymbolCounter, E, T, Count, SymbolTag);
+ DataSet.InsertNode(SD, InsertPos);
+ ++SymbolCounter;
+ }
+
+ return cast<SymbolConjured>(SD);
+}
+
+const SymbolDerived*
+SymbolManager::getDerivedSymbol(SymbolRef parentSymbol,
+ const TypedRegion *R) {
+
+ llvm::FoldingSetNodeID profile;
+ SymbolDerived::Profile(profile, parentSymbol, R);
+ void* InsertPos;
+ SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+ if (!SD) {
+ SD = (SymExpr*) BPAlloc.Allocate<SymbolDerived>();
+ new (SD) SymbolDerived(SymbolCounter, parentSymbol, R);
+ DataSet.InsertNode(SD, InsertPos);
+ ++SymbolCounter;
+ }
+
+ return cast<SymbolDerived>(SD);
+}
+
+const SymIntExpr *SymbolManager::getSymIntExpr(const SymExpr *lhs,
+ BinaryOperator::Opcode op,
+ const llvm::APSInt& v,
+ QualType t) {
+ llvm::FoldingSetNodeID ID;
+ SymIntExpr::Profile(ID, lhs, op, v, t);
+ void *InsertPos;
+ SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!data) {
+ data = (SymIntExpr*) BPAlloc.Allocate<SymIntExpr>();
+ new (data) SymIntExpr(lhs, op, v, t);
+ DataSet.InsertNode(data, InsertPos);
+ }
+
+ return cast<SymIntExpr>(data);
+}
+
+const SymSymExpr *SymbolManager::getSymSymExpr(const SymExpr *lhs,
+ BinaryOperator::Opcode op,
+ const SymExpr *rhs,
+ QualType t) {
+ llvm::FoldingSetNodeID ID;
+ SymSymExpr::Profile(ID, lhs, op, rhs, t);
+ void *InsertPos;
+ SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!data) {
+ data = (SymSymExpr*) BPAlloc.Allocate<SymSymExpr>();
+ new (data) SymSymExpr(lhs, op, rhs, t);
+ DataSet.InsertNode(data, InsertPos);
+ }
+
+ return cast<SymSymExpr>(data);
+}
+
+QualType SymbolConjured::getType(ASTContext&) const {
+ return T;
+}
+
+
+QualType SymbolDerived::getType(ASTContext& Ctx) const {
+ return R->getValueType(Ctx);
+}
+
+QualType SymbolRegionValue::getType(ASTContext& C) const {
+ return R->getValueType(C);
+}
+
+SymbolManager::~SymbolManager() {}
+
+bool SymbolManager::canSymbolicate(QualType T) {
+ return Loc::IsLocType(T) || (T->isIntegerType() && T->isScalarType());
+}
+
+void SymbolReaper::markLive(SymbolRef sym) {
+ TheLiving.insert(sym);
+ TheDead.erase(sym);
+}
+
+bool SymbolReaper::maybeDead(SymbolRef sym) {
+ if (isLive(sym))
+ return false;
+
+ TheDead.insert(sym);
+ return true;
+}
+
+bool SymbolReaper::isLive(SymbolRef sym) {
+ if (TheLiving.count(sym))
+ return true;
+
+ if (const SymbolDerived *derived = dyn_cast<SymbolDerived>(sym)) {
+ if (isLive(derived->getParentSymbol())) {
+ markLive(sym);
+ return true;
+ }
+ return false;
+ }
+
+ // Interogate the symbol. It may derive from an input value to
+ // the analyzed function/method.
+ return isa<SymbolRegionValue>(sym);
+}
+
+bool SymbolReaper::isLive(const Stmt* Loc, const Stmt* ExprVal) const {
+ return LCtx->getLiveVariables()->isLive(Loc, ExprVal);
+}
+
+bool SymbolReaper::isLive(const Stmt *Loc, const VarRegion *VR) const {
+ const StackFrameContext *SFC = VR->getStackFrame();
+
+ if (SFC == LCtx->getCurrentStackFrame())
+ return LCtx->getLiveVariables()->isLive(Loc, VR->getDecl());
+ else
+ return SFC->isParentOf(LCtx->getCurrentStackFrame());
+}
+
+SymbolVisitor::~SymbolVisitor() {}
diff --git a/contrib/llvm/tools/clang/lib/Checker/UndefBranchChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/UndefBranchChecker.cpp
new file mode 100644
index 0000000..9088345
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/UndefBranchChecker.cpp
@@ -0,0 +1,118 @@
+//=== UndefBranchChecker.cpp -----------------------------------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines UndefBranchChecker, which checks for undefined branch
+// condition.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/Checker.h"
+
+using namespace clang;
+
+namespace {
+
+class UndefBranchChecker : public Checker {
+ BuiltinBug *BT;
+
+ struct FindUndefExpr {
+ GRStateManager& VM;
+ const GRState* St;
+
+ FindUndefExpr(GRStateManager& V, const GRState* S) : VM(V), St(S) {}
+
+ Expr* FindExpr(Expr* Ex) {
+ if (!MatchesCriteria(Ex))
+ return 0;
+
+ for (Stmt::child_iterator I=Ex->child_begin(), E=Ex->child_end();I!=E;++I)
+ if (Expr* ExI = dyn_cast_or_null<Expr>(*I)) {
+ Expr* E2 = FindExpr(ExI);
+ if (E2) return E2;
+ }
+
+ return Ex;
+ }
+
+ bool MatchesCriteria(Expr* Ex) { return St->getSVal(Ex).isUndef(); }
+ };
+
+public:
+ UndefBranchChecker() : BT(0) {}
+ static void *getTag();
+ void VisitBranchCondition(GRBranchNodeBuilder &Builder, GRExprEngine &Eng,
+ Stmt *Condition, void *tag);
+};
+
+}
+
+void clang::RegisterUndefBranchChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new UndefBranchChecker());
+}
+
+void *UndefBranchChecker::getTag() {
+ static int x;
+ return &x;
+}
+
+void UndefBranchChecker::VisitBranchCondition(GRBranchNodeBuilder &Builder,
+ GRExprEngine &Eng,
+ Stmt *Condition, void *tag) {
+ const GRState *state = Builder.getState();
+ SVal X = state->getSVal(Condition);
+ if (X.isUndef()) {
+ ExplodedNode *N = Builder.generateNode(state, true);
+ if (N) {
+ N->markAsSink();
+ if (!BT)
+ BT = new BuiltinBug("Branch condition evaluates to a garbage value");
+
+ // What's going on here: we want to highlight the subexpression of the
+ // condition that is the most likely source of the "uninitialized
+ // branch condition." We do a recursive walk of the condition's
+ // subexpressions and roughly look for the most nested subexpression
+ // that binds to Undefined. We then highlight that expression's range.
+ BlockEdge B = cast<BlockEdge>(N->getLocation());
+ Expr* Ex = cast<Expr>(B.getSrc()->getTerminatorCondition());
+ assert (Ex && "Block must have a terminator.");
+
+ // Get the predecessor node and check if is a PostStmt with the Stmt
+ // being the terminator condition. We want to inspect the state
+ // of that node instead because it will contain main information about
+ // the subexpressions.
+ assert (!N->pred_empty());
+
+ // Note: any predecessor will do. They should have identical state,
+ // since all the BlockEdge did was act as an error sink since the value
+ // had to already be undefined.
+ ExplodedNode *PrevN = *N->pred_begin();
+ ProgramPoint P = PrevN->getLocation();
+ const GRState* St = N->getState();
+
+ if (PostStmt* PS = dyn_cast<PostStmt>(&P))
+ if (PS->getStmt() == Ex)
+ St = PrevN->getState();
+
+ FindUndefExpr FindIt(Eng.getStateManager(), St);
+ Ex = FindIt.FindExpr(Ex);
+
+ // Emit the bug report.
+ EnhancedBugReport *R = new EnhancedBugReport(*BT, BT->getDescription(),N);
+ R->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue, Ex);
+ R->addRange(Ex->getSourceRange());
+
+ Eng.getBugReporter().EmitReport(R);
+ }
+
+ Builder.markInfeasible(true);
+ Builder.markInfeasible(false);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/UndefCapturedBlockVarChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/UndefCapturedBlockVarChecker.cpp
new file mode 100644
index 0000000..b1010c9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/UndefCapturedBlockVarChecker.cpp
@@ -0,0 +1,101 @@
+// UndefCapturedBlockVarChecker.cpp - Uninitialized captured vars -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker detects blocks that capture uninitialized values.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+namespace {
+class UndefCapturedBlockVarChecker
+ : public CheckerVisitor<UndefCapturedBlockVarChecker> {
+ BugType *BT;
+
+public:
+ UndefCapturedBlockVarChecker() : BT(0) {}
+ static void *getTag() { static int tag = 0; return &tag; }
+ void PostVisitBlockExpr(CheckerContext &C, const BlockExpr *BE);
+};
+} // end anonymous namespace
+
+void clang::RegisterUndefCapturedBlockVarChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new UndefCapturedBlockVarChecker());
+}
+
+static const BlockDeclRefExpr *FindBlockDeclRefExpr(const Stmt *S,
+ const VarDecl *VD){
+ if (const BlockDeclRefExpr *BR = dyn_cast<BlockDeclRefExpr>(S))
+ if (BR->getDecl() == VD)
+ return BR;
+
+ for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
+ I!=E; ++I)
+ if (const Stmt *child = *I) {
+ const BlockDeclRefExpr *BR = FindBlockDeclRefExpr(child, VD);
+ if (BR)
+ return BR;
+ }
+
+ return NULL;
+}
+
+void
+UndefCapturedBlockVarChecker::PostVisitBlockExpr(CheckerContext &C,
+ const BlockExpr *BE) {
+ if (!BE->hasBlockDeclRefExprs())
+ return;
+
+ const GRState *state = C.getState();
+ const BlockDataRegion *R =
+ cast<BlockDataRegion>(state->getSVal(BE).getAsRegion());
+
+ BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
+ E = R->referenced_vars_end();
+
+ for (; I != E; ++I) {
+ // This VarRegion is the region associated with the block; we need
+ // the one associated with the encompassing context.
+ const VarRegion *VR = *I;
+ const VarDecl *VD = VR->getDecl();
+
+ if (VD->getAttr<BlocksAttr>() || !VD->hasLocalStorage())
+ continue;
+
+ // Get the VarRegion associated with VD in the local stack frame.
+ const LocationContext *LC = C.getPredecessor()->getLocationContext();
+ VR = C.getValueManager().getRegionManager().getVarRegion(VD, LC);
+
+ if (state->getSVal(VR).isUndef())
+ if (ExplodedNode *N = C.GenerateSink()) {
+ if (!BT)
+ BT = new BuiltinBug("Captured block variable is uninitialized");
+
+ // Generate a bug report.
+ llvm::SmallString<128> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ os << "Variable '" << VD->getName() << "' is captured by block with "
+ "a garbage value";
+
+ EnhancedBugReport *R = new EnhancedBugReport(*BT, os.str(), N);
+ if (const Expr *Ex = FindBlockDeclRefExpr(BE->getBody(), VD))
+ R->addRange(Ex->getSourceRange());
+ R->addVisitorCreator(bugreporter::registerFindLastStore, VR);
+ // need location of block
+ C.EmitReport(R);
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/UndefResultChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/UndefResultChecker.cpp
new file mode 100644
index 0000000..8b07aed
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/UndefResultChecker.cpp
@@ -0,0 +1,86 @@
+//=== UndefResultChecker.cpp ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines UndefResultChecker, a builtin check in GRExprEngine that
+// performs checks for undefined results of non-assignment binary operators.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Checker/PathSensitive/GRExprEngine.h"
+
+using namespace clang;
+
+namespace {
+class UndefResultChecker
+ : public CheckerVisitor<UndefResultChecker> {
+
+ BugType *BT;
+
+public:
+ UndefResultChecker() : BT(0) {}
+ static void *getTag() { static int tag = 0; return &tag; }
+ void PostVisitBinaryOperator(CheckerContext &C, const BinaryOperator *B);
+};
+} // end anonymous namespace
+
+void clang::RegisterUndefResultChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new UndefResultChecker());
+}
+
+void UndefResultChecker::PostVisitBinaryOperator(CheckerContext &C,
+ const BinaryOperator *B) {
+ const GRState *state = C.getState();
+ if (state->getSVal(B).isUndef()) {
+ // Generate an error node.
+ ExplodedNode *N = C.GenerateSink();
+ if (!N)
+ return;
+
+ if (!BT)
+ BT = new BuiltinBug("Result of operation is garbage or undefined");
+
+ llvm::SmallString<256> sbuf;
+ llvm::raw_svector_ostream OS(sbuf);
+ const Expr *Ex = NULL;
+ bool isLeft = true;
+
+ if (state->getSVal(B->getLHS()).isUndef()) {
+ Ex = B->getLHS()->IgnoreParenCasts();
+ isLeft = true;
+ }
+ else if (state->getSVal(B->getRHS()).isUndef()) {
+ Ex = B->getRHS()->IgnoreParenCasts();
+ isLeft = false;
+ }
+
+ if (Ex) {
+ OS << "The " << (isLeft ? "left" : "right")
+ << " operand of '"
+ << BinaryOperator::getOpcodeStr(B->getOpcode())
+ << "' is a garbage value";
+ }
+ else {
+ // Neither operand was undefined, but the result is undefined.
+ OS << "The result of the '"
+ << BinaryOperator::getOpcodeStr(B->getOpcode())
+ << "' expression is undefined";
+ }
+ EnhancedBugReport *report = new EnhancedBugReport(*BT, OS.str(), N);
+ if (Ex) {
+ report->addRange(Ex->getSourceRange());
+ report->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue, Ex);
+ }
+ else
+ report->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue, B);
+ C.EmitReport(report);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/UndefinedArraySubscriptChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/UndefinedArraySubscriptChecker.cpp
new file mode 100644
index 0000000..148629e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/UndefinedArraySubscriptChecker.cpp
@@ -0,0 +1,56 @@
+//===--- UndefinedArraySubscriptChecker.h ----------------------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines UndefinedArraySubscriptChecker, a builtin check in GRExprEngine
+// that performs checks for undefined array subscripts.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+
+using namespace clang;
+
+namespace {
+class UndefinedArraySubscriptChecker
+ : public CheckerVisitor<UndefinedArraySubscriptChecker> {
+ BugType *BT;
+public:
+ UndefinedArraySubscriptChecker() : BT(0) {}
+ static void *getTag() {
+ static int x = 0;
+ return &x;
+ }
+ void PreVisitArraySubscriptExpr(CheckerContext &C,
+ const ArraySubscriptExpr *A);
+};
+} // end anonymous namespace
+
+void clang::RegisterUndefinedArraySubscriptChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new UndefinedArraySubscriptChecker());
+}
+
+void
+UndefinedArraySubscriptChecker::PreVisitArraySubscriptExpr(CheckerContext &C,
+ const ArraySubscriptExpr *A) {
+ if (C.getState()->getSVal(A->getIdx()).isUndef()) {
+ if (ExplodedNode *N = C.GenerateSink()) {
+ if (!BT)
+ BT = new BuiltinBug("Array subscript is undefined");
+
+ // Generate a report for this bug.
+ EnhancedBugReport *R = new EnhancedBugReport(*BT, BT->getName(), N);
+ R->addRange(A->getIdx()->getSourceRange());
+ R->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue,
+ A->getIdx());
+ C.EmitReport(R);
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/UndefinedAssignmentChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/UndefinedAssignmentChecker.cpp
new file mode 100644
index 0000000..6cef60e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/UndefinedAssignmentChecker.cpp
@@ -0,0 +1,95 @@
+//===--- UndefinedAssignmentChecker.h ---------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines UndefinedAssginmentChecker, a builtin check in GRExprEngine that
+// checks for assigning undefined values.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+
+using namespace clang;
+
+namespace {
+class UndefinedAssignmentChecker
+ : public CheckerVisitor<UndefinedAssignmentChecker> {
+ BugType *BT;
+public:
+ UndefinedAssignmentChecker() : BT(0) {}
+ static void *getTag();
+ virtual void PreVisitBind(CheckerContext &C, const Stmt *AssignE,
+ const Stmt *StoreE, SVal location,
+ SVal val);
+};
+}
+
+void clang::RegisterUndefinedAssignmentChecker(GRExprEngine &Eng){
+ Eng.registerCheck(new UndefinedAssignmentChecker());
+}
+
+void *UndefinedAssignmentChecker::getTag() {
+ static int x = 0;
+ return &x;
+}
+
+void UndefinedAssignmentChecker::PreVisitBind(CheckerContext &C,
+ const Stmt *AssignE,
+ const Stmt *StoreE,
+ SVal location,
+ SVal val) {
+ if (!val.isUndef())
+ return;
+
+ ExplodedNode *N = C.GenerateSink();
+
+ if (!N)
+ return;
+
+ const char *str = "Assigned value is garbage or undefined";
+
+ if (!BT)
+ BT = new BuiltinBug(str);
+
+ // Generate a report for this bug.
+ const Expr *ex = 0;
+
+ while (AssignE) {
+ if (const BinaryOperator *B = dyn_cast<BinaryOperator>(AssignE)) {
+ if (B->isCompoundAssignmentOp()) {
+ const GRState *state = C.getState();
+ if (state->getSVal(B->getLHS()).isUndef()) {
+ str = "The left expression of the compound assignment is an "
+ "uninitialized value. The computed value will also be garbage";
+ ex = B->getLHS();
+ break;
+ }
+ }
+
+ ex = B->getRHS();
+ break;
+ }
+
+ if (const DeclStmt *DS = dyn_cast<DeclStmt>(AssignE)) {
+ const VarDecl* VD = dyn_cast<VarDecl>(DS->getSingleDecl());
+ ex = VD->getInit();
+ }
+
+ break;
+ }
+
+ EnhancedBugReport *R = new EnhancedBugReport(*BT, str, N);
+ if (ex) {
+ R->addRange(ex->getSourceRange());
+ R->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue, ex);
+ }
+ C.EmitReport(R);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Checker/UnixAPIChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/UnixAPIChecker.cpp
new file mode 100644
index 0000000..e9b8f09
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/UnixAPIChecker.cpp
@@ -0,0 +1,222 @@
+//= UnixAPIChecker.h - Checks preconditions for various Unix APIs --*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines UnixAPIChecker, which is an assortment of checks on calls
+// to various, widely used UNIX/Posix functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringSwitch.h"
+#include <fcntl.h>
+
+using namespace clang;
+using llvm::Optional;
+
+namespace {
+class UnixAPIChecker : public CheckerVisitor<UnixAPIChecker> {
+ enum SubChecks {
+ OpenFn = 0,
+ PthreadOnceFn = 1,
+ NumChecks
+ };
+
+ BugType *BTypes[NumChecks];
+
+public:
+ Optional<uint64_t> Val_O_CREAT;
+
+public:
+ UnixAPIChecker() { memset(BTypes, 0, sizeof(*BTypes) * NumChecks); }
+ static void *getTag() { static unsigned tag = 0; return &tag; }
+
+ void PreVisitCallExpr(CheckerContext &C, const CallExpr *CE);
+};
+} //end anonymous namespace
+
+void clang::RegisterUnixAPIChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new UnixAPIChecker());
+}
+
+//===----------------------------------------------------------------------===//
+// Utility functions.
+//===----------------------------------------------------------------------===//
+
+static inline void LazyInitialize(BugType *&BT, const char *name) {
+ if (BT)
+ return;
+ BT = new BugType(name, "Unix API");
+}
+
+//===----------------------------------------------------------------------===//
+// "open" (man 2 open)
+//===----------------------------------------------------------------------===//
+
+static void CheckOpen(CheckerContext &C, UnixAPIChecker &UC,
+ const CallExpr *CE, BugType *&BT) {
+ // The definition of O_CREAT is platform specific. We need a better way
+ // of querying this information from the checking environment.
+ if (!UC.Val_O_CREAT.hasValue()) {
+ if (C.getASTContext().Target.getTriple().getVendor() == llvm::Triple::Apple)
+ UC.Val_O_CREAT = 0x0200;
+ else {
+ // FIXME: We need a more general way of getting the O_CREAT value.
+ // We could possibly grovel through the preprocessor state, but
+ // that would require passing the Preprocessor object to the GRExprEngine.
+ return;
+ }
+ }
+
+ LazyInitialize(BT, "Improper use of 'open'");
+
+ // Look at the 'oflags' argument for the O_CREAT flag.
+ const GRState *state = C.getState();
+
+ if (CE->getNumArgs() < 2) {
+ // The frontend should issue a warning for this case, so this is a sanity
+ // check.
+ return;
+ }
+
+ // Now check if oflags has O_CREAT set.
+ const Expr *oflagsEx = CE->getArg(1);
+ const SVal V = state->getSVal(oflagsEx);
+ if (!isa<NonLoc>(V)) {
+ // The case where 'V' can be a location can only be due to a bad header,
+ // so in this case bail out.
+ return;
+ }
+ NonLoc oflags = cast<NonLoc>(V);
+ NonLoc ocreateFlag =
+ cast<NonLoc>(C.getValueManager().makeIntVal(UC.Val_O_CREAT.getValue(),
+ oflagsEx->getType()));
+ SVal maskedFlagsUC = C.getSValuator().EvalBinOpNN(state, BinaryOperator::And,
+ oflags, ocreateFlag,
+ oflagsEx->getType());
+ if (maskedFlagsUC.isUnknownOrUndef())
+ return;
+ DefinedSVal maskedFlags = cast<DefinedSVal>(maskedFlagsUC);
+
+ // Check if maskedFlags is non-zero.
+ const GRState *trueState, *falseState;
+ llvm::tie(trueState, falseState) = state->Assume(maskedFlags);
+
+ // Only emit an error if the value of 'maskedFlags' is properly
+ // constrained;
+ if (!(trueState && !falseState))
+ return;
+
+ if (CE->getNumArgs() < 3) {
+ ExplodedNode *N = C.GenerateSink(trueState);
+ if (!N)
+ return;
+
+ EnhancedBugReport *report =
+ new EnhancedBugReport(*BT,
+ "Call to 'open' requires a third argument when "
+ "the 'O_CREAT' flag is set", N);
+ report->addRange(oflagsEx->getSourceRange());
+ C.EmitReport(report);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// pthread_once
+//===----------------------------------------------------------------------===//
+
+static void CheckPthreadOnce(CheckerContext &C, UnixAPIChecker &,
+ const CallExpr *CE, BugType *&BT) {
+
+ // This is similar to 'CheckDispatchOnce' in the MacOSXAPIChecker.
+ // They can possibly be refactored.
+
+ LazyInitialize(BT, "Improper use of 'pthread_once'");
+
+ if (CE->getNumArgs() < 1)
+ return;
+
+ // Check if the first argument is stack allocated. If so, issue a warning
+ // because that's likely to be bad news.
+ const GRState *state = C.getState();
+ const MemRegion *R = state->getSVal(CE->getArg(0)).getAsRegion();
+ if (!R || !isa<StackSpaceRegion>(R->getMemorySpace()))
+ return;
+
+ ExplodedNode *N = C.GenerateSink(state);
+ if (!N)
+ return;
+
+ llvm::SmallString<256> S;
+ llvm::raw_svector_ostream os(S);
+ os << "Call to 'pthread_once' uses";
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R))
+ os << " the local variable '" << VR->getDecl()->getName() << '\'';
+ else
+ os << " stack allocated memory";
+ os << " for the \"control\" value. Using such transient memory for "
+ "the control value is potentially dangerous.";
+ if (isa<VarRegion>(R) && isa<StackLocalsSpaceRegion>(R->getMemorySpace()))
+ os << " Perhaps you intended to declare the variable as 'static'?";
+
+ EnhancedBugReport *report = new EnhancedBugReport(*BT, os.str(), N);
+ report->addRange(CE->getArg(0)->getSourceRange());
+ C.EmitReport(report);
+}
+
+//===----------------------------------------------------------------------===//
+// Central dispatch function.
+//===----------------------------------------------------------------------===//
+
+typedef void (*SubChecker)(CheckerContext &C, UnixAPIChecker &UC,
+ const CallExpr *CE, BugType *&BT);
+namespace {
+ class SubCheck {
+ SubChecker SC;
+ UnixAPIChecker *UC;
+ BugType **BT;
+ public:
+ SubCheck(SubChecker sc, UnixAPIChecker *uc, BugType *& bt) : SC(sc), UC(uc),
+ BT(&bt) {}
+ SubCheck() : SC(NULL), UC(NULL), BT(NULL) {}
+
+ void run(CheckerContext &C, const CallExpr *CE) const {
+ if (SC)
+ SC(C, *UC, CE, *BT);
+ }
+ };
+} // end anonymous namespace
+
+void UnixAPIChecker::PreVisitCallExpr(CheckerContext &C, const CallExpr *CE) {
+ // Get the callee. All the functions we care about are C functions
+ // with simple identifiers.
+ const GRState *state = C.getState();
+ const Expr *Callee = CE->getCallee();
+ const FunctionTextRegion *Fn =
+ dyn_cast_or_null<FunctionTextRegion>(state->getSVal(Callee).getAsRegion());
+
+ if (!Fn)
+ return;
+
+ const IdentifierInfo *FI = Fn->getDecl()->getIdentifier();
+ if (!FI)
+ return;
+
+ const SubCheck &SC =
+ llvm::StringSwitch<SubCheck>(FI->getName())
+ .Case("open", SubCheck(CheckOpen, this, BTypes[OpenFn]))
+ .Case("pthread_once", SubCheck(CheckPthreadOnce, this,
+ BTypes[PthreadOnceFn]))
+ .Default(SubCheck());
+
+ SC.run(C, CE);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/VLASizeChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/VLASizeChecker.cpp
new file mode 100644
index 0000000..cea9d19
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/VLASizeChecker.cpp
@@ -0,0 +1,96 @@
+//=== VLASizeChecker.cpp - Undefined dereference checker --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines VLASizeChecker, a builtin check in GRExprEngine that
+// performs checks for declaration of VLA of undefined or zero size.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Checker/PathSensitive/GRExprEngine.h"
+
+using namespace clang;
+
+namespace {
+class VLASizeChecker : public CheckerVisitor<VLASizeChecker> {
+ BugType *BT_zero;
+ BugType *BT_undef;
+
+public:
+ VLASizeChecker() : BT_zero(0), BT_undef(0) {}
+ static void *getTag() { static int tag = 0; return &tag; }
+ void PreVisitDeclStmt(CheckerContext &C, const DeclStmt *DS);
+};
+} // end anonymous namespace
+
+void clang::RegisterVLASizeChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new VLASizeChecker());
+}
+
+void VLASizeChecker::PreVisitDeclStmt(CheckerContext &C, const DeclStmt *DS) {
+ if (!DS->isSingleDecl())
+ return;
+
+ const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
+ if (!VD)
+ return;
+
+ const VariableArrayType *VLA
+ = C.getASTContext().getAsVariableArrayType(VD->getType());
+ if (!VLA)
+ return;
+
+ // FIXME: Handle multi-dimensional VLAs.
+ const Expr* SE = VLA->getSizeExpr();
+ const GRState *state = C.getState();
+ SVal sizeV = state->getSVal(SE);
+
+ if (sizeV.isUndef()) {
+ // Generate an error node.
+ ExplodedNode *N = C.GenerateSink();
+ if (!N)
+ return;
+
+ if (!BT_undef)
+ BT_undef = new BuiltinBug("Declared variable-length array (VLA) uses a "
+ "garbage value as its size");
+
+ EnhancedBugReport *report =
+ new EnhancedBugReport(*BT_undef, BT_undef->getName(), N);
+ report->addRange(SE->getSourceRange());
+ report->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue, SE);
+ C.EmitReport(report);
+ return;
+ }
+
+ // Check if the size is zero.
+ DefinedOrUnknownSVal sizeD = cast<DefinedOrUnknownSVal>(sizeV);
+
+ const GRState *stateNotZero, *stateZero;
+ llvm::tie(stateNotZero, stateZero) = state->Assume(sizeD);
+
+ if (stateZero && !stateNotZero) {
+ ExplodedNode* N = C.GenerateSink(stateZero);
+ if (!BT_zero)
+ BT_zero = new BuiltinBug("Declared variable-length array (VLA) has zero "
+ "size");
+
+ EnhancedBugReport *report =
+ new EnhancedBugReport(*BT_zero, BT_zero->getName(), N);
+ report->addRange(SE->getSourceRange());
+ report->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue, SE);
+ C.EmitReport(report);
+ return;
+ }
+
+ // From this point on, assume that the size is not zero.
+ C.addTransition(stateNotZero);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/ValueManager.cpp b/contrib/llvm/tools/clang/lib/Checker/ValueManager.cpp
new file mode 100644
index 0000000..aa0c3c8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/ValueManager.cpp
@@ -0,0 +1,149 @@
+//== ValueManager.cpp - Aggregate manager of symbols and SVals --*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ValueManager, a class that manages symbolic values
+// and SVals created for use by GRExprEngine and related classes. It
+// wraps and owns SymbolManager, MemRegionManager, and BasicValueFactory.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/PathSensitive/ValueManager.h"
+#include "clang/Analysis/AnalysisContext.h"
+
+using namespace clang;
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Utility methods for constructing SVals.
+//===----------------------------------------------------------------------===//
+
+DefinedOrUnknownSVal ValueManager::makeZeroVal(QualType T) {
+ if (Loc::IsLocType(T))
+ return makeNull();
+
+ if (T->isIntegerType())
+ return makeIntVal(0, T);
+
+ // FIXME: Handle floats.
+ // FIXME: Handle structs.
+ return UnknownVal();
+}
+
+//===----------------------------------------------------------------------===//
+// Utility methods for constructing Non-Locs.
+//===----------------------------------------------------------------------===//
+
+NonLoc ValueManager::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
+ const APSInt& v, QualType T) {
+ // The Environment ensures we always get a persistent APSInt in
+ // BasicValueFactory, so we don't need to get the APSInt from
+ // BasicValueFactory again.
+ assert(!Loc::IsLocType(T));
+ return nonloc::SymExprVal(SymMgr.getSymIntExpr(lhs, op, v, T));
+}
+
+NonLoc ValueManager::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
+ const SymExpr *rhs, QualType T) {
+ assert(SymMgr.getType(lhs) == SymMgr.getType(rhs));
+ assert(!Loc::IsLocType(T));
+ return nonloc::SymExprVal(SymMgr.getSymSymExpr(lhs, op, rhs, T));
+}
+
+
+SVal ValueManager::convertToArrayIndex(SVal V) {
+ if (V.isUnknownOrUndef())
+ return V;
+
+ // Common case: we have an appropriately sized integer.
+ if (nonloc::ConcreteInt* CI = dyn_cast<nonloc::ConcreteInt>(&V)) {
+ const llvm::APSInt& I = CI->getValue();
+ if (I.getBitWidth() == ArrayIndexWidth && I.isSigned())
+ return V;
+ }
+
+ return SVator->EvalCastNL(cast<NonLoc>(V), ArrayIndexTy);
+}
+
+DefinedOrUnknownSVal
+ValueManager::getRegionValueSymbolVal(const TypedRegion* R) {
+ QualType T = R->getValueType(SymMgr.getContext());
+
+ if (!SymbolManager::canSymbolicate(T))
+ return UnknownVal();
+
+ SymbolRef sym = SymMgr.getRegionValueSymbol(R);
+
+ if (Loc::IsLocType(T))
+ return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+ return nonloc::SymbolVal(sym);
+}
+
+DefinedOrUnknownSVal ValueManager::getConjuredSymbolVal(const void *SymbolTag,
+ const Expr *E,
+ unsigned Count) {
+ QualType T = E->getType();
+
+ if (!SymbolManager::canSymbolicate(T))
+ return UnknownVal();
+
+ SymbolRef sym = SymMgr.getConjuredSymbol(E, Count, SymbolTag);
+
+ if (Loc::IsLocType(T))
+ return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+ return nonloc::SymbolVal(sym);
+}
+
+DefinedOrUnknownSVal ValueManager::getConjuredSymbolVal(const void *SymbolTag,
+ const Expr *E,
+ QualType T,
+ unsigned Count) {
+
+ if (!SymbolManager::canSymbolicate(T))
+ return UnknownVal();
+
+ SymbolRef sym = SymMgr.getConjuredSymbol(E, T, Count, SymbolTag);
+
+ if (Loc::IsLocType(T))
+ return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+ return nonloc::SymbolVal(sym);
+}
+
+
+DefinedOrUnknownSVal
+ValueManager::getDerivedRegionValueSymbolVal(SymbolRef parentSymbol,
+ const TypedRegion *R) {
+ QualType T = R->getValueType(R->getContext());
+
+ if (!SymbolManager::canSymbolicate(T))
+ return UnknownVal();
+
+ SymbolRef sym = SymMgr.getDerivedSymbol(parentSymbol, R);
+
+ if (Loc::IsLocType(T))
+ return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+ return nonloc::SymbolVal(sym);
+}
+
+DefinedSVal ValueManager::getFunctionPointer(const FunctionDecl* FD) {
+ return loc::MemRegionVal(MemMgr.getFunctionTextRegion(FD));
+}
+
+DefinedSVal ValueManager::getBlockPointer(const BlockDecl *D,
+ CanQualType locTy,
+ const LocationContext *LC) {
+ const BlockTextRegion *BC =
+ MemMgr.getBlockTextRegion(D, locTy, LC->getAnalysisContext());
+ const BlockDataRegion *BD = MemMgr.getBlockDataRegion(BC, LC);
+ return loc::MemRegionVal(BD);
+}
+
OpenPOWER on IntegriCloud