diff options
author | dim <dim@FreeBSD.org> | 2011-02-20 13:06:31 +0000 |
---|---|---|
committer | dim <dim@FreeBSD.org> | 2011-02-20 13:06:31 +0000 |
commit | 39fcc9a984e2820e4ea0fa2ac4abd17d9f3a31df (patch) | |
tree | a9243275843fbeaa590afc07ee888e006b8d54ea /lib/StaticAnalyzer/Core | |
parent | 69b4eca4a4255ba43baa5c1d9bbdec3ec17f479e (diff) | |
download | FreeBSD-src-39fcc9a984e2820e4ea0fa2ac4abd17d9f3a31df.zip FreeBSD-src-39fcc9a984e2820e4ea0fa2ac4abd17d9f3a31df.tar.gz |
Vendor import of clang trunk r126079:
http://llvm.org/svn/llvm-project/cfe/trunk@126079
Diffstat (limited to 'lib/StaticAnalyzer/Core')
35 files changed, 17581 insertions, 0 deletions
diff --git a/lib/StaticAnalyzer/Core/AggExprVisitor.cpp b/lib/StaticAnalyzer/Core/AggExprVisitor.cpp new file mode 100644 index 0000000..e80cf9b --- /dev/null +++ b/lib/StaticAnalyzer/Core/AggExprVisitor.cpp @@ -0,0 +1,69 @@ +//=-- AggExprVisitor.cpp - evaluating expressions of C++ class type -*- C++ -*-= +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines AggExprVisitor class, which contains lots of boiler +// plate code for evaluating expressions of C++ class type. +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h" +#include "clang/AST/StmtVisitor.h" + +using namespace clang; +using namespace ento; + +namespace { +/// AggExprVisitor is designed after AggExprEmitter of the CodeGen module. It +/// is used for evaluating exprs of C++ object type. Evaluating such exprs +/// requires a destination pointer pointing to the object being evaluated +/// into. Passing such a pointer around would pollute the Visit* interface of +/// ExprEngine. AggExprVisitor encapsulates code that goes through various +/// cast and construct exprs (and others), and at the final point, dispatches +/// back to the ExprEngine to let the real evaluation logic happen. +class AggExprVisitor : public StmtVisitor<AggExprVisitor> { + const MemRegion *Dest; + ExplodedNode *Pred; + ExplodedNodeSet &DstSet; + ExprEngine &Eng; + +public: + AggExprVisitor(const MemRegion *dest, ExplodedNode *N, ExplodedNodeSet &dst, + ExprEngine &eng) + : Dest(dest), Pred(N), DstSet(dst), Eng(eng) {} + + void VisitCastExpr(CastExpr *E); + void VisitCXXConstructExpr(CXXConstructExpr *E); + void VisitCXXMemberCallExpr(CXXMemberCallExpr *E); +}; +} + +void AggExprVisitor::VisitCastExpr(CastExpr *E) { + switch (E->getCastKind()) { + default: + assert(0 && "Unhandled cast kind"); + case CK_NoOp: + case CK_ConstructorConversion: + case CK_UserDefinedConversion: + Visit(E->getSubExpr()); + break; + } +} + +void AggExprVisitor::VisitCXXConstructExpr(CXXConstructExpr *E) { + Eng.VisitCXXConstructExpr(E, Dest, Pred, DstSet); +} + +void AggExprVisitor::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) { + Eng.VisitCXXMemberCallExpr(E, Pred, DstSet); +} + +void ExprEngine::VisitAggExpr(const Expr *E, const MemRegion *Dest, + ExplodedNode *Pred, ExplodedNodeSet &Dst) { + AggExprVisitor(Dest, Pred, Dst, *this).Visit(const_cast<Expr *>(E)); +} diff --git a/lib/StaticAnalyzer/Core/AnalysisManager.cpp b/lib/StaticAnalyzer/Core/AnalysisManager.cpp new file mode 100644 index 0000000..5f4f83c --- /dev/null +++ b/lib/StaticAnalyzer/Core/AnalysisManager.cpp @@ -0,0 +1,32 @@ +//===-- AnalysisManager.cpp -------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h" +#include "clang/Index/Entity.h" +#include "clang/Index/Indexer.h" + +using namespace clang; +using namespace ento; + +AnalysisContext * +AnalysisManager::getAnalysisContextInAnotherTU(const Decl *D) { + idx::Entity Ent = idx::Entity::get(const_cast<Decl *>(D), + Idxer->getProgram()); + FunctionDecl *FuncDef; + idx::TranslationUnit *TU; + llvm::tie(FuncDef, TU) = Idxer->getDefinitionFor(Ent); + + if (FuncDef == 0) + return 0; + + // This AnalysisContext wraps function definition in another translation unit. + // But it is still owned by the AnalysisManager associated with the current + // translation unit. + return AnaCtxMgr.getContext(FuncDef, TU); +} diff --git a/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp b/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp new file mode 100644 index 0000000..3050ca3 --- /dev/null +++ b/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp @@ -0,0 +1,338 @@ +//== BasicConstraintManager.cpp - Manage basic constraints.------*- C++ -*--==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines BasicConstraintManager, a class that tracks simple +// equality and inequality constraints on symbolic values of GRState. +// +//===----------------------------------------------------------------------===// + +#include "SimpleConstraintManager.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/GRStateTrait.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/TransferFuncs.h" +#include "llvm/Support/raw_ostream.h" + +using namespace clang; +using namespace ento; + + +namespace { class ConstNotEq {}; } +namespace { class ConstEq {}; } + +typedef llvm::ImmutableMap<SymbolRef,GRState::IntSetTy> ConstNotEqTy; +typedef llvm::ImmutableMap<SymbolRef,const llvm::APSInt*> ConstEqTy; + +static int ConstEqIndex = 0; +static int ConstNotEqIndex = 0; + +namespace clang { +namespace ento { +template<> +struct GRStateTrait<ConstNotEq> : public GRStatePartialTrait<ConstNotEqTy> { + static inline void* GDMIndex() { return &ConstNotEqIndex; } +}; + +template<> +struct GRStateTrait<ConstEq> : public GRStatePartialTrait<ConstEqTy> { + static inline void* GDMIndex() { return &ConstEqIndex; } +}; +} +} + +namespace { +// BasicConstraintManager only tracks equality and inequality constraints of +// constants and integer variables. +class BasicConstraintManager + : public SimpleConstraintManager { + GRState::IntSetTy::Factory ISetFactory; +public: + BasicConstraintManager(GRStateManager &statemgr, SubEngine &subengine) + : SimpleConstraintManager(subengine), + ISetFactory(statemgr.getAllocator()) {} + + const GRState *assumeSymNE(const GRState* state, SymbolRef sym, + const llvm::APSInt& V, + const llvm::APSInt& Adjustment); + + const GRState *assumeSymEQ(const GRState* state, SymbolRef sym, + const llvm::APSInt& V, + const llvm::APSInt& Adjustment); + + const GRState *assumeSymLT(const GRState* state, SymbolRef sym, + const llvm::APSInt& V, + const llvm::APSInt& Adjustment); + + const GRState *assumeSymGT(const GRState* state, SymbolRef sym, + const llvm::APSInt& V, + const llvm::APSInt& Adjustment); + + const GRState *assumeSymGE(const GRState* state, SymbolRef sym, + const llvm::APSInt& V, + const llvm::APSInt& Adjustment); + + const GRState *assumeSymLE(const GRState* state, SymbolRef sym, + const llvm::APSInt& V, + const llvm::APSInt& Adjustment); + + const GRState* AddEQ(const GRState* state, SymbolRef sym, const llvm::APSInt& V); + + const GRState* AddNE(const GRState* state, SymbolRef sym, const llvm::APSInt& V); + + const llvm::APSInt* getSymVal(const GRState* state, SymbolRef sym) const; + bool isNotEqual(const GRState* state, SymbolRef sym, const llvm::APSInt& V) + const; + bool isEqual(const GRState* state, SymbolRef sym, const llvm::APSInt& V) + const; + + const GRState* removeDeadBindings(const GRState* state, SymbolReaper& SymReaper); + + void print(const GRState* state, llvm::raw_ostream& Out, + const char* nl, const char *sep); +}; + +} // end anonymous namespace + +ConstraintManager* ento::CreateBasicConstraintManager(GRStateManager& statemgr, + SubEngine &subengine) { + return new BasicConstraintManager(statemgr, subengine); +} + + +const GRState* +BasicConstraintManager::assumeSymNE(const GRState *state, SymbolRef sym, + const llvm::APSInt &V, + const llvm::APSInt &Adjustment) { + // First, determine if sym == X, where X+Adjustment != V. + llvm::APSInt Adjusted = V-Adjustment; + if (const llvm::APSInt* X = getSymVal(state, sym)) { + bool isFeasible = (*X != Adjusted); + return isFeasible ? state : NULL; + } + + // Second, determine if sym+Adjustment != V. + if (isNotEqual(state, sym, Adjusted)) + return state; + + // If we reach here, sym is not a constant and we don't know if it is != V. + // Make that assumption. + return AddNE(state, sym, Adjusted); +} + +const GRState* +BasicConstraintManager::assumeSymEQ(const GRState *state, SymbolRef sym, + const llvm::APSInt &V, + const llvm::APSInt &Adjustment) { + // First, determine if sym == X, where X+Adjustment != V. + llvm::APSInt Adjusted = V-Adjustment; + if (const llvm::APSInt* X = getSymVal(state, sym)) { + bool isFeasible = (*X == Adjusted); + return isFeasible ? state : NULL; + } + + // Second, determine if sym+Adjustment != V. + if (isNotEqual(state, sym, Adjusted)) + return NULL; + + // If we reach here, sym is not a constant and we don't know if it is == V. + // Make that assumption. + return AddEQ(state, sym, Adjusted); +} + +// The logic for these will be handled in another ConstraintManager. +const GRState* +BasicConstraintManager::assumeSymLT(const GRState *state, SymbolRef sym, + const llvm::APSInt &V, + const llvm::APSInt &Adjustment) { + // Is 'V' the smallest possible value? + if (V == llvm::APSInt::getMinValue(V.getBitWidth(), V.isUnsigned())) { + // sym cannot be any value less than 'V'. This path is infeasible. + return NULL; + } + + // FIXME: For now have assuming x < y be the same as assuming sym != V; + return assumeSymNE(state, sym, V, Adjustment); +} + +const GRState* +BasicConstraintManager::assumeSymGT(const GRState *state, SymbolRef sym, + const llvm::APSInt &V, + const llvm::APSInt &Adjustment) { + // Is 'V' the largest possible value? + if (V == llvm::APSInt::getMaxValue(V.getBitWidth(), V.isUnsigned())) { + // sym cannot be any value greater than 'V'. This path is infeasible. + return NULL; + } + + // FIXME: For now have assuming x > y be the same as assuming sym != V; + return assumeSymNE(state, sym, V, Adjustment); +} + +const GRState* +BasicConstraintManager::assumeSymGE(const GRState *state, SymbolRef sym, + const llvm::APSInt &V, + const llvm::APSInt &Adjustment) { + // Reject a path if the value of sym is a constant X and !(X+Adj >= V). + if (const llvm::APSInt *X = getSymVal(state, sym)) { + bool isFeasible = (*X >= V-Adjustment); + return isFeasible ? state : NULL; + } + + // Sym is not a constant, but it is worth looking to see if V is the + // maximum integer value. + if (V == llvm::APSInt::getMaxValue(V.getBitWidth(), V.isUnsigned())) { + llvm::APSInt Adjusted = V-Adjustment; + + // If we know that sym != V (after adjustment), then this condition + // is infeasible since there is no other value greater than V. + bool isFeasible = !isNotEqual(state, sym, Adjusted); + + // If the path is still feasible then as a consequence we know that + // 'sym+Adjustment == V' because there are no larger values. + // Add this constraint. + return isFeasible ? AddEQ(state, sym, Adjusted) : NULL; + } + + return state; +} + +const GRState* +BasicConstraintManager::assumeSymLE(const GRState *state, SymbolRef sym, + const llvm::APSInt &V, + const llvm::APSInt &Adjustment) { + // Reject a path if the value of sym is a constant X and !(X+Adj <= V). + if (const llvm::APSInt* X = getSymVal(state, sym)) { + bool isFeasible = (*X <= V-Adjustment); + return isFeasible ? state : NULL; + } + + // Sym is not a constant, but it is worth looking to see if V is the + // minimum integer value. + if (V == llvm::APSInt::getMinValue(V.getBitWidth(), V.isUnsigned())) { + llvm::APSInt Adjusted = V-Adjustment; + + // If we know that sym != V (after adjustment), then this condition + // is infeasible since there is no other value less than V. + bool isFeasible = !isNotEqual(state, sym, Adjusted); + + // If the path is still feasible then as a consequence we know that + // 'sym+Adjustment == V' because there are no smaller values. + // Add this constraint. + return isFeasible ? AddEQ(state, sym, Adjusted) : NULL; + } + + return state; +} + +const GRState* BasicConstraintManager::AddEQ(const GRState* state, SymbolRef sym, + const llvm::APSInt& V) { + // Create a new state with the old binding replaced. + return state->set<ConstEq>(sym, &state->getBasicVals().getValue(V)); +} + +const GRState* BasicConstraintManager::AddNE(const GRState* state, SymbolRef sym, + const llvm::APSInt& V) { + + // First, retrieve the NE-set associated with the given symbol. + ConstNotEqTy::data_type* T = state->get<ConstNotEq>(sym); + GRState::IntSetTy S = T ? *T : ISetFactory.getEmptySet(); + + // Now add V to the NE set. + S = ISetFactory.add(S, &state->getBasicVals().getValue(V)); + + // Create a new state with the old binding replaced. + return state->set<ConstNotEq>(sym, S); +} + +const llvm::APSInt* BasicConstraintManager::getSymVal(const GRState* state, + SymbolRef sym) const { + const ConstEqTy::data_type* T = state->get<ConstEq>(sym); + return T ? *T : NULL; +} + +bool BasicConstraintManager::isNotEqual(const GRState* state, SymbolRef sym, + const llvm::APSInt& V) const { + + // Retrieve the NE-set associated with the given symbol. + const ConstNotEqTy::data_type* T = state->get<ConstNotEq>(sym); + + // See if V is present in the NE-set. + return T ? T->contains(&state->getBasicVals().getValue(V)) : false; +} + +bool BasicConstraintManager::isEqual(const GRState* state, SymbolRef sym, + const llvm::APSInt& V) const { + // Retrieve the EQ-set associated with the given symbol. + const ConstEqTy::data_type* T = state->get<ConstEq>(sym); + // See if V is present in the EQ-set. + return T ? **T == V : false; +} + +/// Scan all symbols referenced by the constraints. If the symbol is not alive +/// as marked in LSymbols, mark it as dead in DSymbols. +const GRState* +BasicConstraintManager::removeDeadBindings(const GRState* state, + SymbolReaper& SymReaper) { + + ConstEqTy CE = state->get<ConstEq>(); + ConstEqTy::Factory& CEFactory = state->get_context<ConstEq>(); + + for (ConstEqTy::iterator I = CE.begin(), E = CE.end(); I!=E; ++I) { + SymbolRef sym = I.getKey(); + if (SymReaper.maybeDead(sym)) + CE = CEFactory.remove(CE, sym); + } + state = state->set<ConstEq>(CE); + + ConstNotEqTy CNE = state->get<ConstNotEq>(); + ConstNotEqTy::Factory& CNEFactory = state->get_context<ConstNotEq>(); + + for (ConstNotEqTy::iterator I = CNE.begin(), E = CNE.end(); I != E; ++I) { + SymbolRef sym = I.getKey(); + if (SymReaper.maybeDead(sym)) + CNE = CNEFactory.remove(CNE, sym); + } + + return state->set<ConstNotEq>(CNE); +} + +void BasicConstraintManager::print(const GRState* state, llvm::raw_ostream& Out, + const char* nl, const char *sep) { + // Print equality constraints. + + ConstEqTy CE = state->get<ConstEq>(); + + if (!CE.isEmpty()) { + Out << nl << sep << "'==' constraints:"; + for (ConstEqTy::iterator I = CE.begin(), E = CE.end(); I!=E; ++I) + Out << nl << " $" << I.getKey() << " : " << *I.getData(); + } + + // Print != constraints. + + ConstNotEqTy CNE = state->get<ConstNotEq>(); + + if (!CNE.isEmpty()) { + Out << nl << sep << "'!=' constraints:"; + + for (ConstNotEqTy::iterator I = CNE.begin(), EI = CNE.end(); I!=EI; ++I) { + Out << nl << " $" << I.getKey() << " : "; + bool isFirst = true; + + GRState::IntSetTy::iterator J = I.getData().begin(), + EJ = I.getData().end(); + + for ( ; J != EJ; ++J) { + if (isFirst) isFirst = false; + else Out << ", "; + + Out << (*J)->getSExtValue(); // Hack: should print to raw_ostream. + } + } + } +} diff --git a/lib/StaticAnalyzer/Core/BasicStore.cpp b/lib/StaticAnalyzer/Core/BasicStore.cpp new file mode 100644 index 0000000..98365e7 --- /dev/null +++ b/lib/StaticAnalyzer/Core/BasicStore.cpp @@ -0,0 +1,604 @@ +//== BasicStore.cpp - Basic map from Locations to Values --------*- C++ -*--==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defined the BasicStore and BasicStoreManager classes. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/DeclCXX.h" +#include "clang/AST/ExprObjC.h" +#include "clang/Analysis/Analyses/LiveVariables.h" +#include "clang/Analysis/AnalysisContext.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h" +#include "llvm/ADT/ImmutableMap.h" + +using namespace clang; +using namespace ento; + +typedef llvm::ImmutableMap<const MemRegion*,SVal> BindingsTy; + +namespace { + +class BasicStoreSubRegionMap : public SubRegionMap { +public: + BasicStoreSubRegionMap() {} + + bool iterSubRegions(const MemRegion* R, Visitor& V) const { + return true; // Do nothing. No subregions. + } +}; + +class BasicStoreManager : public StoreManager { + BindingsTy::Factory VBFactory; +public: + BasicStoreManager(GRStateManager& mgr) + : StoreManager(mgr), VBFactory(mgr.getAllocator()) {} + + ~BasicStoreManager() {} + + SubRegionMap *getSubRegionMap(Store store) { + return new BasicStoreSubRegionMap(); + } + + SVal Retrieve(Store store, Loc loc, QualType T = QualType()); + + StoreRef invalidateRegion(Store store, const MemRegion *R, const Expr *E, + unsigned Count, InvalidatedSymbols *IS); + + StoreRef invalidateRegions(Store store, const MemRegion * const *Begin, + const MemRegion * const *End, const Expr *E, + unsigned Count, InvalidatedSymbols *IS, + bool invalidateGlobals, + InvalidatedRegions *Regions); + + StoreRef scanForIvars(Stmt *B, const Decl* SelfDecl, + const MemRegion *SelfRegion, Store St); + + StoreRef Bind(Store St, Loc loc, SVal V); + StoreRef Remove(Store St, Loc loc); + StoreRef getInitialStore(const LocationContext *InitLoc); + + StoreRef BindCompoundLiteral(Store store, const CompoundLiteralExpr*, + const LocationContext*, SVal val) { + return StoreRef(store, *this); + } + + /// ArrayToPointer - Used by ExprEngine::VistCast to handle implicit + /// conversions between arrays and pointers. + SVal ArrayToPointer(Loc Array) { return Array; } + + /// removeDeadBindings - Scans a BasicStore of 'state' for dead values. + /// It updatees the GRState object in place with the values removed. + StoreRef removeDeadBindings(Store store, const StackFrameContext *LCtx, + SymbolReaper& SymReaper, + llvm::SmallVectorImpl<const MemRegion*>& RegionRoots); + + void iterBindings(Store store, BindingsHandler& f); + + StoreRef BindDecl(Store store, const VarRegion *VR, SVal InitVal) { + return BindDeclInternal(store, VR, &InitVal); + } + + StoreRef BindDeclWithNoInit(Store store, const VarRegion *VR) { + return BindDeclInternal(store, VR, 0); + } + + StoreRef BindDeclInternal(Store store, const VarRegion *VR, SVal *InitVal); + + static inline BindingsTy GetBindings(Store store) { + return BindingsTy(static_cast<const BindingsTy::TreeTy*>(store)); + } + + void print(Store store, llvm::raw_ostream& Out, const char* nl, + const char *sep); + +private: + SVal LazyRetrieve(Store store, const TypedRegion *R); +}; + +} // end anonymous namespace + + +StoreManager* ento::CreateBasicStoreManager(GRStateManager& StMgr) { + return new BasicStoreManager(StMgr); +} + +static bool isHigherOrderRawPtr(QualType T, ASTContext &C) { + bool foundPointer = false; + while (1) { + const PointerType *PT = T->getAs<PointerType>(); + if (!PT) { + if (!foundPointer) + return false; + + // intptr_t* or intptr_t**, etc? + if (T->isIntegerType() && C.getTypeSize(T) == C.getTypeSize(C.VoidPtrTy)) + return true; + + QualType X = C.getCanonicalType(T).getUnqualifiedType(); + return X == C.VoidTy; + } + + foundPointer = true; + T = PT->getPointeeType(); + } +} + +SVal BasicStoreManager::LazyRetrieve(Store store, const TypedRegion *R) { + const VarRegion *VR = dyn_cast<VarRegion>(R); + if (!VR) + return UnknownVal(); + + const VarDecl *VD = VR->getDecl(); + QualType T = VD->getType(); + + // Only handle simple types that we can symbolicate. + if (!SymbolManager::canSymbolicate(T) || !T->isScalarType()) + return UnknownVal(); + + // Globals and parameters start with symbolic values. + // Local variables initially are undefined. + + // Non-static globals may have had their values reset by invalidateRegions. + const MemSpaceRegion *MS = VR->getMemorySpace(); + if (isa<NonStaticGlobalSpaceRegion>(MS)) { + BindingsTy B = GetBindings(store); + // FIXME: Copy-and-pasted from RegionStore.cpp. + if (BindingsTy::data_type *Val = B.lookup(MS)) { + if (SymbolRef parentSym = Val->getAsSymbol()) + return svalBuilder.getDerivedRegionValueSymbolVal(parentSym, R); + + if (Val->isZeroConstant()) + return svalBuilder.makeZeroVal(T); + + if (Val->isUnknownOrUndef()) + return *Val; + + assert(0 && "Unknown default value."); + } + } + + if (VR->hasGlobalsOrParametersStorage() || + isa<UnknownSpaceRegion>(VR->getMemorySpace())) + return svalBuilder.getRegionValueSymbolVal(R); + + return UndefinedVal(); +} + +SVal BasicStoreManager::Retrieve(Store store, Loc loc, QualType T) { + if (isa<UnknownVal>(loc)) + return UnknownVal(); + + assert(!isa<UndefinedVal>(loc)); + + switch (loc.getSubKind()) { + + case loc::MemRegionKind: { + const MemRegion* R = cast<loc::MemRegionVal>(loc).getRegion(); + + if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R) || + isa<CXXThisRegion>(R))) + return UnknownVal(); + + BindingsTy B = GetBindings(store); + BindingsTy::data_type *Val = B.lookup(R); + const TypedRegion *TR = cast<TypedRegion>(R); + + if (Val) + return CastRetrievedVal(*Val, TR, T); + + SVal V = LazyRetrieve(store, TR); + return V.isUnknownOrUndef() ? V : CastRetrievedVal(V, TR, T); + } + + case loc::ObjCPropRefKind: + case loc::ConcreteIntKind: + // Support direct accesses to memory. It's up to individual checkers + // to flag an error. + return UnknownVal(); + + default: + assert (false && "Invalid Loc."); + break; + } + + return UnknownVal(); +} + +StoreRef BasicStoreManager::Bind(Store store, Loc loc, SVal V) { + if (isa<loc::ConcreteInt>(loc)) + return StoreRef(store, *this); + + const MemRegion* R = cast<loc::MemRegionVal>(loc).getRegion(); + + // Special case: a default symbol assigned to the NonStaticGlobalsSpaceRegion + // that is used to derive other symbols. + if (isa<NonStaticGlobalSpaceRegion>(R)) { + BindingsTy B = GetBindings(store); + return StoreRef(VBFactory.add(B, R, V).getRoot(), *this); + } + + // Special case: handle store of pointer values (Loc) to pointers via + // a cast to intXX_t*, void*, etc. This is needed to handle + // OSCompareAndSwap32Barrier/OSCompareAndSwap64Barrier. + if (isa<Loc>(V) || isa<nonloc::LocAsInteger>(V)) + if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) { + // FIXME: Should check for index 0. + QualType T = ER->getLocationType(); + + if (isHigherOrderRawPtr(T, Ctx)) + R = ER->getSuperRegion(); + } + + if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R) || isa<CXXThisRegion>(R))) + return StoreRef(store, *this); + + const TypedRegion *TyR = cast<TypedRegion>(R); + + // Do not bind to arrays. We need to explicitly check for this so that + // we do not encounter any weirdness of trying to load/store from arrays. + if (TyR->isBoundable() && TyR->getValueType()->isArrayType()) + return StoreRef(store, *this); + + if (nonloc::LocAsInteger *X = dyn_cast<nonloc::LocAsInteger>(&V)) { + // Only convert 'V' to a location iff the underlying region type + // is a location as well. + // FIXME: We are allowing a store of an arbitrary location to + // a pointer. We may wish to flag a type error here if the types + // are incompatible. This may also cause lots of breakage + // elsewhere. Food for thought. + if (TyR->isBoundable() && Loc::isLocType(TyR->getValueType())) + V = X->getLoc(); + } + + BindingsTy B = GetBindings(store); + return StoreRef(V.isUnknown() + ? VBFactory.remove(B, R).getRoot() + : VBFactory.add(B, R, V).getRoot(), *this); +} + +StoreRef BasicStoreManager::Remove(Store store, Loc loc) { + switch (loc.getSubKind()) { + case loc::MemRegionKind: { + const MemRegion* R = cast<loc::MemRegionVal>(loc).getRegion(); + + if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R) || + isa<CXXThisRegion>(R))) + return StoreRef(store, *this); + + return StoreRef(VBFactory.remove(GetBindings(store), R).getRoot(), *this); + } + default: + assert ("Remove for given Loc type not yet implemented."); + return StoreRef(store, *this); + } +} + +StoreRef BasicStoreManager::removeDeadBindings(Store store, + const StackFrameContext *LCtx, + SymbolReaper& SymReaper, + llvm::SmallVectorImpl<const MemRegion*>& RegionRoots) +{ + BindingsTy B = GetBindings(store); + typedef SVal::symbol_iterator symbol_iterator; + + // Iterate over the variable bindings. + for (BindingsTy::iterator I=B.begin(), E=B.end(); I!=E ; ++I) { + if (const VarRegion *VR = dyn_cast<VarRegion>(I.getKey())) { + if (SymReaper.isLive(VR)) + RegionRoots.push_back(VR); + else + continue; + } + else if (isa<ObjCIvarRegion>(I.getKey()) || + isa<NonStaticGlobalSpaceRegion>(I.getKey()) || + isa<CXXThisRegion>(I.getKey())) + RegionRoots.push_back(I.getKey()); + else + continue; + + // Mark the bindings in the data as live. + SVal X = I.getData(); + for (symbol_iterator SI=X.symbol_begin(), SE=X.symbol_end(); SI!=SE; ++SI) + SymReaper.markLive(*SI); + } + + // Scan for live variables and live symbols. + llvm::SmallPtrSet<const MemRegion*, 10> Marked; + + while (!RegionRoots.empty()) { + const MemRegion* MR = RegionRoots.back(); + RegionRoots.pop_back(); + + while (MR) { + if (const SymbolicRegion* SymR = dyn_cast<SymbolicRegion>(MR)) { + SymReaper.markLive(SymR->getSymbol()); + break; + } + else if (isa<VarRegion>(MR) || isa<ObjCIvarRegion>(MR) || + isa<NonStaticGlobalSpaceRegion>(MR) || isa<CXXThisRegion>(MR)) { + if (Marked.count(MR)) + break; + + Marked.insert(MR); + SVal X = Retrieve(store, loc::MemRegionVal(MR)); + + // FIXME: We need to handle symbols nested in region definitions. + for (symbol_iterator SI=X.symbol_begin(),SE=X.symbol_end();SI!=SE;++SI) + SymReaper.markLive(*SI); + + if (!isa<loc::MemRegionVal>(X)) + break; + + const loc::MemRegionVal& LVD = cast<loc::MemRegionVal>(X); + RegionRoots.push_back(LVD.getRegion()); + break; + } + else if (const SubRegion* R = dyn_cast<SubRegion>(MR)) + MR = R->getSuperRegion(); + else + break; + } + } + + // Remove dead variable bindings. + StoreRef newStore(store, *this); + for (BindingsTy::iterator I=B.begin(), E=B.end(); I!=E ; ++I) { + const MemRegion* R = I.getKey(); + + if (!Marked.count(R)) { + newStore = Remove(newStore.getStore(), svalBuilder.makeLoc(R)); + SVal X = I.getData(); + + for (symbol_iterator SI=X.symbol_begin(), SE=X.symbol_end(); SI!=SE; ++SI) + SymReaper.maybeDead(*SI); + } + } + + return newStore; +} + +StoreRef BasicStoreManager::scanForIvars(Stmt *B, const Decl* SelfDecl, + const MemRegion *SelfRegion, + Store St) { + + StoreRef newStore(St, *this); + + for (Stmt::child_iterator CI=B->child_begin(), CE=B->child_end(); + CI != CE; ++CI) { + + if (!*CI) + continue; + + // Check if the statement is an ivar reference. We only + // care about self.ivar. + if (ObjCIvarRefExpr *IV = dyn_cast<ObjCIvarRefExpr>(*CI)) { + const Expr *Base = IV->getBase()->IgnoreParenCasts(); + if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Base)) { + if (DR->getDecl() == SelfDecl) { + const ObjCIvarRegion *IVR = MRMgr.getObjCIvarRegion(IV->getDecl(), + SelfRegion); + SVal X = svalBuilder.getRegionValueSymbolVal(IVR); + newStore = Bind(newStore.getStore(), svalBuilder.makeLoc(IVR), X); + } + } + } + else + newStore = scanForIvars(*CI, SelfDecl, SelfRegion, newStore.getStore()); + } + + return newStore; +} + +StoreRef BasicStoreManager::getInitialStore(const LocationContext *InitLoc) { + // The LiveVariables information already has a compilation of all VarDecls + // used in the function. Iterate through this set, and "symbolicate" + // any VarDecl whose value originally comes from outside the function. + typedef LiveVariables::AnalysisDataTy LVDataTy; + LVDataTy& D = InitLoc->getLiveVariables()->getAnalysisData(); + StoreRef St(VBFactory.getEmptyMap().getRoot(), *this); + + for (LVDataTy::decl_iterator I=D.begin_decl(), E=D.end_decl(); I != E; ++I) { + const NamedDecl* ND = I->first; + + // Handle implicit parameters. + if (const ImplicitParamDecl* PD = dyn_cast<ImplicitParamDecl>(ND)) { + const Decl& CD = *InitLoc->getDecl(); + if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(&CD)) { + if (MD->getSelfDecl() == PD) { + // FIXME: Add type constraints (when they become available) to + // SelfRegion? (i.e., it implements MD->getClassInterface()). + const VarRegion *VR = MRMgr.getVarRegion(PD, InitLoc); + const MemRegion *SelfRegion = + svalBuilder.getRegionValueSymbolVal(VR).getAsRegion(); + assert(SelfRegion); + St = Bind(St.getStore(), svalBuilder.makeLoc(VR), + loc::MemRegionVal(SelfRegion)); + // Scan the method for ivar references. While this requires an + // entire AST scan, the cost should not be high in practice. + St = scanForIvars(MD->getBody(), PD, SelfRegion, St.getStore()); + } + } + } + } + + if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(InitLoc->getDecl())) { + // For C++ methods add symbolic region for 'this' in initial stack frame. + QualType ThisT = MD->getThisType(StateMgr.getContext()); + MemRegionManager &RegMgr = svalBuilder.getRegionManager(); + const CXXThisRegion *ThisR = RegMgr.getCXXThisRegion(ThisT, InitLoc); + SVal ThisV = svalBuilder.getRegionValueSymbolVal(ThisR); + St = Bind(St.getStore(), svalBuilder.makeLoc(ThisR), ThisV); + } + + return St; +} + +StoreRef BasicStoreManager::BindDeclInternal(Store store, const VarRegion* VR, + SVal* InitVal) { + + BasicValueFactory& BasicVals = StateMgr.getBasicVals(); + const VarDecl *VD = VR->getDecl(); + StoreRef newStore(store, *this); + + // BasicStore does not model arrays and structs. + if (VD->getType()->isArrayType() || VD->getType()->isStructureOrClassType()) + return newStore; + + if (VD->hasGlobalStorage()) { + // Handle variables with global storage: extern, static, PrivateExtern. + + // FIXME:: static variables may have an initializer, but the second time a + // function is called those values may not be current. Currently, a function + // will not be called more than once. + + // Static global variables should not be visited here. + assert(!(VD->getStorageClass() == SC_Static && + VD->isFileVarDecl())); + + // Process static variables. + if (VD->getStorageClass() == SC_Static) { + // C99: 6.7.8 Initialization + // If an object that has static storage duration is not initialized + // explicitly, then: + // -if it has pointer type, it is initialized to a null pointer; + // -if it has arithmetic type, it is initialized to (positive or + // unsigned) zero; + if (!InitVal) { + QualType T = VD->getType(); + if (Loc::isLocType(T)) + newStore = Bind(store, loc::MemRegionVal(VR), + loc::ConcreteInt(BasicVals.getValue(0, T))); + else if (T->isIntegerType() && T->isScalarType()) + newStore = Bind(store, loc::MemRegionVal(VR), + nonloc::ConcreteInt(BasicVals.getValue(0, T))); + } else { + newStore = Bind(store, loc::MemRegionVal(VR), *InitVal); + } + } + } else { + // Process local scalar variables. + QualType T = VD->getType(); + // BasicStore only supports scalars. + if ((T->isScalarType() || T->isReferenceType()) && + svalBuilder.getSymbolManager().canSymbolicate(T)) { + SVal V = InitVal ? *InitVal : UndefinedVal(); + newStore = Bind(store, loc::MemRegionVal(VR), V); + } + } + + return newStore; +} + +void BasicStoreManager::print(Store store, llvm::raw_ostream& Out, + const char* nl, const char *sep) { + + BindingsTy B = GetBindings(store); + Out << "Variables:" << nl; + + bool isFirst = true; + + for (BindingsTy::iterator I=B.begin(), E=B.end(); I != E; ++I) { + if (isFirst) + isFirst = false; + else + Out << nl; + + Out << ' ' << I.getKey() << " : " << I.getData(); + } +} + + +void BasicStoreManager::iterBindings(Store store, BindingsHandler& f) { + BindingsTy B = GetBindings(store); + + for (BindingsTy::iterator I=B.begin(), E=B.end(); I != E; ++I) + if (!f.HandleBinding(*this, store, I.getKey(), I.getData())) + return; + +} + +StoreManager::BindingsHandler::~BindingsHandler() {} + +//===----------------------------------------------------------------------===// +// Binding invalidation. +//===----------------------------------------------------------------------===// + + +StoreRef BasicStoreManager::invalidateRegions(Store store, + const MemRegion * const *I, + const MemRegion * const *End, + const Expr *E, unsigned Count, + InvalidatedSymbols *IS, + bool invalidateGlobals, + InvalidatedRegions *Regions) { + StoreRef newStore(store, *this); + + if (invalidateGlobals) { + BindingsTy B = GetBindings(store); + for (BindingsTy::iterator I=B.begin(), End=B.end(); I != End; ++I) { + const MemRegion *R = I.getKey(); + if (isa<NonStaticGlobalSpaceRegion>(R->getMemorySpace())) + newStore = invalidateRegion(newStore.getStore(), R, E, Count, IS); + } + } + + for ( ; I != End ; ++I) { + const MemRegion *R = *I; + // Don't invalidate globals twice. + if (invalidateGlobals) { + if (isa<NonStaticGlobalSpaceRegion>(R->getMemorySpace())) + continue; + } + newStore = invalidateRegion(newStore.getStore(), *I, E, Count, IS); + if (Regions) + Regions->push_back(R); + } + + // FIXME: This is copy-and-paste from RegionStore.cpp. + if (invalidateGlobals) { + // Bind the non-static globals memory space to a new symbol that we will + // use to derive the bindings for all non-static globals. + const GlobalsSpaceRegion *GS = MRMgr.getGlobalsRegion(); + SVal V = + svalBuilder.getConjuredSymbolVal(/* SymbolTag = */ (void*) GS, E, + /* symbol type, doesn't matter */ Ctx.IntTy, + Count); + + newStore = Bind(newStore.getStore(), loc::MemRegionVal(GS), V); + if (Regions) + Regions->push_back(GS); + } + + return newStore; +} + + +StoreRef BasicStoreManager::invalidateRegion(Store store, + const MemRegion *R, + const Expr *E, + unsigned Count, + InvalidatedSymbols *IS) { + R = R->StripCasts(); + + if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R))) + return StoreRef(store, *this); + + if (IS) { + BindingsTy B = GetBindings(store); + if (BindingsTy::data_type *Val = B.lookup(R)) { + if (SymbolRef Sym = Val->getAsSymbol()) + IS->insert(Sym); + } + } + + QualType T = cast<TypedRegion>(R)->getValueType(); + SVal V = svalBuilder.getConjuredSymbolVal(R, E, T, Count); + return Bind(store, loc::MemRegionVal(R), V); +} diff --git a/lib/StaticAnalyzer/Core/BasicValueFactory.cpp b/lib/StaticAnalyzer/Core/BasicValueFactory.cpp new file mode 100644 index 0000000..6315d83 --- /dev/null +++ b/lib/StaticAnalyzer/Core/BasicValueFactory.cpp @@ -0,0 +1,288 @@ +//=== BasicValueFactory.cpp - Basic values for Path Sens analysis --*- C++ -*-// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines BasicValueFactory, a class that manages the lifetime +// of APSInt objects and symbolic constraints used by ExprEngine +// and related classes. +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h" + +using namespace clang; +using namespace ento; + +void CompoundValData::Profile(llvm::FoldingSetNodeID& ID, QualType T, + llvm::ImmutableList<SVal> L) { + T.Profile(ID); + ID.AddPointer(L.getInternalPointer()); +} + +void LazyCompoundValData::Profile(llvm::FoldingSetNodeID& ID, + const void *store,const TypedRegion *region) { + ID.AddPointer(store); + ID.AddPointer(region); +} + +typedef std::pair<SVal, uintptr_t> SValData; +typedef std::pair<SVal, SVal> SValPair; + +namespace llvm { +template<> struct FoldingSetTrait<SValData> { + static inline void Profile(const SValData& X, llvm::FoldingSetNodeID& ID) { + X.first.Profile(ID); + ID.AddPointer( (void*) X.second); + } +}; + +template<> struct FoldingSetTrait<SValPair> { + static inline void Profile(const SValPair& X, llvm::FoldingSetNodeID& ID) { + X.first.Profile(ID); + X.second.Profile(ID); + } +}; +} + +typedef llvm::FoldingSet<llvm::FoldingSetNodeWrapper<SValData> > + PersistentSValsTy; + +typedef llvm::FoldingSet<llvm::FoldingSetNodeWrapper<SValPair> > + PersistentSValPairsTy; + +BasicValueFactory::~BasicValueFactory() { + // Note that the dstor for the contents of APSIntSet will never be called, + // so we iterate over the set and invoke the dstor for each APSInt. This + // frees an aux. memory allocated to represent very large constants. + for (APSIntSetTy::iterator I=APSIntSet.begin(), E=APSIntSet.end(); I!=E; ++I) + I->getValue().~APSInt(); + + delete (PersistentSValsTy*) PersistentSVals; + delete (PersistentSValPairsTy*) PersistentSValPairs; +} + +const llvm::APSInt& BasicValueFactory::getValue(const llvm::APSInt& X) { + llvm::FoldingSetNodeID ID; + void* InsertPos; + typedef llvm::FoldingSetNodeWrapper<llvm::APSInt> FoldNodeTy; + + X.Profile(ID); + FoldNodeTy* P = APSIntSet.FindNodeOrInsertPos(ID, InsertPos); + + if (!P) { + P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>(); + new (P) FoldNodeTy(X); + APSIntSet.InsertNode(P, InsertPos); + } + + return *P; +} + +const llvm::APSInt& BasicValueFactory::getValue(const llvm::APInt& X, + bool isUnsigned) { + llvm::APSInt V(X, isUnsigned); + return getValue(V); +} + +const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, unsigned BitWidth, + bool isUnsigned) { + llvm::APSInt V(BitWidth, isUnsigned); + V = X; + return getValue(V); +} + +const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, QualType T) { + + unsigned bits = Ctx.getTypeSize(T); + llvm::APSInt V(bits, T->isUnsignedIntegerType() || Loc::isLocType(T)); + V = X; + return getValue(V); +} + +const CompoundValData* +BasicValueFactory::getCompoundValData(QualType T, + llvm::ImmutableList<SVal> Vals) { + + llvm::FoldingSetNodeID ID; + CompoundValData::Profile(ID, T, Vals); + void* InsertPos; + + CompoundValData* D = CompoundValDataSet.FindNodeOrInsertPos(ID, InsertPos); + + if (!D) { + D = (CompoundValData*) BPAlloc.Allocate<CompoundValData>(); + new (D) CompoundValData(T, Vals); + CompoundValDataSet.InsertNode(D, InsertPos); + } + + return D; +} + +const LazyCompoundValData* +BasicValueFactory::getLazyCompoundValData(const void *store, + const TypedRegion *region) { + llvm::FoldingSetNodeID ID; + LazyCompoundValData::Profile(ID, store, region); + void* InsertPos; + + LazyCompoundValData *D = + LazyCompoundValDataSet.FindNodeOrInsertPos(ID, InsertPos); + + if (!D) { + D = (LazyCompoundValData*) BPAlloc.Allocate<LazyCompoundValData>(); + new (D) LazyCompoundValData(store, region); + LazyCompoundValDataSet.InsertNode(D, InsertPos); + } + + return D; +} + +const llvm::APSInt* +BasicValueFactory::evalAPSInt(BinaryOperator::Opcode Op, + const llvm::APSInt& V1, const llvm::APSInt& V2) { + + switch (Op) { + default: + assert (false && "Invalid Opcode."); + + case BO_Mul: + return &getValue( V1 * V2 ); + + case BO_Div: + return &getValue( V1 / V2 ); + + case BO_Rem: + return &getValue( V1 % V2 ); + + case BO_Add: + return &getValue( V1 + V2 ); + + case BO_Sub: + return &getValue( V1 - V2 ); + + case BO_Shl: { + + // FIXME: This logic should probably go higher up, where we can + // test these conditions symbolically. + + // FIXME: Expand these checks to include all undefined behavior. + + if (V2.isSigned() && V2.isNegative()) + return NULL; + + uint64_t Amt = V2.getZExtValue(); + + if (Amt > V1.getBitWidth()) + return NULL; + + return &getValue( V1.operator<<( (unsigned) Amt )); + } + + case BO_Shr: { + + // FIXME: This logic should probably go higher up, where we can + // test these conditions symbolically. + + // FIXME: Expand these checks to include all undefined behavior. + + if (V2.isSigned() && V2.isNegative()) + return NULL; + + uint64_t Amt = V2.getZExtValue(); + + if (Amt > V1.getBitWidth()) + return NULL; + + return &getValue( V1.operator>>( (unsigned) Amt )); + } + + case BO_LT: + return &getTruthValue( V1 < V2 ); + + case BO_GT: + return &getTruthValue( V1 > V2 ); + + case BO_LE: + return &getTruthValue( V1 <= V2 ); + + case BO_GE: + return &getTruthValue( V1 >= V2 ); + + case BO_EQ: + return &getTruthValue( V1 == V2 ); + + case BO_NE: + return &getTruthValue( V1 != V2 ); + + // Note: LAnd, LOr, Comma are handled specially by higher-level logic. + + case BO_And: + return &getValue( V1 & V2 ); + + case BO_Or: + return &getValue( V1 | V2 ); + + case BO_Xor: + return &getValue( V1 ^ V2 ); + } +} + + +const std::pair<SVal, uintptr_t>& +BasicValueFactory::getPersistentSValWithData(const SVal& V, uintptr_t Data) { + + // Lazily create the folding set. + if (!PersistentSVals) PersistentSVals = new PersistentSValsTy(); + + llvm::FoldingSetNodeID ID; + void* InsertPos; + V.Profile(ID); + ID.AddPointer((void*) Data); + + PersistentSValsTy& Map = *((PersistentSValsTy*) PersistentSVals); + + typedef llvm::FoldingSetNodeWrapper<SValData> FoldNodeTy; + FoldNodeTy* P = Map.FindNodeOrInsertPos(ID, InsertPos); + + if (!P) { + P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>(); + new (P) FoldNodeTy(std::make_pair(V, Data)); + Map.InsertNode(P, InsertPos); + } + + return P->getValue(); +} + +const std::pair<SVal, SVal>& +BasicValueFactory::getPersistentSValPair(const SVal& V1, const SVal& V2) { + + // Lazily create the folding set. + if (!PersistentSValPairs) PersistentSValPairs = new PersistentSValPairsTy(); + + llvm::FoldingSetNodeID ID; + void* InsertPos; + V1.Profile(ID); + V2.Profile(ID); + + PersistentSValPairsTy& Map = *((PersistentSValPairsTy*) PersistentSValPairs); + + typedef llvm::FoldingSetNodeWrapper<SValPair> FoldNodeTy; + FoldNodeTy* P = Map.FindNodeOrInsertPos(ID, InsertPos); + + if (!P) { + P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>(); + new (P) FoldNodeTy(std::make_pair(V1, V2)); + Map.InsertNode(P, InsertPos); + } + + return P->getValue(); +} + +const SVal* BasicValueFactory::getPersistentSVal(SVal X) { + return &getPersistentSValWithData(X, 0).first; +} diff --git a/lib/StaticAnalyzer/Core/BlockCounter.cpp b/lib/StaticAnalyzer/Core/BlockCounter.cpp new file mode 100644 index 0000000..ed52b6b --- /dev/null +++ b/lib/StaticAnalyzer/Core/BlockCounter.cpp @@ -0,0 +1,86 @@ +//==- BlockCounter.h - ADT for counting block visits -------------*- C++ -*-// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines BlockCounter, an abstract data type used to count +// the number of times a given block has been visited along a path +// analyzed by CoreEngine. +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/PathSensitive/BlockCounter.h" +#include "llvm/ADT/ImmutableMap.h" + +using namespace clang; +using namespace ento; + +namespace { + +class CountKey { + const StackFrameContext *CallSite; + unsigned BlockID; + +public: + CountKey(const StackFrameContext *CS, unsigned ID) + : CallSite(CS), BlockID(ID) {} + + bool operator==(const CountKey &RHS) const { + return (CallSite == RHS.CallSite) && (BlockID == RHS.BlockID); + } + + bool operator<(const CountKey &RHS) const { + return (CallSite == RHS.CallSite) ? (BlockID < RHS.BlockID) + : (CallSite < RHS.CallSite); + } + + void Profile(llvm::FoldingSetNodeID &ID) const { + ID.AddPointer(CallSite); + ID.AddInteger(BlockID); + } +}; + +} + +typedef llvm::ImmutableMap<CountKey, unsigned> CountMap; + +static inline CountMap GetMap(void* D) { + return CountMap(static_cast<CountMap::TreeTy*>(D)); +} + +static inline CountMap::Factory& GetFactory(void* F) { + return *static_cast<CountMap::Factory*>(F); +} + +unsigned BlockCounter::getNumVisited(const StackFrameContext *CallSite, + unsigned BlockID) const { + CountMap M = GetMap(Data); + CountMap::data_type* T = M.lookup(CountKey(CallSite, BlockID)); + return T ? *T : 0; +} + +BlockCounter::Factory::Factory(llvm::BumpPtrAllocator& Alloc) { + F = new CountMap::Factory(Alloc); +} + +BlockCounter::Factory::~Factory() { + delete static_cast<CountMap::Factory*>(F); +} + +BlockCounter +BlockCounter::Factory::IncrementCount(BlockCounter BC, + const StackFrameContext *CallSite, + unsigned BlockID) { + return BlockCounter(GetFactory(F).add(GetMap(BC.Data), + CountKey(CallSite, BlockID), + BC.getNumVisited(CallSite, BlockID)+1).getRoot()); +} + +BlockCounter +BlockCounter::Factory::GetEmptyCounter() { + return BlockCounter(GetFactory(F).getEmptyMap().getRoot()); +} diff --git a/lib/StaticAnalyzer/Core/BugReporter.cpp b/lib/StaticAnalyzer/Core/BugReporter.cpp new file mode 100644 index 0000000..9a84045 --- /dev/null +++ b/lib/StaticAnalyzer/Core/BugReporter.cpp @@ -0,0 +1,1896 @@ +// BugReporter.cpp - Generate PathDiagnostics for Bugs ------------*- C++ -*--// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines BugReporter, a utility class for generating +// PathDiagnostics. +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h" +#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h" +#include "clang/AST/ASTContext.h" +#include "clang/Analysis/CFG.h" +#include "clang/AST/Expr.h" +#include "clang/AST/ParentMap.h" +#include "clang/AST/StmtObjC.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Analysis/ProgramPoint.h" +#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/OwningPtr.h" +#include <queue> + +using namespace clang; +using namespace ento; + +BugReporterVisitor::~BugReporterVisitor() {} +BugReporterContext::~BugReporterContext() { + for (visitor_iterator I = visitor_begin(), E = visitor_end(); I != E; ++I) + if ((*I)->isOwnedByReporterContext()) delete *I; +} + +void BugReporterContext::addVisitor(BugReporterVisitor* visitor) { + if (!visitor) + return; + + llvm::FoldingSetNodeID ID; + visitor->Profile(ID); + void *InsertPos; + + if (CallbacksSet.FindNodeOrInsertPos(ID, InsertPos)) { + delete visitor; + return; + } + + CallbacksSet.InsertNode(visitor, InsertPos); + Callbacks = F.add(visitor, Callbacks); +} + +//===----------------------------------------------------------------------===// +// Helper routines for walking the ExplodedGraph and fetching statements. +//===----------------------------------------------------------------------===// + +static inline const Stmt* GetStmt(const ProgramPoint &P) { + if (const StmtPoint* SP = dyn_cast<StmtPoint>(&P)) + return SP->getStmt(); + else if (const BlockEdge* BE = dyn_cast<BlockEdge>(&P)) + return BE->getSrc()->getTerminator(); + + return 0; +} + +static inline const ExplodedNode* +GetPredecessorNode(const ExplodedNode* N) { + return N->pred_empty() ? NULL : *(N->pred_begin()); +} + +static inline const ExplodedNode* +GetSuccessorNode(const ExplodedNode* N) { + return N->succ_empty() ? NULL : *(N->succ_begin()); +} + +static const Stmt* GetPreviousStmt(const ExplodedNode* N) { + for (N = GetPredecessorNode(N); N; N = GetPredecessorNode(N)) + if (const Stmt *S = GetStmt(N->getLocation())) + return S; + + return 0; +} + +static const Stmt* GetNextStmt(const ExplodedNode* N) { + for (N = GetSuccessorNode(N); N; N = GetSuccessorNode(N)) + if (const Stmt *S = GetStmt(N->getLocation())) { + // Check if the statement is '?' or '&&'/'||'. These are "merges", + // not actual statement points. + switch (S->getStmtClass()) { + case Stmt::ChooseExprClass: + case Stmt::BinaryConditionalOperatorClass: continue; + case Stmt::ConditionalOperatorClass: continue; + case Stmt::BinaryOperatorClass: { + BinaryOperatorKind Op = cast<BinaryOperator>(S)->getOpcode(); + if (Op == BO_LAnd || Op == BO_LOr) + continue; + break; + } + default: + break; + } + + // Some expressions don't have locations. + if (S->getLocStart().isInvalid()) + continue; + + return S; + } + + return 0; +} + +static inline const Stmt* +GetCurrentOrPreviousStmt(const ExplodedNode* N) { + if (const Stmt *S = GetStmt(N->getLocation())) + return S; + + return GetPreviousStmt(N); +} + +static inline const Stmt* +GetCurrentOrNextStmt(const ExplodedNode* N) { + if (const Stmt *S = GetStmt(N->getLocation())) + return S; + + return GetNextStmt(N); +} + +//===----------------------------------------------------------------------===// +// PathDiagnosticBuilder and its associated routines and helper objects. +//===----------------------------------------------------------------------===// + +typedef llvm::DenseMap<const ExplodedNode*, +const ExplodedNode*> NodeBackMap; + +namespace { +class NodeMapClosure : public BugReport::NodeResolver { + NodeBackMap& M; +public: + NodeMapClosure(NodeBackMap *m) : M(*m) {} + ~NodeMapClosure() {} + + const ExplodedNode* getOriginalNode(const ExplodedNode* N) { + NodeBackMap::iterator I = M.find(N); + return I == M.end() ? 0 : I->second; + } +}; + +class PathDiagnosticBuilder : public BugReporterContext { + BugReport *R; + PathDiagnosticClient *PDC; + llvm::OwningPtr<ParentMap> PM; + NodeMapClosure NMC; +public: + PathDiagnosticBuilder(GRBugReporter &br, + BugReport *r, NodeBackMap *Backmap, + PathDiagnosticClient *pdc) + : BugReporterContext(br), + R(r), PDC(pdc), NMC(Backmap) { + addVisitor(R); + } + + PathDiagnosticLocation ExecutionContinues(const ExplodedNode* N); + + PathDiagnosticLocation ExecutionContinues(llvm::raw_string_ostream& os, + const ExplodedNode* N); + + Decl const &getCodeDecl() { return R->getErrorNode()->getCodeDecl(); } + + ParentMap& getParentMap() { return R->getErrorNode()->getParentMap(); } + + const Stmt *getParent(const Stmt *S) { + return getParentMap().getParent(S); + } + + virtual NodeMapClosure& getNodeResolver() { return NMC; } + + PathDiagnosticLocation getEnclosingStmtLocation(const Stmt *S); + + PathDiagnosticClient::PathGenerationScheme getGenerationScheme() const { + return PDC ? PDC->getGenerationScheme() : PathDiagnosticClient::Extensive; + } + + bool supportsLogicalOpControlFlow() const { + return PDC ? PDC->supportsLogicalOpControlFlow() : true; + } +}; +} // end anonymous namespace + +PathDiagnosticLocation +PathDiagnosticBuilder::ExecutionContinues(const ExplodedNode* N) { + if (const Stmt *S = GetNextStmt(N)) + return PathDiagnosticLocation(S, getSourceManager()); + + return FullSourceLoc(N->getLocationContext()->getDecl()->getBodyRBrace(), + getSourceManager()); +} + +PathDiagnosticLocation +PathDiagnosticBuilder::ExecutionContinues(llvm::raw_string_ostream& os, + const ExplodedNode* N) { + + // Slow, but probably doesn't matter. + if (os.str().empty()) + os << ' '; + + const PathDiagnosticLocation &Loc = ExecutionContinues(N); + + if (Loc.asStmt()) + os << "Execution continues on line " + << getSourceManager().getInstantiationLineNumber(Loc.asLocation()) + << '.'; + else { + os << "Execution jumps to the end of the "; + const Decl *D = N->getLocationContext()->getDecl(); + if (isa<ObjCMethodDecl>(D)) + os << "method"; + else if (isa<FunctionDecl>(D)) + os << "function"; + else { + assert(isa<BlockDecl>(D)); + os << "anonymous block"; + } + os << '.'; + } + + return Loc; +} + +static bool IsNested(const Stmt *S, ParentMap &PM) { + if (isa<Expr>(S) && PM.isConsumedExpr(cast<Expr>(S))) + return true; + + const Stmt *Parent = PM.getParentIgnoreParens(S); + + if (Parent) + switch (Parent->getStmtClass()) { + case Stmt::ForStmtClass: + case Stmt::DoStmtClass: + case Stmt::WhileStmtClass: + return true; + default: + break; + } + + return false; +} + +PathDiagnosticLocation +PathDiagnosticBuilder::getEnclosingStmtLocation(const Stmt *S) { + assert(S && "Null Stmt* passed to getEnclosingStmtLocation"); + ParentMap &P = getParentMap(); + SourceManager &SMgr = getSourceManager(); + + while (IsNested(S, P)) { + const Stmt *Parent = P.getParentIgnoreParens(S); + + if (!Parent) + break; + + switch (Parent->getStmtClass()) { + case Stmt::BinaryOperatorClass: { + const BinaryOperator *B = cast<BinaryOperator>(Parent); + if (B->isLogicalOp()) + return PathDiagnosticLocation(S, SMgr); + break; + } + case Stmt::CompoundStmtClass: + case Stmt::StmtExprClass: + return PathDiagnosticLocation(S, SMgr); + case Stmt::ChooseExprClass: + // Similar to '?' if we are referring to condition, just have the edge + // point to the entire choose expression. + if (cast<ChooseExpr>(Parent)->getCond() == S) + return PathDiagnosticLocation(Parent, SMgr); + else + return PathDiagnosticLocation(S, SMgr); + case Stmt::BinaryConditionalOperatorClass: + case Stmt::ConditionalOperatorClass: + // For '?', if we are referring to condition, just have the edge point + // to the entire '?' expression. + if (cast<AbstractConditionalOperator>(Parent)->getCond() == S) + return PathDiagnosticLocation(Parent, SMgr); + else + return PathDiagnosticLocation(S, SMgr); + case Stmt::DoStmtClass: + return PathDiagnosticLocation(S, SMgr); + case Stmt::ForStmtClass: + if (cast<ForStmt>(Parent)->getBody() == S) + return PathDiagnosticLocation(S, SMgr); + break; + case Stmt::IfStmtClass: + if (cast<IfStmt>(Parent)->getCond() != S) + return PathDiagnosticLocation(S, SMgr); + break; + case Stmt::ObjCForCollectionStmtClass: + if (cast<ObjCForCollectionStmt>(Parent)->getBody() == S) + return PathDiagnosticLocation(S, SMgr); + break; + case Stmt::WhileStmtClass: + if (cast<WhileStmt>(Parent)->getCond() != S) + return PathDiagnosticLocation(S, SMgr); + break; + default: + break; + } + + S = Parent; + } + + assert(S && "Cannot have null Stmt for PathDiagnosticLocation"); + + // Special case: DeclStmts can appear in for statement declarations, in which + // case the ForStmt is the context. + if (isa<DeclStmt>(S)) { + if (const Stmt *Parent = P.getParent(S)) { + switch (Parent->getStmtClass()) { + case Stmt::ForStmtClass: + case Stmt::ObjCForCollectionStmtClass: + return PathDiagnosticLocation(Parent, SMgr); + default: + break; + } + } + } + else if (isa<BinaryOperator>(S)) { + // Special case: the binary operator represents the initialization + // code in a for statement (this can happen when the variable being + // initialized is an old variable. + if (const ForStmt *FS = + dyn_cast_or_null<ForStmt>(P.getParentIgnoreParens(S))) { + if (FS->getInit() == S) + return PathDiagnosticLocation(FS, SMgr); + } + } + + return PathDiagnosticLocation(S, SMgr); +} + +//===----------------------------------------------------------------------===// +// ScanNotableSymbols: closure-like callback for scanning Store bindings. +//===----------------------------------------------------------------------===// + +static const VarDecl* +GetMostRecentVarDeclBinding(const ExplodedNode* N, + GRStateManager& VMgr, SVal X) { + + for ( ; N ; N = N->pred_empty() ? 0 : *N->pred_begin()) { + + ProgramPoint P = N->getLocation(); + + if (!isa<PostStmt>(P)) + continue; + + const DeclRefExpr* DR = dyn_cast<DeclRefExpr>(cast<PostStmt>(P).getStmt()); + + if (!DR) + continue; + + SVal Y = N->getState()->getSVal(DR); + + if (X != Y) + continue; + + const VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl()); + + if (!VD) + continue; + + return VD; + } + + return 0; +} + +namespace { +class NotableSymbolHandler +: public StoreManager::BindingsHandler { + + SymbolRef Sym; + const GRState* PrevSt; + const Stmt* S; + GRStateManager& VMgr; + const ExplodedNode* Pred; + PathDiagnostic& PD; + BugReporter& BR; + +public: + + NotableSymbolHandler(SymbolRef sym, const GRState* prevst, const Stmt* s, + GRStateManager& vmgr, const ExplodedNode* pred, + PathDiagnostic& pd, BugReporter& br) + : Sym(sym), PrevSt(prevst), S(s), VMgr(vmgr), Pred(pred), PD(pd), BR(br) {} + + bool HandleBinding(StoreManager& SMgr, Store store, const MemRegion* R, + SVal V) { + + SymbolRef ScanSym = V.getAsSymbol(); + + if (ScanSym != Sym) + return true; + + // Check if the previous state has this binding. + SVal X = PrevSt->getSVal(loc::MemRegionVal(R)); + + if (X == V) // Same binding? + return true; + + // Different binding. Only handle assignments for now. We don't pull + // this check out of the loop because we will eventually handle other + // cases. + + VarDecl *VD = 0; + + if (const BinaryOperator* B = dyn_cast<BinaryOperator>(S)) { + if (!B->isAssignmentOp()) + return true; + + // What variable did we assign to? + DeclRefExpr* DR = dyn_cast<DeclRefExpr>(B->getLHS()->IgnoreParenCasts()); + + if (!DR) + return true; + + VD = dyn_cast<VarDecl>(DR->getDecl()); + } + else if (const DeclStmt* DS = dyn_cast<DeclStmt>(S)) { + // FIXME: Eventually CFGs won't have DeclStmts. Right now we + // assume that each DeclStmt has a single Decl. This invariant + // holds by contruction in the CFG. + VD = dyn_cast<VarDecl>(*DS->decl_begin()); + } + + if (!VD) + return true; + + // What is the most recently referenced variable with this binding? + const VarDecl* MostRecent = GetMostRecentVarDeclBinding(Pred, VMgr, V); + + if (!MostRecent) + return true; + + // Create the diagnostic. + FullSourceLoc L(S->getLocStart(), BR.getSourceManager()); + + if (Loc::isLocType(VD->getType())) { + std::string msg = "'" + std::string(VD->getNameAsString()) + + "' now aliases '" + MostRecent->getNameAsString() + "'"; + + PD.push_front(new PathDiagnosticEventPiece(L, msg)); + } + + return true; + } +}; +} + +static void HandleNotableSymbol(const ExplodedNode* N, + const Stmt* S, + SymbolRef Sym, BugReporter& BR, + PathDiagnostic& PD) { + + const ExplodedNode* Pred = N->pred_empty() ? 0 : *N->pred_begin(); + const GRState* PrevSt = Pred ? Pred->getState() : 0; + + if (!PrevSt) + return; + + // Look at the region bindings of the current state that map to the + // specified symbol. Are any of them not in the previous state? + GRStateManager& VMgr = cast<GRBugReporter>(BR).getStateManager(); + NotableSymbolHandler H(Sym, PrevSt, S, VMgr, Pred, PD, BR); + cast<GRBugReporter>(BR).getStateManager().iterBindings(N->getState(), H); +} + +namespace { +class ScanNotableSymbols +: public StoreManager::BindingsHandler { + + llvm::SmallSet<SymbolRef, 10> AlreadyProcessed; + const ExplodedNode* N; + const Stmt* S; + GRBugReporter& BR; + PathDiagnostic& PD; + +public: + ScanNotableSymbols(const ExplodedNode* n, const Stmt* s, + GRBugReporter& br, PathDiagnostic& pd) + : N(n), S(s), BR(br), PD(pd) {} + + bool HandleBinding(StoreManager& SMgr, Store store, + const MemRegion* R, SVal V) { + + SymbolRef ScanSym = V.getAsSymbol(); + + if (!ScanSym) + return true; + + if (!BR.isNotable(ScanSym)) + return true; + + if (AlreadyProcessed.count(ScanSym)) + return true; + + AlreadyProcessed.insert(ScanSym); + + HandleNotableSymbol(N, S, ScanSym, BR, PD); + return true; + } +}; +} // end anonymous namespace + +//===----------------------------------------------------------------------===// +// "Minimal" path diagnostic generation algorithm. +//===----------------------------------------------------------------------===// + +static void CompactPathDiagnostic(PathDiagnostic &PD, const SourceManager& SM); + +static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD, + PathDiagnosticBuilder &PDB, + const ExplodedNode *N) { + + SourceManager& SMgr = PDB.getSourceManager(); + const ExplodedNode* NextNode = N->pred_empty() + ? NULL : *(N->pred_begin()); + while (NextNode) { + N = NextNode; + NextNode = GetPredecessorNode(N); + + ProgramPoint P = N->getLocation(); + + if (const BlockEdge* BE = dyn_cast<BlockEdge>(&P)) { + const CFGBlock* Src = BE->getSrc(); + const CFGBlock* Dst = BE->getDst(); + const Stmt* T = Src->getTerminator(); + + if (!T) + continue; + + FullSourceLoc Start(T->getLocStart(), SMgr); + + switch (T->getStmtClass()) { + default: + break; + + case Stmt::GotoStmtClass: + case Stmt::IndirectGotoStmtClass: { + const Stmt* S = GetNextStmt(N); + + if (!S) + continue; + + std::string sbuf; + llvm::raw_string_ostream os(sbuf); + const PathDiagnosticLocation &End = PDB.getEnclosingStmtLocation(S); + + os << "Control jumps to line " + << End.asLocation().getInstantiationLineNumber(); + PD.push_front(new PathDiagnosticControlFlowPiece(Start, End, + os.str())); + break; + } + + case Stmt::SwitchStmtClass: { + // Figure out what case arm we took. + std::string sbuf; + llvm::raw_string_ostream os(sbuf); + + if (const Stmt* S = Dst->getLabel()) { + PathDiagnosticLocation End(S, SMgr); + + switch (S->getStmtClass()) { + default: + os << "No cases match in the switch statement. " + "Control jumps to line " + << End.asLocation().getInstantiationLineNumber(); + break; + case Stmt::DefaultStmtClass: + os << "Control jumps to the 'default' case at line " + << End.asLocation().getInstantiationLineNumber(); + break; + + case Stmt::CaseStmtClass: { + os << "Control jumps to 'case "; + const CaseStmt* Case = cast<CaseStmt>(S); + const Expr* LHS = Case->getLHS()->IgnoreParenCasts(); + + // Determine if it is an enum. + bool GetRawInt = true; + + if (const DeclRefExpr* DR = dyn_cast<DeclRefExpr>(LHS)) { + // FIXME: Maybe this should be an assertion. Are there cases + // were it is not an EnumConstantDecl? + const EnumConstantDecl* D = + dyn_cast<EnumConstantDecl>(DR->getDecl()); + + if (D) { + GetRawInt = false; + os << D; + } + } + + if (GetRawInt) + os << LHS->EvaluateAsInt(PDB.getASTContext()); + + os << ":' at line " + << End.asLocation().getInstantiationLineNumber(); + break; + } + } + PD.push_front(new PathDiagnosticControlFlowPiece(Start, End, + os.str())); + } + else { + os << "'Default' branch taken. "; + const PathDiagnosticLocation &End = PDB.ExecutionContinues(os, N); + PD.push_front(new PathDiagnosticControlFlowPiece(Start, End, + os.str())); + } + + break; + } + + case Stmt::BreakStmtClass: + case Stmt::ContinueStmtClass: { + std::string sbuf; + llvm::raw_string_ostream os(sbuf); + PathDiagnosticLocation End = PDB.ExecutionContinues(os, N); + PD.push_front(new PathDiagnosticControlFlowPiece(Start, End, + os.str())); + break; + } + + // Determine control-flow for ternary '?'. + case Stmt::BinaryConditionalOperatorClass: + case Stmt::ConditionalOperatorClass: { + std::string sbuf; + llvm::raw_string_ostream os(sbuf); + os << "'?' condition is "; + + if (*(Src->succ_begin()+1) == Dst) + os << "false"; + else + os << "true"; + + PathDiagnosticLocation End = PDB.ExecutionContinues(N); + + if (const Stmt *S = End.asStmt()) + End = PDB.getEnclosingStmtLocation(S); + + PD.push_front(new PathDiagnosticControlFlowPiece(Start, End, + os.str())); + break; + } + + // Determine control-flow for short-circuited '&&' and '||'. + case Stmt::BinaryOperatorClass: { + if (!PDB.supportsLogicalOpControlFlow()) + break; + + const BinaryOperator *B = cast<BinaryOperator>(T); + std::string sbuf; + llvm::raw_string_ostream os(sbuf); + os << "Left side of '"; + + if (B->getOpcode() == BO_LAnd) { + os << "&&" << "' is "; + + if (*(Src->succ_begin()+1) == Dst) { + os << "false"; + PathDiagnosticLocation End(B->getLHS(), SMgr); + PathDiagnosticLocation Start(B->getOperatorLoc(), SMgr); + PD.push_front(new PathDiagnosticControlFlowPiece(Start, End, + os.str())); + } + else { + os << "true"; + PathDiagnosticLocation Start(B->getLHS(), SMgr); + PathDiagnosticLocation End = PDB.ExecutionContinues(N); + PD.push_front(new PathDiagnosticControlFlowPiece(Start, End, + os.str())); + } + } + else { + assert(B->getOpcode() == BO_LOr); + os << "||" << "' is "; + + if (*(Src->succ_begin()+1) == Dst) { + os << "false"; + PathDiagnosticLocation Start(B->getLHS(), SMgr); + PathDiagnosticLocation End = PDB.ExecutionContinues(N); + PD.push_front(new PathDiagnosticControlFlowPiece(Start, End, + os.str())); + } + else { + os << "true"; + PathDiagnosticLocation End(B->getLHS(), SMgr); + PathDiagnosticLocation Start(B->getOperatorLoc(), SMgr); + PD.push_front(new PathDiagnosticControlFlowPiece(Start, End, + os.str())); + } + } + + break; + } + + case Stmt::DoStmtClass: { + if (*(Src->succ_begin()) == Dst) { + std::string sbuf; + llvm::raw_string_ostream os(sbuf); + + os << "Loop condition is true. "; + PathDiagnosticLocation End = PDB.ExecutionContinues(os, N); + + if (const Stmt *S = End.asStmt()) + End = PDB.getEnclosingStmtLocation(S); + + PD.push_front(new PathDiagnosticControlFlowPiece(Start, End, + os.str())); + } + else { + PathDiagnosticLocation End = PDB.ExecutionContinues(N); + + if (const Stmt *S = End.asStmt()) + End = PDB.getEnclosingStmtLocation(S); + + PD.push_front(new PathDiagnosticControlFlowPiece(Start, End, + "Loop condition is false. Exiting loop")); + } + + break; + } + + case Stmt::WhileStmtClass: + case Stmt::ForStmtClass: { + if (*(Src->succ_begin()+1) == Dst) { + std::string sbuf; + llvm::raw_string_ostream os(sbuf); + + os << "Loop condition is false. "; + PathDiagnosticLocation End = PDB.ExecutionContinues(os, N); + if (const Stmt *S = End.asStmt()) + End = PDB.getEnclosingStmtLocation(S); + + PD.push_front(new PathDiagnosticControlFlowPiece(Start, End, + os.str())); + } + else { + PathDiagnosticLocation End = PDB.ExecutionContinues(N); + if (const Stmt *S = End.asStmt()) + End = PDB.getEnclosingStmtLocation(S); + + PD.push_front(new PathDiagnosticControlFlowPiece(Start, End, + "Loop condition is true. Entering loop body")); + } + + break; + } + + case Stmt::IfStmtClass: { + PathDiagnosticLocation End = PDB.ExecutionContinues(N); + + if (const Stmt *S = End.asStmt()) + End = PDB.getEnclosingStmtLocation(S); + + if (*(Src->succ_begin()+1) == Dst) + PD.push_front(new PathDiagnosticControlFlowPiece(Start, End, + "Taking false branch")); + else + PD.push_front(new PathDiagnosticControlFlowPiece(Start, End, + "Taking true branch")); + + break; + } + } + } + + if (NextNode) { + for (BugReporterContext::visitor_iterator I = PDB.visitor_begin(), + E = PDB.visitor_end(); I!=E; ++I) { + if (PathDiagnosticPiece* p = (*I)->VisitNode(N, NextNode, PDB)) + PD.push_front(p); + } + } + + if (const PostStmt* PS = dyn_cast<PostStmt>(&P)) { + // Scan the region bindings, and see if a "notable" symbol has a new + // lval binding. + ScanNotableSymbols SNS(N, PS->getStmt(), PDB.getBugReporter(), PD); + PDB.getStateManager().iterBindings(N->getState(), SNS); + } + } + + // After constructing the full PathDiagnostic, do a pass over it to compact + // PathDiagnosticPieces that occur within a macro. + CompactPathDiagnostic(PD, PDB.getSourceManager()); +} + +//===----------------------------------------------------------------------===// +// "Extensive" PathDiagnostic generation. +//===----------------------------------------------------------------------===// + +static bool IsControlFlowExpr(const Stmt *S) { + const Expr *E = dyn_cast<Expr>(S); + + if (!E) + return false; + + E = E->IgnoreParenCasts(); + + if (isa<AbstractConditionalOperator>(E)) + return true; + + if (const BinaryOperator *B = dyn_cast<BinaryOperator>(E)) + if (B->isLogicalOp()) + return true; + + return false; +} + +namespace { +class ContextLocation : public PathDiagnosticLocation { + bool IsDead; +public: + ContextLocation(const PathDiagnosticLocation &L, bool isdead = false) + : PathDiagnosticLocation(L), IsDead(isdead) {} + + void markDead() { IsDead = true; } + bool isDead() const { return IsDead; } +}; + +class EdgeBuilder { + std::vector<ContextLocation> CLocs; + typedef std::vector<ContextLocation>::iterator iterator; + PathDiagnostic &PD; + PathDiagnosticBuilder &PDB; + PathDiagnosticLocation PrevLoc; + + bool IsConsumedExpr(const PathDiagnosticLocation &L); + + bool containsLocation(const PathDiagnosticLocation &Container, + const PathDiagnosticLocation &Containee); + + PathDiagnosticLocation getContextLocation(const PathDiagnosticLocation &L); + + PathDiagnosticLocation cleanUpLocation(PathDiagnosticLocation L, + bool firstCharOnly = false) { + if (const Stmt *S = L.asStmt()) { + const Stmt *Original = S; + while (1) { + // Adjust the location for some expressions that are best referenced + // by one of their subexpressions. + switch (S->getStmtClass()) { + default: + break; + case Stmt::ParenExprClass: + S = cast<ParenExpr>(S)->IgnoreParens(); + firstCharOnly = true; + continue; + case Stmt::BinaryConditionalOperatorClass: + case Stmt::ConditionalOperatorClass: + S = cast<AbstractConditionalOperator>(S)->getCond(); + firstCharOnly = true; + continue; + case Stmt::ChooseExprClass: + S = cast<ChooseExpr>(S)->getCond(); + firstCharOnly = true; + continue; + case Stmt::BinaryOperatorClass: + S = cast<BinaryOperator>(S)->getLHS(); + firstCharOnly = true; + continue; + } + + break; + } + + if (S != Original) + L = PathDiagnosticLocation(S, L.getManager()); + } + + if (firstCharOnly) + L = PathDiagnosticLocation(L.asLocation()); + + return L; + } + + void popLocation() { + if (!CLocs.back().isDead() && CLocs.back().asLocation().isFileID()) { + // For contexts, we only one the first character as the range. + rawAddEdge(cleanUpLocation(CLocs.back(), true)); + } + CLocs.pop_back(); + } + +public: + EdgeBuilder(PathDiagnostic &pd, PathDiagnosticBuilder &pdb) + : PD(pd), PDB(pdb) { + + // If the PathDiagnostic already has pieces, add the enclosing statement + // of the first piece as a context as well. + if (!PD.empty()) { + PrevLoc = PD.begin()->getLocation(); + + if (const Stmt *S = PrevLoc.asStmt()) + addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt()); + } + } + + ~EdgeBuilder() { + while (!CLocs.empty()) popLocation(); + + // Finally, add an initial edge from the start location of the first + // statement (if it doesn't already exist). + // FIXME: Should handle CXXTryStmt if analyser starts supporting C++. + if (const CompoundStmt *CS = + dyn_cast_or_null<CompoundStmt>(PDB.getCodeDecl().getBody())) + if (!CS->body_empty()) { + SourceLocation Loc = (*CS->body_begin())->getLocStart(); + rawAddEdge(PathDiagnosticLocation(Loc, PDB.getSourceManager())); + } + + } + + void addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd = false); + + void rawAddEdge(PathDiagnosticLocation NewLoc); + + void addContext(const Stmt *S); + void addExtendedContext(const Stmt *S); +}; +} // end anonymous namespace + + +PathDiagnosticLocation +EdgeBuilder::getContextLocation(const PathDiagnosticLocation &L) { + if (const Stmt *S = L.asStmt()) { + if (IsControlFlowExpr(S)) + return L; + + return PDB.getEnclosingStmtLocation(S); + } + + return L; +} + +bool EdgeBuilder::containsLocation(const PathDiagnosticLocation &Container, + const PathDiagnosticLocation &Containee) { + + if (Container == Containee) + return true; + + if (Container.asDecl()) + return true; + + if (const Stmt *S = Containee.asStmt()) + if (const Stmt *ContainerS = Container.asStmt()) { + while (S) { + if (S == ContainerS) + return true; + S = PDB.getParent(S); + } + return false; + } + + // Less accurate: compare using source ranges. + SourceRange ContainerR = Container.asRange(); + SourceRange ContaineeR = Containee.asRange(); + + SourceManager &SM = PDB.getSourceManager(); + SourceLocation ContainerRBeg = SM.getInstantiationLoc(ContainerR.getBegin()); + SourceLocation ContainerREnd = SM.getInstantiationLoc(ContainerR.getEnd()); + SourceLocation ContaineeRBeg = SM.getInstantiationLoc(ContaineeR.getBegin()); + SourceLocation ContaineeREnd = SM.getInstantiationLoc(ContaineeR.getEnd()); + + unsigned ContainerBegLine = SM.getInstantiationLineNumber(ContainerRBeg); + unsigned ContainerEndLine = SM.getInstantiationLineNumber(ContainerREnd); + unsigned ContaineeBegLine = SM.getInstantiationLineNumber(ContaineeRBeg); + unsigned ContaineeEndLine = SM.getInstantiationLineNumber(ContaineeREnd); + + assert(ContainerBegLine <= ContainerEndLine); + assert(ContaineeBegLine <= ContaineeEndLine); + + return (ContainerBegLine <= ContaineeBegLine && + ContainerEndLine >= ContaineeEndLine && + (ContainerBegLine != ContaineeBegLine || + SM.getInstantiationColumnNumber(ContainerRBeg) <= + SM.getInstantiationColumnNumber(ContaineeRBeg)) && + (ContainerEndLine != ContaineeEndLine || + SM.getInstantiationColumnNumber(ContainerREnd) >= + SM.getInstantiationColumnNumber(ContainerREnd))); +} + +void EdgeBuilder::rawAddEdge(PathDiagnosticLocation NewLoc) { + if (!PrevLoc.isValid()) { + PrevLoc = NewLoc; + return; + } + + const PathDiagnosticLocation &NewLocClean = cleanUpLocation(NewLoc); + const PathDiagnosticLocation &PrevLocClean = cleanUpLocation(PrevLoc); + + if (NewLocClean.asLocation() == PrevLocClean.asLocation()) + return; + + // FIXME: Ignore intra-macro edges for now. + if (NewLocClean.asLocation().getInstantiationLoc() == + PrevLocClean.asLocation().getInstantiationLoc()) + return; + + PD.push_front(new PathDiagnosticControlFlowPiece(NewLocClean, PrevLocClean)); + PrevLoc = NewLoc; +} + +void EdgeBuilder::addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd) { + + if (!alwaysAdd && NewLoc.asLocation().isMacroID()) + return; + + const PathDiagnosticLocation &CLoc = getContextLocation(NewLoc); + + while (!CLocs.empty()) { + ContextLocation &TopContextLoc = CLocs.back(); + + // Is the top location context the same as the one for the new location? + if (TopContextLoc == CLoc) { + if (alwaysAdd) { + if (IsConsumedExpr(TopContextLoc) && + !IsControlFlowExpr(TopContextLoc.asStmt())) + TopContextLoc.markDead(); + + rawAddEdge(NewLoc); + } + + return; + } + + if (containsLocation(TopContextLoc, CLoc)) { + if (alwaysAdd) { + rawAddEdge(NewLoc); + + if (IsConsumedExpr(CLoc) && !IsControlFlowExpr(CLoc.asStmt())) { + CLocs.push_back(ContextLocation(CLoc, true)); + return; + } + } + + CLocs.push_back(CLoc); + return; + } + + // Context does not contain the location. Flush it. + popLocation(); + } + + // If we reach here, there is no enclosing context. Just add the edge. + rawAddEdge(NewLoc); +} + +bool EdgeBuilder::IsConsumedExpr(const PathDiagnosticLocation &L) { + if (const Expr *X = dyn_cast_or_null<Expr>(L.asStmt())) + return PDB.getParentMap().isConsumedExpr(X) && !IsControlFlowExpr(X); + + return false; +} + +void EdgeBuilder::addExtendedContext(const Stmt *S) { + if (!S) + return; + + const Stmt *Parent = PDB.getParent(S); + while (Parent) { + if (isa<CompoundStmt>(Parent)) + Parent = PDB.getParent(Parent); + else + break; + } + + if (Parent) { + switch (Parent->getStmtClass()) { + case Stmt::DoStmtClass: + case Stmt::ObjCAtSynchronizedStmtClass: + addContext(Parent); + default: + break; + } + } + + addContext(S); +} + +void EdgeBuilder::addContext(const Stmt *S) { + if (!S) + return; + + PathDiagnosticLocation L(S, PDB.getSourceManager()); + + while (!CLocs.empty()) { + const PathDiagnosticLocation &TopContextLoc = CLocs.back(); + + // Is the top location context the same as the one for the new location? + if (TopContextLoc == L) + return; + + if (containsLocation(TopContextLoc, L)) { + CLocs.push_back(L); + return; + } + + // Context does not contain the location. Flush it. + popLocation(); + } + + CLocs.push_back(L); +} + +static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD, + PathDiagnosticBuilder &PDB, + const ExplodedNode *N) { + EdgeBuilder EB(PD, PDB); + + const ExplodedNode* NextNode = N->pred_empty() ? NULL : *(N->pred_begin()); + while (NextNode) { + N = NextNode; + NextNode = GetPredecessorNode(N); + ProgramPoint P = N->getLocation(); + + do { + // Block edges. + if (const BlockEdge *BE = dyn_cast<BlockEdge>(&P)) { + const CFGBlock &Blk = *BE->getSrc(); + const Stmt *Term = Blk.getTerminator(); + + // Are we jumping to the head of a loop? Add a special diagnostic. + if (const Stmt *Loop = BE->getDst()->getLoopTarget()) { + PathDiagnosticLocation L(Loop, PDB.getSourceManager()); + const CompoundStmt *CS = NULL; + + if (!Term) { + if (const ForStmt *FS = dyn_cast<ForStmt>(Loop)) + CS = dyn_cast<CompoundStmt>(FS->getBody()); + else if (const WhileStmt *WS = dyn_cast<WhileStmt>(Loop)) + CS = dyn_cast<CompoundStmt>(WS->getBody()); + } + + PathDiagnosticEventPiece *p = + new PathDiagnosticEventPiece(L, + "Looping back to the head of the loop"); + + EB.addEdge(p->getLocation(), true); + PD.push_front(p); + + if (CS) { + PathDiagnosticLocation BL(CS->getRBracLoc(), + PDB.getSourceManager()); + BL = PathDiagnosticLocation(BL.asLocation()); + EB.addEdge(BL); + } + } + + if (Term) + EB.addContext(Term); + + break; + } + + if (const BlockEntrance *BE = dyn_cast<BlockEntrance>(&P)) { + if (CFGStmt S = BE->getFirstElement().getAs<CFGStmt>()) { + if (IsControlFlowExpr(S)) { + // Add the proper context for '&&', '||', and '?'. + EB.addContext(S); + } + else + EB.addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt()); + } + + break; + } + } while (0); + + if (!NextNode) + continue; + + for (BugReporterContext::visitor_iterator I = PDB.visitor_begin(), + E = PDB.visitor_end(); I!=E; ++I) { + if (PathDiagnosticPiece* p = (*I)->VisitNode(N, NextNode, PDB)) { + const PathDiagnosticLocation &Loc = p->getLocation(); + EB.addEdge(Loc, true); + PD.push_front(p); + if (const Stmt *S = Loc.asStmt()) + EB.addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt()); + } + } + } +} + +//===----------------------------------------------------------------------===// +// Methods for BugType and subclasses. +//===----------------------------------------------------------------------===// +BugType::~BugType() { + // Free up the equivalence class objects. Observe that we get a pointer to + // the object first before incrementing the iterator, as destroying the + // node before doing so means we will read from freed memory. + for (iterator I = begin(), E = end(); I !=E; ) { + BugReportEquivClass *EQ = &*I; + ++I; + delete EQ; + } +} +void BugType::FlushReports(BugReporter &BR) {} + +//===----------------------------------------------------------------------===// +// Methods for BugReport and subclasses. +//===----------------------------------------------------------------------===// +BugReport::~BugReport() {} +RangedBugReport::~RangedBugReport() {} + +const Stmt* BugReport::getStmt() const { + ProgramPoint ProgP = ErrorNode->getLocation(); + const Stmt *S = NULL; + + if (BlockEntrance* BE = dyn_cast<BlockEntrance>(&ProgP)) { + CFGBlock &Exit = ProgP.getLocationContext()->getCFG()->getExit(); + if (BE->getBlock() == &Exit) + S = GetPreviousStmt(ErrorNode); + } + if (!S) + S = GetStmt(ProgP); + + return S; +} + +PathDiagnosticPiece* +BugReport::getEndPath(BugReporterContext& BRC, + const ExplodedNode* EndPathNode) { + + const Stmt* S = getStmt(); + + if (!S) + return NULL; + + BugReport::ranges_iterator Beg, End; + llvm::tie(Beg, End) = getRanges(); + PathDiagnosticLocation L(S, BRC.getSourceManager()); + + // Only add the statement itself as a range if we didn't specify any + // special ranges for this report. + PathDiagnosticPiece* P = new PathDiagnosticEventPiece(L, getDescription(), + Beg == End); + + for (; Beg != End; ++Beg) + P->addRange(*Beg); + + return P; +} + +std::pair<BugReport::ranges_iterator, BugReport::ranges_iterator> +BugReport::getRanges() const { + if (const Expr* E = dyn_cast_or_null<Expr>(getStmt())) { + R = E->getSourceRange(); + assert(R.isValid()); + return std::make_pair(&R, &R+1); + } + else + return std::make_pair(ranges_iterator(), ranges_iterator()); +} + +SourceLocation BugReport::getLocation() const { + if (ErrorNode) + if (const Stmt* S = GetCurrentOrPreviousStmt(ErrorNode)) { + // For member expressions, return the location of the '.' or '->'. + if (const MemberExpr *ME = dyn_cast<MemberExpr>(S)) + return ME->getMemberLoc(); + // For binary operators, return the location of the operator. + if (const BinaryOperator *B = dyn_cast<BinaryOperator>(S)) + return B->getOperatorLoc(); + + return S->getLocStart(); + } + + return FullSourceLoc(); +} + +PathDiagnosticPiece* BugReport::VisitNode(const ExplodedNode* N, + const ExplodedNode* PrevN, + BugReporterContext &BRC) { + return NULL; +} + +//===----------------------------------------------------------------------===// +// Methods for BugReporter and subclasses. +//===----------------------------------------------------------------------===// + +BugReportEquivClass::~BugReportEquivClass() { + for (iterator I=begin(), E=end(); I!=E; ++I) delete *I; +} + +GRBugReporter::~GRBugReporter() { } +BugReporterData::~BugReporterData() {} + +ExplodedGraph &GRBugReporter::getGraph() { return Eng.getGraph(); } + +GRStateManager& +GRBugReporter::getStateManager() { return Eng.getStateManager(); } + +BugReporter::~BugReporter() { FlushReports(); } + +void BugReporter::FlushReports() { + if (BugTypes.isEmpty()) + return; + + // First flush the warnings for each BugType. This may end up creating new + // warnings and new BugTypes. Because ImmutableSet is a functional data + // structure, we do not need to worry about the iterators being invalidated. + for (BugTypesTy::iterator I=BugTypes.begin(), E=BugTypes.end(); I!=E; ++I) + const_cast<BugType*>(*I)->FlushReports(*this); + + // Iterate through BugTypes a second time. BugTypes may have been updated + // with new BugType objects and new warnings. + for (BugTypesTy::iterator I=BugTypes.begin(), E=BugTypes.end(); I!=E; ++I) { + BugType *BT = const_cast<BugType*>(*I); + + typedef llvm::FoldingSet<BugReportEquivClass> SetTy; + SetTy& EQClasses = BT->EQClasses; + + for (SetTy::iterator EI=EQClasses.begin(), EE=EQClasses.end(); EI!=EE;++EI){ + BugReportEquivClass& EQ = *EI; + FlushReport(EQ); + } + + // Delete the BugType object. + delete BT; + } + + // Remove all references to the BugType objects. + BugTypes = F.getEmptySet(); +} + +//===----------------------------------------------------------------------===// +// PathDiagnostics generation. +//===----------------------------------------------------------------------===// + +static std::pair<std::pair<ExplodedGraph*, NodeBackMap*>, + std::pair<ExplodedNode*, unsigned> > +MakeReportGraph(const ExplodedGraph* G, + llvm::SmallVectorImpl<const ExplodedNode*> &nodes) { + + // Create the trimmed graph. It will contain the shortest paths from the + // error nodes to the root. In the new graph we should only have one + // error node unless there are two or more error nodes with the same minimum + // path length. + ExplodedGraph* GTrim; + InterExplodedGraphMap* NMap; + + llvm::DenseMap<const void*, const void*> InverseMap; + llvm::tie(GTrim, NMap) = G->Trim(nodes.data(), nodes.data() + nodes.size(), + &InverseMap); + + // Create owning pointers for GTrim and NMap just to ensure that they are + // released when this function exists. + llvm::OwningPtr<ExplodedGraph> AutoReleaseGTrim(GTrim); + llvm::OwningPtr<InterExplodedGraphMap> AutoReleaseNMap(NMap); + + // Find the (first) error node in the trimmed graph. We just need to consult + // the node map (NMap) which maps from nodes in the original graph to nodes + // in the new graph. + + std::queue<const ExplodedNode*> WS; + typedef llvm::DenseMap<const ExplodedNode*, unsigned> IndexMapTy; + IndexMapTy IndexMap; + + for (unsigned nodeIndex = 0 ; nodeIndex < nodes.size(); ++nodeIndex) { + const ExplodedNode *originalNode = nodes[nodeIndex]; + if (const ExplodedNode *N = NMap->getMappedNode(originalNode)) { + WS.push(N); + IndexMap[originalNode] = nodeIndex; + } + } + + assert(!WS.empty() && "No error node found in the trimmed graph."); + + // Create a new (third!) graph with a single path. This is the graph + // that will be returned to the caller. + ExplodedGraph *GNew = new ExplodedGraph(); + + // Sometimes the trimmed graph can contain a cycle. Perform a reverse BFS + // to the root node, and then construct a new graph that contains only + // a single path. + llvm::DenseMap<const void*,unsigned> Visited; + + unsigned cnt = 0; + const ExplodedNode* Root = 0; + + while (!WS.empty()) { + const ExplodedNode* Node = WS.front(); + WS.pop(); + + if (Visited.find(Node) != Visited.end()) + continue; + + Visited[Node] = cnt++; + + if (Node->pred_empty()) { + Root = Node; + break; + } + + for (ExplodedNode::const_pred_iterator I=Node->pred_begin(), + E=Node->pred_end(); I!=E; ++I) + WS.push(*I); + } + + assert(Root); + + // Now walk from the root down the BFS path, always taking the successor + // with the lowest number. + ExplodedNode *Last = 0, *First = 0; + NodeBackMap *BM = new NodeBackMap(); + unsigned NodeIndex = 0; + + for ( const ExplodedNode *N = Root ;;) { + // Lookup the number associated with the current node. + llvm::DenseMap<const void*,unsigned>::iterator I = Visited.find(N); + assert(I != Visited.end()); + + // Create the equivalent node in the new graph with the same state + // and location. + ExplodedNode* NewN = GNew->getNode(N->getLocation(), N->getState()); + + // Store the mapping to the original node. + llvm::DenseMap<const void*, const void*>::iterator IMitr=InverseMap.find(N); + assert(IMitr != InverseMap.end() && "No mapping to original node."); + (*BM)[NewN] = (const ExplodedNode*) IMitr->second; + + // Link up the new node with the previous node. + if (Last) + NewN->addPredecessor(Last, *GNew); + + Last = NewN; + + // Are we at the final node? + IndexMapTy::iterator IMI = + IndexMap.find((const ExplodedNode*)(IMitr->second)); + if (IMI != IndexMap.end()) { + First = NewN; + NodeIndex = IMI->second; + break; + } + + // Find the next successor node. We choose the node that is marked + // with the lowest DFS number. + ExplodedNode::const_succ_iterator SI = N->succ_begin(); + ExplodedNode::const_succ_iterator SE = N->succ_end(); + N = 0; + + for (unsigned MinVal = 0; SI != SE; ++SI) { + + I = Visited.find(*SI); + + if (I == Visited.end()) + continue; + + if (!N || I->second < MinVal) { + N = *SI; + MinVal = I->second; + } + } + + assert(N); + } + + assert(First); + + return std::make_pair(std::make_pair(GNew, BM), + std::make_pair(First, NodeIndex)); +} + +/// CompactPathDiagnostic - This function postprocesses a PathDiagnostic object +/// and collapses PathDiagosticPieces that are expanded by macros. +static void CompactPathDiagnostic(PathDiagnostic &PD, const SourceManager& SM) { + typedef std::vector<std::pair<PathDiagnosticMacroPiece*, SourceLocation> > + MacroStackTy; + + typedef std::vector<PathDiagnosticPiece*> + PiecesTy; + + MacroStackTy MacroStack; + PiecesTy Pieces; + + for (PathDiagnostic::iterator I = PD.begin(), E = PD.end(); I!=E; ++I) { + // Get the location of the PathDiagnosticPiece. + const FullSourceLoc Loc = I->getLocation().asLocation(); + + // Determine the instantiation location, which is the location we group + // related PathDiagnosticPieces. + SourceLocation InstantiationLoc = Loc.isMacroID() ? + SM.getInstantiationLoc(Loc) : + SourceLocation(); + + if (Loc.isFileID()) { + MacroStack.clear(); + Pieces.push_back(&*I); + continue; + } + + assert(Loc.isMacroID()); + + // Is the PathDiagnosticPiece within the same macro group? + if (!MacroStack.empty() && InstantiationLoc == MacroStack.back().second) { + MacroStack.back().first->push_back(&*I); + continue; + } + + // We aren't in the same group. Are we descending into a new macro + // or are part of an old one? + PathDiagnosticMacroPiece *MacroGroup = 0; + + SourceLocation ParentInstantiationLoc = InstantiationLoc.isMacroID() ? + SM.getInstantiationLoc(Loc) : + SourceLocation(); + + // Walk the entire macro stack. + while (!MacroStack.empty()) { + if (InstantiationLoc == MacroStack.back().second) { + MacroGroup = MacroStack.back().first; + break; + } + + if (ParentInstantiationLoc == MacroStack.back().second) { + MacroGroup = MacroStack.back().first; + break; + } + + MacroStack.pop_back(); + } + + if (!MacroGroup || ParentInstantiationLoc == MacroStack.back().second) { + // Create a new macro group and add it to the stack. + PathDiagnosticMacroPiece *NewGroup = new PathDiagnosticMacroPiece(Loc); + + if (MacroGroup) + MacroGroup->push_back(NewGroup); + else { + assert(InstantiationLoc.isFileID()); + Pieces.push_back(NewGroup); + } + + MacroGroup = NewGroup; + MacroStack.push_back(std::make_pair(MacroGroup, InstantiationLoc)); + } + + // Finally, add the PathDiagnosticPiece to the group. + MacroGroup->push_back(&*I); + } + + // Now take the pieces and construct a new PathDiagnostic. + PD.resetPath(false); + + for (PiecesTy::iterator I=Pieces.begin(), E=Pieces.end(); I!=E; ++I) { + if (PathDiagnosticMacroPiece *MP=dyn_cast<PathDiagnosticMacroPiece>(*I)) + if (!MP->containsEvent()) { + delete MP; + continue; + } + + PD.push_back(*I); + } +} + +void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD, + llvm::SmallVectorImpl<BugReport *> &bugReports) { + + assert(!bugReports.empty()); + llvm::SmallVector<const ExplodedNode *, 10> errorNodes; + for (llvm::SmallVectorImpl<BugReport*>::iterator I = bugReports.begin(), + E = bugReports.end(); I != E; ++I) { + errorNodes.push_back((*I)->getErrorNode()); + } + + // Construct a new graph that contains only a single path from the error + // node to a root. + const std::pair<std::pair<ExplodedGraph*, NodeBackMap*>, + std::pair<ExplodedNode*, unsigned> >& + GPair = MakeReportGraph(&getGraph(), errorNodes); + + // Find the BugReport with the original location. + assert(GPair.second.second < bugReports.size()); + BugReport *R = bugReports[GPair.second.second]; + assert(R && "No original report found for sliced graph."); + + llvm::OwningPtr<ExplodedGraph> ReportGraph(GPair.first.first); + llvm::OwningPtr<NodeBackMap> BackMap(GPair.first.second); + const ExplodedNode *N = GPair.second.first; + + // Start building the path diagnostic... + PathDiagnosticBuilder PDB(*this, R, BackMap.get(), getPathDiagnosticClient()); + + if (PathDiagnosticPiece* Piece = R->getEndPath(PDB, N)) + PD.push_back(Piece); + else + return; + + // Register node visitors. + R->registerInitialVisitors(PDB, N); + bugreporter::registerNilReceiverVisitor(PDB); + + switch (PDB.getGenerationScheme()) { + case PathDiagnosticClient::Extensive: + GenerateExtensivePathDiagnostic(PD, PDB, N); + break; + case PathDiagnosticClient::Minimal: + GenerateMinimalPathDiagnostic(PD, PDB, N); + break; + } +} + +void BugReporter::Register(BugType *BT) { + BugTypes = F.add(BugTypes, BT); +} + +void BugReporter::EmitReport(BugReport* R) { + // Compute the bug report's hash to determine its equivalence class. + llvm::FoldingSetNodeID ID; + R->Profile(ID); + + // Lookup the equivance class. If there isn't one, create it. + BugType& BT = R->getBugType(); + Register(&BT); + void *InsertPos; + BugReportEquivClass* EQ = BT.EQClasses.FindNodeOrInsertPos(ID, InsertPos); + + if (!EQ) { + EQ = new BugReportEquivClass(R); + BT.EQClasses.InsertNode(EQ, InsertPos); + } + else + EQ->AddReport(R); +} + + +//===----------------------------------------------------------------------===// +// Emitting reports in equivalence classes. +//===----------------------------------------------------------------------===// + +namespace { +struct FRIEC_WLItem { + const ExplodedNode *N; + ExplodedNode::const_succ_iterator I, E; + + FRIEC_WLItem(const ExplodedNode *n) + : N(n), I(N->succ_begin()), E(N->succ_end()) {} +}; +} + +static BugReport * +FindReportInEquivalenceClass(BugReportEquivClass& EQ, + llvm::SmallVectorImpl<BugReport*> &bugReports) { + + BugReportEquivClass::iterator I = EQ.begin(), E = EQ.end(); + assert(I != E); + BugReport *R = *I; + BugType& BT = R->getBugType(); + + // If we don't need to suppress any of the nodes because they are + // post-dominated by a sink, simply add all the nodes in the equivalence class + // to 'Nodes'. Any of the reports will serve as a "representative" report. + if (!BT.isSuppressOnSink()) { + for (BugReportEquivClass::iterator I=EQ.begin(), E=EQ.end(); I!=E; ++I) { + const ExplodedNode* N = I->getErrorNode(); + if (N) { + R = *I; + bugReports.push_back(R); + } + } + return R; + } + + // For bug reports that should be suppressed when all paths are post-dominated + // by a sink node, iterate through the reports in the equivalence class + // until we find one that isn't post-dominated (if one exists). We use a + // DFS traversal of the ExplodedGraph to find a non-sink node. We could write + // this as a recursive function, but we don't want to risk blowing out the + // stack for very long paths. + BugReport *exampleReport = 0; + + for (; I != E; ++I) { + R = *I; + const ExplodedNode *errorNode = R->getErrorNode(); + + if (!errorNode) + continue; + if (errorNode->isSink()) { + assert(false && + "BugType::isSuppressSink() should not be 'true' for sink end nodes"); + return 0; + } + // No successors? By definition this nodes isn't post-dominated by a sink. + if (errorNode->succ_empty()) { + bugReports.push_back(R); + if (!exampleReport) + exampleReport = R; + continue; + } + + // At this point we know that 'N' is not a sink and it has at least one + // successor. Use a DFS worklist to find a non-sink end-of-path node. + typedef FRIEC_WLItem WLItem; + typedef llvm::SmallVector<WLItem, 10> DFSWorkList; + llvm::DenseMap<const ExplodedNode *, unsigned> Visited; + + DFSWorkList WL; + WL.push_back(errorNode); + Visited[errorNode] = 1; + + while (!WL.empty()) { + WLItem &WI = WL.back(); + assert(!WI.N->succ_empty()); + + for (; WI.I != WI.E; ++WI.I) { + const ExplodedNode *Succ = *WI.I; + // End-of-path node? + if (Succ->succ_empty()) { + // If we found an end-of-path node that is not a sink. + if (!Succ->isSink()) { + bugReports.push_back(R); + if (!exampleReport) + exampleReport = R; + WL.clear(); + break; + } + // Found a sink? Continue on to the next successor. + continue; + } + // Mark the successor as visited. If it hasn't been explored, + // enqueue it to the DFS worklist. + unsigned &mark = Visited[Succ]; + if (!mark) { + mark = 1; + WL.push_back(Succ); + break; + } + } + + // The worklist may have been cleared at this point. First + // check if it is empty before checking the last item. + if (!WL.empty() && &WL.back() == &WI) + WL.pop_back(); + } + } + + // ExampleReport will be NULL if all the nodes in the equivalence class + // were post-dominated by sinks. + return exampleReport; +} + +//===----------------------------------------------------------------------===// +// DiagnosticCache. This is a hack to cache analyzer diagnostics. It +// uses global state, which eventually should go elsewhere. +//===----------------------------------------------------------------------===// +namespace { +class DiagCacheItem : public llvm::FoldingSetNode { + llvm::FoldingSetNodeID ID; +public: + DiagCacheItem(BugReport *R, PathDiagnostic *PD) { + ID.AddString(R->getBugType().getName()); + ID.AddString(R->getBugType().getCategory()); + ID.AddString(R->getDescription()); + ID.AddInteger(R->getLocation().getRawEncoding()); + PD->Profile(ID); + } + + void Profile(llvm::FoldingSetNodeID &id) { + id = ID; + } + + llvm::FoldingSetNodeID &getID() { return ID; } +}; +} + +static bool IsCachedDiagnostic(BugReport *R, PathDiagnostic *PD) { + // FIXME: Eventually this diagnostic cache should reside in something + // like AnalysisManager instead of being a static variable. This is + // really unsafe in the long term. + typedef llvm::FoldingSet<DiagCacheItem> DiagnosticCache; + static DiagnosticCache DC; + + void *InsertPos; + DiagCacheItem *Item = new DiagCacheItem(R, PD); + + if (DC.FindNodeOrInsertPos(Item->getID(), InsertPos)) { + delete Item; + return true; + } + + DC.InsertNode(Item, InsertPos); + return false; +} + +void BugReporter::FlushReport(BugReportEquivClass& EQ) { + llvm::SmallVector<BugReport*, 10> bugReports; + BugReport *exampleReport = FindReportInEquivalenceClass(EQ, bugReports); + if (!exampleReport) + return; + + PathDiagnosticClient* PD = getPathDiagnosticClient(); + + // FIXME: Make sure we use the 'R' for the path that was actually used. + // Probably doesn't make a difference in practice. + BugType& BT = exampleReport->getBugType(); + + llvm::OwningPtr<PathDiagnostic> + D(new PathDiagnostic(exampleReport->getBugType().getName(), + !PD || PD->useVerboseDescription() + ? exampleReport->getDescription() + : exampleReport->getShortDescription(), + BT.getCategory())); + + if (!bugReports.empty()) + GeneratePathDiagnostic(*D.get(), bugReports); + + if (IsCachedDiagnostic(exampleReport, D.get())) + return; + + // Get the meta data. + std::pair<const char**, const char**> Meta = + exampleReport->getExtraDescriptiveText(); + for (const char** s = Meta.first; s != Meta.second; ++s) + D->addMeta(*s); + + // Emit a summary diagnostic to the regular Diagnostics engine. + BugReport::ranges_iterator Beg, End; + llvm::tie(Beg, End) = exampleReport->getRanges(); + Diagnostic &Diag = getDiagnostic(); + FullSourceLoc L(exampleReport->getLocation(), getSourceManager()); + + // Search the description for '%', as that will be interpretted as a + // format character by FormatDiagnostics. + llvm::StringRef desc = exampleReport->getShortDescription(); + unsigned ErrorDiag; + { + llvm::SmallString<512> TmpStr; + llvm::raw_svector_ostream Out(TmpStr); + for (llvm::StringRef::iterator I=desc.begin(), E=desc.end(); I!=E; ++I) + if (*I == '%') + Out << "%%"; + else + Out << *I; + + Out.flush(); + ErrorDiag = Diag.getCustomDiagID(Diagnostic::Warning, TmpStr); + } + + { + DiagnosticBuilder diagBuilder = Diag.Report(L, ErrorDiag); + for (BugReport::ranges_iterator I = Beg; I != End; ++I) + diagBuilder << *I; + } + + // Emit a full diagnostic for the path if we have a PathDiagnosticClient. + if (!PD) + return; + + if (D->empty()) { + PathDiagnosticPiece* piece = + new PathDiagnosticEventPiece(L, exampleReport->getDescription()); + + for ( ; Beg != End; ++Beg) piece->addRange(*Beg); + D->push_back(piece); + } + + PD->HandlePathDiagnostic(D.take()); +} + +void BugReporter::EmitBasicReport(llvm::StringRef name, llvm::StringRef str, + SourceLocation Loc, + SourceRange* RBeg, unsigned NumRanges) { + EmitBasicReport(name, "", str, Loc, RBeg, NumRanges); +} + +void BugReporter::EmitBasicReport(llvm::StringRef name, + llvm::StringRef category, + llvm::StringRef str, SourceLocation Loc, + SourceRange* RBeg, unsigned NumRanges) { + + // 'BT' will be owned by BugReporter as soon as we call 'EmitReport'. + BugType *BT = new BugType(name, category); + FullSourceLoc L = getContext().getFullLoc(Loc); + RangedBugReport *R = new DiagBugReport(*BT, str, L); + for ( ; NumRanges > 0 ; --NumRanges, ++RBeg) R->addRange(*RBeg); + EmitReport(R); +} diff --git a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp new file mode 100644 index 0000000..8e31ade --- /dev/null +++ b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp @@ -0,0 +1,457 @@ +// BugReporterVisitors.cpp - Helpers for reporting bugs -----------*- C++ -*--// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a set of BugReporter "visitors" which can be used to +// enhance the diagnostics reported for a bug. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/Expr.h" +#include "clang/AST/ExprObjC.h" +#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h" +#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h" + +using namespace clang; +using namespace ento; + +//===----------------------------------------------------------------------===// +// Utility functions. +//===----------------------------------------------------------------------===// + +const Stmt *bugreporter::GetDerefExpr(const ExplodedNode *N) { + // Pattern match for a few useful cases (do something smarter later): + // a[0], p->f, *p + const Stmt *S = N->getLocationAs<PostStmt>()->getStmt(); + + if (const UnaryOperator *U = dyn_cast<UnaryOperator>(S)) { + if (U->getOpcode() == UO_Deref) + return U->getSubExpr()->IgnoreParenCasts(); + } + else if (const MemberExpr *ME = dyn_cast<MemberExpr>(S)) { + return ME->getBase()->IgnoreParenCasts(); + } + else if (const ArraySubscriptExpr *AE = dyn_cast<ArraySubscriptExpr>(S)) { + // Retrieve the base for arrays since BasicStoreManager doesn't know how + // to reason about them. + return AE->getBase(); + } + + return NULL; +} + +const Stmt *bugreporter::GetDenomExpr(const ExplodedNode *N) { + const Stmt *S = N->getLocationAs<PreStmt>()->getStmt(); + if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(S)) + return BE->getRHS(); + return NULL; +} + +const Stmt *bugreporter::GetCalleeExpr(const ExplodedNode *N) { + // Callee is checked as a PreVisit to the CallExpr. + const Stmt *S = N->getLocationAs<PreStmt>()->getStmt(); + if (const CallExpr *CE = dyn_cast<CallExpr>(S)) + return CE->getCallee(); + return NULL; +} + +const Stmt *bugreporter::GetRetValExpr(const ExplodedNode *N) { + const Stmt *S = N->getLocationAs<PostStmt>()->getStmt(); + if (const ReturnStmt *RS = dyn_cast<ReturnStmt>(S)) + return RS->getRetValue(); + return NULL; +} + +//===----------------------------------------------------------------------===// +// Definitions for bug reporter visitors. +//===----------------------------------------------------------------------===// + +namespace { +class FindLastStoreBRVisitor : public BugReporterVisitor { + const MemRegion *R; + SVal V; + bool satisfied; + const ExplodedNode *StoreSite; +public: + FindLastStoreBRVisitor(SVal v, const MemRegion *r) + : R(r), V(v), satisfied(false), StoreSite(0) {} + + virtual void Profile(llvm::FoldingSetNodeID &ID) const { + static int tag = 0; + ID.AddPointer(&tag); + ID.AddPointer(R); + ID.Add(V); + } + + PathDiagnosticPiece* VisitNode(const ExplodedNode *N, + const ExplodedNode *PrevN, + BugReporterContext& BRC) { + + if (satisfied) + return NULL; + + if (!StoreSite) { + const ExplodedNode *Node = N, *Last = NULL; + + for ( ; Node ; Last = Node, Node = Node->getFirstPred()) { + + if (const VarRegion *VR = dyn_cast<VarRegion>(R)) { + if (const PostStmt *P = Node->getLocationAs<PostStmt>()) + if (const DeclStmt *DS = P->getStmtAs<DeclStmt>()) + if (DS->getSingleDecl() == VR->getDecl()) { + Last = Node; + break; + } + } + + if (Node->getState()->getSVal(R) != V) + break; + } + + if (!Node || !Last) { + satisfied = true; + return NULL; + } + + StoreSite = Last; + } + + if (StoreSite != N) + return NULL; + + satisfied = true; + llvm::SmallString<256> sbuf; + llvm::raw_svector_ostream os(sbuf); + + if (const PostStmt *PS = N->getLocationAs<PostStmt>()) { + if (const DeclStmt *DS = PS->getStmtAs<DeclStmt>()) { + + if (const VarRegion *VR = dyn_cast<VarRegion>(R)) { + os << "Variable '" << VR->getDecl() << "' "; + } + else + return NULL; + + if (isa<loc::ConcreteInt>(V)) { + bool b = false; + if (R->isBoundable()) { + if (const TypedRegion *TR = dyn_cast<TypedRegion>(R)) { + if (TR->getValueType()->isObjCObjectPointerType()) { + os << "initialized to nil"; + b = true; + } + } + } + + if (!b) + os << "initialized to a null pointer value"; + } + else if (isa<nonloc::ConcreteInt>(V)) { + os << "initialized to " << cast<nonloc::ConcreteInt>(V).getValue(); + } + else if (V.isUndef()) { + if (isa<VarRegion>(R)) { + const VarDecl *VD = cast<VarDecl>(DS->getSingleDecl()); + if (VD->getInit()) + os << "initialized to a garbage value"; + else + os << "declared without an initial value"; + } + } + } + } + + if (os.str().empty()) { + if (isa<loc::ConcreteInt>(V)) { + bool b = false; + if (R->isBoundable()) { + if (const TypedRegion *TR = dyn_cast<TypedRegion>(R)) { + if (TR->getValueType()->isObjCObjectPointerType()) { + os << "nil object reference stored to "; + b = true; + } + } + } + + if (!b) + os << "Null pointer value stored to "; + } + else if (V.isUndef()) { + os << "Uninitialized value stored to "; + } + else if (isa<nonloc::ConcreteInt>(V)) { + os << "The value " << cast<nonloc::ConcreteInt>(V).getValue() + << " is assigned to "; + } + else + return NULL; + + if (const VarRegion *VR = dyn_cast<VarRegion>(R)) { + os << '\'' << VR->getDecl() << '\''; + } + else + return NULL; + } + + // FIXME: Refactor this into BugReporterContext. + const Stmt *S = 0; + ProgramPoint P = N->getLocation(); + + if (BlockEdge *BE = dyn_cast<BlockEdge>(&P)) { + const CFGBlock *BSrc = BE->getSrc(); + S = BSrc->getTerminatorCondition(); + } + else if (PostStmt *PS = dyn_cast<PostStmt>(&P)) { + S = PS->getStmt(); + } + + if (!S) + return NULL; + + // Construct a new PathDiagnosticPiece. + PathDiagnosticLocation L(S, BRC.getSourceManager()); + return new PathDiagnosticEventPiece(L, os.str()); + } +}; + + +static void registerFindLastStore(BugReporterContext& BRC, const MemRegion *R, + SVal V) { + BRC.addVisitor(new FindLastStoreBRVisitor(V, R)); +} + +class TrackConstraintBRVisitor : public BugReporterVisitor { + DefinedSVal Constraint; + const bool Assumption; + bool isSatisfied; +public: + TrackConstraintBRVisitor(DefinedSVal constraint, bool assumption) + : Constraint(constraint), Assumption(assumption), isSatisfied(false) {} + + void Profile(llvm::FoldingSetNodeID &ID) const { + static int tag = 0; + ID.AddPointer(&tag); + ID.AddBoolean(Assumption); + ID.Add(Constraint); + } + + PathDiagnosticPiece* VisitNode(const ExplodedNode *N, + const ExplodedNode *PrevN, + BugReporterContext& BRC) { + if (isSatisfied) + return NULL; + + // Check if in the previous state it was feasible for this constraint + // to *not* be true. + if (PrevN->getState()->assume(Constraint, !Assumption)) { + + isSatisfied = true; + + // As a sanity check, make sure that the negation of the constraint + // was infeasible in the current state. If it is feasible, we somehow + // missed the transition point. + if (N->getState()->assume(Constraint, !Assumption)) + return NULL; + + // We found the transition point for the constraint. We now need to + // pretty-print the constraint. (work-in-progress) + std::string sbuf; + llvm::raw_string_ostream os(sbuf); + + if (isa<Loc>(Constraint)) { + os << "Assuming pointer value is "; + os << (Assumption ? "non-null" : "null"); + } + + if (os.str().empty()) + return NULL; + + // FIXME: Refactor this into BugReporterContext. + const Stmt *S = 0; + ProgramPoint P = N->getLocation(); + + if (BlockEdge *BE = dyn_cast<BlockEdge>(&P)) { + const CFGBlock *BSrc = BE->getSrc(); + S = BSrc->getTerminatorCondition(); + } + else if (PostStmt *PS = dyn_cast<PostStmt>(&P)) { + S = PS->getStmt(); + } + + if (!S) + return NULL; + + // Construct a new PathDiagnosticPiece. + PathDiagnosticLocation L(S, BRC.getSourceManager()); + return new PathDiagnosticEventPiece(L, os.str()); + } + + return NULL; + } +}; +} // end anonymous namespace + +static void registerTrackConstraint(BugReporterContext& BRC, + DefinedSVal Constraint, + bool Assumption) { + BRC.addVisitor(new TrackConstraintBRVisitor(Constraint, Assumption)); +} + +void bugreporter::registerTrackNullOrUndefValue(BugReporterContext& BRC, + const void *data, + const ExplodedNode* N) { + + const Stmt *S = static_cast<const Stmt*>(data); + + if (!S) + return; + + GRStateManager &StateMgr = BRC.getStateManager(); + const GRState *state = N->getState(); + + // Walk through lvalue-to-rvalue conversions. + if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S)) { + if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { + const VarRegion *R = + StateMgr.getRegionManager().getVarRegion(VD, N->getLocationContext()); + + // What did we load? + SVal V = state->getSVal(loc::MemRegionVal(R)); + + if (isa<loc::ConcreteInt>(V) || isa<nonloc::ConcreteInt>(V) + || V.isUndef()) { + ::registerFindLastStore(BRC, R, V); + } + } + } + + SVal V = state->getSValAsScalarOrLoc(S); + + // Uncomment this to find cases where we aren't properly getting the + // base value that was dereferenced. + // assert(!V.isUnknownOrUndef()); + + // Is it a symbolic value? + if (loc::MemRegionVal *L = dyn_cast<loc::MemRegionVal>(&V)) { + const SubRegion *R = cast<SubRegion>(L->getRegion()); + while (R && !isa<SymbolicRegion>(R)) { + R = dyn_cast<SubRegion>(R->getSuperRegion()); + } + + if (R) { + assert(isa<SymbolicRegion>(R)); + registerTrackConstraint(BRC, loc::MemRegionVal(R), false); + } + } +} + +void bugreporter::registerFindLastStore(BugReporterContext& BRC, + const void *data, + const ExplodedNode* N) { + + const MemRegion *R = static_cast<const MemRegion*>(data); + + if (!R) + return; + + const GRState *state = N->getState(); + SVal V = state->getSVal(R); + + if (V.isUnknown()) + return; + + BRC.addVisitor(new FindLastStoreBRVisitor(V, R)); +} + + +namespace { +class NilReceiverVisitor : public BugReporterVisitor { +public: + NilReceiverVisitor() {} + + void Profile(llvm::FoldingSetNodeID &ID) const { + static int x = 0; + ID.AddPointer(&x); + } + + PathDiagnosticPiece* VisitNode(const ExplodedNode *N, + const ExplodedNode *PrevN, + BugReporterContext& BRC) { + + const PostStmt *P = N->getLocationAs<PostStmt>(); + if (!P) + return 0; + const ObjCMessageExpr *ME = P->getStmtAs<ObjCMessageExpr>(); + if (!ME) + return 0; + const Expr *Receiver = ME->getInstanceReceiver(); + if (!Receiver) + return 0; + const GRState *state = N->getState(); + const SVal &V = state->getSVal(Receiver); + const DefinedOrUnknownSVal *DV = dyn_cast<DefinedOrUnknownSVal>(&V); + if (!DV) + return 0; + state = state->assume(*DV, true); + if (state) + return 0; + + // The receiver was nil, and hence the method was skipped. + // Register a BugReporterVisitor to issue a message telling us how + // the receiver was null. + bugreporter::registerTrackNullOrUndefValue(BRC, Receiver, N); + // Issue a message saying that the method was skipped. + PathDiagnosticLocation L(Receiver, BRC.getSourceManager()); + return new PathDiagnosticEventPiece(L, "No method actually called " + "because the receiver is nil"); + } +}; +} // end anonymous namespace + +void bugreporter::registerNilReceiverVisitor(BugReporterContext &BRC) { + BRC.addVisitor(new NilReceiverVisitor()); +} + +// Registers every VarDecl inside a Stmt with a last store vistor. +void bugreporter::registerVarDeclsLastStore(BugReporterContext &BRC, + const void *stmt, + const ExplodedNode *N) { + const Stmt *S = static_cast<const Stmt *>(stmt); + + std::deque<const Stmt *> WorkList; + + WorkList.push_back(S); + + while (!WorkList.empty()) { + const Stmt *Head = WorkList.front(); + WorkList.pop_front(); + + GRStateManager &StateMgr = BRC.getStateManager(); + const GRState *state = N->getState(); + + if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Head)) { + if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { + const VarRegion *R = + StateMgr.getRegionManager().getVarRegion(VD, N->getLocationContext()); + + // What did we load? + SVal V = state->getSVal(S); + + if (isa<loc::ConcreteInt>(V) || isa<nonloc::ConcreteInt>(V)) { + ::registerFindLastStore(BRC, R, V); + } + } + } + + for (Stmt::const_child_iterator I = Head->child_begin(); + I != Head->child_end(); ++I) + WorkList.push_back(*I); + } +} diff --git a/lib/StaticAnalyzer/Core/CFRefCount.cpp b/lib/StaticAnalyzer/Core/CFRefCount.cpp new file mode 100644 index 0000000..b3721d7 --- /dev/null +++ b/lib/StaticAnalyzer/Core/CFRefCount.cpp @@ -0,0 +1,3519 @@ +// CFRefCount.cpp - Transfer functions for tracking simple values -*- C++ -*--// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the methods for CFRefCount, which implements +// a reference count checker for Core Foundation (Mac OS X). +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/DeclObjC.h" +#include "clang/AST/StmtVisitor.h" +#include "clang/Basic/LangOptions.h" +#include "clang/Basic/SourceManager.h" +#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h" +#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h" +#include "clang/StaticAnalyzer/Checkers/LocalCheckers.h" +#include "clang/Analysis/DomainSpecific/CocoaConventions.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngineBuilders.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/GRStateTrait.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/TransferFuncs.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/FoldingSet.h" +#include "llvm/ADT/ImmutableList.h" +#include "llvm/ADT/ImmutableMap.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/StringExtras.h" +#include <stdarg.h> + +using namespace clang; +using namespace ento; +using llvm::StringRef; +using llvm::StrInStrNoCase; + +namespace { +class InstanceReceiver { + ObjCMessage Msg; + const LocationContext *LC; +public: + InstanceReceiver() : LC(0) { } + InstanceReceiver(const ObjCMessage &msg, + const LocationContext *lc = 0) : Msg(msg), LC(lc) {} + + bool isValid() const { + return Msg.isValid() && Msg.isInstanceMessage(); + } + operator bool() const { + return isValid(); + } + + SVal getSValAsScalarOrLoc(const GRState *state) { + assert(isValid()); + // We have an expression for the receiver? Fetch the value + // of that expression. + if (const Expr *Ex = Msg.getInstanceReceiver()) + return state->getSValAsScalarOrLoc(Ex); + + // Otherwise we are sending a message to super. In this case the + // object reference is the same as 'self'. + if (const ImplicitParamDecl *SelfDecl = LC->getSelfDecl()) + return state->getSVal(state->getRegion(SelfDecl, LC)); + + return UnknownVal(); + } + + SourceRange getSourceRange() const { + assert(isValid()); + if (const Expr *Ex = Msg.getInstanceReceiver()) + return Ex->getSourceRange(); + + // Otherwise we are sending a message to super. + SourceLocation L = Msg.getSuperLoc(); + assert(L.isValid()); + return SourceRange(L, L); + } +}; +} + +static const ObjCMethodDecl* +ResolveToInterfaceMethodDecl(const ObjCMethodDecl *MD) { + const ObjCInterfaceDecl *ID = MD->getClassInterface(); + + return MD->isInstanceMethod() + ? ID->lookupInstanceMethod(MD->getSelector()) + : ID->lookupClassMethod(MD->getSelector()); +} + +namespace { +class GenericNodeBuilderRefCount { + StmtNodeBuilder *SNB; + const Stmt *S; + const void *tag; + EndOfFunctionNodeBuilder *ENB; +public: + GenericNodeBuilderRefCount(StmtNodeBuilder &snb, const Stmt *s, + const void *t) + : SNB(&snb), S(s), tag(t), ENB(0) {} + + GenericNodeBuilderRefCount(EndOfFunctionNodeBuilder &enb) + : SNB(0), S(0), tag(0), ENB(&enb) {} + + ExplodedNode *MakeNode(const GRState *state, ExplodedNode *Pred) { + if (SNB) + return SNB->generateNode(PostStmt(S, Pred->getLocationContext(), tag), + state, Pred); + + assert(ENB); + return ENB->generateNode(state, Pred); + } +}; +} // end anonymous namespace + +//===----------------------------------------------------------------------===// +// Primitives used for constructing summaries for function/method calls. +//===----------------------------------------------------------------------===// + +/// ArgEffect is used to summarize a function/method call's effect on a +/// particular argument. +enum ArgEffect { Autorelease, Dealloc, DecRef, DecRefMsg, DoNothing, + DoNothingByRef, IncRefMsg, IncRef, MakeCollectable, MayEscape, + NewAutoreleasePool, SelfOwn, StopTracking }; + +namespace llvm { +template <> struct FoldingSetTrait<ArgEffect> { +static inline void Profile(const ArgEffect X, FoldingSetNodeID& ID) { + ID.AddInteger((unsigned) X); +} +}; +} // end llvm namespace + +/// ArgEffects summarizes the effects of a function/method call on all of +/// its arguments. +typedef llvm::ImmutableMap<unsigned,ArgEffect> ArgEffects; + +namespace { + +/// RetEffect is used to summarize a function/method call's behavior with +/// respect to its return value. +class RetEffect { +public: + enum Kind { NoRet, Alias, OwnedSymbol, OwnedAllocatedSymbol, + NotOwnedSymbol, GCNotOwnedSymbol, ReceiverAlias, + OwnedWhenTrackedReceiver }; + + enum ObjKind { CF, ObjC, AnyObj }; + +private: + Kind K; + ObjKind O; + unsigned index; + + RetEffect(Kind k, unsigned idx = 0) : K(k), O(AnyObj), index(idx) {} + RetEffect(Kind k, ObjKind o) : K(k), O(o), index(0) {} + +public: + Kind getKind() const { return K; } + + ObjKind getObjKind() const { return O; } + + unsigned getIndex() const { + assert(getKind() == Alias); + return index; + } + + bool isOwned() const { + return K == OwnedSymbol || K == OwnedAllocatedSymbol || + K == OwnedWhenTrackedReceiver; + } + + static RetEffect MakeOwnedWhenTrackedReceiver() { + return RetEffect(OwnedWhenTrackedReceiver, ObjC); + } + + static RetEffect MakeAlias(unsigned Idx) { + return RetEffect(Alias, Idx); + } + static RetEffect MakeReceiverAlias() { + return RetEffect(ReceiverAlias); + } + static RetEffect MakeOwned(ObjKind o, bool isAllocated = false) { + return RetEffect(isAllocated ? OwnedAllocatedSymbol : OwnedSymbol, o); + } + static RetEffect MakeNotOwned(ObjKind o) { + return RetEffect(NotOwnedSymbol, o); + } + static RetEffect MakeGCNotOwned() { + return RetEffect(GCNotOwnedSymbol, ObjC); + } + + static RetEffect MakeNoRet() { + return RetEffect(NoRet); + } +}; + +//===----------------------------------------------------------------------===// +// Reference-counting logic (typestate + counts). +//===----------------------------------------------------------------------===// + +class RefVal { +public: + enum Kind { + Owned = 0, // Owning reference. + NotOwned, // Reference is not owned by still valid (not freed). + Released, // Object has been released. + ReturnedOwned, // Returned object passes ownership to caller. + ReturnedNotOwned, // Return object does not pass ownership to caller. + ERROR_START, + ErrorDeallocNotOwned, // -dealloc called on non-owned object. + ErrorDeallocGC, // Calling -dealloc with GC enabled. + ErrorUseAfterRelease, // Object used after released. + ErrorReleaseNotOwned, // Release of an object that was not owned. + ERROR_LEAK_START, + ErrorLeak, // A memory leak due to excessive reference counts. + ErrorLeakReturned, // A memory leak due to the returning method not having + // the correct naming conventions. + ErrorGCLeakReturned, + ErrorOverAutorelease, + ErrorReturnedNotOwned + }; + +private: + Kind kind; + RetEffect::ObjKind okind; + unsigned Cnt; + unsigned ACnt; + QualType T; + + RefVal(Kind k, RetEffect::ObjKind o, unsigned cnt, unsigned acnt, QualType t) + : kind(k), okind(o), Cnt(cnt), ACnt(acnt), T(t) {} + +public: + Kind getKind() const { return kind; } + + RetEffect::ObjKind getObjKind() const { return okind; } + + unsigned getCount() const { return Cnt; } + unsigned getAutoreleaseCount() const { return ACnt; } + unsigned getCombinedCounts() const { return Cnt + ACnt; } + void clearCounts() { Cnt = 0; ACnt = 0; } + void setCount(unsigned i) { Cnt = i; } + void setAutoreleaseCount(unsigned i) { ACnt = i; } + + QualType getType() const { return T; } + + bool isOwned() const { + return getKind() == Owned; + } + + bool isNotOwned() const { + return getKind() == NotOwned; + } + + bool isReturnedOwned() const { + return getKind() == ReturnedOwned; + } + + bool isReturnedNotOwned() const { + return getKind() == ReturnedNotOwned; + } + + static RefVal makeOwned(RetEffect::ObjKind o, QualType t, + unsigned Count = 1) { + return RefVal(Owned, o, Count, 0, t); + } + + static RefVal makeNotOwned(RetEffect::ObjKind o, QualType t, + unsigned Count = 0) { + return RefVal(NotOwned, o, Count, 0, t); + } + + // Comparison, profiling, and pretty-printing. + + bool operator==(const RefVal& X) const { + return kind == X.kind && Cnt == X.Cnt && T == X.T && ACnt == X.ACnt; + } + + RefVal operator-(size_t i) const { + return RefVal(getKind(), getObjKind(), getCount() - i, + getAutoreleaseCount(), getType()); + } + + RefVal operator+(size_t i) const { + return RefVal(getKind(), getObjKind(), getCount() + i, + getAutoreleaseCount(), getType()); + } + + RefVal operator^(Kind k) const { + return RefVal(k, getObjKind(), getCount(), getAutoreleaseCount(), + getType()); + } + + RefVal autorelease() const { + return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount()+1, + getType()); + } + + void Profile(llvm::FoldingSetNodeID& ID) const { + ID.AddInteger((unsigned) kind); + ID.AddInteger(Cnt); + ID.AddInteger(ACnt); + ID.Add(T); + } + + void print(llvm::raw_ostream& Out) const; +}; + +void RefVal::print(llvm::raw_ostream& Out) const { + if (!T.isNull()) + Out << "Tracked Type:" << T.getAsString() << '\n'; + + switch (getKind()) { + default: assert(false); + case Owned: { + Out << "Owned"; + unsigned cnt = getCount(); + if (cnt) Out << " (+ " << cnt << ")"; + break; + } + + case NotOwned: { + Out << "NotOwned"; + unsigned cnt = getCount(); + if (cnt) Out << " (+ " << cnt << ")"; + break; + } + + case ReturnedOwned: { + Out << "ReturnedOwned"; + unsigned cnt = getCount(); + if (cnt) Out << " (+ " << cnt << ")"; + break; + } + + case ReturnedNotOwned: { + Out << "ReturnedNotOwned"; + unsigned cnt = getCount(); + if (cnt) Out << " (+ " << cnt << ")"; + break; + } + + case Released: + Out << "Released"; + break; + + case ErrorDeallocGC: + Out << "-dealloc (GC)"; + break; + + case ErrorDeallocNotOwned: + Out << "-dealloc (not-owned)"; + break; + + case ErrorLeak: + Out << "Leaked"; + break; + + case ErrorLeakReturned: + Out << "Leaked (Bad naming)"; + break; + + case ErrorGCLeakReturned: + Out << "Leaked (GC-ed at return)"; + break; + + case ErrorUseAfterRelease: + Out << "Use-After-Release [ERROR]"; + break; + + case ErrorReleaseNotOwned: + Out << "Release of Not-Owned [ERROR]"; + break; + + case RefVal::ErrorOverAutorelease: + Out << "Over autoreleased"; + break; + + case RefVal::ErrorReturnedNotOwned: + Out << "Non-owned object returned instead of owned"; + break; + } + + if (ACnt) { + Out << " [ARC +" << ACnt << ']'; + } +} +} //end anonymous namespace + +//===----------------------------------------------------------------------===// +// RefBindings - State used to track object reference counts. +//===----------------------------------------------------------------------===// + +typedef llvm::ImmutableMap<SymbolRef, RefVal> RefBindings; + +namespace clang { +namespace ento { + template<> + struct GRStateTrait<RefBindings> : public GRStatePartialTrait<RefBindings> { + static void* GDMIndex() { + static int RefBIndex = 0; + return &RefBIndex; + } + }; +} +} + +//===----------------------------------------------------------------------===// +// Summaries +//===----------------------------------------------------------------------===// + +namespace { +class RetainSummary { + /// Args - an ordered vector of (index, ArgEffect) pairs, where index + /// specifies the argument (starting from 0). This can be sparsely + /// populated; arguments with no entry in Args use 'DefaultArgEffect'. + ArgEffects Args; + + /// DefaultArgEffect - The default ArgEffect to apply to arguments that + /// do not have an entry in Args. + ArgEffect DefaultArgEffect; + + /// Receiver - If this summary applies to an Objective-C message expression, + /// this is the effect applied to the state of the receiver. + ArgEffect Receiver; + + /// Ret - The effect on the return value. Used to indicate if the + /// function/method call returns a new tracked symbol, returns an + /// alias of one of the arguments in the call, and so on. + RetEffect Ret; + + /// EndPath - Indicates that execution of this method/function should + /// terminate the simulation of a path. + bool EndPath; + +public: + RetainSummary(ArgEffects A, RetEffect R, ArgEffect defaultEff, + ArgEffect ReceiverEff, bool endpath = false) + : Args(A), DefaultArgEffect(defaultEff), Receiver(ReceiverEff), Ret(R), + EndPath(endpath) {} + + /// getArg - Return the argument effect on the argument specified by + /// idx (starting from 0). + ArgEffect getArg(unsigned idx) const { + if (const ArgEffect *AE = Args.lookup(idx)) + return *AE; + + return DefaultArgEffect; + } + + void addArg(ArgEffects::Factory &af, unsigned idx, ArgEffect e) { + Args = af.add(Args, idx, e); + } + + /// setDefaultArgEffect - Set the default argument effect. + void setDefaultArgEffect(ArgEffect E) { + DefaultArgEffect = E; + } + + /// getRetEffect - Returns the effect on the return value of the call. + RetEffect getRetEffect() const { return Ret; } + + /// setRetEffect - Set the effect of the return value of the call. + void setRetEffect(RetEffect E) { Ret = E; } + + /// isEndPath - Returns true if executing the given method/function should + /// terminate the path. + bool isEndPath() const { return EndPath; } + + + /// Sets the effect on the receiver of the message. + void setReceiverEffect(ArgEffect e) { Receiver = e; } + + /// getReceiverEffect - Returns the effect on the receiver of the call. + /// This is only meaningful if the summary applies to an ObjCMessageExpr*. + ArgEffect getReceiverEffect() const { return Receiver; } +}; +} // end anonymous namespace + +//===----------------------------------------------------------------------===// +// Data structures for constructing summaries. +//===----------------------------------------------------------------------===// + +namespace { +class ObjCSummaryKey { + IdentifierInfo* II; + Selector S; +public: + ObjCSummaryKey(IdentifierInfo* ii, Selector s) + : II(ii), S(s) {} + + ObjCSummaryKey(const ObjCInterfaceDecl* d, Selector s) + : II(d ? d->getIdentifier() : 0), S(s) {} + + ObjCSummaryKey(const ObjCInterfaceDecl* d, IdentifierInfo *ii, Selector s) + : II(d ? d->getIdentifier() : ii), S(s) {} + + ObjCSummaryKey(Selector s) + : II(0), S(s) {} + + IdentifierInfo* getIdentifier() const { return II; } + Selector getSelector() const { return S; } +}; +} + +namespace llvm { +template <> struct DenseMapInfo<ObjCSummaryKey> { + static inline ObjCSummaryKey getEmptyKey() { + return ObjCSummaryKey(DenseMapInfo<IdentifierInfo*>::getEmptyKey(), + DenseMapInfo<Selector>::getEmptyKey()); + } + + static inline ObjCSummaryKey getTombstoneKey() { + return ObjCSummaryKey(DenseMapInfo<IdentifierInfo*>::getTombstoneKey(), + DenseMapInfo<Selector>::getTombstoneKey()); + } + + static unsigned getHashValue(const ObjCSummaryKey &V) { + return (DenseMapInfo<IdentifierInfo*>::getHashValue(V.getIdentifier()) + & 0x88888888) + | (DenseMapInfo<Selector>::getHashValue(V.getSelector()) + & 0x55555555); + } + + static bool isEqual(const ObjCSummaryKey& LHS, const ObjCSummaryKey& RHS) { + return DenseMapInfo<IdentifierInfo*>::isEqual(LHS.getIdentifier(), + RHS.getIdentifier()) && + DenseMapInfo<Selector>::isEqual(LHS.getSelector(), + RHS.getSelector()); + } + +}; +template <> +struct isPodLike<ObjCSummaryKey> { static const bool value = true; }; +} // end llvm namespace + +namespace { +class ObjCSummaryCache { + typedef llvm::DenseMap<ObjCSummaryKey, RetainSummary*> MapTy; + MapTy M; +public: + ObjCSummaryCache() {} + + RetainSummary* find(const ObjCInterfaceDecl* D, IdentifierInfo *ClsName, + Selector S) { + // Lookup the method using the decl for the class @interface. If we + // have no decl, lookup using the class name. + return D ? find(D, S) : find(ClsName, S); + } + + RetainSummary* find(const ObjCInterfaceDecl* D, Selector S) { + // Do a lookup with the (D,S) pair. If we find a match return + // the iterator. + ObjCSummaryKey K(D, S); + MapTy::iterator I = M.find(K); + + if (I != M.end() || !D) + return I->second; + + // Walk the super chain. If we find a hit with a parent, we'll end + // up returning that summary. We actually allow that key (null,S), as + // we cache summaries for the null ObjCInterfaceDecl* to allow us to + // generate initial summaries without having to worry about NSObject + // being declared. + // FIXME: We may change this at some point. + for (ObjCInterfaceDecl* C=D->getSuperClass() ;; C=C->getSuperClass()) { + if ((I = M.find(ObjCSummaryKey(C, S))) != M.end()) + break; + + if (!C) + return NULL; + } + + // Cache the summary with original key to make the next lookup faster + // and return the iterator. + RetainSummary *Summ = I->second; + M[K] = Summ; + return Summ; + } + + RetainSummary* find(IdentifierInfo* II, Selector S) { + // FIXME: Class method lookup. Right now we dont' have a good way + // of going between IdentifierInfo* and the class hierarchy. + MapTy::iterator I = M.find(ObjCSummaryKey(II, S)); + + if (I == M.end()) + I = M.find(ObjCSummaryKey(S)); + + return I == M.end() ? NULL : I->second; + } + + RetainSummary*& operator[](ObjCSummaryKey K) { + return M[K]; + } + + RetainSummary*& operator[](Selector S) { + return M[ ObjCSummaryKey(S) ]; + } +}; +} // end anonymous namespace + +//===----------------------------------------------------------------------===// +// Data structures for managing collections of summaries. +//===----------------------------------------------------------------------===// + +namespace { +class RetainSummaryManager { + + //==-----------------------------------------------------------------==// + // Typedefs. + //==-----------------------------------------------------------------==// + + typedef llvm::DenseMap<const FunctionDecl*, RetainSummary*> + FuncSummariesTy; + + typedef ObjCSummaryCache ObjCMethodSummariesTy; + + //==-----------------------------------------------------------------==// + // Data. + //==-----------------------------------------------------------------==// + + /// Ctx - The ASTContext object for the analyzed ASTs. + ASTContext& Ctx; + + /// CFDictionaryCreateII - An IdentifierInfo* representing the indentifier + /// "CFDictionaryCreate". + IdentifierInfo* CFDictionaryCreateII; + + /// GCEnabled - Records whether or not the analyzed code runs in GC mode. + const bool GCEnabled; + + /// FuncSummaries - A map from FunctionDecls to summaries. + FuncSummariesTy FuncSummaries; + + /// ObjCClassMethodSummaries - A map from selectors (for instance methods) + /// to summaries. + ObjCMethodSummariesTy ObjCClassMethodSummaries; + + /// ObjCMethodSummaries - A map from selectors to summaries. + ObjCMethodSummariesTy ObjCMethodSummaries; + + /// BPAlloc - A BumpPtrAllocator used for allocating summaries, ArgEffects, + /// and all other data used by the checker. + llvm::BumpPtrAllocator BPAlloc; + + /// AF - A factory for ArgEffects objects. + ArgEffects::Factory AF; + + /// ScratchArgs - A holding buffer for construct ArgEffects. + ArgEffects ScratchArgs; + + /// ObjCAllocRetE - Default return effect for methods returning Objective-C + /// objects. + RetEffect ObjCAllocRetE; + + /// ObjCInitRetE - Default return effect for init methods returning + /// Objective-C objects. + RetEffect ObjCInitRetE; + + RetainSummary DefaultSummary; + RetainSummary* StopSummary; + + //==-----------------------------------------------------------------==// + // Methods. + //==-----------------------------------------------------------------==// + + /// getArgEffects - Returns a persistent ArgEffects object based on the + /// data in ScratchArgs. + ArgEffects getArgEffects(); + + enum UnaryFuncKind { cfretain, cfrelease, cfmakecollectable }; + +public: + RetEffect getObjAllocRetEffect() const { return ObjCAllocRetE; } + + RetainSummary *getDefaultSummary() { + RetainSummary *Summ = (RetainSummary*) BPAlloc.Allocate<RetainSummary>(); + return new (Summ) RetainSummary(DefaultSummary); + } + + RetainSummary* getUnarySummary(const FunctionType* FT, UnaryFuncKind func); + + RetainSummary* getCFSummaryCreateRule(const FunctionDecl* FD); + RetainSummary* getCFSummaryGetRule(const FunctionDecl* FD); + RetainSummary* getCFCreateGetRuleSummary(const FunctionDecl* FD, + StringRef FName); + + RetainSummary* getPersistentSummary(ArgEffects AE, RetEffect RetEff, + ArgEffect ReceiverEff = DoNothing, + ArgEffect DefaultEff = MayEscape, + bool isEndPath = false); + + RetainSummary* getPersistentSummary(RetEffect RE, + ArgEffect ReceiverEff = DoNothing, + ArgEffect DefaultEff = MayEscape) { + return getPersistentSummary(getArgEffects(), RE, ReceiverEff, DefaultEff); + } + + RetainSummary *getPersistentStopSummary() { + if (StopSummary) + return StopSummary; + + StopSummary = getPersistentSummary(RetEffect::MakeNoRet(), + StopTracking, StopTracking); + + return StopSummary; + } + + RetainSummary *getInitMethodSummary(QualType RetTy); + + void InitializeClassMethodSummaries(); + void InitializeMethodSummaries(); +private: + void addNSObjectClsMethSummary(Selector S, RetainSummary *Summ) { + ObjCClassMethodSummaries[S] = Summ; + } + + void addNSObjectMethSummary(Selector S, RetainSummary *Summ) { + ObjCMethodSummaries[S] = Summ; + } + + void addClassMethSummary(const char* Cls, const char* nullaryName, + RetainSummary *Summ) { + IdentifierInfo* ClsII = &Ctx.Idents.get(Cls); + Selector S = GetNullarySelector(nullaryName, Ctx); + ObjCClassMethodSummaries[ObjCSummaryKey(ClsII, S)] = Summ; + } + + void addInstMethSummary(const char* Cls, const char* nullaryName, + RetainSummary *Summ) { + IdentifierInfo* ClsII = &Ctx.Idents.get(Cls); + Selector S = GetNullarySelector(nullaryName, Ctx); + ObjCMethodSummaries[ObjCSummaryKey(ClsII, S)] = Summ; + } + + Selector generateSelector(va_list argp) { + llvm::SmallVector<IdentifierInfo*, 10> II; + + while (const char* s = va_arg(argp, const char*)) + II.push_back(&Ctx.Idents.get(s)); + + return Ctx.Selectors.getSelector(II.size(), &II[0]); + } + + void addMethodSummary(IdentifierInfo *ClsII, ObjCMethodSummariesTy& Summaries, + RetainSummary* Summ, va_list argp) { + Selector S = generateSelector(argp); + Summaries[ObjCSummaryKey(ClsII, S)] = Summ; + } + + void addInstMethSummary(const char* Cls, RetainSummary* Summ, ...) { + va_list argp; + va_start(argp, Summ); + addMethodSummary(&Ctx.Idents.get(Cls), ObjCMethodSummaries, Summ, argp); + va_end(argp); + } + + void addClsMethSummary(const char* Cls, RetainSummary* Summ, ...) { + va_list argp; + va_start(argp, Summ); + addMethodSummary(&Ctx.Idents.get(Cls),ObjCClassMethodSummaries, Summ, argp); + va_end(argp); + } + + void addClsMethSummary(IdentifierInfo *II, RetainSummary* Summ, ...) { + va_list argp; + va_start(argp, Summ); + addMethodSummary(II, ObjCClassMethodSummaries, Summ, argp); + va_end(argp); + } + + void addPanicSummary(const char* Cls, ...) { + RetainSummary* Summ = getPersistentSummary(AF.getEmptyMap(), + RetEffect::MakeNoRet(), + DoNothing, DoNothing, true); + va_list argp; + va_start (argp, Cls); + addMethodSummary(&Ctx.Idents.get(Cls), ObjCMethodSummaries, Summ, argp); + va_end(argp); + } + +public: + + RetainSummaryManager(ASTContext& ctx, bool gcenabled) + : Ctx(ctx), + CFDictionaryCreateII(&ctx.Idents.get("CFDictionaryCreate")), + GCEnabled(gcenabled), AF(BPAlloc), ScratchArgs(AF.getEmptyMap()), + ObjCAllocRetE(gcenabled ? RetEffect::MakeGCNotOwned() + : RetEffect::MakeOwned(RetEffect::ObjC, true)), + ObjCInitRetE(gcenabled ? RetEffect::MakeGCNotOwned() + : RetEffect::MakeOwnedWhenTrackedReceiver()), + DefaultSummary(AF.getEmptyMap() /* per-argument effects (none) */, + RetEffect::MakeNoRet() /* return effect */, + MayEscape, /* default argument effect */ + DoNothing /* receiver effect */), + StopSummary(0) { + + InitializeClassMethodSummaries(); + InitializeMethodSummaries(); + } + + ~RetainSummaryManager(); + + RetainSummary* getSummary(const FunctionDecl* FD); + + RetainSummary *getInstanceMethodSummary(const ObjCMessage &msg, + const GRState *state, + const LocationContext *LC); + + RetainSummary* getInstanceMethodSummary(const ObjCMessage &msg, + const ObjCInterfaceDecl* ID) { + return getInstanceMethodSummary(msg.getSelector(), 0, + ID, msg.getMethodDecl(), msg.getType(Ctx)); + } + + RetainSummary* getInstanceMethodSummary(Selector S, IdentifierInfo *ClsName, + const ObjCInterfaceDecl* ID, + const ObjCMethodDecl *MD, + QualType RetTy); + + RetainSummary *getClassMethodSummary(Selector S, IdentifierInfo *ClsName, + const ObjCInterfaceDecl *ID, + const ObjCMethodDecl *MD, + QualType RetTy); + + RetainSummary *getClassMethodSummary(const ObjCMessage &msg) { + const ObjCInterfaceDecl *Class = 0; + if (!msg.isInstanceMessage()) + Class = msg.getReceiverInterface(); + + return getClassMethodSummary(msg.getSelector(), + Class? Class->getIdentifier() : 0, + Class, + msg.getMethodDecl(), msg.getType(Ctx)); + } + + /// getMethodSummary - This version of getMethodSummary is used to query + /// the summary for the current method being analyzed. + RetainSummary *getMethodSummary(const ObjCMethodDecl *MD) { + // FIXME: Eventually this should be unneeded. + const ObjCInterfaceDecl *ID = MD->getClassInterface(); + Selector S = MD->getSelector(); + IdentifierInfo *ClsName = ID->getIdentifier(); + QualType ResultTy = MD->getResultType(); + + // Resolve the method decl last. + if (const ObjCMethodDecl *InterfaceMD = ResolveToInterfaceMethodDecl(MD)) + MD = InterfaceMD; + + if (MD->isInstanceMethod()) + return getInstanceMethodSummary(S, ClsName, ID, MD, ResultTy); + else + return getClassMethodSummary(S, ClsName, ID, MD, ResultTy); + } + + RetainSummary* getCommonMethodSummary(const ObjCMethodDecl* MD, + Selector S, QualType RetTy); + + void updateSummaryFromAnnotations(RetainSummary &Summ, + const ObjCMethodDecl *MD); + + void updateSummaryFromAnnotations(RetainSummary &Summ, + const FunctionDecl *FD); + + bool isGCEnabled() const { return GCEnabled; } + + RetainSummary *copySummary(RetainSummary *OldSumm) { + RetainSummary *Summ = (RetainSummary*) BPAlloc.Allocate<RetainSummary>(); + new (Summ) RetainSummary(*OldSumm); + return Summ; + } +}; + +} // end anonymous namespace + +//===----------------------------------------------------------------------===// +// Implementation of checker data structures. +//===----------------------------------------------------------------------===// + +RetainSummaryManager::~RetainSummaryManager() {} + +ArgEffects RetainSummaryManager::getArgEffects() { + ArgEffects AE = ScratchArgs; + ScratchArgs = AF.getEmptyMap(); + return AE; +} + +RetainSummary* +RetainSummaryManager::getPersistentSummary(ArgEffects AE, RetEffect RetEff, + ArgEffect ReceiverEff, + ArgEffect DefaultEff, + bool isEndPath) { + // Create the summary and return it. + RetainSummary *Summ = (RetainSummary*) BPAlloc.Allocate<RetainSummary>(); + new (Summ) RetainSummary(AE, RetEff, DefaultEff, ReceiverEff, isEndPath); + return Summ; +} + +//===----------------------------------------------------------------------===// +// Summary creation for functions (largely uses of Core Foundation). +//===----------------------------------------------------------------------===// + +static bool isRetain(const FunctionDecl* FD, StringRef FName) { + return FName.endswith("Retain"); +} + +static bool isRelease(const FunctionDecl* FD, StringRef FName) { + return FName.endswith("Release"); +} + +RetainSummary* RetainSummaryManager::getSummary(const FunctionDecl* FD) { + // Look up a summary in our cache of FunctionDecls -> Summaries. + FuncSummariesTy::iterator I = FuncSummaries.find(FD); + if (I != FuncSummaries.end()) + return I->second; + + // No summary? Generate one. + RetainSummary *S = 0; + + do { + // We generate "stop" summaries for implicitly defined functions. + if (FD->isImplicit()) { + S = getPersistentStopSummary(); + break; + } + + // [PR 3337] Use 'getAs<FunctionType>' to strip away any typedefs on the + // function's type. + const FunctionType* FT = FD->getType()->getAs<FunctionType>(); + const IdentifierInfo *II = FD->getIdentifier(); + if (!II) + break; + + StringRef FName = II->getName(); + + // Strip away preceding '_'. Doing this here will effect all the checks + // down below. + FName = FName.substr(FName.find_first_not_of('_')); + + // Inspect the result type. + QualType RetTy = FT->getResultType(); + + // FIXME: This should all be refactored into a chain of "summary lookup" + // filters. + assert(ScratchArgs.isEmpty()); + + if (FName == "pthread_create") { + // Part of: <rdar://problem/7299394>. This will be addressed + // better with IPA. + S = getPersistentStopSummary(); + } else if (FName == "NSMakeCollectable") { + // Handle: id NSMakeCollectable(CFTypeRef) + S = (RetTy->isObjCIdType()) + ? getUnarySummary(FT, cfmakecollectable) + : getPersistentStopSummary(); + } else if (FName == "IOBSDNameMatching" || + FName == "IOServiceMatching" || + FName == "IOServiceNameMatching" || + FName == "IORegistryEntryIDMatching" || + FName == "IOOpenFirmwarePathMatching") { + // Part of <rdar://problem/6961230>. (IOKit) + // This should be addressed using a API table. + S = getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true), + DoNothing, DoNothing); + } else if (FName == "IOServiceGetMatchingService" || + FName == "IOServiceGetMatchingServices") { + // FIXES: <rdar://problem/6326900> + // This should be addressed using a API table. This strcmp is also + // a little gross, but there is no need to super optimize here. + ScratchArgs = AF.add(ScratchArgs, 1, DecRef); + S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing); + } else if (FName == "IOServiceAddNotification" || + FName == "IOServiceAddMatchingNotification") { + // Part of <rdar://problem/6961230>. (IOKit) + // This should be addressed using a API table. + ScratchArgs = AF.add(ScratchArgs, 2, DecRef); + S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing); + } else if (FName == "CVPixelBufferCreateWithBytes") { + // FIXES: <rdar://problem/7283567> + // Eventually this can be improved by recognizing that the pixel + // buffer passed to CVPixelBufferCreateWithBytes is released via + // a callback and doing full IPA to make sure this is done correctly. + // FIXME: This function has an out parameter that returns an + // allocated object. + ScratchArgs = AF.add(ScratchArgs, 7, StopTracking); + S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing); + } else if (FName == "CGBitmapContextCreateWithData") { + // FIXES: <rdar://problem/7358899> + // Eventually this can be improved by recognizing that 'releaseInfo' + // passed to CGBitmapContextCreateWithData is released via + // a callback and doing full IPA to make sure this is done correctly. + ScratchArgs = AF.add(ScratchArgs, 8, StopTracking); + S = getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true), + DoNothing, DoNothing); + } else if (FName == "CVPixelBufferCreateWithPlanarBytes") { + // FIXES: <rdar://problem/7283567> + // Eventually this can be improved by recognizing that the pixel + // buffer passed to CVPixelBufferCreateWithPlanarBytes is released + // via a callback and doing full IPA to make sure this is done + // correctly. + ScratchArgs = AF.add(ScratchArgs, 12, StopTracking); + S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing); + } + + // Did we get a summary? + if (S) + break; + + // Enable this code once the semantics of NSDeallocateObject are resolved + // for GC. <rdar://problem/6619988> +#if 0 + // Handle: NSDeallocateObject(id anObject); + // This method does allow 'nil' (although we don't check it now). + if (strcmp(FName, "NSDeallocateObject") == 0) { + return RetTy == Ctx.VoidTy + ? getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, Dealloc) + : getPersistentStopSummary(); + } +#endif + + if (RetTy->isPointerType()) { + // For CoreFoundation ('CF') types. + if (cocoa::isRefType(RetTy, "CF", FName)) { + if (isRetain(FD, FName)) + S = getUnarySummary(FT, cfretain); + else if (FName.find("MakeCollectable") != StringRef::npos) + S = getUnarySummary(FT, cfmakecollectable); + else + S = getCFCreateGetRuleSummary(FD, FName); + + break; + } + + // For CoreGraphics ('CG') types. + if (cocoa::isRefType(RetTy, "CG", FName)) { + if (isRetain(FD, FName)) + S = getUnarySummary(FT, cfretain); + else + S = getCFCreateGetRuleSummary(FD, FName); + + break; + } + + // For the Disk Arbitration API (DiskArbitration/DADisk.h) + if (cocoa::isRefType(RetTy, "DADisk") || + cocoa::isRefType(RetTy, "DADissenter") || + cocoa::isRefType(RetTy, "DASessionRef")) { + S = getCFCreateGetRuleSummary(FD, FName); + break; + } + + break; + } + + // Check for release functions, the only kind of functions that we care + // about that don't return a pointer type. + if (FName[0] == 'C' && (FName[1] == 'F' || FName[1] == 'G')) { + // Test for 'CGCF'. + FName = FName.substr(FName.startswith("CGCF") ? 4 : 2); + + if (isRelease(FD, FName)) + S = getUnarySummary(FT, cfrelease); + else { + assert (ScratchArgs.isEmpty()); + // Remaining CoreFoundation and CoreGraphics functions. + // We use to assume that they all strictly followed the ownership idiom + // and that ownership cannot be transferred. While this is technically + // correct, many methods allow a tracked object to escape. For example: + // + // CFMutableDictionaryRef x = CFDictionaryCreateMutable(...); + // CFDictionaryAddValue(y, key, x); + // CFRelease(x); + // ... it is okay to use 'x' since 'y' has a reference to it + // + // We handle this and similar cases with the follow heuristic. If the + // function name contains "InsertValue", "SetValue", "AddValue", + // "AppendValue", or "SetAttribute", then we assume that arguments may + // "escape." This means that something else holds on to the object, + // allowing it be used even after its local retain count drops to 0. + ArgEffect E = (StrInStrNoCase(FName, "InsertValue") != StringRef::npos|| + StrInStrNoCase(FName, "AddValue") != StringRef::npos || + StrInStrNoCase(FName, "SetValue") != StringRef::npos || + StrInStrNoCase(FName, "AppendValue") != StringRef::npos|| + StrInStrNoCase(FName, "SetAttribute") != StringRef::npos) + ? MayEscape : DoNothing; + + S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, E); + } + } + } + while (0); + + if (!S) + S = getDefaultSummary(); + + // Annotations override defaults. + assert(S); + updateSummaryFromAnnotations(*S, FD); + + FuncSummaries[FD] = S; + return S; +} + +RetainSummary* +RetainSummaryManager::getCFCreateGetRuleSummary(const FunctionDecl* FD, + StringRef FName) { + + if (FName.find("Create") != StringRef::npos || + FName.find("Copy") != StringRef::npos) + return getCFSummaryCreateRule(FD); + + if (FName.find("Get") != StringRef::npos) + return getCFSummaryGetRule(FD); + + return getDefaultSummary(); +} + +RetainSummary* +RetainSummaryManager::getUnarySummary(const FunctionType* FT, + UnaryFuncKind func) { + + // Sanity check that this is *really* a unary function. This can + // happen if people do weird things. + const FunctionProtoType* FTP = dyn_cast<FunctionProtoType>(FT); + if (!FTP || FTP->getNumArgs() != 1) + return getPersistentStopSummary(); + + assert (ScratchArgs.isEmpty()); + + switch (func) { + case cfretain: { + ScratchArgs = AF.add(ScratchArgs, 0, IncRef); + return getPersistentSummary(RetEffect::MakeAlias(0), + DoNothing, DoNothing); + } + + case cfrelease: { + ScratchArgs = AF.add(ScratchArgs, 0, DecRef); + return getPersistentSummary(RetEffect::MakeNoRet(), + DoNothing, DoNothing); + } + + case cfmakecollectable: { + ScratchArgs = AF.add(ScratchArgs, 0, MakeCollectable); + return getPersistentSummary(RetEffect::MakeAlias(0),DoNothing, DoNothing); + } + + default: + assert (false && "Not a supported unary function."); + return getDefaultSummary(); + } +} + +RetainSummary* +RetainSummaryManager::getCFSummaryCreateRule(const FunctionDecl* FD) { + assert (ScratchArgs.isEmpty()); + + if (FD->getIdentifier() == CFDictionaryCreateII) { + ScratchArgs = AF.add(ScratchArgs, 1, DoNothingByRef); + ScratchArgs = AF.add(ScratchArgs, 2, DoNothingByRef); + } + + return getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true)); +} + +RetainSummary* +RetainSummaryManager::getCFSummaryGetRule(const FunctionDecl* FD) { + assert (ScratchArgs.isEmpty()); + return getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::CF), + DoNothing, DoNothing); +} + +//===----------------------------------------------------------------------===// +// Summary creation for Selectors. +//===----------------------------------------------------------------------===// + +RetainSummary* +RetainSummaryManager::getInitMethodSummary(QualType RetTy) { + assert(ScratchArgs.isEmpty()); + // 'init' methods conceptually return a newly allocated object and claim + // the receiver. + if (cocoa::isCocoaObjectRef(RetTy) || cocoa::isCFObjectRef(RetTy)) + return getPersistentSummary(ObjCInitRetE, DecRefMsg); + + return getDefaultSummary(); +} + +void +RetainSummaryManager::updateSummaryFromAnnotations(RetainSummary &Summ, + const FunctionDecl *FD) { + if (!FD) + return; + + // Effects on the parameters. + unsigned parm_idx = 0; + for (FunctionDecl::param_const_iterator pi = FD->param_begin(), + pe = FD->param_end(); pi != pe; ++pi) { + const ParmVarDecl *pd = *pi; + if (pd->getAttr<NSConsumedAttr>()) { + if (!GCEnabled) + Summ.addArg(AF, parm_idx, DecRef); + } + else if(pd->getAttr<CFConsumedAttr>()) { + Summ.addArg(AF, parm_idx, DecRef); + } + } + + QualType RetTy = FD->getResultType(); + + // Determine if there is a special return effect for this method. + if (cocoa::isCocoaObjectRef(RetTy)) { + if (FD->getAttr<NSReturnsRetainedAttr>()) { + Summ.setRetEffect(ObjCAllocRetE); + } + else if (FD->getAttr<CFReturnsRetainedAttr>()) { + Summ.setRetEffect(RetEffect::MakeOwned(RetEffect::CF, true)); + } + else if (FD->getAttr<NSReturnsNotRetainedAttr>()) { + Summ.setRetEffect(RetEffect::MakeNotOwned(RetEffect::ObjC)); + } + else if (FD->getAttr<CFReturnsNotRetainedAttr>()) { + Summ.setRetEffect(RetEffect::MakeNotOwned(RetEffect::CF)); + } + } + else if (RetTy->getAs<PointerType>()) { + if (FD->getAttr<CFReturnsRetainedAttr>()) { + Summ.setRetEffect(RetEffect::MakeOwned(RetEffect::CF, true)); + } + } +} + +void +RetainSummaryManager::updateSummaryFromAnnotations(RetainSummary &Summ, + const ObjCMethodDecl *MD) { + if (!MD) + return; + + bool isTrackedLoc = false; + + // Effects on the receiver. + if (MD->getAttr<NSConsumesSelfAttr>()) { + if (!GCEnabled) + Summ.setReceiverEffect(DecRefMsg); + } + + // Effects on the parameters. + unsigned parm_idx = 0; + for (ObjCMethodDecl::param_iterator pi=MD->param_begin(), pe=MD->param_end(); + pi != pe; ++pi, ++parm_idx) { + const ParmVarDecl *pd = *pi; + if (pd->getAttr<NSConsumedAttr>()) { + if (!GCEnabled) + Summ.addArg(AF, parm_idx, DecRef); + } + else if(pd->getAttr<CFConsumedAttr>()) { + Summ.addArg(AF, parm_idx, DecRef); + } + } + + // Determine if there is a special return effect for this method. + if (cocoa::isCocoaObjectRef(MD->getResultType())) { + if (MD->getAttr<NSReturnsRetainedAttr>()) { + Summ.setRetEffect(ObjCAllocRetE); + return; + } + if (MD->getAttr<NSReturnsNotRetainedAttr>()) { + Summ.setRetEffect(RetEffect::MakeNotOwned(RetEffect::ObjC)); + return; + } + + isTrackedLoc = true; + } + + if (!isTrackedLoc) + isTrackedLoc = MD->getResultType()->getAs<PointerType>() != NULL; + + if (isTrackedLoc) { + if (MD->getAttr<CFReturnsRetainedAttr>()) + Summ.setRetEffect(RetEffect::MakeOwned(RetEffect::CF, true)); + else if (MD->getAttr<CFReturnsNotRetainedAttr>()) + Summ.setRetEffect(RetEffect::MakeNotOwned(RetEffect::CF)); + } +} + +RetainSummary* +RetainSummaryManager::getCommonMethodSummary(const ObjCMethodDecl* MD, + Selector S, QualType RetTy) { + + if (MD) { + // Scan the method decl for 'void*' arguments. These should be treated + // as 'StopTracking' because they are often used with delegates. + // Delegates are a frequent form of false positives with the retain + // count checker. + unsigned i = 0; + for (ObjCMethodDecl::param_iterator I = MD->param_begin(), + E = MD->param_end(); I != E; ++I, ++i) + if (ParmVarDecl *PD = *I) { + QualType Ty = Ctx.getCanonicalType(PD->getType()); + if (Ty.getLocalUnqualifiedType() == Ctx.VoidPtrTy) + ScratchArgs = AF.add(ScratchArgs, i, StopTracking); + } + } + + // Any special effect for the receiver? + ArgEffect ReceiverEff = DoNothing; + + // If one of the arguments in the selector has the keyword 'delegate' we + // should stop tracking the reference count for the receiver. This is + // because the reference count is quite possibly handled by a delegate + // method. + if (S.isKeywordSelector()) { + const std::string &str = S.getAsString(); + assert(!str.empty()); + if (StrInStrNoCase(str, "delegate:") != StringRef::npos) + ReceiverEff = StopTracking; + } + + // Look for methods that return an owned object. + if (cocoa::isCocoaObjectRef(RetTy)) { + // EXPERIMENTAL: assume the Cocoa conventions for all objects returned + // by instance methods. + RetEffect E = cocoa::followsFundamentalRule(S) + ? ObjCAllocRetE : RetEffect::MakeNotOwned(RetEffect::ObjC); + + return getPersistentSummary(E, ReceiverEff, MayEscape); + } + + // Look for methods that return an owned core foundation object. + if (cocoa::isCFObjectRef(RetTy)) { + RetEffect E = cocoa::followsFundamentalRule(S) + ? RetEffect::MakeOwned(RetEffect::CF, true) + : RetEffect::MakeNotOwned(RetEffect::CF); + + return getPersistentSummary(E, ReceiverEff, MayEscape); + } + + if (ScratchArgs.isEmpty() && ReceiverEff == DoNothing) + return getDefaultSummary(); + + return getPersistentSummary(RetEffect::MakeNoRet(), ReceiverEff, MayEscape); +} + +RetainSummary* +RetainSummaryManager::getInstanceMethodSummary(const ObjCMessage &msg, + const GRState *state, + const LocationContext *LC) { + + // We need the type-information of the tracked receiver object + // Retrieve it from the state. + const Expr *Receiver = msg.getInstanceReceiver(); + const ObjCInterfaceDecl* ID = 0; + + // FIXME: Is this really working as expected? There are cases where + // we just use the 'ID' from the message expression. + SVal receiverV; + + if (Receiver) { + receiverV = state->getSValAsScalarOrLoc(Receiver); + + // FIXME: Eventually replace the use of state->get<RefBindings> with + // a generic API for reasoning about the Objective-C types of symbolic + // objects. + if (SymbolRef Sym = receiverV.getAsLocSymbol()) + if (const RefVal *T = state->get<RefBindings>(Sym)) + if (const ObjCObjectPointerType* PT = + T->getType()->getAs<ObjCObjectPointerType>()) + ID = PT->getInterfaceDecl(); + + // FIXME: this is a hack. This may or may not be the actual method + // that is called. + if (!ID) { + if (const ObjCObjectPointerType *PT = + Receiver->getType()->getAs<ObjCObjectPointerType>()) + ID = PT->getInterfaceDecl(); + } + } else { + // FIXME: Hack for 'super'. + ID = msg.getReceiverInterface(); + } + + // FIXME: The receiver could be a reference to a class, meaning that + // we should use the class method. + RetainSummary *Summ = getInstanceMethodSummary(msg, ID); + return Summ ? Summ : getDefaultSummary(); +} + +RetainSummary* +RetainSummaryManager::getInstanceMethodSummary(Selector S, + IdentifierInfo *ClsName, + const ObjCInterfaceDecl* ID, + const ObjCMethodDecl *MD, + QualType RetTy) { + + // Look up a summary in our summary cache. + RetainSummary *Summ = ObjCMethodSummaries.find(ID, ClsName, S); + + if (!Summ) { + assert(ScratchArgs.isEmpty()); + + // "initXXX": pass-through for receiver. + if (cocoa::deriveNamingConvention(S) == cocoa::InitRule) + Summ = getInitMethodSummary(RetTy); + else + Summ = getCommonMethodSummary(MD, S, RetTy); + + // Annotations override defaults. + updateSummaryFromAnnotations(*Summ, MD); + + // Memoize the summary. + ObjCMethodSummaries[ObjCSummaryKey(ID, ClsName, S)] = Summ; + } + + return Summ; +} + +RetainSummary* +RetainSummaryManager::getClassMethodSummary(Selector S, IdentifierInfo *ClsName, + const ObjCInterfaceDecl *ID, + const ObjCMethodDecl *MD, + QualType RetTy) { + + assert(ClsName && "Class name must be specified."); + RetainSummary *Summ = ObjCClassMethodSummaries.find(ID, ClsName, S); + + if (!Summ) { + Summ = getCommonMethodSummary(MD, S, RetTy); + // Annotations override defaults. + updateSummaryFromAnnotations(*Summ, MD); + // Memoize the summary. + ObjCClassMethodSummaries[ObjCSummaryKey(ID, ClsName, S)] = Summ; + } + + return Summ; +} + +void RetainSummaryManager::InitializeClassMethodSummaries() { + assert(ScratchArgs.isEmpty()); + RetainSummary* Summ = getPersistentSummary(ObjCAllocRetE); + + // Create the [NSAssertionHandler currentHander] summary. + addClassMethSummary("NSAssertionHandler", "currentHandler", + getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::ObjC))); + + // Create the [NSAutoreleasePool addObject:] summary. + ScratchArgs = AF.add(ScratchArgs, 0, Autorelease); + addClassMethSummary("NSAutoreleasePool", "addObject", + getPersistentSummary(RetEffect::MakeNoRet(), + DoNothing, Autorelease)); + + // Create the summaries for [NSObject performSelector...]. We treat + // these as 'stop tracking' for the arguments because they are often + // used for delegates that can release the object. When we have better + // inter-procedural analysis we can potentially do something better. This + // workaround is to remove false positives. + Summ = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, StopTracking); + IdentifierInfo *NSObjectII = &Ctx.Idents.get("NSObject"); + addClsMethSummary(NSObjectII, Summ, "performSelector", "withObject", + "afterDelay", NULL); + addClsMethSummary(NSObjectII, Summ, "performSelector", "withObject", + "afterDelay", "inModes", NULL); + addClsMethSummary(NSObjectII, Summ, "performSelectorOnMainThread", + "withObject", "waitUntilDone", NULL); + addClsMethSummary(NSObjectII, Summ, "performSelectorOnMainThread", + "withObject", "waitUntilDone", "modes", NULL); + addClsMethSummary(NSObjectII, Summ, "performSelector", "onThread", + "withObject", "waitUntilDone", NULL); + addClsMethSummary(NSObjectII, Summ, "performSelector", "onThread", + "withObject", "waitUntilDone", "modes", NULL); + addClsMethSummary(NSObjectII, Summ, "performSelectorInBackground", + "withObject", NULL); +} + +void RetainSummaryManager::InitializeMethodSummaries() { + + assert (ScratchArgs.isEmpty()); + + // Create the "init" selector. It just acts as a pass-through for the + // receiver. + RetainSummary *InitSumm = getPersistentSummary(ObjCInitRetE, DecRefMsg); + addNSObjectMethSummary(GetNullarySelector("init", Ctx), InitSumm); + + // awakeAfterUsingCoder: behaves basically like an 'init' method. It + // claims the receiver and returns a retained object. + addNSObjectMethSummary(GetUnarySelector("awakeAfterUsingCoder", Ctx), + InitSumm); + + // The next methods are allocators. + RetainSummary *AllocSumm = getPersistentSummary(ObjCAllocRetE); + RetainSummary *CFAllocSumm = + getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true)); + + // Create the "retain" selector. + RetEffect E = RetEffect::MakeReceiverAlias(); + RetainSummary *Summ = getPersistentSummary(E, IncRefMsg); + addNSObjectMethSummary(GetNullarySelector("retain", Ctx), Summ); + + // Create the "release" selector. + Summ = getPersistentSummary(E, DecRefMsg); + addNSObjectMethSummary(GetNullarySelector("release", Ctx), Summ); + + // Create the "drain" selector. + Summ = getPersistentSummary(E, isGCEnabled() ? DoNothing : DecRef); + addNSObjectMethSummary(GetNullarySelector("drain", Ctx), Summ); + + // Create the -dealloc summary. + Summ = getPersistentSummary(RetEffect::MakeNoRet(), Dealloc); + addNSObjectMethSummary(GetNullarySelector("dealloc", Ctx), Summ); + + // Create the "autorelease" selector. + Summ = getPersistentSummary(E, Autorelease); + addNSObjectMethSummary(GetNullarySelector("autorelease", Ctx), Summ); + + // Specially handle NSAutoreleasePool. + addInstMethSummary("NSAutoreleasePool", "init", + getPersistentSummary(RetEffect::MakeReceiverAlias(), + NewAutoreleasePool)); + + // For NSWindow, allocated objects are (initially) self-owned. + // FIXME: For now we opt for false negatives with NSWindow, as these objects + // self-own themselves. However, they only do this once they are displayed. + // Thus, we need to track an NSWindow's display status. + // This is tracked in <rdar://problem/6062711>. + // See also http://llvm.org/bugs/show_bug.cgi?id=3714. + RetainSummary *NoTrackYet = getPersistentSummary(RetEffect::MakeNoRet(), + StopTracking, + StopTracking); + + addClassMethSummary("NSWindow", "alloc", NoTrackYet); + +#if 0 + addInstMethSummary("NSWindow", NoTrackYet, "initWithContentRect", + "styleMask", "backing", "defer", NULL); + + addInstMethSummary("NSWindow", NoTrackYet, "initWithContentRect", + "styleMask", "backing", "defer", "screen", NULL); +#endif + + // For NSPanel (which subclasses NSWindow), allocated objects are not + // self-owned. + // FIXME: For now we don't track NSPanels. object for the same reason + // as for NSWindow objects. + addClassMethSummary("NSPanel", "alloc", NoTrackYet); + +#if 0 + addInstMethSummary("NSPanel", NoTrackYet, "initWithContentRect", + "styleMask", "backing", "defer", NULL); + + addInstMethSummary("NSPanel", NoTrackYet, "initWithContentRect", + "styleMask", "backing", "defer", "screen", NULL); +#endif + + // Don't track allocated autorelease pools yet, as it is okay to prematurely + // exit a method. + addClassMethSummary("NSAutoreleasePool", "alloc", NoTrackYet); + + // Create NSAssertionHandler summaries. + addPanicSummary("NSAssertionHandler", "handleFailureInFunction", "file", + "lineNumber", "description", NULL); + + addPanicSummary("NSAssertionHandler", "handleFailureInMethod", "object", + "file", "lineNumber", "description", NULL); + + // Create summaries QCRenderer/QCView -createSnapShotImageOfType: + addInstMethSummary("QCRenderer", AllocSumm, + "createSnapshotImageOfType", NULL); + addInstMethSummary("QCView", AllocSumm, + "createSnapshotImageOfType", NULL); + + // Create summaries for CIContext, 'createCGImage' and + // 'createCGLayerWithSize'. These objects are CF objects, and are not + // automatically garbage collected. + addInstMethSummary("CIContext", CFAllocSumm, + "createCGImage", "fromRect", NULL); + addInstMethSummary("CIContext", CFAllocSumm, + "createCGImage", "fromRect", "format", "colorSpace", NULL); + addInstMethSummary("CIContext", CFAllocSumm, "createCGLayerWithSize", + "info", NULL); +} + +//===----------------------------------------------------------------------===// +// AutoreleaseBindings - State used to track objects in autorelease pools. +//===----------------------------------------------------------------------===// + +typedef llvm::ImmutableMap<SymbolRef, unsigned> ARCounts; +typedef llvm::ImmutableMap<SymbolRef, ARCounts> ARPoolContents; +typedef llvm::ImmutableList<SymbolRef> ARStack; + +static int AutoRCIndex = 0; +static int AutoRBIndex = 0; + +namespace { class AutoreleasePoolContents {}; } +namespace { class AutoreleaseStack {}; } + +namespace clang { +namespace ento { +template<> struct GRStateTrait<AutoreleaseStack> + : public GRStatePartialTrait<ARStack> { + static inline void* GDMIndex() { return &AutoRBIndex; } +}; + +template<> struct GRStateTrait<AutoreleasePoolContents> + : public GRStatePartialTrait<ARPoolContents> { + static inline void* GDMIndex() { return &AutoRCIndex; } +}; +} // end GR namespace +} // end clang namespace + +static SymbolRef GetCurrentAutoreleasePool(const GRState* state) { + ARStack stack = state->get<AutoreleaseStack>(); + return stack.isEmpty() ? SymbolRef() : stack.getHead(); +} + +static const GRState * SendAutorelease(const GRState *state, + ARCounts::Factory &F, SymbolRef sym) { + + SymbolRef pool = GetCurrentAutoreleasePool(state); + const ARCounts *cnts = state->get<AutoreleasePoolContents>(pool); + ARCounts newCnts(0); + + if (cnts) { + const unsigned *cnt = (*cnts).lookup(sym); + newCnts = F.add(*cnts, sym, cnt ? *cnt + 1 : 1); + } + else + newCnts = F.add(F.getEmptyMap(), sym, 1); + + return state->set<AutoreleasePoolContents>(pool, newCnts); +} + +//===----------------------------------------------------------------------===// +// Transfer functions. +//===----------------------------------------------------------------------===// + +namespace { + +class CFRefCount : public TransferFuncs { +public: + class BindingsPrinter : public GRState::Printer { + public: + virtual void Print(llvm::raw_ostream& Out, const GRState* state, + const char* nl, const char* sep); + }; + +private: + typedef llvm::DenseMap<const ExplodedNode*, const RetainSummary*> + SummaryLogTy; + + RetainSummaryManager Summaries; + SummaryLogTy SummaryLog; + const LangOptions& LOpts; + ARCounts::Factory ARCountFactory; + + BugType *useAfterRelease, *releaseNotOwned; + BugType *deallocGC, *deallocNotOwned; + BugType *leakWithinFunction, *leakAtReturn; + BugType *overAutorelease; + BugType *returnNotOwnedForOwned; + BugReporter *BR; + + const GRState * Update(const GRState * state, SymbolRef sym, RefVal V, ArgEffect E, + RefVal::Kind& hasErr); + + void ProcessNonLeakError(ExplodedNodeSet& Dst, + StmtNodeBuilder& Builder, + const Expr* NodeExpr, SourceRange ErrorRange, + ExplodedNode* Pred, + const GRState* St, + RefVal::Kind hasErr, SymbolRef Sym); + + const GRState * HandleSymbolDeath(const GRState * state, SymbolRef sid, RefVal V, + llvm::SmallVectorImpl<SymbolRef> &Leaked); + + ExplodedNode* ProcessLeaks(const GRState * state, + llvm::SmallVectorImpl<SymbolRef> &Leaked, + GenericNodeBuilderRefCount &Builder, + ExprEngine &Eng, + ExplodedNode *Pred = 0); + +public: + CFRefCount(ASTContext& Ctx, bool gcenabled, const LangOptions& lopts) + : Summaries(Ctx, gcenabled), + LOpts(lopts), useAfterRelease(0), releaseNotOwned(0), + deallocGC(0), deallocNotOwned(0), + leakWithinFunction(0), leakAtReturn(0), overAutorelease(0), + returnNotOwnedForOwned(0), BR(0) {} + + virtual ~CFRefCount() {} + + void RegisterChecks(ExprEngine &Eng); + + virtual void RegisterPrinters(std::vector<GRState::Printer*>& Printers) { + Printers.push_back(new BindingsPrinter()); + } + + bool isGCEnabled() const { return Summaries.isGCEnabled(); } + const LangOptions& getLangOptions() const { return LOpts; } + + const RetainSummary *getSummaryOfNode(const ExplodedNode *N) const { + SummaryLogTy::const_iterator I = SummaryLog.find(N); + return I == SummaryLog.end() ? 0 : I->second; + } + + // Calls. + + void evalSummary(ExplodedNodeSet& Dst, + ExprEngine& Eng, + StmtNodeBuilder& Builder, + const Expr* Ex, + const CallOrObjCMessage &callOrMsg, + InstanceReceiver Receiver, + const RetainSummary& Summ, + const MemRegion *Callee, + ExplodedNode* Pred, const GRState *state); + + virtual void evalCall(ExplodedNodeSet& Dst, + ExprEngine& Eng, + StmtNodeBuilder& Builder, + const CallExpr* CE, SVal L, + ExplodedNode* Pred); + + + virtual void evalObjCMessage(ExplodedNodeSet& Dst, + ExprEngine& Engine, + StmtNodeBuilder& Builder, + ObjCMessage msg, + ExplodedNode* Pred, + const GRState *state); + // Stores. + virtual void evalBind(StmtNodeBuilderRef& B, SVal location, SVal val); + + // End-of-path. + + virtual void evalEndPath(ExprEngine& Engine, + EndOfFunctionNodeBuilder& Builder); + + virtual void evalDeadSymbols(ExplodedNodeSet& Dst, + ExprEngine& Engine, + StmtNodeBuilder& Builder, + ExplodedNode* Pred, + const GRState* state, + SymbolReaper& SymReaper); + + std::pair<ExplodedNode*, const GRState *> + HandleAutoreleaseCounts(const GRState * state, GenericNodeBuilderRefCount Bd, + ExplodedNode* Pred, ExprEngine &Eng, + SymbolRef Sym, RefVal V, bool &stop); + // Return statements. + + virtual void evalReturn(ExplodedNodeSet& Dst, + ExprEngine& Engine, + StmtNodeBuilder& Builder, + const ReturnStmt* S, + ExplodedNode* Pred); + + // Assumptions. + + virtual const GRState *evalAssume(const GRState* state, SVal condition, + bool assumption); +}; + +} // end anonymous namespace + +static void PrintPool(llvm::raw_ostream &Out, SymbolRef Sym, + const GRState *state) { + Out << ' '; + if (Sym) + Out << Sym->getSymbolID(); + else + Out << "<pool>"; + Out << ":{"; + + // Get the contents of the pool. + if (const ARCounts *cnts = state->get<AutoreleasePoolContents>(Sym)) + for (ARCounts::iterator J=cnts->begin(), EJ=cnts->end(); J != EJ; ++J) + Out << '(' << J.getKey() << ',' << J.getData() << ')'; + + Out << '}'; +} + +void CFRefCount::BindingsPrinter::Print(llvm::raw_ostream& Out, + const GRState* state, + const char* nl, const char* sep) { + + RefBindings B = state->get<RefBindings>(); + + if (!B.isEmpty()) + Out << sep << nl; + + for (RefBindings::iterator I=B.begin(), E=B.end(); I!=E; ++I) { + Out << (*I).first << " : "; + (*I).second.print(Out); + Out << nl; + } + + // Print the autorelease stack. + Out << sep << nl << "AR pool stack:"; + ARStack stack = state->get<AutoreleaseStack>(); + + PrintPool(Out, SymbolRef(), state); // Print the caller's pool. + for (ARStack::iterator I=stack.begin(), E=stack.end(); I!=E; ++I) + PrintPool(Out, *I, state); + + Out << nl; +} + +//===----------------------------------------------------------------------===// +// Error reporting. +//===----------------------------------------------------------------------===// + +namespace { + + //===-------------===// + // Bug Descriptions. // + //===-------------===// + + class CFRefBug : public BugType { + protected: + CFRefCount& TF; + + CFRefBug(CFRefCount* tf, llvm::StringRef name) + : BugType(name, "Memory (Core Foundation/Objective-C)"), TF(*tf) {} + public: + + CFRefCount& getTF() { return TF; } + + // FIXME: Eventually remove. + virtual const char* getDescription() const = 0; + + virtual bool isLeak() const { return false; } + }; + + class UseAfterRelease : public CFRefBug { + public: + UseAfterRelease(CFRefCount* tf) + : CFRefBug(tf, "Use-after-release") {} + + const char* getDescription() const { + return "Reference-counted object is used after it is released"; + } + }; + + class BadRelease : public CFRefBug { + public: + BadRelease(CFRefCount* tf) : CFRefBug(tf, "Bad release") {} + + const char* getDescription() const { + return "Incorrect decrement of the reference count of an object that is " + "not owned at this point by the caller"; + } + }; + + class DeallocGC : public CFRefBug { + public: + DeallocGC(CFRefCount *tf) + : CFRefBug(tf, "-dealloc called while using garbage collection") {} + + const char *getDescription() const { + return "-dealloc called while using garbage collection"; + } + }; + + class DeallocNotOwned : public CFRefBug { + public: + DeallocNotOwned(CFRefCount *tf) + : CFRefBug(tf, "-dealloc sent to non-exclusively owned object") {} + + const char *getDescription() const { + return "-dealloc sent to object that may be referenced elsewhere"; + } + }; + + class OverAutorelease : public CFRefBug { + public: + OverAutorelease(CFRefCount *tf) : + CFRefBug(tf, "Object sent -autorelease too many times") {} + + const char *getDescription() const { + return "Object sent -autorelease too many times"; + } + }; + + class ReturnedNotOwnedForOwned : public CFRefBug { + public: + ReturnedNotOwnedForOwned(CFRefCount *tf) : + CFRefBug(tf, "Method should return an owned object") {} + + const char *getDescription() const { + return "Object with +0 retain counts returned to caller where a +1 " + "(owning) retain count is expected"; + } + }; + + class Leak : public CFRefBug { + const bool isReturn; + protected: + Leak(CFRefCount* tf, llvm::StringRef name, bool isRet) + : CFRefBug(tf, name), isReturn(isRet) {} + public: + + const char* getDescription() const { return ""; } + + bool isLeak() const { return true; } + }; + + class LeakAtReturn : public Leak { + public: + LeakAtReturn(CFRefCount* tf, llvm::StringRef name) + : Leak(tf, name, true) {} + }; + + class LeakWithinFunction : public Leak { + public: + LeakWithinFunction(CFRefCount* tf, llvm::StringRef name) + : Leak(tf, name, false) {} + }; + + //===---------===// + // Bug Reports. // + //===---------===// + + class CFRefReport : public RangedBugReport { + protected: + SymbolRef Sym; + const CFRefCount &TF; + public: + CFRefReport(CFRefBug& D, const CFRefCount &tf, + ExplodedNode *n, SymbolRef sym) + : RangedBugReport(D, D.getDescription(), n), Sym(sym), TF(tf) {} + + CFRefReport(CFRefBug& D, const CFRefCount &tf, + ExplodedNode *n, SymbolRef sym, llvm::StringRef endText) + : RangedBugReport(D, D.getDescription(), endText, n), Sym(sym), TF(tf) {} + + virtual ~CFRefReport() {} + + CFRefBug& getBugType() const { + return (CFRefBug&) RangedBugReport::getBugType(); + } + + virtual std::pair<ranges_iterator, ranges_iterator> getRanges() const { + if (!getBugType().isLeak()) + return RangedBugReport::getRanges(); + else + return std::make_pair(ranges_iterator(), ranges_iterator()); + } + + SymbolRef getSymbol() const { return Sym; } + + PathDiagnosticPiece* getEndPath(BugReporterContext& BRC, + const ExplodedNode* N); + + std::pair<const char**,const char**> getExtraDescriptiveText(); + + PathDiagnosticPiece* VisitNode(const ExplodedNode* N, + const ExplodedNode* PrevN, + BugReporterContext& BRC); + }; + + class CFRefLeakReport : public CFRefReport { + SourceLocation AllocSite; + const MemRegion* AllocBinding; + public: + CFRefLeakReport(CFRefBug& D, const CFRefCount &tf, + ExplodedNode *n, SymbolRef sym, + ExprEngine& Eng); + + PathDiagnosticPiece* getEndPath(BugReporterContext& BRC, + const ExplodedNode* N); + + SourceLocation getLocation() const { return AllocSite; } + }; +} // end anonymous namespace + + + +static const char* Msgs[] = { + // GC only + "Code is compiled to only use garbage collection", + // No GC. + "Code is compiled to use reference counts", + // Hybrid, with GC. + "Code is compiled to use either garbage collection (GC) or reference counts" + " (non-GC). The bug occurs with GC enabled", + // Hybrid, without GC + "Code is compiled to use either garbage collection (GC) or reference counts" + " (non-GC). The bug occurs in non-GC mode" +}; + +std::pair<const char**,const char**> CFRefReport::getExtraDescriptiveText() { + CFRefCount& TF = static_cast<CFRefBug&>(getBugType()).getTF(); + + switch (TF.getLangOptions().getGCMode()) { + default: + assert(false); + + case LangOptions::GCOnly: + assert (TF.isGCEnabled()); + return std::make_pair(&Msgs[0], &Msgs[0]+1); + + case LangOptions::NonGC: + assert (!TF.isGCEnabled()); + return std::make_pair(&Msgs[1], &Msgs[1]+1); + + case LangOptions::HybridGC: + if (TF.isGCEnabled()) + return std::make_pair(&Msgs[2], &Msgs[2]+1); + else + return std::make_pair(&Msgs[3], &Msgs[3]+1); + } +} + +static inline bool contains(const llvm::SmallVectorImpl<ArgEffect>& V, + ArgEffect X) { + for (llvm::SmallVectorImpl<ArgEffect>::const_iterator I=V.begin(), E=V.end(); + I!=E; ++I) + if (*I == X) return true; + + return false; +} + +PathDiagnosticPiece* CFRefReport::VisitNode(const ExplodedNode* N, + const ExplodedNode* PrevN, + BugReporterContext& BRC) { + + if (!isa<PostStmt>(N->getLocation())) + return NULL; + + // Check if the type state has changed. + const GRState *PrevSt = PrevN->getState(); + const GRState *CurrSt = N->getState(); + + const RefVal* CurrT = CurrSt->get<RefBindings>(Sym); + if (!CurrT) return NULL; + + const RefVal &CurrV = *CurrT; + const RefVal *PrevT = PrevSt->get<RefBindings>(Sym); + + // Create a string buffer to constain all the useful things we want + // to tell the user. + std::string sbuf; + llvm::raw_string_ostream os(sbuf); + + // This is the allocation site since the previous node had no bindings + // for this symbol. + if (!PrevT) { + const Stmt* S = cast<PostStmt>(N->getLocation()).getStmt(); + + if (const CallExpr *CE = dyn_cast<CallExpr>(S)) { + // Get the name of the callee (if it is available). + SVal X = CurrSt->getSValAsScalarOrLoc(CE->getCallee()); + if (const FunctionDecl* FD = X.getAsFunctionDecl()) + os << "Call to function '" << FD << '\''; + else + os << "function call"; + } + else if (isa<ObjCMessageExpr>(S)) { + os << "Method"; + } else { + os << "Property"; + } + + if (CurrV.getObjKind() == RetEffect::CF) { + os << " returns a Core Foundation object with a "; + } + else { + assert (CurrV.getObjKind() == RetEffect::ObjC); + os << " returns an Objective-C object with a "; + } + + if (CurrV.isOwned()) { + os << "+1 retain count (owning reference)."; + + if (static_cast<CFRefBug&>(getBugType()).getTF().isGCEnabled()) { + assert(CurrV.getObjKind() == RetEffect::CF); + os << " " + "Core Foundation objects are not automatically garbage collected."; + } + } + else { + assert (CurrV.isNotOwned()); + os << "+0 retain count (non-owning reference)."; + } + + PathDiagnosticLocation Pos(S, BRC.getSourceManager()); + return new PathDiagnosticEventPiece(Pos, os.str()); + } + + // Gather up the effects that were performed on the object at this + // program point + llvm::SmallVector<ArgEffect, 2> AEffects; + + if (const RetainSummary *Summ = + TF.getSummaryOfNode(BRC.getNodeResolver().getOriginalNode(N))) { + // We only have summaries attached to nodes after evaluating CallExpr and + // ObjCMessageExprs. + const Stmt* S = cast<PostStmt>(N->getLocation()).getStmt(); + + if (const CallExpr *CE = dyn_cast<CallExpr>(S)) { + // Iterate through the parameter expressions and see if the symbol + // was ever passed as an argument. + unsigned i = 0; + + for (CallExpr::const_arg_iterator AI=CE->arg_begin(), AE=CE->arg_end(); + AI!=AE; ++AI, ++i) { + + // Retrieve the value of the argument. Is it the symbol + // we are interested in? + if (CurrSt->getSValAsScalarOrLoc(*AI).getAsLocSymbol() != Sym) + continue; + + // We have an argument. Get the effect! + AEffects.push_back(Summ->getArg(i)); + } + } + else if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S)) { + if (const Expr *receiver = ME->getInstanceReceiver()) + if (CurrSt->getSValAsScalarOrLoc(receiver).getAsLocSymbol() == Sym) { + // The symbol we are tracking is the receiver. + AEffects.push_back(Summ->getReceiverEffect()); + } + } + } + + do { + // Get the previous type state. + RefVal PrevV = *PrevT; + + // Specially handle -dealloc. + if (!TF.isGCEnabled() && contains(AEffects, Dealloc)) { + // Determine if the object's reference count was pushed to zero. + assert(!(PrevV == CurrV) && "The typestate *must* have changed."); + // We may not have transitioned to 'release' if we hit an error. + // This case is handled elsewhere. + if (CurrV.getKind() == RefVal::Released) { + assert(CurrV.getCombinedCounts() == 0); + os << "Object released by directly sending the '-dealloc' message"; + break; + } + } + + // Specially handle CFMakeCollectable and friends. + if (contains(AEffects, MakeCollectable)) { + // Get the name of the function. + const Stmt* S = cast<PostStmt>(N->getLocation()).getStmt(); + SVal X = CurrSt->getSValAsScalarOrLoc(cast<CallExpr>(S)->getCallee()); + const FunctionDecl* FD = X.getAsFunctionDecl(); + const std::string& FName = FD->getNameAsString(); + + if (TF.isGCEnabled()) { + // Determine if the object's reference count was pushed to zero. + assert(!(PrevV == CurrV) && "The typestate *must* have changed."); + + os << "In GC mode a call to '" << FName + << "' decrements an object's retain count and registers the " + "object with the garbage collector. "; + + if (CurrV.getKind() == RefVal::Released) { + assert(CurrV.getCount() == 0); + os << "Since it now has a 0 retain count the object can be " + "automatically collected by the garbage collector."; + } + else + os << "An object must have a 0 retain count to be garbage collected. " + "After this call its retain count is +" << CurrV.getCount() + << '.'; + } + else + os << "When GC is not enabled a call to '" << FName + << "' has no effect on its argument."; + + // Nothing more to say. + break; + } + + // Determine if the typestate has changed. + if (!(PrevV == CurrV)) + switch (CurrV.getKind()) { + case RefVal::Owned: + case RefVal::NotOwned: + + if (PrevV.getCount() == CurrV.getCount()) { + // Did an autorelease message get sent? + if (PrevV.getAutoreleaseCount() == CurrV.getAutoreleaseCount()) + return 0; + + assert(PrevV.getAutoreleaseCount() < CurrV.getAutoreleaseCount()); + os << "Object sent -autorelease message"; + break; + } + + if (PrevV.getCount() > CurrV.getCount()) + os << "Reference count decremented."; + else + os << "Reference count incremented."; + + if (unsigned Count = CurrV.getCount()) + os << " The object now has a +" << Count << " retain count."; + + if (PrevV.getKind() == RefVal::Released) { + assert(TF.isGCEnabled() && CurrV.getCount() > 0); + os << " The object is not eligible for garbage collection until the " + "retain count reaches 0 again."; + } + + break; + + case RefVal::Released: + os << "Object released."; + break; + + case RefVal::ReturnedOwned: + os << "Object returned to caller as an owning reference (single retain " + "count transferred to caller)."; + break; + + case RefVal::ReturnedNotOwned: + os << "Object returned to caller with a +0 (non-owning) retain count."; + break; + + default: + return NULL; + } + + // Emit any remaining diagnostics for the argument effects (if any). + for (llvm::SmallVectorImpl<ArgEffect>::iterator I=AEffects.begin(), + E=AEffects.end(); I != E; ++I) { + + // A bunch of things have alternate behavior under GC. + if (TF.isGCEnabled()) + switch (*I) { + default: break; + case Autorelease: + os << "In GC mode an 'autorelease' has no effect."; + continue; + case IncRefMsg: + os << "In GC mode the 'retain' message has no effect."; + continue; + case DecRefMsg: + os << "In GC mode the 'release' message has no effect."; + continue; + } + } + } while (0); + + if (os.str().empty()) + return 0; // We have nothing to say! + + const Stmt* S = cast<PostStmt>(N->getLocation()).getStmt(); + PathDiagnosticLocation Pos(S, BRC.getSourceManager()); + PathDiagnosticPiece* P = new PathDiagnosticEventPiece(Pos, os.str()); + + // Add the range by scanning the children of the statement for any bindings + // to Sym. + for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end(); + I!=E; ++I) + if (const Expr* Exp = dyn_cast_or_null<Expr>(*I)) + if (CurrSt->getSValAsScalarOrLoc(Exp).getAsLocSymbol() == Sym) { + P->addRange(Exp->getSourceRange()); + break; + } + + return P; +} + +namespace { + class FindUniqueBinding : + public StoreManager::BindingsHandler { + SymbolRef Sym; + const MemRegion* Binding; + bool First; + + public: + FindUniqueBinding(SymbolRef sym) : Sym(sym), Binding(0), First(true) {} + + bool HandleBinding(StoreManager& SMgr, Store store, const MemRegion* R, + SVal val) { + + SymbolRef SymV = val.getAsSymbol(); + if (!SymV || SymV != Sym) + return true; + + if (Binding) { + First = false; + return false; + } + else + Binding = R; + + return true; + } + + operator bool() { return First && Binding; } + const MemRegion* getRegion() { return Binding; } + }; +} + +static std::pair<const ExplodedNode*,const MemRegion*> +GetAllocationSite(GRStateManager& StateMgr, const ExplodedNode* N, + SymbolRef Sym) { + + // Find both first node that referred to the tracked symbol and the + // memory location that value was store to. + const ExplodedNode* Last = N; + const MemRegion* FirstBinding = 0; + + while (N) { + const GRState* St = N->getState(); + RefBindings B = St->get<RefBindings>(); + + if (!B.lookup(Sym)) + break; + + FindUniqueBinding FB(Sym); + StateMgr.iterBindings(St, FB); + if (FB) FirstBinding = FB.getRegion(); + + Last = N; + N = N->pred_empty() ? NULL : *(N->pred_begin()); + } + + return std::make_pair(Last, FirstBinding); +} + +PathDiagnosticPiece* +CFRefReport::getEndPath(BugReporterContext& BRC, + const ExplodedNode* EndN) { + // Tell the BugReporterContext to report cases when the tracked symbol is + // assigned to different variables, etc. + BRC.addNotableSymbol(Sym); + return RangedBugReport::getEndPath(BRC, EndN); +} + +PathDiagnosticPiece* +CFRefLeakReport::getEndPath(BugReporterContext& BRC, + const ExplodedNode* EndN){ + + // Tell the BugReporterContext to report cases when the tracked symbol is + // assigned to different variables, etc. + BRC.addNotableSymbol(Sym); + + // We are reporting a leak. Walk up the graph to get to the first node where + // the symbol appeared, and also get the first VarDecl that tracked object + // is stored to. + const ExplodedNode* AllocNode = 0; + const MemRegion* FirstBinding = 0; + + llvm::tie(AllocNode, FirstBinding) = + GetAllocationSite(BRC.getStateManager(), EndN, Sym); + + // Get the allocate site. + assert(AllocNode); + const Stmt* FirstStmt = cast<PostStmt>(AllocNode->getLocation()).getStmt(); + + SourceManager& SMgr = BRC.getSourceManager(); + unsigned AllocLine =SMgr.getInstantiationLineNumber(FirstStmt->getLocStart()); + + // Compute an actual location for the leak. Sometimes a leak doesn't + // occur at an actual statement (e.g., transition between blocks; end + // of function) so we need to walk the graph and compute a real location. + const ExplodedNode* LeakN = EndN; + PathDiagnosticLocation L; + + while (LeakN) { + ProgramPoint P = LeakN->getLocation(); + + if (const PostStmt *PS = dyn_cast<PostStmt>(&P)) { + L = PathDiagnosticLocation(PS->getStmt()->getLocStart(), SMgr); + break; + } + else if (const BlockEdge *BE = dyn_cast<BlockEdge>(&P)) { + if (const Stmt* Term = BE->getSrc()->getTerminator()) { + L = PathDiagnosticLocation(Term->getLocStart(), SMgr); + break; + } + } + + LeakN = LeakN->succ_empty() ? 0 : *(LeakN->succ_begin()); + } + + if (!L.isValid()) { + const Decl &D = EndN->getCodeDecl(); + L = PathDiagnosticLocation(D.getBodyRBrace(), SMgr); + } + + std::string sbuf; + llvm::raw_string_ostream os(sbuf); + + os << "Object allocated on line " << AllocLine; + + if (FirstBinding) + os << " and stored into '" << FirstBinding->getString() << '\''; + + // Get the retain count. + const RefVal* RV = EndN->getState()->get<RefBindings>(Sym); + + if (RV->getKind() == RefVal::ErrorLeakReturned) { + // FIXME: Per comments in rdar://6320065, "create" only applies to CF + // ojbects. Only "copy", "alloc", "retain" and "new" transfer ownership + // to the caller for NS objects. + ObjCMethodDecl& MD = cast<ObjCMethodDecl>(EndN->getCodeDecl()); + os << " is returned from a method whose name ('" + << MD.getSelector().getAsString() + << "') does not contain 'copy' or otherwise starts with" + " 'new' or 'alloc'. This violates the naming convention rules given" + " in the Memory Management Guide for Cocoa (object leaked)"; + } + else if (RV->getKind() == RefVal::ErrorGCLeakReturned) { + ObjCMethodDecl& MD = cast<ObjCMethodDecl>(EndN->getCodeDecl()); + os << " and returned from method '" << MD.getSelector().getAsString() + << "' is potentially leaked when using garbage collection. Callers " + "of this method do not expect a returned object with a +1 retain " + "count since they expect the object to be managed by the garbage " + "collector"; + } + else + os << " is not referenced later in this execution path and has a retain " + "count of +" << RV->getCount() << " (object leaked)"; + + return new PathDiagnosticEventPiece(L, os.str()); +} + +CFRefLeakReport::CFRefLeakReport(CFRefBug& D, const CFRefCount &tf, + ExplodedNode *n, + SymbolRef sym, ExprEngine& Eng) +: CFRefReport(D, tf, n, sym) { + + // Most bug reports are cached at the location where they occured. + // With leaks, we want to unique them by the location where they were + // allocated, and only report a single path. To do this, we need to find + // the allocation site of a piece of tracked memory, which we do via a + // call to GetAllocationSite. This will walk the ExplodedGraph backwards. + // Note that this is *not* the trimmed graph; we are guaranteed, however, + // that all ancestor nodes that represent the allocation site have the + // same SourceLocation. + const ExplodedNode* AllocNode = 0; + + llvm::tie(AllocNode, AllocBinding) = // Set AllocBinding. + GetAllocationSite(Eng.getStateManager(), getErrorNode(), getSymbol()); + + // Get the SourceLocation for the allocation site. + ProgramPoint P = AllocNode->getLocation(); + AllocSite = cast<PostStmt>(P).getStmt()->getLocStart(); + + // Fill in the description of the bug. + Description.clear(); + llvm::raw_string_ostream os(Description); + SourceManager& SMgr = Eng.getContext().getSourceManager(); + unsigned AllocLine = SMgr.getInstantiationLineNumber(AllocSite); + os << "Potential leak "; + if (tf.isGCEnabled()) { + os << "(when using garbage collection) "; + } + os << "of an object allocated on line " << AllocLine; + + // FIXME: AllocBinding doesn't get populated for RegionStore yet. + if (AllocBinding) + os << " and stored into '" << AllocBinding->getString() << '\''; +} + +//===----------------------------------------------------------------------===// +// Main checker logic. +//===----------------------------------------------------------------------===// + +/// GetReturnType - Used to get the return type of a message expression or +/// function call with the intention of affixing that type to a tracked symbol. +/// While the the return type can be queried directly from RetEx, when +/// invoking class methods we augment to the return type to be that of +/// a pointer to the class (as opposed it just being id). +static QualType GetReturnType(const Expr* RetE, ASTContext& Ctx) { + QualType RetTy = RetE->getType(); + // If RetE is not a message expression just return its type. + // If RetE is a message expression, return its types if it is something + /// more specific than id. + if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(RetE)) + if (const ObjCObjectPointerType *PT = RetTy->getAs<ObjCObjectPointerType>()) + if (PT->isObjCQualifiedIdType() || PT->isObjCIdType() || + PT->isObjCClassType()) { + // At this point we know the return type of the message expression is + // id, id<...>, or Class. If we have an ObjCInterfaceDecl, we know this + // is a call to a class method whose type we can resolve. In such + // cases, promote the return type to XXX* (where XXX is the class). + const ObjCInterfaceDecl *D = ME->getReceiverInterface(); + return !D ? RetTy : + Ctx.getObjCObjectPointerType(Ctx.getObjCInterfaceType(D)); + } + + return RetTy; +} + +void CFRefCount::evalSummary(ExplodedNodeSet& Dst, + ExprEngine& Eng, + StmtNodeBuilder& Builder, + const Expr* Ex, + const CallOrObjCMessage &callOrMsg, + InstanceReceiver Receiver, + const RetainSummary& Summ, + const MemRegion *Callee, + ExplodedNode* Pred, const GRState *state) { + + // Evaluate the effect of the arguments. + RefVal::Kind hasErr = (RefVal::Kind) 0; + SourceRange ErrorRange; + SymbolRef ErrorSym = 0; + + llvm::SmallVector<const MemRegion*, 10> RegionsToInvalidate; + + // HACK: Symbols that have ref-count state that are referenced directly + // (not as structure or array elements, or via bindings) by an argument + // should not have their ref-count state stripped after we have + // done an invalidation pass. + llvm::DenseSet<SymbolRef> WhitelistedSymbols; + + // Invalidate all instance variables of the receiver of a message. + // FIXME: We should be able to do better with inter-procedural analysis. + if (Receiver) { + SVal V = Receiver.getSValAsScalarOrLoc(state); + if (SymbolRef Sym = V.getAsLocSymbol()) { + if (state->get<RefBindings>(Sym)) + WhitelistedSymbols.insert(Sym); + } + if (const MemRegion *region = V.getAsRegion()) + RegionsToInvalidate.push_back(region); + } + + for (unsigned idx = 0, e = callOrMsg.getNumArgs(); idx != e; ++idx) { + SVal V = callOrMsg.getArgSValAsScalarOrLoc(idx); + SymbolRef Sym = V.getAsLocSymbol(); + + if (Sym) + if (RefBindings::data_type* T = state->get<RefBindings>(Sym)) { + WhitelistedSymbols.insert(Sym); + state = Update(state, Sym, *T, Summ.getArg(idx), hasErr); + if (hasErr) { + ErrorRange = callOrMsg.getArgSourceRange(idx); + ErrorSym = Sym; + break; + } + } + + tryAgain: + if (isa<Loc>(V)) { + if (loc::MemRegionVal* MR = dyn_cast<loc::MemRegionVal>(&V)) { + if (Summ.getArg(idx) == DoNothingByRef) + continue; + + // Invalidate the value of the variable passed by reference. + const MemRegion *R = MR->getRegion(); + + // Are we dealing with an ElementRegion? If the element type is + // a basic integer type (e.g., char, int) and the underying region + // is a variable region then strip off the ElementRegion. + // FIXME: We really need to think about this for the general case + // as sometimes we are reasoning about arrays and other times + // about (char*), etc., is just a form of passing raw bytes. + // e.g., void *p = alloca(); foo((char*)p); + if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) { + // Checking for 'integral type' is probably too promiscuous, but + // we'll leave it in for now until we have a systematic way of + // handling all of these cases. Eventually we need to come up + // with an interface to StoreManager so that this logic can be + // approriately delegated to the respective StoreManagers while + // still allowing us to do checker-specific logic (e.g., + // invalidating reference counts), probably via callbacks. + if (ER->getElementType()->isIntegralOrEnumerationType()) { + const MemRegion *superReg = ER->getSuperRegion(); + if (isa<VarRegion>(superReg) || isa<FieldRegion>(superReg) || + isa<ObjCIvarRegion>(superReg)) + R = cast<TypedRegion>(superReg); + } + // FIXME: What about layers of ElementRegions? + } + + // Mark this region for invalidation. We batch invalidate regions + // below for efficiency. + RegionsToInvalidate.push_back(R); + continue; + } + else { + // Nuke all other arguments passed by reference. + // FIXME: is this necessary or correct? This handles the non-Region + // cases. Is it ever valid to store to these? + state = state->unbindLoc(cast<Loc>(V)); + } + } + else if (isa<nonloc::LocAsInteger>(V)) { + // If we are passing a location wrapped as an integer, unwrap it and + // invalidate the values referred by the location. + V = cast<nonloc::LocAsInteger>(V).getLoc(); + goto tryAgain; + } + } + + // Block calls result in all captured values passed-via-reference to be + // invalidated. + if (const BlockDataRegion *BR = dyn_cast_or_null<BlockDataRegion>(Callee)) { + RegionsToInvalidate.push_back(BR); + } + + // Invalidate regions we designed for invalidation use the batch invalidation + // API. + + // FIXME: We can have collisions on the conjured symbol if the + // expression *I also creates conjured symbols. We probably want + // to identify conjured symbols by an expression pair: the enclosing + // expression (the context) and the expression itself. This should + // disambiguate conjured symbols. + unsigned Count = Builder.getCurrentBlockCount(); + StoreManager::InvalidatedSymbols IS; + + // NOTE: Even if RegionsToInvalidate is empty, we must still invalidate + // global variables. + state = state->invalidateRegions(RegionsToInvalidate.data(), + RegionsToInvalidate.data() + + RegionsToInvalidate.size(), + Ex, Count, &IS, + /* invalidateGlobals = */ true); + + for (StoreManager::InvalidatedSymbols::iterator I = IS.begin(), + E = IS.end(); I!=E; ++I) { + SymbolRef sym = *I; + if (WhitelistedSymbols.count(sym)) + continue; + // Remove any existing reference-count binding. + state = state->remove<RefBindings>(*I); + } + + // Evaluate the effect on the message receiver. + if (!ErrorRange.isValid() && Receiver) { + SymbolRef Sym = Receiver.getSValAsScalarOrLoc(state).getAsLocSymbol(); + if (Sym) { + if (const RefVal* T = state->get<RefBindings>(Sym)) { + state = Update(state, Sym, *T, Summ.getReceiverEffect(), hasErr); + if (hasErr) { + ErrorRange = Receiver.getSourceRange(); + ErrorSym = Sym; + } + } + } + } + + // Process any errors. + if (hasErr) { + ProcessNonLeakError(Dst, Builder, Ex, ErrorRange, Pred, state, + hasErr, ErrorSym); + return; + } + + // Consult the summary for the return value. + RetEffect RE = Summ.getRetEffect(); + + if (RE.getKind() == RetEffect::OwnedWhenTrackedReceiver) { + bool found = false; + if (Receiver) { + SVal V = Receiver.getSValAsScalarOrLoc(state); + if (SymbolRef Sym = V.getAsLocSymbol()) + if (state->get<RefBindings>(Sym)) { + found = true; + RE = Summaries.getObjAllocRetEffect(); + } + } // FIXME: Otherwise, this is a send-to-super instance message. + if (!found) + RE = RetEffect::MakeNoRet(); + } + + switch (RE.getKind()) { + default: + assert (false && "Unhandled RetEffect."); break; + + case RetEffect::NoRet: { + // Make up a symbol for the return value (not reference counted). + // FIXME: Most of this logic is not specific to the retain/release + // checker. + + // FIXME: We eventually should handle structs and other compound types + // that are returned by value. + + QualType T = callOrMsg.getResultType(Eng.getContext()); + if (Loc::isLocType(T) || (T->isIntegerType() && T->isScalarType())) { + unsigned Count = Builder.getCurrentBlockCount(); + SValBuilder &svalBuilder = Eng.getSValBuilder(); + SVal X = svalBuilder.getConjuredSymbolVal(NULL, Ex, T, Count); + state = state->BindExpr(Ex, X, false); + } + + break; + } + + case RetEffect::Alias: { + unsigned idx = RE.getIndex(); + assert (idx < callOrMsg.getNumArgs()); + SVal V = callOrMsg.getArgSValAsScalarOrLoc(idx); + state = state->BindExpr(Ex, V, false); + break; + } + + case RetEffect::ReceiverAlias: { + assert(Receiver); + SVal V = Receiver.getSValAsScalarOrLoc(state); + state = state->BindExpr(Ex, V, false); + break; + } + + case RetEffect::OwnedAllocatedSymbol: + case RetEffect::OwnedSymbol: { + unsigned Count = Builder.getCurrentBlockCount(); + SValBuilder &svalBuilder = Eng.getSValBuilder(); + SymbolRef Sym = svalBuilder.getConjuredSymbol(Ex, Count); + QualType RetT = GetReturnType(Ex, svalBuilder.getContext()); + state = state->set<RefBindings>(Sym, RefVal::makeOwned(RE.getObjKind(), + RetT)); + state = state->BindExpr(Ex, svalBuilder.makeLoc(Sym), false); + + // FIXME: Add a flag to the checker where allocations are assumed to + // *not fail. +#if 0 + if (RE.getKind() == RetEffect::OwnedAllocatedSymbol) { + bool isFeasible; + state = state.assume(loc::SymbolVal(Sym), true, isFeasible); + assert(isFeasible && "Cannot assume fresh symbol is non-null."); + } +#endif + + break; + } + + case RetEffect::GCNotOwnedSymbol: + case RetEffect::NotOwnedSymbol: { + unsigned Count = Builder.getCurrentBlockCount(); + SValBuilder &svalBuilder = Eng.getSValBuilder(); + SymbolRef Sym = svalBuilder.getConjuredSymbol(Ex, Count); + QualType RetT = GetReturnType(Ex, svalBuilder.getContext()); + state = state->set<RefBindings>(Sym, RefVal::makeNotOwned(RE.getObjKind(), + RetT)); + state = state->BindExpr(Ex, svalBuilder.makeLoc(Sym), false); + break; + } + } + + // Generate a sink node if we are at the end of a path. + ExplodedNode *NewNode = + Summ.isEndPath() ? Builder.MakeSinkNode(Dst, Ex, Pred, state) + : Builder.MakeNode(Dst, Ex, Pred, state); + + // Annotate the edge with summary we used. + if (NewNode) SummaryLog[NewNode] = &Summ; +} + + +void CFRefCount::evalCall(ExplodedNodeSet& Dst, + ExprEngine& Eng, + StmtNodeBuilder& Builder, + const CallExpr* CE, SVal L, + ExplodedNode* Pred) { + + RetainSummary *Summ = 0; + + // FIXME: Better support for blocks. For now we stop tracking anything + // that is passed to blocks. + // FIXME: Need to handle variables that are "captured" by the block. + if (dyn_cast_or_null<BlockDataRegion>(L.getAsRegion())) { + Summ = Summaries.getPersistentStopSummary(); + } + else { + const FunctionDecl* FD = L.getAsFunctionDecl(); + Summ = !FD ? Summaries.getDefaultSummary() : + Summaries.getSummary(FD); + } + + assert(Summ); + evalSummary(Dst, Eng, Builder, CE, + CallOrObjCMessage(CE, Builder.GetState(Pred)), + InstanceReceiver(), *Summ,L.getAsRegion(), + Pred, Builder.GetState(Pred)); +} + +void CFRefCount::evalObjCMessage(ExplodedNodeSet& Dst, + ExprEngine& Eng, + StmtNodeBuilder& Builder, + ObjCMessage msg, + ExplodedNode* Pred, + const GRState *state) { + RetainSummary *Summ = + msg.isInstanceMessage() + ? Summaries.getInstanceMethodSummary(msg, state,Pred->getLocationContext()) + : Summaries.getClassMethodSummary(msg); + + assert(Summ && "RetainSummary is null"); + evalSummary(Dst, Eng, Builder, msg.getOriginExpr(), + CallOrObjCMessage(msg, Builder.GetState(Pred)), + InstanceReceiver(msg, Pred->getLocationContext()), *Summ, NULL, + Pred, state); +} + +namespace { +class StopTrackingCallback : public SymbolVisitor { + const GRState *state; +public: + StopTrackingCallback(const GRState *st) : state(st) {} + const GRState *getState() const { return state; } + + bool VisitSymbol(SymbolRef sym) { + state = state->remove<RefBindings>(sym); + return true; + } +}; +} // end anonymous namespace + + +void CFRefCount::evalBind(StmtNodeBuilderRef& B, SVal location, SVal val) { + // Are we storing to something that causes the value to "escape"? + bool escapes = false; + + // A value escapes in three possible cases (this may change): + // + // (1) we are binding to something that is not a memory region. + // (2) we are binding to a memregion that does not have stack storage + // (3) we are binding to a memregion with stack storage that the store + // does not understand. + const GRState *state = B.getState(); + + if (!isa<loc::MemRegionVal>(location)) + escapes = true; + else { + const MemRegion* R = cast<loc::MemRegionVal>(location).getRegion(); + escapes = !R->hasStackStorage(); + + if (!escapes) { + // To test (3), generate a new state with the binding removed. If it is + // the same state, then it escapes (since the store cannot represent + // the binding). + escapes = (state == (state->bindLoc(cast<Loc>(location), UnknownVal()))); + } + } + + // If our store can represent the binding and we aren't storing to something + // that doesn't have local storage then just return and have the simulation + // state continue as is. + if (!escapes) + return; + + // Otherwise, find all symbols referenced by 'val' that we are tracking + // and stop tracking them. + B.MakeNode(state->scanReachableSymbols<StopTrackingCallback>(val).getState()); +} + + // Return statements. + +void CFRefCount::evalReturn(ExplodedNodeSet& Dst, + ExprEngine& Eng, + StmtNodeBuilder& Builder, + const ReturnStmt* S, + ExplodedNode* Pred) { + + const Expr* RetE = S->getRetValue(); + if (!RetE) + return; + + const GRState *state = Builder.GetState(Pred); + SymbolRef Sym = state->getSValAsScalarOrLoc(RetE).getAsLocSymbol(); + + if (!Sym) + return; + + // Get the reference count binding (if any). + const RefVal* T = state->get<RefBindings>(Sym); + + if (!T) + return; + + // Change the reference count. + RefVal X = *T; + + switch (X.getKind()) { + case RefVal::Owned: { + unsigned cnt = X.getCount(); + assert (cnt > 0); + X.setCount(cnt - 1); + X = X ^ RefVal::ReturnedOwned; + break; + } + + case RefVal::NotOwned: { + unsigned cnt = X.getCount(); + if (cnt) { + X.setCount(cnt - 1); + X = X ^ RefVal::ReturnedOwned; + } + else { + X = X ^ RefVal::ReturnedNotOwned; + } + break; + } + + default: + return; + } + + // Update the binding. + state = state->set<RefBindings>(Sym, X); + Pred = Builder.MakeNode(Dst, S, Pred, state); + + // Did we cache out? + if (!Pred) + return; + + // Update the autorelease counts. + static unsigned autoreleasetag = 0; + GenericNodeBuilderRefCount Bd(Builder, S, &autoreleasetag); + bool stop = false; + llvm::tie(Pred, state) = HandleAutoreleaseCounts(state , Bd, Pred, Eng, Sym, + X, stop); + + // Did we cache out? + if (!Pred || stop) + return; + + // Get the updated binding. + T = state->get<RefBindings>(Sym); + assert(T); + X = *T; + + // Any leaks or other errors? + if (X.isReturnedOwned() && X.getCount() == 0) { + Decl const *CD = &Pred->getCodeDecl(); + if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(CD)) { + const RetainSummary &Summ = *Summaries.getMethodSummary(MD); + RetEffect RE = Summ.getRetEffect(); + bool hasError = false; + + if (RE.getKind() != RetEffect::NoRet) { + if (isGCEnabled() && RE.getObjKind() == RetEffect::ObjC) { + // Things are more complicated with garbage collection. If the + // returned object is suppose to be an Objective-C object, we have + // a leak (as the caller expects a GC'ed object) because no + // method should return ownership unless it returns a CF object. + hasError = true; + X = X ^ RefVal::ErrorGCLeakReturned; + } + else if (!RE.isOwned()) { + // Either we are using GC and the returned object is a CF type + // or we aren't using GC. In either case, we expect that the + // enclosing method is expected to return ownership. + hasError = true; + X = X ^ RefVal::ErrorLeakReturned; + } + } + + if (hasError) { + // Generate an error node. + static int ReturnOwnLeakTag = 0; + state = state->set<RefBindings>(Sym, X); + ExplodedNode *N = + Builder.generateNode(PostStmt(S, Pred->getLocationContext(), + &ReturnOwnLeakTag), state, Pred); + if (N) { + CFRefReport *report = + new CFRefLeakReport(*static_cast<CFRefBug*>(leakAtReturn), *this, + N, Sym, Eng); + BR->EmitReport(report); + } + } + } + } + else if (X.isReturnedNotOwned()) { + Decl const *CD = &Pred->getCodeDecl(); + if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(CD)) { + const RetainSummary &Summ = *Summaries.getMethodSummary(MD); + if (Summ.getRetEffect().isOwned()) { + // Trying to return a not owned object to a caller expecting an + // owned object. + + static int ReturnNotOwnedForOwnedTag = 0; + state = state->set<RefBindings>(Sym, X ^ RefVal::ErrorReturnedNotOwned); + if (ExplodedNode *N = + Builder.generateNode(PostStmt(S, Pred->getLocationContext(), + &ReturnNotOwnedForOwnedTag), + state, Pred)) { + CFRefReport *report = + new CFRefReport(*static_cast<CFRefBug*>(returnNotOwnedForOwned), + *this, N, Sym); + BR->EmitReport(report); + } + } + } + } +} + +// Assumptions. + +const GRState* CFRefCount::evalAssume(const GRState *state, + SVal Cond, bool Assumption) { + + // FIXME: We may add to the interface of evalAssume the list of symbols + // whose assumptions have changed. For now we just iterate through the + // bindings and check if any of the tracked symbols are NULL. This isn't + // too bad since the number of symbols we will track in practice are + // probably small and evalAssume is only called at branches and a few + // other places. + RefBindings B = state->get<RefBindings>(); + + if (B.isEmpty()) + return state; + + bool changed = false; + RefBindings::Factory& RefBFactory = state->get_context<RefBindings>(); + + for (RefBindings::iterator I=B.begin(), E=B.end(); I!=E; ++I) { + // Check if the symbol is null (or equal to any constant). + // If this is the case, stop tracking the symbol. + if (state->getSymVal(I.getKey())) { + changed = true; + B = RefBFactory.remove(B, I.getKey()); + } + } + + if (changed) + state = state->set<RefBindings>(B); + + return state; +} + +const GRState * CFRefCount::Update(const GRState * state, SymbolRef sym, + RefVal V, ArgEffect E, + RefVal::Kind& hasErr) { + + // In GC mode [... release] and [... retain] do nothing. + switch (E) { + default: break; + case IncRefMsg: E = isGCEnabled() ? DoNothing : IncRef; break; + case DecRefMsg: E = isGCEnabled() ? DoNothing : DecRef; break; + case MakeCollectable: E = isGCEnabled() ? DecRef : DoNothing; break; + case NewAutoreleasePool: E = isGCEnabled() ? DoNothing : + NewAutoreleasePool; break; + } + + // Handle all use-after-releases. + if (!isGCEnabled() && V.getKind() == RefVal::Released) { + V = V ^ RefVal::ErrorUseAfterRelease; + hasErr = V.getKind(); + return state->set<RefBindings>(sym, V); + } + + switch (E) { + default: + assert (false && "Unhandled CFRef transition."); + + case Dealloc: + // Any use of -dealloc in GC is *bad*. + if (isGCEnabled()) { + V = V ^ RefVal::ErrorDeallocGC; + hasErr = V.getKind(); + break; + } + + switch (V.getKind()) { + default: + assert(false && "Invalid case."); + case RefVal::Owned: + // The object immediately transitions to the released state. + V = V ^ RefVal::Released; + V.clearCounts(); + return state->set<RefBindings>(sym, V); + case RefVal::NotOwned: + V = V ^ RefVal::ErrorDeallocNotOwned; + hasErr = V.getKind(); + break; + } + break; + + case NewAutoreleasePool: + assert(!isGCEnabled()); + return state->add<AutoreleaseStack>(sym); + + case MayEscape: + if (V.getKind() == RefVal::Owned) { + V = V ^ RefVal::NotOwned; + break; + } + + // Fall-through. + + case DoNothingByRef: + case DoNothing: + return state; + + case Autorelease: + if (isGCEnabled()) + return state; + + // Update the autorelease counts. + state = SendAutorelease(state, ARCountFactory, sym); + V = V.autorelease(); + break; + + case StopTracking: + return state->remove<RefBindings>(sym); + + case IncRef: + switch (V.getKind()) { + default: + assert(false); + + case RefVal::Owned: + case RefVal::NotOwned: + V = V + 1; + break; + case RefVal::Released: + // Non-GC cases are handled above. + assert(isGCEnabled()); + V = (V ^ RefVal::Owned) + 1; + break; + } + break; + + case SelfOwn: + V = V ^ RefVal::NotOwned; + // Fall-through. + case DecRef: + switch (V.getKind()) { + default: + // case 'RefVal::Released' handled above. + assert (false); + + case RefVal::Owned: + assert(V.getCount() > 0); + if (V.getCount() == 1) V = V ^ RefVal::Released; + V = V - 1; + break; + + case RefVal::NotOwned: + if (V.getCount() > 0) + V = V - 1; + else { + V = V ^ RefVal::ErrorReleaseNotOwned; + hasErr = V.getKind(); + } + break; + + case RefVal::Released: + // Non-GC cases are handled above. + assert(isGCEnabled()); + V = V ^ RefVal::ErrorUseAfterRelease; + hasErr = V.getKind(); + break; + } + break; + } + return state->set<RefBindings>(sym, V); +} + +//===----------------------------------------------------------------------===// +// Handle dead symbols and end-of-path. +//===----------------------------------------------------------------------===// + +std::pair<ExplodedNode*, const GRState *> +CFRefCount::HandleAutoreleaseCounts(const GRState * state, + GenericNodeBuilderRefCount Bd, + ExplodedNode* Pred, + ExprEngine &Eng, + SymbolRef Sym, RefVal V, bool &stop) { + + unsigned ACnt = V.getAutoreleaseCount(); + stop = false; + + // No autorelease counts? Nothing to be done. + if (!ACnt) + return std::make_pair(Pred, state); + + assert(!isGCEnabled() && "Autorelease counts in GC mode?"); + unsigned Cnt = V.getCount(); + + // FIXME: Handle sending 'autorelease' to already released object. + + if (V.getKind() == RefVal::ReturnedOwned) + ++Cnt; + + if (ACnt <= Cnt) { + if (ACnt == Cnt) { + V.clearCounts(); + if (V.getKind() == RefVal::ReturnedOwned) + V = V ^ RefVal::ReturnedNotOwned; + else + V = V ^ RefVal::NotOwned; + } + else { + V.setCount(Cnt - ACnt); + V.setAutoreleaseCount(0); + } + state = state->set<RefBindings>(Sym, V); + ExplodedNode *N = Bd.MakeNode(state, Pred); + stop = (N == 0); + return std::make_pair(N, state); + } + + // Woah! More autorelease counts then retain counts left. + // Emit hard error. + stop = true; + V = V ^ RefVal::ErrorOverAutorelease; + state = state->set<RefBindings>(Sym, V); + + if (ExplodedNode *N = Bd.MakeNode(state, Pred)) { + N->markAsSink(); + + std::string sbuf; + llvm::raw_string_ostream os(sbuf); + os << "Object over-autoreleased: object was sent -autorelease"; + if (V.getAutoreleaseCount() > 1) + os << V.getAutoreleaseCount() << " times"; + os << " but the object has "; + if (V.getCount() == 0) + os << "zero (locally visible)"; + else + os << "+" << V.getCount(); + os << " retain counts"; + + CFRefReport *report = + new CFRefReport(*static_cast<CFRefBug*>(overAutorelease), + *this, N, Sym, os.str()); + BR->EmitReport(report); + } + + return std::make_pair((ExplodedNode*)0, state); +} + +const GRState * +CFRefCount::HandleSymbolDeath(const GRState * state, SymbolRef sid, RefVal V, + llvm::SmallVectorImpl<SymbolRef> &Leaked) { + + bool hasLeak = V.isOwned() || + ((V.isNotOwned() || V.isReturnedOwned()) && V.getCount() > 0); + + if (!hasLeak) + return state->remove<RefBindings>(sid); + + Leaked.push_back(sid); + return state->set<RefBindings>(sid, V ^ RefVal::ErrorLeak); +} + +ExplodedNode* +CFRefCount::ProcessLeaks(const GRState * state, + llvm::SmallVectorImpl<SymbolRef> &Leaked, + GenericNodeBuilderRefCount &Builder, + ExprEngine& Eng, + ExplodedNode *Pred) { + + if (Leaked.empty()) + return Pred; + + // Generate an intermediate node representing the leak point. + ExplodedNode *N = Builder.MakeNode(state, Pred); + + if (N) { + for (llvm::SmallVectorImpl<SymbolRef>::iterator + I = Leaked.begin(), E = Leaked.end(); I != E; ++I) { + + CFRefBug *BT = static_cast<CFRefBug*>(Pred ? leakWithinFunction + : leakAtReturn); + assert(BT && "BugType not initialized."); + CFRefLeakReport* report = new CFRefLeakReport(*BT, *this, N, *I, Eng); + BR->EmitReport(report); + } + } + + return N; +} + +void CFRefCount::evalEndPath(ExprEngine& Eng, + EndOfFunctionNodeBuilder& Builder) { + + const GRState *state = Builder.getState(); + GenericNodeBuilderRefCount Bd(Builder); + RefBindings B = state->get<RefBindings>(); + ExplodedNode *Pred = 0; + + for (RefBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) { + bool stop = false; + llvm::tie(Pred, state) = HandleAutoreleaseCounts(state, Bd, Pred, Eng, + (*I).first, + (*I).second, stop); + + if (stop) + return; + } + + B = state->get<RefBindings>(); + llvm::SmallVector<SymbolRef, 10> Leaked; + + for (RefBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) + state = HandleSymbolDeath(state, (*I).first, (*I).second, Leaked); + + ProcessLeaks(state, Leaked, Bd, Eng, Pred); +} + +void CFRefCount::evalDeadSymbols(ExplodedNodeSet& Dst, + ExprEngine& Eng, + StmtNodeBuilder& Builder, + ExplodedNode* Pred, + const GRState* state, + SymbolReaper& SymReaper) { + const Stmt *S = Builder.getStmt(); + RefBindings B = state->get<RefBindings>(); + + // Update counts from autorelease pools + for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(), + E = SymReaper.dead_end(); I != E; ++I) { + SymbolRef Sym = *I; + if (const RefVal* T = B.lookup(Sym)){ + // Use the symbol as the tag. + // FIXME: This might not be as unique as we would like. + GenericNodeBuilderRefCount Bd(Builder, S, Sym); + bool stop = false; + llvm::tie(Pred, state) = HandleAutoreleaseCounts(state, Bd, Pred, Eng, + Sym, *T, stop); + if (stop) + return; + } + } + + B = state->get<RefBindings>(); + llvm::SmallVector<SymbolRef, 10> Leaked; + + for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(), + E = SymReaper.dead_end(); I != E; ++I) { + if (const RefVal* T = B.lookup(*I)) + state = HandleSymbolDeath(state, *I, *T, Leaked); + } + + static unsigned LeakPPTag = 0; + { + GenericNodeBuilderRefCount Bd(Builder, S, &LeakPPTag); + Pred = ProcessLeaks(state, Leaked, Bd, Eng, Pred); + } + + // Did we cache out? + if (!Pred) + return; + + // Now generate a new node that nukes the old bindings. + RefBindings::Factory& F = state->get_context<RefBindings>(); + + for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(), + E = SymReaper.dead_end(); I!=E; ++I) B = F.remove(B, *I); + + state = state->set<RefBindings>(B); + Builder.MakeNode(Dst, S, Pred, state); +} + +void CFRefCount::ProcessNonLeakError(ExplodedNodeSet& Dst, + StmtNodeBuilder& Builder, + const Expr* NodeExpr, + SourceRange ErrorRange, + ExplodedNode* Pred, + const GRState* St, + RefVal::Kind hasErr, SymbolRef Sym) { + Builder.BuildSinks = true; + ExplodedNode *N = Builder.MakeNode(Dst, NodeExpr, Pred, St); + + if (!N) + return; + + CFRefBug *BT = 0; + + switch (hasErr) { + default: + assert(false && "Unhandled error."); + return; + case RefVal::ErrorUseAfterRelease: + BT = static_cast<CFRefBug*>(useAfterRelease); + break; + case RefVal::ErrorReleaseNotOwned: + BT = static_cast<CFRefBug*>(releaseNotOwned); + break; + case RefVal::ErrorDeallocGC: + BT = static_cast<CFRefBug*>(deallocGC); + break; + case RefVal::ErrorDeallocNotOwned: + BT = static_cast<CFRefBug*>(deallocNotOwned); + break; + } + + CFRefReport *report = new CFRefReport(*BT, *this, N, Sym); + report->addRange(ErrorRange); + BR->EmitReport(report); +} + +//===----------------------------------------------------------------------===// +// Pieces of the retain/release checker implemented using a CheckerVisitor. +// More pieces of the retain/release checker will be migrated to this interface +// (ideally, all of it some day). +//===----------------------------------------------------------------------===// + +namespace { +class RetainReleaseChecker + : public CheckerVisitor<RetainReleaseChecker> { + CFRefCount *TF; +public: + RetainReleaseChecker(CFRefCount *tf) : TF(tf) {} + static void* getTag() { static int x = 0; return &x; } + + void PostVisitBlockExpr(CheckerContext &C, const BlockExpr *BE); +}; +} // end anonymous namespace + + +void RetainReleaseChecker::PostVisitBlockExpr(CheckerContext &C, + const BlockExpr *BE) { + + // Scan the BlockDecRefExprs for any object the retain/release checker + // may be tracking. + if (!BE->getBlockDecl()->hasCaptures()) + return; + + const GRState *state = C.getState(); + const BlockDataRegion *R = + cast<BlockDataRegion>(state->getSVal(BE).getAsRegion()); + + BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(), + E = R->referenced_vars_end(); + + if (I == E) + return; + + // FIXME: For now we invalidate the tracking of all symbols passed to blocks + // via captured variables, even though captured variables result in a copy + // and in implicit increment/decrement of a retain count. + llvm::SmallVector<const MemRegion*, 10> Regions; + const LocationContext *LC = C.getPredecessor()->getLocationContext(); + MemRegionManager &MemMgr = C.getSValBuilder().getRegionManager(); + + for ( ; I != E; ++I) { + const VarRegion *VR = *I; + if (VR->getSuperRegion() == R) { + VR = MemMgr.getVarRegion(VR->getDecl(), LC); + } + Regions.push_back(VR); + } + + state = + state->scanReachableSymbols<StopTrackingCallback>(Regions.data(), + Regions.data() + Regions.size()).getState(); + C.addTransition(state); +} + +//===----------------------------------------------------------------------===// +// Transfer function creation for external clients. +//===----------------------------------------------------------------------===// + +void CFRefCount::RegisterChecks(ExprEngine& Eng) { + BugReporter &BR = Eng.getBugReporter(); + + useAfterRelease = new UseAfterRelease(this); + BR.Register(useAfterRelease); + + releaseNotOwned = new BadRelease(this); + BR.Register(releaseNotOwned); + + deallocGC = new DeallocGC(this); + BR.Register(deallocGC); + + deallocNotOwned = new DeallocNotOwned(this); + BR.Register(deallocNotOwned); + + overAutorelease = new OverAutorelease(this); + BR.Register(overAutorelease); + + returnNotOwnedForOwned = new ReturnedNotOwnedForOwned(this); + BR.Register(returnNotOwnedForOwned); + + // First register "return" leaks. + const char* name = 0; + + if (isGCEnabled()) + name = "Leak of returned object when using garbage collection"; + else if (getLangOptions().getGCMode() == LangOptions::HybridGC) + name = "Leak of returned object when not using garbage collection (GC) in " + "dual GC/non-GC code"; + else { + assert(getLangOptions().getGCMode() == LangOptions::NonGC); + name = "Leak of returned object"; + } + + // Leaks should not be reported if they are post-dominated by a sink. + leakAtReturn = new LeakAtReturn(this, name); + leakAtReturn->setSuppressOnSink(true); + BR.Register(leakAtReturn); + + // Second, register leaks within a function/method. + if (isGCEnabled()) + name = "Leak of object when using garbage collection"; + else if (getLangOptions().getGCMode() == LangOptions::HybridGC) + name = "Leak of object when not using garbage collection (GC) in " + "dual GC/non-GC code"; + else { + assert(getLangOptions().getGCMode() == LangOptions::NonGC); + name = "Leak"; + } + + // Leaks should not be reported if they are post-dominated by sinks. + leakWithinFunction = new LeakWithinFunction(this, name); + leakWithinFunction->setSuppressOnSink(true); + BR.Register(leakWithinFunction); + + // Save the reference to the BugReporter. + this->BR = &BR; + + // Register the RetainReleaseChecker with the ExprEngine object. + // Functionality in CFRefCount will be migrated to RetainReleaseChecker + // over time. + Eng.registerCheck(new RetainReleaseChecker(this)); +} + +TransferFuncs* ento::MakeCFRefCountTF(ASTContext& Ctx, bool GCEnabled, + const LangOptions& lopts) { + return new CFRefCount(Ctx, GCEnabled, lopts); +} diff --git a/lib/StaticAnalyzer/Core/CMakeLists.txt b/lib/StaticAnalyzer/Core/CMakeLists.txt new file mode 100644 index 0000000..14c636c --- /dev/null +++ b/lib/StaticAnalyzer/Core/CMakeLists.txt @@ -0,0 +1,41 @@ +set(LLVM_LINK_COMPONENTS support) + +set(LLVM_USED_LIBS clangBasic clangLex clangAST clangFrontend clangRewrite) + +add_clang_library(clangStaticAnalyzerCore + AggExprVisitor.cpp + AnalysisManager.cpp + BasicConstraintManager.cpp + BasicStore.cpp + BasicValueFactory.cpp + BugReporter.cpp + BugReporterVisitors.cpp + CFRefCount.cpp + Checker.cpp + CheckerHelpers.cpp + CheckerManager.cpp + Environment.cpp + ExplodedGraph.cpp + FlatStore.cpp + BlockCounter.cpp + CXXExprEngine.cpp + CoreEngine.cpp + GRState.cpp + HTMLDiagnostics.cpp + MemRegion.cpp + ObjCMessage.cpp + PathDiagnostic.cpp + PlistDiagnostics.cpp + RangeConstraintManager.cpp + RegionStore.cpp + SimpleConstraintManager.cpp + SimpleSValBuilder.cpp + Store.cpp + SValBuilder.cpp + SVals.cpp + SymbolManager.cpp + TextPathDiagnostics.cpp + ) + +add_dependencies(clangStaticAnalyzerCore ClangAttrClasses ClangAttrList ClangDeclNodes + ClangStmtNodes) diff --git a/lib/StaticAnalyzer/Core/CXXExprEngine.cpp b/lib/StaticAnalyzer/Core/CXXExprEngine.cpp new file mode 100644 index 0000000..56dfe8c --- /dev/null +++ b/lib/StaticAnalyzer/Core/CXXExprEngine.cpp @@ -0,0 +1,322 @@ +//===- GRCXXExprEngine.cpp - C++ expr evaluation engine ---------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the C++ expression evaluation engine. +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h" +#include "clang/AST/DeclCXX.h" + +using namespace clang; +using namespace ento; + +namespace { +class CallExprWLItem { +public: + CallExpr::const_arg_iterator I; + ExplodedNode *N; + + CallExprWLItem(const CallExpr::const_arg_iterator &i, ExplodedNode *n) + : I(i), N(n) {} +}; +} + +void ExprEngine::evalArguments(ConstExprIterator AI, ConstExprIterator AE, + const FunctionProtoType *FnType, + ExplodedNode *Pred, ExplodedNodeSet &Dst, + bool FstArgAsLValue) { + + + llvm::SmallVector<CallExprWLItem, 20> WorkList; + WorkList.reserve(AE - AI); + WorkList.push_back(CallExprWLItem(AI, Pred)); + + while (!WorkList.empty()) { + CallExprWLItem Item = WorkList.back(); + WorkList.pop_back(); + + if (Item.I == AE) { + Dst.insert(Item.N); + continue; + } + + // Evaluate the argument. + ExplodedNodeSet Tmp; + if (FstArgAsLValue) { + FstArgAsLValue = false; + } + + Visit(*Item.I, Item.N, Tmp); + ++(Item.I); + for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI != NE; ++NI) + WorkList.push_back(CallExprWLItem(Item.I, *NI)); + } +} + +const CXXThisRegion *ExprEngine::getCXXThisRegion(const CXXRecordDecl *D, + const StackFrameContext *SFC) { + const Type *T = D->getTypeForDecl(); + QualType PT = getContext().getPointerType(QualType(T, 0)); + return svalBuilder.getRegionManager().getCXXThisRegion(PT, SFC); +} + +const CXXThisRegion *ExprEngine::getCXXThisRegion(const CXXMethodDecl *decl, + const StackFrameContext *frameCtx) { + return svalBuilder.getRegionManager(). + getCXXThisRegion(decl->getThisType(getContext()), frameCtx); +} + +void ExprEngine::CreateCXXTemporaryObject(const Expr *Ex, ExplodedNode *Pred, + ExplodedNodeSet &Dst) { + ExplodedNodeSet Tmp; + Visit(Ex, Pred, Tmp); + for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E; ++I) { + const GRState *state = GetState(*I); + + // Bind the temporary object to the value of the expression. Then bind + // the expression to the location of the object. + SVal V = state->getSVal(Ex); + + const MemRegion *R = + svalBuilder.getRegionManager().getCXXTempObjectRegion(Ex, + Pred->getLocationContext()); + + state = state->bindLoc(loc::MemRegionVal(R), V); + MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, loc::MemRegionVal(R))); + } +} + +void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *E, + const MemRegion *Dest, + ExplodedNode *Pred, + ExplodedNodeSet &Dst) { + if (!Dest) + Dest = svalBuilder.getRegionManager().getCXXTempObjectRegion(E, + Pred->getLocationContext()); + + if (E->isElidable()) { + VisitAggExpr(E->getArg(0), Dest, Pred, Dst); + return; + } + + const CXXConstructorDecl *CD = E->getConstructor(); + assert(CD); + + if (!(CD->isThisDeclarationADefinition() && AMgr.shouldInlineCall())) + // FIXME: invalidate the object. + return; + + + // Evaluate other arguments. + ExplodedNodeSet argsEvaluated; + const FunctionProtoType *FnType = CD->getType()->getAs<FunctionProtoType>(); + evalArguments(E->arg_begin(), E->arg_end(), FnType, Pred, argsEvaluated); + // The callee stack frame context used to create the 'this' parameter region. + const StackFrameContext *SFC = AMgr.getStackFrame(CD, + Pred->getLocationContext(), + E, Builder->getBlock(), + Builder->getIndex()); + + const CXXThisRegion *ThisR =getCXXThisRegion(E->getConstructor()->getParent(), + SFC); + + CallEnter Loc(E, SFC, Pred->getLocationContext()); + for (ExplodedNodeSet::iterator NI = argsEvaluated.begin(), + NE = argsEvaluated.end(); NI != NE; ++NI) { + const GRState *state = GetState(*NI); + // Setup 'this' region, so that the ctor is evaluated on the object pointed + // by 'Dest'. + state = state->bindLoc(loc::MemRegionVal(ThisR), loc::MemRegionVal(Dest)); + ExplodedNode *N = Builder->generateNode(Loc, state, Pred); + if (N) + Dst.Add(N); + } +} + +void ExprEngine::VisitCXXDestructor(const CXXDestructorDecl *DD, + const MemRegion *Dest, + const Stmt *S, + ExplodedNode *Pred, + ExplodedNodeSet &Dst) { + if (!(DD->isThisDeclarationADefinition() && AMgr.shouldInlineCall())) + return; + // Create the context for 'this' region. + const StackFrameContext *SFC = AMgr.getStackFrame(DD, + Pred->getLocationContext(), + S, Builder->getBlock(), + Builder->getIndex()); + + const CXXThisRegion *ThisR = getCXXThisRegion(DD->getParent(), SFC); + + CallEnter PP(S, SFC, Pred->getLocationContext()); + + const GRState *state = Pred->getState(); + state = state->bindLoc(loc::MemRegionVal(ThisR), loc::MemRegionVal(Dest)); + ExplodedNode *N = Builder->generateNode(PP, state, Pred); + if (N) + Dst.Add(N); +} + +void ExprEngine::VisitCXXMemberCallExpr(const CXXMemberCallExpr *MCE, + ExplodedNode *Pred, + ExplodedNodeSet &Dst) { + // Get the method type. + const FunctionProtoType *FnType = + MCE->getCallee()->getType()->getAs<FunctionProtoType>(); + assert(FnType && "Method type not available"); + + // Evaluate explicit arguments with a worklist. + ExplodedNodeSet argsEvaluated; + evalArguments(MCE->arg_begin(), MCE->arg_end(), FnType, Pred, argsEvaluated); + + // Evaluate the implicit object argument. + ExplodedNodeSet AllargsEvaluated; + const MemberExpr *ME = dyn_cast<MemberExpr>(MCE->getCallee()->IgnoreParens()); + if (!ME) + return; + Expr *ObjArgExpr = ME->getBase(); + for (ExplodedNodeSet::iterator I = argsEvaluated.begin(), + E = argsEvaluated.end(); I != E; ++I) { + Visit(ObjArgExpr, *I, AllargsEvaluated); + } + + // Now evaluate the call itself. + const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl()); + assert(MD && "not a CXXMethodDecl?"); + evalMethodCall(MCE, MD, ObjArgExpr, Pred, AllargsEvaluated, Dst); +} + +void ExprEngine::VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *C, + ExplodedNode *Pred, + ExplodedNodeSet &Dst) { + const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(C->getCalleeDecl()); + if (!MD) { + // If the operator doesn't represent a method call treat as regural call. + VisitCall(C, Pred, C->arg_begin(), C->arg_end(), Dst); + return; + } + + // Determine the type of function we're calling (if available). + const FunctionProtoType *Proto = NULL; + QualType FnType = C->getCallee()->IgnoreParens()->getType(); + if (const PointerType *FnTypePtr = FnType->getAs<PointerType>()) + Proto = FnTypePtr->getPointeeType()->getAs<FunctionProtoType>(); + + // Evaluate arguments treating the first one (object method is called on) + // as alvalue. + ExplodedNodeSet argsEvaluated; + evalArguments(C->arg_begin(), C->arg_end(), Proto, Pred, argsEvaluated, true); + + // Now evaluate the call itself. + evalMethodCall(C, MD, C->getArg(0), Pred, argsEvaluated, Dst); +} + +void ExprEngine::evalMethodCall(const CallExpr *MCE, const CXXMethodDecl *MD, + const Expr *ThisExpr, ExplodedNode *Pred, + ExplodedNodeSet &Src, ExplodedNodeSet &Dst) { + // Allow checkers to pre-visit the member call. + ExplodedNodeSet PreVisitChecks; + CheckerVisit(MCE, PreVisitChecks, Src, PreVisitStmtCallback); + + if (!(MD->isThisDeclarationADefinition() && AMgr.shouldInlineCall())) { + // FIXME: conservative method call evaluation. + CheckerVisit(MCE, Dst, PreVisitChecks, PostVisitStmtCallback); + return; + } + + const StackFrameContext *SFC = AMgr.getStackFrame(MD, + Pred->getLocationContext(), + MCE, + Builder->getBlock(), + Builder->getIndex()); + const CXXThisRegion *ThisR = getCXXThisRegion(MD, SFC); + CallEnter Loc(MCE, SFC, Pred->getLocationContext()); + for (ExplodedNodeSet::iterator I = PreVisitChecks.begin(), + E = PreVisitChecks.end(); I != E; ++I) { + // Set up 'this' region. + const GRState *state = GetState(*I); + state = state->bindLoc(loc::MemRegionVal(ThisR), state->getSVal(ThisExpr)); + Dst.Add(Builder->generateNode(Loc, state, *I)); + } +} + +void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred, + ExplodedNodeSet &Dst) { + if (CNE->isArray()) { + // FIXME: allocating an array has not been handled. + return; + } + + unsigned Count = Builder->getCurrentBlockCount(); + DefinedOrUnknownSVal symVal = + svalBuilder.getConjuredSymbolVal(NULL, CNE, CNE->getType(), Count); + const MemRegion *NewReg = cast<loc::MemRegionVal>(symVal).getRegion(); + + QualType ObjTy = CNE->getType()->getAs<PointerType>()->getPointeeType(); + + const ElementRegion *EleReg = + getStoreManager().GetElementZeroRegion(NewReg, ObjTy); + + // Evaluate constructor arguments. + const FunctionProtoType *FnType = NULL; + const CXXConstructorDecl *CD = CNE->getConstructor(); + if (CD) + FnType = CD->getType()->getAs<FunctionProtoType>(); + ExplodedNodeSet argsEvaluated; + evalArguments(CNE->constructor_arg_begin(), CNE->constructor_arg_end(), + FnType, Pred, argsEvaluated); + + // Initialize the object region and bind the 'new' expression. + for (ExplodedNodeSet::iterator I = argsEvaluated.begin(), + E = argsEvaluated.end(); I != E; ++I) { + const GRState *state = GetState(*I); + + if (ObjTy->isRecordType()) { + state = state->invalidateRegion(EleReg, CNE, Count); + } else { + if (CNE->hasInitializer()) { + SVal V = state->getSVal(*CNE->constructor_arg_begin()); + state = state->bindLoc(loc::MemRegionVal(EleReg), V); + } else { + // Explicitly set to undefined, because currently we retrieve symbolic + // value from symbolic region. + state = state->bindLoc(loc::MemRegionVal(EleReg), UndefinedVal()); + } + } + state = state->BindExpr(CNE, loc::MemRegionVal(EleReg)); + MakeNode(Dst, CNE, *I, state); + } +} + +void ExprEngine::VisitCXXDeleteExpr(const CXXDeleteExpr *CDE, + ExplodedNode *Pred,ExplodedNodeSet &Dst) { + // Should do more checking. + ExplodedNodeSet Argevaluated; + Visit(CDE->getArgument(), Pred, Argevaluated); + for (ExplodedNodeSet::iterator I = Argevaluated.begin(), + E = Argevaluated.end(); I != E; ++I) { + const GRState *state = GetState(*I); + MakeNode(Dst, CDE, *I, state); + } +} + +void ExprEngine::VisitCXXThisExpr(const CXXThisExpr *TE, ExplodedNode *Pred, + ExplodedNodeSet &Dst) { + // Get the this object region from StoreManager. + const MemRegion *R = + svalBuilder.getRegionManager().getCXXThisRegion( + getContext().getCanonicalType(TE->getType()), + Pred->getLocationContext()); + + const GRState *state = GetState(Pred); + SVal V = state->getSVal(loc::MemRegionVal(R)); + MakeNode(Dst, TE, Pred, state->BindExpr(TE, V)); +} diff --git a/lib/StaticAnalyzer/Core/Checker.cpp b/lib/StaticAnalyzer/Core/Checker.cpp new file mode 100644 index 0000000..a014eec --- /dev/null +++ b/lib/StaticAnalyzer/Core/Checker.cpp @@ -0,0 +1,35 @@ +//== Checker.h - Abstract interface for checkers -----------------*- C++ -*--=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines Checker and CheckerVisitor, classes used for creating +// domain-specific checks. +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/PathSensitive/Checker.h" +using namespace clang; +using namespace ento; + +Checker::~Checker() {} + +CheckerContext::~CheckerContext() { + // Do we need to autotransition? 'Dst' can get populated in a variety of + // ways, including 'addTransition()' adding the predecessor node to Dst + // without actually generated a new node. We also shouldn't autotransition + // if we are building sinks or we generated a node and decided to not + // add it as a transition. + if (Dst.size() == size && !B.BuildSinks && !B.hasGeneratedNode) { + if (ST && ST != B.GetState(Pred)) { + static int autoTransitionTag = 0; + addTransition(ST, &autoTransitionTag); + } + else + Dst.Add(Pred); + } +} diff --git a/lib/StaticAnalyzer/Core/CheckerHelpers.cpp b/lib/StaticAnalyzer/Core/CheckerHelpers.cpp new file mode 100644 index 0000000..28df695 --- /dev/null +++ b/lib/StaticAnalyzer/Core/CheckerHelpers.cpp @@ -0,0 +1,80 @@ +//===---- CheckerHelpers.cpp - Helper functions for checkers ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines several static functions for use in checkers. +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h" +#include "clang/AST/Expr.h" + +// Recursively find any substatements containing macros +bool clang::ento::containsMacro(const Stmt *S) { + if (S->getLocStart().isMacroID()) + return true; + + if (S->getLocEnd().isMacroID()) + return true; + + for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end(); + ++I) + if (const Stmt *child = *I) + if (containsMacro(child)) + return true; + + return false; +} + +// Recursively find any substatements containing enum constants +bool clang::ento::containsEnum(const Stmt *S) { + const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S); + + if (DR && isa<EnumConstantDecl>(DR->getDecl())) + return true; + + for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end(); + ++I) + if (const Stmt *child = *I) + if (containsEnum(child)) + return true; + + return false; +} + +// Recursively find any substatements containing static vars +bool clang::ento::containsStaticLocal(const Stmt *S) { + const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S); + + if (DR) + if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) + if (VD->isStaticLocal()) + return true; + + for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end(); + ++I) + if (const Stmt *child = *I) + if (containsStaticLocal(child)) + return true; + + return false; +} + +// Recursively find any substatements containing __builtin_offsetof +bool clang::ento::containsBuiltinOffsetOf(const Stmt *S) { + if (isa<OffsetOfExpr>(S)) + return true; + + for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end(); + ++I) + if (const Stmt *child = *I) + if (containsBuiltinOffsetOf(child)) + return true; + + return false; +} diff --git a/lib/StaticAnalyzer/Core/CheckerManager.cpp b/lib/StaticAnalyzer/Core/CheckerManager.cpp new file mode 100644 index 0000000..1989b82 --- /dev/null +++ b/lib/StaticAnalyzer/Core/CheckerManager.cpp @@ -0,0 +1,85 @@ +//===--- CheckerManager.cpp - Static Analyzer Checker Manager -------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Defines the Static Analyzer Checker Manager. +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/CheckerManager.h" +#include "clang/StaticAnalyzer/Core/CheckerProvider.h" +#include "clang/AST/DeclBase.h" + +using namespace clang; +using namespace ento; + +void CheckerManager::runCheckersOnASTDecl(const Decl *D, AnalysisManager& mgr, + BugReporter &BR) { + assert(D); + + unsigned DeclKind = D->getKind(); + CachedDeclCheckers *checkers = 0; + CachedDeclCheckersMapTy::iterator CCI = CachedDeclCheckersMap.find(DeclKind); + if (CCI != CachedDeclCheckersMap.end()) { + checkers = &(CCI->second); + } else { + // Find the checkers that should run for this Decl and cache them. + checkers = &CachedDeclCheckersMap[DeclKind]; + for (unsigned i = 0, e = DeclCheckers.size(); i != e; ++i) { + DeclCheckerInfo &info = DeclCheckers[i]; + if (info.IsForDeclFn(D)) + checkers->push_back(std::make_pair(info.Checker, info.CheckFn)); + } + } + + assert(checkers); + for (CachedDeclCheckers::iterator + I = checkers->begin(), E = checkers->end(); I != E; ++I) { + CheckerRef checker = I->first; + CheckDeclFunc fn = I->second; + fn(checker, D, mgr, BR); + } +} + +void CheckerManager::runCheckersOnASTBody(const Decl *D, AnalysisManager& mgr, + BugReporter &BR) { + assert(D && D->hasBody()); + + for (unsigned i = 0, e = BodyCheckers.size(); i != e; ++i) { + CheckerRef checker = BodyCheckers[i].first; + CheckDeclFunc fn = BodyCheckers[i].second; + fn(checker, D, mgr, BR); + } +} + +void CheckerManager::_registerForDecl(CheckerRef checker, CheckDeclFunc checkfn, + HandlesDeclFunc isForDeclFn) { + DeclCheckerInfo info = { checker, checkfn, isForDeclFn }; + DeclCheckers.push_back(info); +} + +void CheckerManager::_registerForBody(CheckerRef checker, + CheckDeclFunc checkfn) { + BodyCheckers.push_back(std::make_pair(checker, checkfn)); +} + +void CheckerManager::registerCheckersToEngine(ExprEngine &eng) { + for (unsigned i = 0, e = Funcs.size(); i != e; ++i) + Funcs[i](eng); +} + +CheckerManager::~CheckerManager() { + for (unsigned i = 0, e = Checkers.size(); i != e; ++i) { + CheckerRef checker = Checkers[i].first; + Dtor dtor = Checkers[i].second; + dtor(checker); + } +} + +// Anchor for the vtable. +CheckerProvider::~CheckerProvider() { } diff --git a/lib/StaticAnalyzer/Core/CoreEngine.cpp b/lib/StaticAnalyzer/Core/CoreEngine.cpp new file mode 100644 index 0000000..070042a --- /dev/null +++ b/lib/StaticAnalyzer/Core/CoreEngine.cpp @@ -0,0 +1,848 @@ +//==- CoreEngine.cpp - Path-Sensitive Dataflow Engine ------------*- C++ -*-// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a generic engine for intraprocedural, path-sensitive, +// dataflow analysis via graph reachability engine. +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h" +#include "clang/Index/TranslationUnit.h" +#include "clang/AST/Expr.h" +#include "llvm/Support/Casting.h" +#include "llvm/ADT/DenseMap.h" +#include <vector> +#include <queue> + +using llvm::cast; +using llvm::isa; +using namespace clang; +using namespace ento; + +// This should be removed in the future. +namespace clang { +namespace ento { +TransferFuncs* MakeCFRefCountTF(ASTContext& Ctx, bool GCEnabled, + const LangOptions& lopts); +} +} + +//===----------------------------------------------------------------------===// +// Worklist classes for exploration of reachable states. +//===----------------------------------------------------------------------===// + +WorkList::Visitor::~Visitor() {} + +namespace { +class DFS : public WorkList { + llvm::SmallVector<WorkListUnit,20> Stack; +public: + virtual bool hasWork() const { + return !Stack.empty(); + } + + virtual void enqueue(const WorkListUnit& U) { + Stack.push_back(U); + } + + virtual WorkListUnit dequeue() { + assert (!Stack.empty()); + const WorkListUnit& U = Stack.back(); + Stack.pop_back(); // This technically "invalidates" U, but we are fine. + return U; + } + + virtual bool visitItemsInWorkList(Visitor &V) { + for (llvm::SmallVectorImpl<WorkListUnit>::iterator + I = Stack.begin(), E = Stack.end(); I != E; ++I) { + if (V.visit(*I)) + return true; + } + return false; + } +}; + +class BFS : public WorkList { + std::deque<WorkListUnit> Queue; +public: + virtual bool hasWork() const { + return !Queue.empty(); + } + + virtual void enqueue(const WorkListUnit& U) { + Queue.push_front(U); + } + + virtual WorkListUnit dequeue() { + WorkListUnit U = Queue.front(); + Queue.pop_front(); + return U; + } + + virtual bool visitItemsInWorkList(Visitor &V) { + for (std::deque<WorkListUnit>::iterator + I = Queue.begin(), E = Queue.end(); I != E; ++I) { + if (V.visit(*I)) + return true; + } + return false; + } +}; + +} // end anonymous namespace + +// Place the dstor for WorkList here because it contains virtual member +// functions, and we the code for the dstor generated in one compilation unit. +WorkList::~WorkList() {} + +WorkList *WorkList::makeDFS() { return new DFS(); } +WorkList *WorkList::makeBFS() { return new BFS(); } + +namespace { + class BFSBlockDFSContents : public WorkList { + std::deque<WorkListUnit> Queue; + llvm::SmallVector<WorkListUnit,20> Stack; + public: + virtual bool hasWork() const { + return !Queue.empty() || !Stack.empty(); + } + + virtual void enqueue(const WorkListUnit& U) { + if (isa<BlockEntrance>(U.getNode()->getLocation())) + Queue.push_front(U); + else + Stack.push_back(U); + } + + virtual WorkListUnit dequeue() { + // Process all basic blocks to completion. + if (!Stack.empty()) { + const WorkListUnit& U = Stack.back(); + Stack.pop_back(); // This technically "invalidates" U, but we are fine. + return U; + } + + assert(!Queue.empty()); + // Don't use const reference. The subsequent pop_back() might make it + // unsafe. + WorkListUnit U = Queue.front(); + Queue.pop_front(); + return U; + } + virtual bool visitItemsInWorkList(Visitor &V) { + for (llvm::SmallVectorImpl<WorkListUnit>::iterator + I = Stack.begin(), E = Stack.end(); I != E; ++I) { + if (V.visit(*I)) + return true; + } + for (std::deque<WorkListUnit>::iterator + I = Queue.begin(), E = Queue.end(); I != E; ++I) { + if (V.visit(*I)) + return true; + } + return false; + } + + }; +} // end anonymous namespace + +WorkList* WorkList::makeBFSBlockDFSContents() { + return new BFSBlockDFSContents(); +} + +//===----------------------------------------------------------------------===// +// Core analysis engine. +//===----------------------------------------------------------------------===// + +/// ExecuteWorkList - Run the worklist algorithm for a maximum number of steps. +bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps, + const GRState *InitState) { + + if (G->num_roots() == 0) { // Initialize the analysis by constructing + // the root if none exists. + + const CFGBlock* Entry = &(L->getCFG()->getEntry()); + + assert (Entry->empty() && + "Entry block must be empty."); + + assert (Entry->succ_size() == 1 && + "Entry block must have 1 successor."); + + // Get the solitary successor. + const CFGBlock* Succ = *(Entry->succ_begin()); + + // Construct an edge representing the + // starting location in the function. + BlockEdge StartLoc(Entry, Succ, L); + + // Set the current block counter to being empty. + WList->setBlockCounter(BCounterFactory.GetEmptyCounter()); + + if (!InitState) + // Generate the root. + generateNode(StartLoc, SubEng.getInitialState(L), 0); + else + generateNode(StartLoc, InitState, 0); + } + + // Check if we have a steps limit + bool UnlimitedSteps = Steps == 0; + + while (WList->hasWork()) { + if (!UnlimitedSteps) { + if (Steps == 0) + break; + --Steps; + } + + const WorkListUnit& WU = WList->dequeue(); + + // Set the current block counter. + WList->setBlockCounter(WU.getBlockCounter()); + + // Retrieve the node. + ExplodedNode* Node = WU.getNode(); + + // Dispatch on the location type. + switch (Node->getLocation().getKind()) { + case ProgramPoint::BlockEdgeKind: + HandleBlockEdge(cast<BlockEdge>(Node->getLocation()), Node); + break; + + case ProgramPoint::BlockEntranceKind: + HandleBlockEntrance(cast<BlockEntrance>(Node->getLocation()), Node); + break; + + case ProgramPoint::BlockExitKind: + assert (false && "BlockExit location never occur in forward analysis."); + break; + + case ProgramPoint::CallEnterKind: + HandleCallEnter(cast<CallEnter>(Node->getLocation()), WU.getBlock(), + WU.getIndex(), Node); + break; + + case ProgramPoint::CallExitKind: + HandleCallExit(cast<CallExit>(Node->getLocation()), Node); + break; + + default: + assert(isa<PostStmt>(Node->getLocation()) || + isa<PostInitializer>(Node->getLocation())); + HandlePostStmt(WU.getBlock(), WU.getIndex(), Node); + break; + } + } + + SubEng.processEndWorklist(hasWorkRemaining()); + return WList->hasWork(); +} + +void CoreEngine::ExecuteWorkListWithInitialState(const LocationContext *L, + unsigned Steps, + const GRState *InitState, + ExplodedNodeSet &Dst) { + ExecuteWorkList(L, Steps, InitState); + for (llvm::SmallVectorImpl<ExplodedNode*>::iterator I = G->EndNodes.begin(), + E = G->EndNodes.end(); I != E; ++I) { + Dst.Add(*I); + } +} + +void CoreEngine::HandleCallEnter(const CallEnter &L, const CFGBlock *Block, + unsigned Index, ExplodedNode *Pred) { + CallEnterNodeBuilder Builder(*this, Pred, L.getCallExpr(), + L.getCalleeContext(), Block, Index); + SubEng.processCallEnter(Builder); +} + +void CoreEngine::HandleCallExit(const CallExit &L, ExplodedNode *Pred) { + CallExitNodeBuilder Builder(*this, Pred); + SubEng.processCallExit(Builder); +} + +void CoreEngine::HandleBlockEdge(const BlockEdge& L, ExplodedNode* Pred) { + + const CFGBlock* Blk = L.getDst(); + + // Check if we are entering the EXIT block. + if (Blk == &(L.getLocationContext()->getCFG()->getExit())) { + + assert (L.getLocationContext()->getCFG()->getExit().size() == 0 + && "EXIT block cannot contain Stmts."); + + // Process the final state transition. + EndOfFunctionNodeBuilder Builder(Blk, Pred, this); + SubEng.processEndOfFunction(Builder); + + // This path is done. Don't enqueue any more nodes. + return; + } + + // Call into the subengine to process entering the CFGBlock. + ExplodedNodeSet dstNodes; + BlockEntrance BE(Blk, Pred->getLocationContext()); + GenericNodeBuilder<BlockEntrance> nodeBuilder(*this, Pred, BE); + SubEng.processCFGBlockEntrance(dstNodes, nodeBuilder); + + if (dstNodes.empty()) { + if (!nodeBuilder.hasGeneratedNode) { + // Auto-generate a node and enqueue it to the worklist. + generateNode(BE, Pred->State, Pred); + } + } + else { + for (ExplodedNodeSet::iterator I = dstNodes.begin(), E = dstNodes.end(); + I != E; ++I) { + WList->enqueue(*I); + } + } + + for (llvm::SmallVectorImpl<ExplodedNode*>::const_iterator + I = nodeBuilder.sinks().begin(), E = nodeBuilder.sinks().end(); + I != E; ++I) { + blocksAborted.push_back(std::make_pair(L, *I)); + } +} + +void CoreEngine::HandleBlockEntrance(const BlockEntrance& L, + ExplodedNode* Pred) { + + // Increment the block counter. + BlockCounter Counter = WList->getBlockCounter(); + Counter = BCounterFactory.IncrementCount(Counter, + Pred->getLocationContext()->getCurrentStackFrame(), + L.getBlock()->getBlockID()); + WList->setBlockCounter(Counter); + + // Process the entrance of the block. + if (CFGElement E = L.getFirstElement()) { + StmtNodeBuilder Builder(L.getBlock(), 0, Pred, this, + SubEng.getStateManager()); + SubEng.processCFGElement(E, Builder); + } + else + HandleBlockExit(L.getBlock(), Pred); +} + +void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode* Pred) { + + if (const Stmt* Term = B->getTerminator()) { + switch (Term->getStmtClass()) { + default: + assert(false && "Analysis for this terminator not implemented."); + break; + + case Stmt::BinaryOperatorClass: // '&&' and '||' + HandleBranch(cast<BinaryOperator>(Term)->getLHS(), Term, B, Pred); + return; + + case Stmt::BinaryConditionalOperatorClass: + case Stmt::ConditionalOperatorClass: + HandleBranch(cast<AbstractConditionalOperator>(Term)->getCond(), + Term, B, Pred); + return; + + // FIXME: Use constant-folding in CFG construction to simplify this + // case. + + case Stmt::ChooseExprClass: + HandleBranch(cast<ChooseExpr>(Term)->getCond(), Term, B, Pred); + return; + + case Stmt::DoStmtClass: + HandleBranch(cast<DoStmt>(Term)->getCond(), Term, B, Pred); + return; + + case Stmt::ForStmtClass: + HandleBranch(cast<ForStmt>(Term)->getCond(), Term, B, Pred); + return; + + case Stmt::ContinueStmtClass: + case Stmt::BreakStmtClass: + case Stmt::GotoStmtClass: + break; + + case Stmt::IfStmtClass: + HandleBranch(cast<IfStmt>(Term)->getCond(), Term, B, Pred); + return; + + case Stmt::IndirectGotoStmtClass: { + // Only 1 successor: the indirect goto dispatch block. + assert (B->succ_size() == 1); + + IndirectGotoNodeBuilder + builder(Pred, B, cast<IndirectGotoStmt>(Term)->getTarget(), + *(B->succ_begin()), this); + + SubEng.processIndirectGoto(builder); + return; + } + + case Stmt::ObjCForCollectionStmtClass: { + // In the case of ObjCForCollectionStmt, it appears twice in a CFG: + // + // (1) inside a basic block, which represents the binding of the + // 'element' variable to a value. + // (2) in a terminator, which represents the branch. + // + // For (1), subengines will bind a value (i.e., 0 or 1) indicating + // whether or not collection contains any more elements. We cannot + // just test to see if the element is nil because a container can + // contain nil elements. + HandleBranch(Term, Term, B, Pred); + return; + } + + case Stmt::SwitchStmtClass: { + SwitchNodeBuilder builder(Pred, B, cast<SwitchStmt>(Term)->getCond(), + this); + + SubEng.processSwitch(builder); + return; + } + + case Stmt::WhileStmtClass: + HandleBranch(cast<WhileStmt>(Term)->getCond(), Term, B, Pred); + return; + } + } + + assert (B->succ_size() == 1 && + "Blocks with no terminator should have at most 1 successor."); + + generateNode(BlockEdge(B, *(B->succ_begin()), Pred->getLocationContext()), + Pred->State, Pred); +} + +void CoreEngine::HandleBranch(const Stmt* Cond, const Stmt* Term, + const CFGBlock * B, ExplodedNode* Pred) { + assert(B->succ_size() == 2); + BranchNodeBuilder Builder(B, *(B->succ_begin()), *(B->succ_begin()+1), + Pred, this); + SubEng.processBranch(Cond, Term, Builder); +} + +void CoreEngine::HandlePostStmt(const CFGBlock* B, unsigned StmtIdx, + ExplodedNode* Pred) { + assert (!B->empty()); + + if (StmtIdx == B->size()) + HandleBlockExit(B, Pred); + else { + StmtNodeBuilder Builder(B, StmtIdx, Pred, this, + SubEng.getStateManager()); + SubEng.processCFGElement((*B)[StmtIdx], Builder); + } +} + +/// generateNode - Utility method to generate nodes, hook up successors, +/// and add nodes to the worklist. +void CoreEngine::generateNode(const ProgramPoint& Loc, + const GRState* State, ExplodedNode* Pred) { + + bool IsNew; + ExplodedNode* Node = G->getNode(Loc, State, &IsNew); + + if (Pred) + Node->addPredecessor(Pred, *G); // Link 'Node' with its predecessor. + else { + assert (IsNew); + G->addRoot(Node); // 'Node' has no predecessor. Make it a root. + } + + // Only add 'Node' to the worklist if it was freshly generated. + if (IsNew) WList->enqueue(Node); +} + +ExplodedNode * +GenericNodeBuilderImpl::generateNodeImpl(const GRState *state, + ExplodedNode *pred, + ProgramPoint programPoint, + bool asSink) { + + hasGeneratedNode = true; + bool isNew; + ExplodedNode *node = engine.getGraph().getNode(programPoint, state, &isNew); + if (pred) + node->addPredecessor(pred, engine.getGraph()); + if (isNew) { + if (asSink) { + node->markAsSink(); + sinksGenerated.push_back(node); + } + return node; + } + return 0; +} + +StmtNodeBuilder::StmtNodeBuilder(const CFGBlock* b, unsigned idx, + ExplodedNode* N, CoreEngine* e, + GRStateManager &mgr) + : Eng(*e), B(*b), Idx(idx), Pred(N), Mgr(mgr), + PurgingDeadSymbols(false), BuildSinks(false), hasGeneratedNode(false), + PointKind(ProgramPoint::PostStmtKind), Tag(0) { + Deferred.insert(N); + CleanedState = Pred->getState(); +} + +StmtNodeBuilder::~StmtNodeBuilder() { + for (DeferredTy::iterator I=Deferred.begin(), E=Deferred.end(); I!=E; ++I) + if (!(*I)->isSink()) + GenerateAutoTransition(*I); +} + +void StmtNodeBuilder::GenerateAutoTransition(ExplodedNode* N) { + assert (!N->isSink()); + + // Check if this node entered a callee. + if (isa<CallEnter>(N->getLocation())) { + // Still use the index of the CallExpr. It's needed to create the callee + // StackFrameContext. + Eng.WList->enqueue(N, &B, Idx); + return; + } + + // Do not create extra nodes. Move to the next CFG element. + if (isa<PostInitializer>(N->getLocation())) { + Eng.WList->enqueue(N, &B, Idx+1); + return; + } + + PostStmt Loc(getStmt(), N->getLocationContext()); + + if (Loc == N->getLocation()) { + // Note: 'N' should be a fresh node because otherwise it shouldn't be + // a member of Deferred. + Eng.WList->enqueue(N, &B, Idx+1); + return; + } + + bool IsNew; + ExplodedNode* Succ = Eng.G->getNode(Loc, N->State, &IsNew); + Succ->addPredecessor(N, *Eng.G); + + if (IsNew) + Eng.WList->enqueue(Succ, &B, Idx+1); +} + +ExplodedNode* StmtNodeBuilder::MakeNode(ExplodedNodeSet& Dst, const Stmt* S, + ExplodedNode* Pred, const GRState* St, + ProgramPoint::Kind K) { + + ExplodedNode* N = generateNode(S, St, Pred, K); + + if (N) { + if (BuildSinks) + N->markAsSink(); + else + Dst.Add(N); + } + + return N; +} + +static ProgramPoint GetProgramPoint(const Stmt *S, ProgramPoint::Kind K, + const LocationContext *LC, const void *tag){ + switch (K) { + default: + assert(false && "Unhandled ProgramPoint kind"); + case ProgramPoint::PreStmtKind: + return PreStmt(S, LC, tag); + case ProgramPoint::PostStmtKind: + return PostStmt(S, LC, tag); + case ProgramPoint::PreLoadKind: + return PreLoad(S, LC, tag); + case ProgramPoint::PostLoadKind: + return PostLoad(S, LC, tag); + case ProgramPoint::PreStoreKind: + return PreStore(S, LC, tag); + case ProgramPoint::PostStoreKind: + return PostStore(S, LC, tag); + case ProgramPoint::PostLValueKind: + return PostLValue(S, LC, tag); + case ProgramPoint::PostPurgeDeadSymbolsKind: + return PostPurgeDeadSymbols(S, LC, tag); + } +} + +ExplodedNode* +StmtNodeBuilder::generateNodeInternal(const Stmt* S, const GRState* state, + ExplodedNode* Pred, + ProgramPoint::Kind K, + const void *tag) { + + const ProgramPoint &L = GetProgramPoint(S, K, Pred->getLocationContext(),tag); + return generateNodeInternal(L, state, Pred); +} + +ExplodedNode* +StmtNodeBuilder::generateNodeInternal(const ProgramPoint &Loc, + const GRState* State, + ExplodedNode* Pred) { + bool IsNew; + ExplodedNode* N = Eng.G->getNode(Loc, State, &IsNew); + N->addPredecessor(Pred, *Eng.G); + Deferred.erase(Pred); + + if (IsNew) { + Deferred.insert(N); + return N; + } + + return NULL; +} + +ExplodedNode* BranchNodeBuilder::generateNode(const GRState* State, + bool branch) { + + // If the branch has been marked infeasible we should not generate a node. + if (!isFeasible(branch)) + return NULL; + + bool IsNew; + + ExplodedNode* Succ = + Eng.G->getNode(BlockEdge(Src,branch ? DstT:DstF,Pred->getLocationContext()), + State, &IsNew); + + Succ->addPredecessor(Pred, *Eng.G); + + if (branch) + GeneratedTrue = true; + else + GeneratedFalse = true; + + if (IsNew) { + Deferred.push_back(Succ); + return Succ; + } + + return NULL; +} + +BranchNodeBuilder::~BranchNodeBuilder() { + if (!GeneratedTrue) generateNode(Pred->State, true); + if (!GeneratedFalse) generateNode(Pred->State, false); + + for (DeferredTy::iterator I=Deferred.begin(), E=Deferred.end(); I!=E; ++I) + if (!(*I)->isSink()) Eng.WList->enqueue(*I); +} + + +ExplodedNode* +IndirectGotoNodeBuilder::generateNode(const iterator& I, const GRState* St, + bool isSink) { + bool IsNew; + + ExplodedNode* Succ = Eng.G->getNode(BlockEdge(Src, I.getBlock(), + Pred->getLocationContext()), St, &IsNew); + + Succ->addPredecessor(Pred, *Eng.G); + + if (IsNew) { + + if (isSink) + Succ->markAsSink(); + else + Eng.WList->enqueue(Succ); + + return Succ; + } + + return NULL; +} + + +ExplodedNode* +SwitchNodeBuilder::generateCaseStmtNode(const iterator& I, const GRState* St){ + + bool IsNew; + + ExplodedNode* Succ = Eng.G->getNode(BlockEdge(Src, I.getBlock(), + Pred->getLocationContext()), St, &IsNew); + Succ->addPredecessor(Pred, *Eng.G); + + if (IsNew) { + Eng.WList->enqueue(Succ); + return Succ; + } + + return NULL; +} + + +ExplodedNode* +SwitchNodeBuilder::generateDefaultCaseNode(const GRState* St, bool isSink) { + + // Get the block for the default case. + assert (Src->succ_rbegin() != Src->succ_rend()); + CFGBlock* DefaultBlock = *Src->succ_rbegin(); + + bool IsNew; + + ExplodedNode* Succ = Eng.G->getNode(BlockEdge(Src, DefaultBlock, + Pred->getLocationContext()), St, &IsNew); + Succ->addPredecessor(Pred, *Eng.G); + + if (IsNew) { + if (isSink) + Succ->markAsSink(); + else + Eng.WList->enqueue(Succ); + + return Succ; + } + + return NULL; +} + +EndOfFunctionNodeBuilder::~EndOfFunctionNodeBuilder() { + // Auto-generate an EOP node if one has not been generated. + if (!hasGeneratedNode) { + // If we are in an inlined call, generate CallExit node. + if (Pred->getLocationContext()->getParent()) + GenerateCallExitNode(Pred->State); + else + generateNode(Pred->State); + } +} + +ExplodedNode* +EndOfFunctionNodeBuilder::generateNode(const GRState* State, const void *tag, + ExplodedNode* P) { + hasGeneratedNode = true; + bool IsNew; + + ExplodedNode* Node = Eng.G->getNode(BlockEntrance(&B, + Pred->getLocationContext(), tag), State, &IsNew); + + Node->addPredecessor(P ? P : Pred, *Eng.G); + + if (IsNew) { + Eng.G->addEndOfPath(Node); + return Node; + } + + return NULL; +} + +void EndOfFunctionNodeBuilder::GenerateCallExitNode(const GRState *state) { + hasGeneratedNode = true; + // Create a CallExit node and enqueue it. + const StackFrameContext *LocCtx + = cast<StackFrameContext>(Pred->getLocationContext()); + const Stmt *CE = LocCtx->getCallSite(); + + // Use the the callee location context. + CallExit Loc(CE, LocCtx); + + bool isNew; + ExplodedNode *Node = Eng.G->getNode(Loc, state, &isNew); + Node->addPredecessor(Pred, *Eng.G); + + if (isNew) + Eng.WList->enqueue(Node); +} + + +void CallEnterNodeBuilder::generateNode(const GRState *state) { + // Check if the callee is in the same translation unit. + if (CalleeCtx->getTranslationUnit() != + Pred->getLocationContext()->getTranslationUnit()) { + // Create a new engine. We must be careful that the new engine should not + // reference data structures owned by the old engine. + + AnalysisManager &OldMgr = Eng.SubEng.getAnalysisManager(); + + // Get the callee's translation unit. + idx::TranslationUnit *TU = CalleeCtx->getTranslationUnit(); + + // Create a new AnalysisManager with components of the callee's + // TranslationUnit. + // The Diagnostic is actually shared when we create ASTUnits from AST files. + AnalysisManager AMgr(TU->getASTContext(), TU->getDiagnostic(), + OldMgr.getLangOptions(), + OldMgr.getPathDiagnosticClient(), + OldMgr.getStoreManagerCreator(), + OldMgr.getConstraintManagerCreator(), + OldMgr.getCheckerManager(), + OldMgr.getIndexer(), + OldMgr.getMaxNodes(), OldMgr.getMaxVisit(), + OldMgr.shouldVisualizeGraphviz(), + OldMgr.shouldVisualizeUbigraph(), + OldMgr.shouldPurgeDead(), + OldMgr.shouldEagerlyAssume(), + OldMgr.shouldTrimGraph(), + OldMgr.shouldInlineCall(), + OldMgr.getAnalysisContextManager().getUseUnoptimizedCFG(), + OldMgr.getAnalysisContextManager().getAddImplicitDtors(), + OldMgr.getAnalysisContextManager().getAddInitializers(), + OldMgr.shouldEagerlyTrimExplodedGraph()); + llvm::OwningPtr<TransferFuncs> TF(MakeCFRefCountTF(AMgr.getASTContext(), + /* GCEnabled */ false, + AMgr.getLangOptions())); + // Create the new engine. + ExprEngine NewEng(AMgr, TF.take()); + + // Create the new LocationContext. + AnalysisContext *NewAnaCtx = AMgr.getAnalysisContext(CalleeCtx->getDecl(), + CalleeCtx->getTranslationUnit()); + const StackFrameContext *OldLocCtx = CalleeCtx; + const StackFrameContext *NewLocCtx = AMgr.getStackFrame(NewAnaCtx, + OldLocCtx->getParent(), + OldLocCtx->getCallSite(), + OldLocCtx->getCallSiteBlock(), + OldLocCtx->getIndex()); + + // Now create an initial state for the new engine. + const GRState *NewState = NewEng.getStateManager().MarshalState(state, + NewLocCtx); + ExplodedNodeSet ReturnNodes; + NewEng.ExecuteWorkListWithInitialState(NewLocCtx, AMgr.getMaxNodes(), + NewState, ReturnNodes); + return; + } + + // Get the callee entry block. + const CFGBlock *Entry = &(CalleeCtx->getCFG()->getEntry()); + assert(Entry->empty()); + assert(Entry->succ_size() == 1); + + // Get the solitary successor. + const CFGBlock *SuccB = *(Entry->succ_begin()); + + // Construct an edge representing the starting location in the callee. + BlockEdge Loc(Entry, SuccB, CalleeCtx); + + bool isNew; + ExplodedNode *Node = Eng.G->getNode(Loc, state, &isNew); + Node->addPredecessor(const_cast<ExplodedNode*>(Pred), *Eng.G); + + if (isNew) + Eng.WList->enqueue(Node); +} + +void CallExitNodeBuilder::generateNode(const GRState *state) { + // Get the callee's location context. + const StackFrameContext *LocCtx + = cast<StackFrameContext>(Pred->getLocationContext()); + // When exiting an implicit automatic obj dtor call, the callsite is the Stmt + // that triggers the dtor. + PostStmt Loc(LocCtx->getCallSite(), LocCtx->getParent()); + bool isNew; + ExplodedNode *Node = Eng.G->getNode(Loc, state, &isNew); + Node->addPredecessor(const_cast<ExplodedNode*>(Pred), *Eng.G); + if (isNew) + Eng.WList->enqueue(Node, LocCtx->getCallSiteBlock(), + LocCtx->getIndex() + 1); +} diff --git a/lib/StaticAnalyzer/Core/Environment.cpp b/lib/StaticAnalyzer/Core/Environment.cpp new file mode 100644 index 0000000..ecaff29 --- /dev/null +++ b/lib/StaticAnalyzer/Core/Environment.cpp @@ -0,0 +1,234 @@ +//== Environment.cpp - Map from Stmt* to Locations/Values -------*- C++ -*--==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defined the Environment and EnvironmentManager classes. +// +//===----------------------------------------------------------------------===// + +#include "clang/Analysis/AnalysisContext.h" +#include "clang/Analysis/CFG.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h" + +using namespace clang; +using namespace ento; + +SVal Environment::lookupExpr(const Stmt* E) const { + const SVal* X = ExprBindings.lookup(E); + if (X) { + SVal V = *X; + return V; + } + return UnknownVal(); +} + +SVal Environment::getSVal(const Stmt *E, SValBuilder& svalBuilder) const { + for (;;) { + switch (E->getStmtClass()) { + case Stmt::AddrLabelExprClass: + return svalBuilder.makeLoc(cast<AddrLabelExpr>(E)); + case Stmt::ParenExprClass: + // ParenExprs are no-ops. + E = cast<ParenExpr>(E)->getSubExpr(); + continue; + case Stmt::CharacterLiteralClass: { + const CharacterLiteral* C = cast<CharacterLiteral>(E); + return svalBuilder.makeIntVal(C->getValue(), C->getType()); + } + case Stmt::CXXBoolLiteralExprClass: { + const SVal *X = ExprBindings.lookup(E); + if (X) + return *X; + else + return svalBuilder.makeBoolVal(cast<CXXBoolLiteralExpr>(E)); + } + case Stmt::IntegerLiteralClass: { + // In C++, this expression may have been bound to a temporary object. + SVal const *X = ExprBindings.lookup(E); + if (X) + return *X; + else + return svalBuilder.makeIntVal(cast<IntegerLiteral>(E)); + } + case Stmt::ImplicitCastExprClass: + case Stmt::CXXFunctionalCastExprClass: + case Stmt::CStyleCastExprClass: { + // We blast through no-op casts to get the descendant + // subexpression that has a value. + const CastExpr* C = cast<CastExpr>(E); + QualType CT = C->getType(); + if (CT->isVoidType()) + return UnknownVal(); + if (C->getCastKind() == CK_NoOp) { + E = C->getSubExpr(); + continue; + } + break; + } + case Stmt::ExprWithCleanupsClass: + E = cast<ExprWithCleanups>(E)->getSubExpr(); + continue; + case Stmt::CXXBindTemporaryExprClass: + E = cast<CXXBindTemporaryExpr>(E)->getSubExpr(); + continue; + // Handle all other Stmt* using a lookup. + default: + break; + }; + break; + } + return lookupExpr(E); +} + +Environment EnvironmentManager::bindExpr(Environment Env, const Stmt *S, + SVal V, bool Invalidate) { + assert(S); + + if (V.isUnknown()) { + if (Invalidate) + return Environment(F.remove(Env.ExprBindings, S)); + else + return Env; + } + + return Environment(F.add(Env.ExprBindings, S, V)); +} + +static inline const Stmt *MakeLocation(const Stmt *S) { + return (const Stmt*) (((uintptr_t) S) | 0x1); +} + +Environment EnvironmentManager::bindExprAndLocation(Environment Env, + const Stmt *S, + SVal location, SVal V) { + return Environment(F.add(F.add(Env.ExprBindings, MakeLocation(S), location), + S, V)); +} + +namespace { +class MarkLiveCallback : public SymbolVisitor { + SymbolReaper &SymReaper; +public: + MarkLiveCallback(SymbolReaper &symreaper) : SymReaper(symreaper) {} + bool VisitSymbol(SymbolRef sym) { SymReaper.markLive(sym); return true; } +}; +} // end anonymous namespace + +static bool isBlockExprInCallers(const Stmt *E, const LocationContext *LC) { + const LocationContext *ParentLC = LC->getParent(); + while (ParentLC) { + CFG &C = *ParentLC->getCFG(); + if (C.isBlkExpr(E)) + return true; + ParentLC = ParentLC->getParent(); + } + + return false; +} + +// In addition to mapping from Stmt * - > SVals in the Environment, we also +// maintain a mapping from Stmt * -> SVals (locations) that were used during +// a load and store. +static inline bool IsLocation(const Stmt *S) { + return (bool) (((uintptr_t) S) & 0x1); +} + +// removeDeadBindings: +// - Remove subexpression bindings. +// - Remove dead block expression bindings. +// - Keep live block expression bindings: +// - Mark their reachable symbols live in SymbolReaper, +// see ScanReachableSymbols. +// - Mark the region in DRoots if the binding is a loc::MemRegionVal. +Environment +EnvironmentManager::removeDeadBindings(Environment Env, + SymbolReaper &SymReaper, + const GRState *ST, + llvm::SmallVectorImpl<const MemRegion*> &DRoots) { + + CFG &C = *SymReaper.getLocationContext()->getCFG(); + + // We construct a new Environment object entirely, as this is cheaper than + // individually removing all the subexpression bindings (which will greatly + // outnumber block-level expression bindings). + Environment NewEnv = getInitialEnvironment(); + + llvm::SmallVector<std::pair<const Stmt*, SVal>, 10> deferredLocations; + + // Iterate over the block-expr bindings. + for (Environment::iterator I = Env.begin(), E = Env.end(); + I != E; ++I) { + + const Stmt *BlkExpr = I.getKey(); + + // For recorded locations (used when evaluating loads and stores), we + // consider them live only when their associated normal expression is + // also live. + // NOTE: This assumes that loads/stores that evaluated to UnknownVal + // still have an entry in the map. + if (IsLocation(BlkExpr)) { + deferredLocations.push_back(std::make_pair(BlkExpr, I.getData())); + continue; + } + + const SVal &X = I.getData(); + + // Block-level expressions in callers are assumed always live. + if (isBlockExprInCallers(BlkExpr, SymReaper.getLocationContext())) { + NewEnv.ExprBindings = F.add(NewEnv.ExprBindings, BlkExpr, X); + + if (isa<loc::MemRegionVal>(X)) { + const MemRegion* R = cast<loc::MemRegionVal>(X).getRegion(); + DRoots.push_back(R); + } + + // Mark all symbols in the block expr's value live. + MarkLiveCallback cb(SymReaper); + ST->scanReachableSymbols(X, cb); + continue; + } + + // Not a block-level expression? + if (!C.isBlkExpr(BlkExpr)) + continue; + + if (SymReaper.isLive(BlkExpr)) { + // Copy the binding to the new map. + NewEnv.ExprBindings = F.add(NewEnv.ExprBindings, BlkExpr, X); + + // If the block expr's value is a memory region, then mark that region. + if (isa<loc::MemRegionVal>(X)) { + const MemRegion* R = cast<loc::MemRegionVal>(X).getRegion(); + DRoots.push_back(R); + } + + // Mark all symbols in the block expr's value live. + MarkLiveCallback cb(SymReaper); + ST->scanReachableSymbols(X, cb); + continue; + } + + // Otherwise the expression is dead with a couple exceptions. + // Do not misclean LogicalExpr or ConditionalOperator. It is dead at the + // beginning of itself, but we need its UndefinedVal to determine its + // SVal. + if (X.isUndef() && cast<UndefinedVal>(X).getData()) + NewEnv.ExprBindings = F.add(NewEnv.ExprBindings, BlkExpr, X); + } + + // Go through he deferred locations and add them to the new environment if + // the correspond Stmt* is in the map as well. + for (llvm::SmallVectorImpl<std::pair<const Stmt*, SVal> >::iterator + I = deferredLocations.begin(), E = deferredLocations.end(); I != E; ++I) { + const Stmt *S = (Stmt*) (((uintptr_t) I->first) & (uintptr_t) ~0x1); + if (NewEnv.ExprBindings.lookup(S)) + NewEnv.ExprBindings = F.add(NewEnv.ExprBindings, I->first, I->second); + } + + return NewEnv; +} diff --git a/lib/StaticAnalyzer/Core/ExplodedGraph.cpp b/lib/StaticAnalyzer/Core/ExplodedGraph.cpp new file mode 100644 index 0000000..2a8364d --- /dev/null +++ b/lib/StaticAnalyzer/Core/ExplodedGraph.cpp @@ -0,0 +1,392 @@ +//=-- ExplodedGraph.cpp - Local, Path-Sens. "Exploded Graph" -*- C++ -*------=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the template classes ExplodedNode and ExplodedGraph, +// which represent a path-sensitive, intra-procedural "exploded graph." +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h" +#include "clang/AST/Stmt.h" +#include "llvm/ADT/DenseSet.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallVector.h" +#include <vector> + +using namespace clang; +using namespace ento; + +//===----------------------------------------------------------------------===// +// Node auditing. +//===----------------------------------------------------------------------===// + +// An out of line virtual method to provide a home for the class vtable. +ExplodedNode::Auditor::~Auditor() {} + +#ifndef NDEBUG +static ExplodedNode::Auditor* NodeAuditor = 0; +#endif + +void ExplodedNode::SetAuditor(ExplodedNode::Auditor* A) { +#ifndef NDEBUG + NodeAuditor = A; +#endif +} + +//===----------------------------------------------------------------------===// +// Cleanup. +//===----------------------------------------------------------------------===// + +typedef std::vector<ExplodedNode*> NodeList; +static inline NodeList*& getNodeList(void *&p) { return (NodeList*&) p; } + +ExplodedGraph::~ExplodedGraph() { + if (reclaimNodes) { + delete getNodeList(recentlyAllocatedNodes); + delete getNodeList(freeNodes); + } +} + +//===----------------------------------------------------------------------===// +// Node reclamation. +//===----------------------------------------------------------------------===// + +void ExplodedGraph::reclaimRecentlyAllocatedNodes() { + if (!recentlyAllocatedNodes) + return; + NodeList &nl = *getNodeList(recentlyAllocatedNodes); + + // Reclaimn all nodes that match *all* the following criteria: + // + // (1) 1 predecessor (that has one successor) + // (2) 1 successor (that has one predecessor) + // (3) The ProgramPoint is for a PostStmt. + // (4) There is no 'tag' for the ProgramPoint. + // (5) The 'store' is the same as the predecessor. + // (6) The 'GDM' is the same as the predecessor. + // (7) The LocationContext is the same as the predecessor. + // (8) The PostStmt is for a non-CFGElement expression. + + for (NodeList::iterator i = nl.begin(), e = nl.end() ; i != e; ++i) { + ExplodedNode *node = *i; + + // Conditions 1 and 2. + if (node->pred_size() != 1 || node->succ_size() != 1) + continue; + + ExplodedNode *pred = *(node->pred_begin()); + if (pred->succ_size() != 1) + continue; + + ExplodedNode *succ = *(node->succ_begin()); + if (succ->pred_size() != 1) + continue; + + // Condition 3. + ProgramPoint progPoint = node->getLocation(); + if (!isa<PostStmt>(progPoint)) + continue; + + // Condition 4. + PostStmt ps = cast<PostStmt>(progPoint); + if (ps.getTag() || isa<PostStmtCustom>(ps)) + continue; + + if (isa<BinaryOperator>(ps.getStmt())) + continue; + + // Conditions 5, 6, and 7. + const GRState *state = node->getState(); + const GRState *pred_state = pred->getState(); + if (state->store != pred_state->store || state->GDM != pred_state->GDM || + progPoint.getLocationContext() != pred->getLocationContext()) + continue; + + // Condition 8. + if (node->getCFG().isBlkExpr(ps.getStmt())) + continue; + + // If we reach here, we can remove the node. This means: + // (a) changing the predecessors successor to the successor of this node + // (b) changing the successors predecessor to the predecessor of this node + // (c) Putting 'node' onto freeNodes. + pred->replaceSuccessor(succ); + succ->replacePredecessor(pred); + if (!freeNodes) + freeNodes = new NodeList(); + getNodeList(freeNodes)->push_back(node); + Nodes.RemoveNode(node); + --NumNodes; + node->~ExplodedNode(); + } + + nl.clear(); +} + +//===----------------------------------------------------------------------===// +// ExplodedNode. +//===----------------------------------------------------------------------===// + +static inline BumpVector<ExplodedNode*>& getVector(void* P) { + return *reinterpret_cast<BumpVector<ExplodedNode*>*>(P); +} + +void ExplodedNode::addPredecessor(ExplodedNode* V, ExplodedGraph &G) { + assert (!V->isSink()); + Preds.addNode(V, G); + V->Succs.addNode(this, G); +#ifndef NDEBUG + if (NodeAuditor) NodeAuditor->AddEdge(V, this); +#endif +} + +void ExplodedNode::NodeGroup::replaceNode(ExplodedNode *node) { + assert(getKind() == Size1); + P = reinterpret_cast<uintptr_t>(node); + assert(getKind() == Size1); +} + +void ExplodedNode::NodeGroup::addNode(ExplodedNode* N, ExplodedGraph &G) { + assert((reinterpret_cast<uintptr_t>(N) & Mask) == 0x0); + assert(!getFlag()); + + if (getKind() == Size1) { + if (ExplodedNode* NOld = getNode()) { + BumpVectorContext &Ctx = G.getNodeAllocator(); + BumpVector<ExplodedNode*> *V = + G.getAllocator().Allocate<BumpVector<ExplodedNode*> >(); + new (V) BumpVector<ExplodedNode*>(Ctx, 4); + + assert((reinterpret_cast<uintptr_t>(V) & Mask) == 0x0); + V->push_back(NOld, Ctx); + V->push_back(N, Ctx); + P = reinterpret_cast<uintptr_t>(V) | SizeOther; + assert(getPtr() == (void*) V); + assert(getKind() == SizeOther); + } + else { + P = reinterpret_cast<uintptr_t>(N); + assert(getKind() == Size1); + } + } + else { + assert(getKind() == SizeOther); + getVector(getPtr()).push_back(N, G.getNodeAllocator()); + } +} + +unsigned ExplodedNode::NodeGroup::size() const { + if (getFlag()) + return 0; + + if (getKind() == Size1) + return getNode() ? 1 : 0; + else + return getVector(getPtr()).size(); +} + +ExplodedNode **ExplodedNode::NodeGroup::begin() const { + if (getFlag()) + return NULL; + + if (getKind() == Size1) + return (ExplodedNode**) (getPtr() ? &P : NULL); + else + return const_cast<ExplodedNode**>(&*(getVector(getPtr()).begin())); +} + +ExplodedNode** ExplodedNode::NodeGroup::end() const { + if (getFlag()) + return NULL; + + if (getKind() == Size1) + return (ExplodedNode**) (getPtr() ? &P+1 : NULL); + else { + // Dereferencing end() is undefined behaviour. The vector is not empty, so + // we can dereference the last elem and then add 1 to the result. + return const_cast<ExplodedNode**>(getVector(getPtr()).end()); + } +} + +ExplodedNode *ExplodedGraph::getNode(const ProgramPoint& L, + const GRState* State, bool* IsNew) { + // Profile 'State' to determine if we already have an existing node. + llvm::FoldingSetNodeID profile; + void* InsertPos = 0; + + NodeTy::Profile(profile, L, State); + NodeTy* V = Nodes.FindNodeOrInsertPos(profile, InsertPos); + + if (!V) { + if (freeNodes && !getNodeList(freeNodes)->empty()) { + NodeList *nl = getNodeList(freeNodes); + V = nl->back(); + nl->pop_back(); + } + else { + // Allocate a new node. + V = (NodeTy*) getAllocator().Allocate<NodeTy>(); + } + + new (V) NodeTy(L, State); + + if (reclaimNodes) { + if (!recentlyAllocatedNodes) + recentlyAllocatedNodes = new NodeList(); + getNodeList(recentlyAllocatedNodes)->push_back(V); + } + + // Insert the node into the node set and return it. + Nodes.InsertNode(V, InsertPos); + + ++NumNodes; + + if (IsNew) *IsNew = true; + } + else + if (IsNew) *IsNew = false; + + return V; +} + +std::pair<ExplodedGraph*, InterExplodedGraphMap*> +ExplodedGraph::Trim(const NodeTy* const* NBeg, const NodeTy* const* NEnd, + llvm::DenseMap<const void*, const void*> *InverseMap) const { + + if (NBeg == NEnd) + return std::make_pair((ExplodedGraph*) 0, + (InterExplodedGraphMap*) 0); + + assert (NBeg < NEnd); + + llvm::OwningPtr<InterExplodedGraphMap> M(new InterExplodedGraphMap()); + + ExplodedGraph* G = TrimInternal(NBeg, NEnd, M.get(), InverseMap); + + return std::make_pair(static_cast<ExplodedGraph*>(G), M.take()); +} + +ExplodedGraph* +ExplodedGraph::TrimInternal(const ExplodedNode* const* BeginSources, + const ExplodedNode* const* EndSources, + InterExplodedGraphMap* M, + llvm::DenseMap<const void*, const void*> *InverseMap) const { + + typedef llvm::DenseSet<const ExplodedNode*> Pass1Ty; + Pass1Ty Pass1; + + typedef llvm::DenseMap<const ExplodedNode*, ExplodedNode*> Pass2Ty; + Pass2Ty& Pass2 = M->M; + + llvm::SmallVector<const ExplodedNode*, 10> WL1, WL2; + + // ===- Pass 1 (reverse DFS) -=== + for (const ExplodedNode* const* I = BeginSources; I != EndSources; ++I) { + assert(*I); + WL1.push_back(*I); + } + + // Process the first worklist until it is empty. Because it is a std::list + // it acts like a FIFO queue. + while (!WL1.empty()) { + const ExplodedNode *N = WL1.back(); + WL1.pop_back(); + + // Have we already visited this node? If so, continue to the next one. + if (Pass1.count(N)) + continue; + + // Otherwise, mark this node as visited. + Pass1.insert(N); + + // If this is a root enqueue it to the second worklist. + if (N->Preds.empty()) { + WL2.push_back(N); + continue; + } + + // Visit our predecessors and enqueue them. + for (ExplodedNode** I=N->Preds.begin(), **E=N->Preds.end(); I!=E; ++I) + WL1.push_back(*I); + } + + // We didn't hit a root? Return with a null pointer for the new graph. + if (WL2.empty()) + return 0; + + // Create an empty graph. + ExplodedGraph* G = MakeEmptyGraph(); + + // ===- Pass 2 (forward DFS to construct the new graph) -=== + while (!WL2.empty()) { + const ExplodedNode* N = WL2.back(); + WL2.pop_back(); + + // Skip this node if we have already processed it. + if (Pass2.find(N) != Pass2.end()) + continue; + + // Create the corresponding node in the new graph and record the mapping + // from the old node to the new node. + ExplodedNode* NewN = G->getNode(N->getLocation(), N->State, NULL); + Pass2[N] = NewN; + + // Also record the reverse mapping from the new node to the old node. + if (InverseMap) (*InverseMap)[NewN] = N; + + // If this node is a root, designate it as such in the graph. + if (N->Preds.empty()) + G->addRoot(NewN); + + // In the case that some of the intended predecessors of NewN have already + // been created, we should hook them up as predecessors. + + // Walk through the predecessors of 'N' and hook up their corresponding + // nodes in the new graph (if any) to the freshly created node. + for (ExplodedNode **I=N->Preds.begin(), **E=N->Preds.end(); I!=E; ++I) { + Pass2Ty::iterator PI = Pass2.find(*I); + if (PI == Pass2.end()) + continue; + + NewN->addPredecessor(PI->second, *G); + } + + // In the case that some of the intended successors of NewN have already + // been created, we should hook them up as successors. Otherwise, enqueue + // the new nodes from the original graph that should have nodes created + // in the new graph. + for (ExplodedNode **I=N->Succs.begin(), **E=N->Succs.end(); I!=E; ++I) { + Pass2Ty::iterator PI = Pass2.find(*I); + if (PI != Pass2.end()) { + PI->second->addPredecessor(NewN, *G); + continue; + } + + // Enqueue nodes to the worklist that were marked during pass 1. + if (Pass1.count(*I)) + WL2.push_back(*I); + } + + // Finally, explictly mark all nodes without any successors as sinks. + if (N->isSink()) + NewN->markAsSink(); + } + + return G; +} + +ExplodedNode* +InterExplodedGraphMap::getMappedNode(const ExplodedNode* N) const { + llvm::DenseMap<const ExplodedNode*, ExplodedNode*>::const_iterator I = + M.find(N); + + return I == M.end() ? 0 : I->second; +} + diff --git a/lib/StaticAnalyzer/Core/FlatStore.cpp b/lib/StaticAnalyzer/Core/FlatStore.cpp new file mode 100644 index 0000000..99a5ead --- /dev/null +++ b/lib/StaticAnalyzer/Core/FlatStore.cpp @@ -0,0 +1,204 @@ +//=== FlatStore.cpp - Flat region-based store model -------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h" +#include "llvm/ADT/ImmutableIntervalMap.h" +#include "llvm/Support/ErrorHandling.h" + +using namespace clang; +using namespace ento; +using llvm::Interval; + +// The actual store type. +typedef llvm::ImmutableIntervalMap<SVal> BindingVal; +typedef llvm::ImmutableMap<const MemRegion *, BindingVal> RegionBindings; + +namespace { +class FlatStoreManager : public StoreManager { + RegionBindings::Factory RBFactory; + BindingVal::Factory BVFactory; + +public: + FlatStoreManager(GRStateManager &mgr) + : StoreManager(mgr), + RBFactory(mgr.getAllocator()), + BVFactory(mgr.getAllocator()) {} + + SVal Retrieve(Store store, Loc L, QualType T); + StoreRef Bind(Store store, Loc L, SVal val); + StoreRef Remove(Store St, Loc L); + StoreRef BindCompoundLiteral(Store store, const CompoundLiteralExpr* cl, + const LocationContext *LC, SVal v); + + StoreRef getInitialStore(const LocationContext *InitLoc) { + return StoreRef(RBFactory.getEmptyMap().getRoot(), *this); + } + + SubRegionMap *getSubRegionMap(Store store) { + return 0; + } + + SVal ArrayToPointer(Loc Array); + StoreRef removeDeadBindings(Store store, const StackFrameContext *LCtx, + SymbolReaper& SymReaper, + llvm::SmallVectorImpl<const MemRegion*>& RegionRoots){ + return StoreRef(store, *this); + } + + StoreRef BindDecl(Store store, const VarRegion *VR, SVal initVal); + + StoreRef BindDeclWithNoInit(Store store, const VarRegion *VR); + + typedef llvm::DenseSet<SymbolRef> InvalidatedSymbols; + + StoreRef invalidateRegions(Store store, const MemRegion * const *I, + const MemRegion * const *E, const Expr *Ex, + unsigned Count, InvalidatedSymbols *IS, + bool invalidateGlobals, + InvalidatedRegions *Regions); + + void print(Store store, llvm::raw_ostream& Out, const char* nl, + const char *sep); + void iterBindings(Store store, BindingsHandler& f); + +private: + static RegionBindings getRegionBindings(Store store) { + return RegionBindings(static_cast<const RegionBindings::TreeTy*>(store)); + } + + class RegionInterval { + public: + const MemRegion *R; + Interval I; + RegionInterval(const MemRegion *r, int64_t s, int64_t e) : R(r), I(s, e){} + }; + + RegionInterval RegionToInterval(const MemRegion *R); + + SVal RetrieveRegionWithNoBinding(const MemRegion *R, QualType T); +}; +} // end anonymous namespace + +StoreManager *ento::CreateFlatStoreManager(GRStateManager &StMgr) { + return new FlatStoreManager(StMgr); +} + +SVal FlatStoreManager::Retrieve(Store store, Loc L, QualType T) { + const MemRegion *R = cast<loc::MemRegionVal>(L).getRegion(); + RegionInterval RI = RegionToInterval(R); + // FIXME: FlatStore should handle regions with unknown intervals. + if (!RI.R) + return UnknownVal(); + + RegionBindings B = getRegionBindings(store); + const BindingVal *BV = B.lookup(RI.R); + if (BV) { + const SVal *V = BVFactory.lookup(*BV, RI.I); + if (V) + return *V; + else + return RetrieveRegionWithNoBinding(R, T); + } + return RetrieveRegionWithNoBinding(R, T); +} + +SVal FlatStoreManager::RetrieveRegionWithNoBinding(const MemRegion *R, + QualType T) { + if (R->hasStackNonParametersStorage()) + return UndefinedVal(); + else + return svalBuilder.getRegionValueSymbolVal(cast<TypedRegion>(R)); +} + +StoreRef FlatStoreManager::Bind(Store store, Loc L, SVal val) { + const MemRegion *R = cast<loc::MemRegionVal>(L).getRegion(); + RegionBindings B = getRegionBindings(store); + const BindingVal *V = B.lookup(R); + + BindingVal BV = BVFactory.getEmptyMap(); + if (V) + BV = *V; + + RegionInterval RI = RegionToInterval(R); + // FIXME: FlatStore should handle regions with unknown intervals. + if (!RI.R) + return StoreRef(B.getRoot(), *this); + BV = BVFactory.add(BV, RI.I, val); + B = RBFactory.add(B, RI.R, BV); + return StoreRef(B.getRoot(), *this); +} + +StoreRef FlatStoreManager::Remove(Store store, Loc L) { + return StoreRef(store, *this); +} + +StoreRef FlatStoreManager::BindCompoundLiteral(Store store, + const CompoundLiteralExpr* cl, + const LocationContext *LC, + SVal v) { + return StoreRef(store, *this); +} + +SVal FlatStoreManager::ArrayToPointer(Loc Array) { + return Array; +} + +StoreRef FlatStoreManager::BindDecl(Store store, const VarRegion *VR, + SVal initVal) { + return Bind(store, svalBuilder.makeLoc(VR), initVal); +} + +StoreRef FlatStoreManager::BindDeclWithNoInit(Store store, const VarRegion *VR){ + return StoreRef(store, *this); +} + +StoreRef FlatStoreManager::invalidateRegions(Store store, + const MemRegion * const *I, + const MemRegion * const *E, + const Expr *Ex, unsigned Count, + InvalidatedSymbols *IS, + bool invalidateGlobals, + InvalidatedRegions *Regions) { + assert(false && "Not implemented"); + return StoreRef(store, *this); +} + +void FlatStoreManager::print(Store store, llvm::raw_ostream& Out, + const char* nl, const char *sep) { +} + +void FlatStoreManager::iterBindings(Store store, BindingsHandler& f) { +} + +FlatStoreManager::RegionInterval +FlatStoreManager::RegionToInterval(const MemRegion *R) { + switch (R->getKind()) { + case MemRegion::VarRegionKind: { + QualType T = cast<VarRegion>(R)->getValueType(); + int64_t Size = Ctx.getTypeSize(T); + return RegionInterval(R, 0, Size-1); + } + + case MemRegion::ElementRegionKind: + case MemRegion::FieldRegionKind: { + RegionOffset Offset = R->getAsOffset(); + // We cannot compute offset for all regions, for example, elements + // with symbolic offsets. + if (!Offset.getRegion()) + return RegionInterval(0, 0, 0); + int64_t Start = Offset.getOffset(); + int64_t Size = Ctx.getTypeSize(cast<TypedRegion>(R)->getValueType()); + return RegionInterval(Offset.getRegion(), Start, Start+Size); + } + + default: + llvm_unreachable("Region kind unhandled."); + return RegionInterval(0, 0, 0); + } +} diff --git a/lib/StaticAnalyzer/Core/GRState.cpp b/lib/StaticAnalyzer/Core/GRState.cpp new file mode 100644 index 0000000..7b21677 --- /dev/null +++ b/lib/StaticAnalyzer/Core/GRState.cpp @@ -0,0 +1,606 @@ +//= GRState.cpp - Path-Sensitive "State" for tracking values -----*- C++ -*--=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements GRState and GRStateManager. +// +//===----------------------------------------------------------------------===// + +#include "clang/Analysis/CFG.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/GRStateTrait.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/TransferFuncs.h" +#include "llvm/Support/raw_ostream.h" + +using namespace clang; +using namespace ento; + +// Give the vtable for ConstraintManager somewhere to live. +// FIXME: Move this elsewhere. +ConstraintManager::~ConstraintManager() {} + +GRState::GRState(GRStateManager *mgr, const Environment& env, + StoreRef st, GenericDataMap gdm) + : stateMgr(mgr), + Env(env), + store(st.getStore()), + GDM(gdm), + refCount(0) { + stateMgr->getStoreManager().incrementReferenceCount(store); +} + +GRState::GRState(const GRState& RHS) + : llvm::FoldingSetNode(), + stateMgr(RHS.stateMgr), + Env(RHS.Env), + store(RHS.store), + GDM(RHS.GDM), + refCount(0) { + stateMgr->getStoreManager().incrementReferenceCount(store); +} + +GRState::~GRState() { + if (store) + stateMgr->getStoreManager().decrementReferenceCount(store); +} + +GRStateManager::~GRStateManager() { + for (std::vector<GRState::Printer*>::iterator I=Printers.begin(), + E=Printers.end(); I!=E; ++I) + delete *I; + + for (GDMContextsTy::iterator I=GDMContexts.begin(), E=GDMContexts.end(); + I!=E; ++I) + I->second.second(I->second.first); +} + +const GRState* +GRStateManager::removeDeadBindings(const GRState* state, + const StackFrameContext *LCtx, + SymbolReaper& SymReaper) { + + // This code essentially performs a "mark-and-sweep" of the VariableBindings. + // The roots are any Block-level exprs and Decls that our liveness algorithm + // tells us are live. We then see what Decls they may reference, and keep + // those around. This code more than likely can be made faster, and the + // frequency of which this method is called should be experimented with + // for optimum performance. + llvm::SmallVector<const MemRegion*, 10> RegionRoots; + GRState NewState = *state; + + NewState.Env = EnvMgr.removeDeadBindings(NewState.Env, SymReaper, + state, RegionRoots); + + // Clean up the store. + NewState.setStore(StoreMgr->removeDeadBindings(NewState.getStore(), LCtx, + SymReaper, RegionRoots)); + state = getPersistentState(NewState); + return ConstraintMgr->removeDeadBindings(state, SymReaper); +} + +const GRState *GRStateManager::MarshalState(const GRState *state, + const StackFrameContext *InitLoc) { + // make up an empty state for now. + GRState State(this, + EnvMgr.getInitialEnvironment(), + StoreMgr->getInitialStore(InitLoc), + GDMFactory.getEmptyMap()); + + return getPersistentState(State); +} + +const GRState *GRState::bindCompoundLiteral(const CompoundLiteralExpr* CL, + const LocationContext *LC, + SVal V) const { + const StoreRef &newStore = + getStateManager().StoreMgr->BindCompoundLiteral(getStore(), CL, LC, V); + return makeWithStore(newStore); +} + +const GRState *GRState::bindDecl(const VarRegion* VR, SVal IVal) const { + const StoreRef &newStore = + getStateManager().StoreMgr->BindDecl(getStore(), VR, IVal); + return makeWithStore(newStore); +} + +const GRState *GRState::bindDeclWithNoInit(const VarRegion* VR) const { + const StoreRef &newStore = + getStateManager().StoreMgr->BindDeclWithNoInit(getStore(), VR); + return makeWithStore(newStore); +} + +const GRState *GRState::bindLoc(Loc LV, SVal V) const { + GRStateManager &Mgr = getStateManager(); + const GRState *newState = makeWithStore(Mgr.StoreMgr->Bind(getStore(), + LV, V)); + const MemRegion *MR = LV.getAsRegion(); + if (MR && Mgr.getOwningEngine()) + return Mgr.getOwningEngine()->processRegionChange(newState, MR); + + return newState; +} + +const GRState *GRState::bindDefault(SVal loc, SVal V) const { + GRStateManager &Mgr = getStateManager(); + const MemRegion *R = cast<loc::MemRegionVal>(loc).getRegion(); + const StoreRef &newStore = Mgr.StoreMgr->BindDefault(getStore(), R, V); + const GRState *new_state = makeWithStore(newStore); + return Mgr.getOwningEngine() ? + Mgr.getOwningEngine()->processRegionChange(new_state, R) : + new_state; +} + +const GRState *GRState::invalidateRegions(const MemRegion * const *Begin, + const MemRegion * const *End, + const Expr *E, unsigned Count, + StoreManager::InvalidatedSymbols *IS, + bool invalidateGlobals) const { + GRStateManager &Mgr = getStateManager(); + SubEngine* Eng = Mgr.getOwningEngine(); + + if (Eng && Eng->wantsRegionChangeUpdate(this)) { + StoreManager::InvalidatedRegions Regions; + const StoreRef &newStore + = Mgr.StoreMgr->invalidateRegions(getStore(), Begin, End, E, Count, IS, + invalidateGlobals, &Regions); + const GRState *newState = makeWithStore(newStore); + return Eng->processRegionChanges(newState, + &Regions.front(), + &Regions.back()+1); + } + + const StoreRef &newStore = + Mgr.StoreMgr->invalidateRegions(getStore(), Begin, End, E, Count, IS, + invalidateGlobals, NULL); + return makeWithStore(newStore); +} + +const GRState *GRState::unbindLoc(Loc LV) const { + assert(!isa<loc::MemRegionVal>(LV) && "Use invalidateRegion instead."); + + Store OldStore = getStore(); + const StoreRef &newStore = getStateManager().StoreMgr->Remove(OldStore, LV); + + if (newStore.getStore() == OldStore) + return this; + + return makeWithStore(newStore); +} + +const GRState *GRState::enterStackFrame(const StackFrameContext *frame) const { + const StoreRef &new_store = + getStateManager().StoreMgr->enterStackFrame(this, frame); + return makeWithStore(new_store); +} + +SVal GRState::getSValAsScalarOrLoc(const MemRegion *R) const { + // We only want to do fetches from regions that we can actually bind + // values. For example, SymbolicRegions of type 'id<...>' cannot + // have direct bindings (but their can be bindings on their subregions). + if (!R->isBoundable()) + return UnknownVal(); + + if (const TypedRegion *TR = dyn_cast<TypedRegion>(R)) { + QualType T = TR->getValueType(); + if (Loc::isLocType(T) || T->isIntegerType()) + return getSVal(R); + } + + return UnknownVal(); +} + +SVal GRState::getSVal(Loc location, QualType T) const { + SVal V = getRawSVal(cast<Loc>(location), T); + + // If 'V' is a symbolic value that is *perfectly* constrained to + // be a constant value, use that value instead to lessen the burden + // on later analysis stages (so we have less symbolic values to reason + // about). + if (!T.isNull()) { + if (SymbolRef sym = V.getAsSymbol()) { + if (const llvm::APSInt *Int = getSymVal(sym)) { + // FIXME: Because we don't correctly model (yet) sign-extension + // and truncation of symbolic values, we need to convert + // the integer value to the correct signedness and bitwidth. + // + // This shows up in the following: + // + // char foo(); + // unsigned x = foo(); + // if (x == 54) + // ... + // + // The symbolic value stored to 'x' is actually the conjured + // symbol for the call to foo(); the type of that symbol is 'char', + // not unsigned. + const llvm::APSInt &NewV = getBasicVals().Convert(T, *Int); + + if (isa<Loc>(V)) + return loc::ConcreteInt(NewV); + else + return nonloc::ConcreteInt(NewV); + } + } + } + + return V; +} + +const GRState *GRState::BindExpr(const Stmt* S, SVal V, bool Invalidate) const{ + Environment NewEnv = getStateManager().EnvMgr.bindExpr(Env, S, V, + Invalidate); + if (NewEnv == Env) + return this; + + GRState NewSt = *this; + NewSt.Env = NewEnv; + return getStateManager().getPersistentState(NewSt); +} + +const GRState *GRState::bindExprAndLocation(const Stmt *S, SVal location, + SVal V) const { + Environment NewEnv = + getStateManager().EnvMgr.bindExprAndLocation(Env, S, location, V); + + if (NewEnv == Env) + return this; + + GRState NewSt = *this; + NewSt.Env = NewEnv; + return getStateManager().getPersistentState(NewSt); +} + +const GRState *GRState::assumeInBound(DefinedOrUnknownSVal Idx, + DefinedOrUnknownSVal UpperBound, + bool Assumption) const { + if (Idx.isUnknown() || UpperBound.isUnknown()) + return this; + + // Build an expression for 0 <= Idx < UpperBound. + // This is the same as Idx + MIN < UpperBound + MIN, if overflow is allowed. + // FIXME: This should probably be part of SValBuilder. + GRStateManager &SM = getStateManager(); + SValBuilder &svalBuilder = SM.getSValBuilder(); + ASTContext &Ctx = svalBuilder.getContext(); + + // Get the offset: the minimum value of the array index type. + BasicValueFactory &BVF = svalBuilder.getBasicValueFactory(); + // FIXME: This should be using ValueManager::ArrayindexTy...somehow. + QualType indexTy = Ctx.IntTy; + nonloc::ConcreteInt Min(BVF.getMinValue(indexTy)); + + // Adjust the index. + SVal newIdx = svalBuilder.evalBinOpNN(this, BO_Add, + cast<NonLoc>(Idx), Min, indexTy); + if (newIdx.isUnknownOrUndef()) + return this; + + // Adjust the upper bound. + SVal newBound = + svalBuilder.evalBinOpNN(this, BO_Add, cast<NonLoc>(UpperBound), + Min, indexTy); + + if (newBound.isUnknownOrUndef()) + return this; + + // Build the actual comparison. + SVal inBound = svalBuilder.evalBinOpNN(this, BO_LT, + cast<NonLoc>(newIdx), cast<NonLoc>(newBound), + Ctx.IntTy); + if (inBound.isUnknownOrUndef()) + return this; + + // Finally, let the constraint manager take care of it. + ConstraintManager &CM = SM.getConstraintManager(); + return CM.assume(this, cast<DefinedSVal>(inBound), Assumption); +} + +const GRState* GRStateManager::getInitialState(const LocationContext *InitLoc) { + GRState State(this, + EnvMgr.getInitialEnvironment(), + StoreMgr->getInitialStore(InitLoc), + GDMFactory.getEmptyMap()); + + return getPersistentState(State); +} + +void GRStateManager::recycleUnusedStates() { + for (std::vector<GRState*>::iterator i = recentlyAllocatedStates.begin(), + e = recentlyAllocatedStates.end(); i != e; ++i) { + GRState *state = *i; + if (state->referencedByExplodedNode()) + continue; + StateSet.RemoveNode(state); + freeStates.push_back(state); + state->~GRState(); + } + recentlyAllocatedStates.clear(); +} + +const GRState* GRStateManager::getPersistentState(GRState& State) { + + llvm::FoldingSetNodeID ID; + State.Profile(ID); + void* InsertPos; + + if (GRState* I = StateSet.FindNodeOrInsertPos(ID, InsertPos)) + return I; + + GRState *newState = 0; + if (!freeStates.empty()) { + newState = freeStates.back(); + freeStates.pop_back(); + } + else { + newState = (GRState*) Alloc.Allocate<GRState>(); + } + new (newState) GRState(State); + StateSet.InsertNode(newState, InsertPos); + recentlyAllocatedStates.push_back(newState); + return newState; +} + +const GRState* GRState::makeWithStore(const StoreRef &store) const { + GRState NewSt = *this; + NewSt.setStore(store); + return getStateManager().getPersistentState(NewSt); +} + +void GRState::setStore(const StoreRef &newStore) { + Store newStoreStore = newStore.getStore(); + if (newStoreStore) + stateMgr->getStoreManager().incrementReferenceCount(newStoreStore); + if (store) + stateMgr->getStoreManager().decrementReferenceCount(store); + store = newStoreStore; +} + +//===----------------------------------------------------------------------===// +// State pretty-printing. +//===----------------------------------------------------------------------===// + +static bool IsEnvLoc(const Stmt *S) { + // FIXME: This is a layering violation. Should be in environment. + return (bool) (((uintptr_t) S) & 0x1); +} + +void GRState::print(llvm::raw_ostream& Out, CFG &C, const char* nl, + const char* sep) const { + // Print the store. + GRStateManager &Mgr = getStateManager(); + Mgr.getStoreManager().print(getStore(), Out, nl, sep); + + // Print Subexpression bindings. + bool isFirst = true; + + // FIXME: All environment printing should be moved inside Environment. + for (Environment::iterator I = Env.begin(), E = Env.end(); I != E; ++I) { + if (C.isBlkExpr(I.getKey()) || IsEnvLoc(I.getKey())) + continue; + + if (isFirst) { + Out << nl << nl << "Sub-Expressions:" << nl; + isFirst = false; + } + else { Out << nl; } + + Out << " (" << (void*) I.getKey() << ") "; + LangOptions LO; // FIXME. + I.getKey()->printPretty(Out, 0, PrintingPolicy(LO)); + Out << " : " << I.getData(); + } + + // Print block-expression bindings. + isFirst = true; + + for (Environment::iterator I = Env.begin(), E = Env.end(); I != E; ++I) { + if (!C.isBlkExpr(I.getKey())) + continue; + + if (isFirst) { + Out << nl << nl << "Block-level Expressions:" << nl; + isFirst = false; + } + else { Out << nl; } + + Out << " (" << (void*) I.getKey() << ") "; + LangOptions LO; // FIXME. + I.getKey()->printPretty(Out, 0, PrintingPolicy(LO)); + Out << " : " << I.getData(); + } + + // Print locations. + isFirst = true; + + for (Environment::iterator I = Env.begin(), E = Env.end(); I != E; ++I) { + if (!IsEnvLoc(I.getKey())) + continue; + + if (isFirst) { + Out << nl << nl << "Load/store locations:" << nl; + isFirst = false; + } + else { Out << nl; } + + const Stmt *S = (Stmt*) (((uintptr_t) I.getKey()) & ((uintptr_t) ~0x1)); + + Out << " (" << (void*) S << ") "; + LangOptions LO; // FIXME. + S->printPretty(Out, 0, PrintingPolicy(LO)); + Out << " : " << I.getData(); + } + + Mgr.getConstraintManager().print(this, Out, nl, sep); + + // Print checker-specific data. + for (std::vector<Printer*>::iterator I = Mgr.Printers.begin(), + E = Mgr.Printers.end(); I != E; ++I) { + (*I)->Print(Out, this, nl, sep); + } +} + +void GRState::printDOT(llvm::raw_ostream& Out, CFG &C) const { + print(Out, C, "\\l", "\\|"); +} + +void GRState::printStdErr(CFG &C) const { + print(llvm::errs(), C); +} + +//===----------------------------------------------------------------------===// +// Generic Data Map. +//===----------------------------------------------------------------------===// + +void* const* GRState::FindGDM(void* K) const { + return GDM.lookup(K); +} + +void* +GRStateManager::FindGDMContext(void* K, + void* (*CreateContext)(llvm::BumpPtrAllocator&), + void (*DeleteContext)(void*)) { + + std::pair<void*, void (*)(void*)>& p = GDMContexts[K]; + if (!p.first) { + p.first = CreateContext(Alloc); + p.second = DeleteContext; + } + + return p.first; +} + +const GRState* GRStateManager::addGDM(const GRState* St, void* Key, void* Data){ + GRState::GenericDataMap M1 = St->getGDM(); + GRState::GenericDataMap M2 = GDMFactory.add(M1, Key, Data); + + if (M1 == M2) + return St; + + GRState NewSt = *St; + NewSt.GDM = M2; + return getPersistentState(NewSt); +} + +const GRState *GRStateManager::removeGDM(const GRState *state, void *Key) { + GRState::GenericDataMap OldM = state->getGDM(); + GRState::GenericDataMap NewM = GDMFactory.remove(OldM, Key); + + if (NewM == OldM) + return state; + + GRState NewState = *state; + NewState.GDM = NewM; + return getPersistentState(NewState); +} + +//===----------------------------------------------------------------------===// +// Utility. +//===----------------------------------------------------------------------===// + +namespace { +class ScanReachableSymbols : public SubRegionMap::Visitor { + typedef llvm::DenseSet<const MemRegion*> VisitedRegionsTy; + + VisitedRegionsTy visited; + const GRState *state; + SymbolVisitor &visitor; + llvm::OwningPtr<SubRegionMap> SRM; +public: + + ScanReachableSymbols(const GRState *st, SymbolVisitor& v) + : state(st), visitor(v) {} + + bool scan(nonloc::CompoundVal val); + bool scan(SVal val); + bool scan(const MemRegion *R); + + // From SubRegionMap::Visitor. + bool Visit(const MemRegion* Parent, const MemRegion* SubRegion) { + return scan(SubRegion); + } +}; +} + +bool ScanReachableSymbols::scan(nonloc::CompoundVal val) { + for (nonloc::CompoundVal::iterator I=val.begin(), E=val.end(); I!=E; ++I) + if (!scan(*I)) + return false; + + return true; +} + +bool ScanReachableSymbols::scan(SVal val) { + if (loc::MemRegionVal *X = dyn_cast<loc::MemRegionVal>(&val)) + return scan(X->getRegion()); + + if (nonloc::LocAsInteger *X = dyn_cast<nonloc::LocAsInteger>(&val)) + return scan(X->getLoc()); + + if (SymbolRef Sym = val.getAsSymbol()) + return visitor.VisitSymbol(Sym); + + if (nonloc::CompoundVal *X = dyn_cast<nonloc::CompoundVal>(&val)) + return scan(*X); + + return true; +} + +bool ScanReachableSymbols::scan(const MemRegion *R) { + if (isa<MemSpaceRegion>(R) || visited.count(R)) + return true; + + visited.insert(R); + + // If this is a symbolic region, visit the symbol for the region. + if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) + if (!visitor.VisitSymbol(SR->getSymbol())) + return false; + + // If this is a subregion, also visit the parent regions. + if (const SubRegion *SR = dyn_cast<SubRegion>(R)) + if (!scan(SR->getSuperRegion())) + return false; + + // Now look at the binding to this region (if any). + if (!scan(state->getSValAsScalarOrLoc(R))) + return false; + + // Now look at the subregions. + if (!SRM.get()) + SRM.reset(state->getStateManager().getStoreManager(). + getSubRegionMap(state->getStore())); + + return SRM->iterSubRegions(R, *this); +} + +bool GRState::scanReachableSymbols(SVal val, SymbolVisitor& visitor) const { + ScanReachableSymbols S(this, visitor); + return S.scan(val); +} + +bool GRState::scanReachableSymbols(const SVal *I, const SVal *E, + SymbolVisitor &visitor) const { + ScanReachableSymbols S(this, visitor); + for ( ; I != E; ++I) { + if (!S.scan(*I)) + return false; + } + return true; +} + +bool GRState::scanReachableSymbols(const MemRegion * const *I, + const MemRegion * const *E, + SymbolVisitor &visitor) const { + ScanReachableSymbols S(this, visitor); + for ( ; I != E; ++I) { + if (!S.scan(*I)) + return false; + } + return true; +} diff --git a/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp new file mode 100644 index 0000000..1ebc28c --- /dev/null +++ b/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp @@ -0,0 +1,581 @@ +//===--- HTMLDiagnostics.cpp - HTML Diagnostics for Paths ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the HTMLDiagnostics object. +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/PathDiagnosticClients.h" +#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Basic/FileManager.h" +#include "clang/Rewrite/Rewriter.h" +#include "clang/Rewrite/HTMLRewrite.h" +#include "clang/Lex/Lexer.h" +#include "clang/Lex/Preprocessor.h" +#include "llvm/Support/FileSystem.h" +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Support/Path.h" + +using namespace clang; +using namespace ento; + +//===----------------------------------------------------------------------===// +// Boilerplate. +//===----------------------------------------------------------------------===// + +namespace { + +class HTMLDiagnostics : public PathDiagnosticClient { + llvm::sys::Path Directory, FilePrefix; + bool createdDir, noDir; + const Preprocessor &PP; + std::vector<const PathDiagnostic*> BatchedDiags; +public: + HTMLDiagnostics(const std::string& prefix, const Preprocessor &pp); + + virtual ~HTMLDiagnostics() { FlushDiagnostics(NULL); } + + virtual void FlushDiagnostics(llvm::SmallVectorImpl<std::string> *FilesMade); + + virtual void HandlePathDiagnostic(const PathDiagnostic* D); + + virtual llvm::StringRef getName() const { + return "HTMLDiagnostics"; + } + + unsigned ProcessMacroPiece(llvm::raw_ostream& os, + const PathDiagnosticMacroPiece& P, + unsigned num); + + void HandlePiece(Rewriter& R, FileID BugFileID, + const PathDiagnosticPiece& P, unsigned num, unsigned max); + + void HighlightRange(Rewriter& R, FileID BugFileID, SourceRange Range, + const char *HighlightStart = "<span class=\"mrange\">", + const char *HighlightEnd = "</span>"); + + void ReportDiag(const PathDiagnostic& D, + llvm::SmallVectorImpl<std::string> *FilesMade); +}; + +} // end anonymous namespace + +HTMLDiagnostics::HTMLDiagnostics(const std::string& prefix, + const Preprocessor &pp) + : Directory(prefix), FilePrefix(prefix), createdDir(false), noDir(false), + PP(pp) { + // All html files begin with "report" + FilePrefix.appendComponent("report"); +} + +PathDiagnosticClient* +ento::createHTMLDiagnosticClient(const std::string& prefix, + const Preprocessor &PP) { + return new HTMLDiagnostics(prefix, PP); +} + +//===----------------------------------------------------------------------===// +// Report processing. +//===----------------------------------------------------------------------===// + +void HTMLDiagnostics::HandlePathDiagnostic(const PathDiagnostic* D) { + if (!D) + return; + + if (D->empty()) { + delete D; + return; + } + + const_cast<PathDiagnostic*>(D)->flattenLocations(); + BatchedDiags.push_back(D); +} + +void +HTMLDiagnostics::FlushDiagnostics(llvm::SmallVectorImpl<std::string> *FilesMade) +{ + while (!BatchedDiags.empty()) { + const PathDiagnostic* D = BatchedDiags.back(); + BatchedDiags.pop_back(); + ReportDiag(*D, FilesMade); + delete D; + } + + BatchedDiags.clear(); +} + +void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D, + llvm::SmallVectorImpl<std::string> *FilesMade){ + // Create the HTML directory if it is missing. + if (!createdDir) { + createdDir = true; + std::string ErrorMsg; + Directory.createDirectoryOnDisk(true, &ErrorMsg); + + bool IsDirectory; + if (llvm::sys::fs::is_directory(Directory.str(), IsDirectory) || + !IsDirectory) { + llvm::errs() << "warning: could not create directory '" + << Directory.str() << "'\n" + << "reason: " << ErrorMsg << '\n'; + + noDir = true; + + return; + } + } + + if (noDir) + return; + + const SourceManager &SMgr = D.begin()->getLocation().getManager(); + FileID FID; + + // Verify that the entire path is from the same FileID. + for (PathDiagnostic::const_iterator I = D.begin(), E = D.end(); I != E; ++I) { + FullSourceLoc L = I->getLocation().asLocation().getInstantiationLoc(); + + if (FID.isInvalid()) { + FID = SMgr.getFileID(L); + } else if (SMgr.getFileID(L) != FID) + return; // FIXME: Emit a warning? + + // Check the source ranges. + for (PathDiagnosticPiece::range_iterator RI=I->ranges_begin(), + RE=I->ranges_end(); RI!=RE; ++RI) { + + SourceLocation L = SMgr.getInstantiationLoc(RI->getBegin()); + + if (!L.isFileID() || SMgr.getFileID(L) != FID) + return; // FIXME: Emit a warning? + + L = SMgr.getInstantiationLoc(RI->getEnd()); + + if (!L.isFileID() || SMgr.getFileID(L) != FID) + return; // FIXME: Emit a warning? + } + } + + if (FID.isInvalid()) + return; // FIXME: Emit a warning? + + // Create a new rewriter to generate HTML. + Rewriter R(const_cast<SourceManager&>(SMgr), PP.getLangOptions()); + + // Process the path. + unsigned n = D.size(); + unsigned max = n; + + for (PathDiagnostic::const_reverse_iterator I=D.rbegin(), E=D.rend(); + I!=E; ++I, --n) + HandlePiece(R, FID, *I, n, max); + + // Add line numbers, header, footer, etc. + + // unsigned FID = R.getSourceMgr().getMainFileID(); + html::EscapeText(R, FID); + html::AddLineNumbers(R, FID); + + // If we have a preprocessor, relex the file and syntax highlight. + // We might not have a preprocessor if we come from a deserialized AST file, + // for example. + + html::SyntaxHighlight(R, FID, PP); + html::HighlightMacros(R, FID, PP); + + // Get the full directory name of the analyzed file. + + const FileEntry* Entry = SMgr.getFileEntryForID(FID); + + // This is a cludge; basically we want to append either the full + // working directory if we have no directory information. This is + // a work in progress. + + std::string DirName = ""; + + if (llvm::sys::path::is_relative(Entry->getName())) { + llvm::sys::Path P = llvm::sys::Path::GetCurrentDirectory(); + DirName = P.str() + "/"; + } + + // Add the name of the file as an <h1> tag. + + { + std::string s; + llvm::raw_string_ostream os(s); + + os << "<!-- REPORTHEADER -->\n" + << "<h3>Bug Summary</h3>\n<table class=\"simpletable\">\n" + "<tr><td class=\"rowname\">File:</td><td>" + << html::EscapeText(DirName) + << html::EscapeText(Entry->getName()) + << "</td></tr>\n<tr><td class=\"rowname\">Location:</td><td>" + "<a href=\"#EndPath\">line " + << (*D.rbegin()).getLocation().asLocation().getInstantiationLineNumber() + << ", column " + << (*D.rbegin()).getLocation().asLocation().getInstantiationColumnNumber() + << "</a></td></tr>\n" + "<tr><td class=\"rowname\">Description:</td><td>" + << D.getDescription() << "</td></tr>\n"; + + // Output any other meta data. + + for (PathDiagnostic::meta_iterator I=D.meta_begin(), E=D.meta_end(); + I!=E; ++I) { + os << "<tr><td></td><td>" << html::EscapeText(*I) << "</td></tr>\n"; + } + + os << "</table>\n<!-- REPORTSUMMARYEXTRA -->\n" + "<h3>Annotated Source Code</h3>\n"; + + R.InsertTextBefore(SMgr.getLocForStartOfFile(FID), os.str()); + } + + // Embed meta-data tags. + { + std::string s; + llvm::raw_string_ostream os(s); + + const std::string& BugDesc = D.getDescription(); + if (!BugDesc.empty()) + os << "\n<!-- BUGDESC " << BugDesc << " -->\n"; + + const std::string& BugType = D.getBugType(); + if (!BugType.empty()) + os << "\n<!-- BUGTYPE " << BugType << " -->\n"; + + const std::string& BugCategory = D.getCategory(); + if (!BugCategory.empty()) + os << "\n<!-- BUGCATEGORY " << BugCategory << " -->\n"; + + os << "\n<!-- BUGFILE " << DirName << Entry->getName() << " -->\n"; + + os << "\n<!-- BUGLINE " + << D.back()->getLocation().asLocation().getInstantiationLineNumber() + << " -->\n"; + + os << "\n<!-- BUGPATHLENGTH " << D.size() << " -->\n"; + + // Mark the end of the tags. + os << "\n<!-- BUGMETAEND -->\n"; + + // Insert the text. + R.InsertTextBefore(SMgr.getLocForStartOfFile(FID), os.str()); + } + + // Add CSS, header, and footer. + + html::AddHeaderFooterInternalBuiltinCSS(R, FID, Entry->getName()); + + // Get the rewrite buffer. + const RewriteBuffer *Buf = R.getRewriteBufferFor(FID); + + if (!Buf) { + llvm::errs() << "warning: no diagnostics generated for main file.\n"; + return; + } + + // Create a path for the target HTML file. + llvm::sys::Path F(FilePrefix); + F.makeUnique(false, NULL); + + // Rename the file with an HTML extension. + llvm::sys::Path H(F); + H.appendSuffix("html"); + F.renamePathOnDisk(H, NULL); + + std::string ErrorMsg; + llvm::raw_fd_ostream os(H.c_str(), ErrorMsg); + + if (!ErrorMsg.empty()) { + llvm::errs() << "warning: could not create file '" << F.str() + << "'\n"; + return; + } + + if (FilesMade) + FilesMade->push_back(llvm::sys::path::filename(H.str())); + + // Emit the HTML to disk. + for (RewriteBuffer::iterator I = Buf->begin(), E = Buf->end(); I!=E; ++I) + os << *I; +} + +void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID, + const PathDiagnosticPiece& P, + unsigned num, unsigned max) { + + // For now, just draw a box above the line in question, and emit the + // warning. + FullSourceLoc Pos = P.getLocation().asLocation(); + + if (!Pos.isValid()) + return; + + SourceManager &SM = R.getSourceMgr(); + assert(&Pos.getManager() == &SM && "SourceManagers are different!"); + std::pair<FileID, unsigned> LPosInfo = SM.getDecomposedInstantiationLoc(Pos); + + if (LPosInfo.first != BugFileID) + return; + + const llvm::MemoryBuffer *Buf = SM.getBuffer(LPosInfo.first); + const char* FileStart = Buf->getBufferStart(); + + // Compute the column number. Rewind from the current position to the start + // of the line. + unsigned ColNo = SM.getColumnNumber(LPosInfo.first, LPosInfo.second); + const char *TokInstantiationPtr =Pos.getInstantiationLoc().getCharacterData(); + const char *LineStart = TokInstantiationPtr-ColNo; + + // Compute LineEnd. + const char *LineEnd = TokInstantiationPtr; + const char* FileEnd = Buf->getBufferEnd(); + while (*LineEnd != '\n' && LineEnd != FileEnd) + ++LineEnd; + + // Compute the margin offset by counting tabs and non-tabs. + unsigned PosNo = 0; + for (const char* c = LineStart; c != TokInstantiationPtr; ++c) + PosNo += *c == '\t' ? 8 : 1; + + // Create the html for the message. + + const char *Kind = 0; + switch (P.getKind()) { + case PathDiagnosticPiece::Event: Kind = "Event"; break; + case PathDiagnosticPiece::ControlFlow: Kind = "Control"; break; + // Setting Kind to "Control" is intentional. + case PathDiagnosticPiece::Macro: Kind = "Control"; break; + } + + std::string sbuf; + llvm::raw_string_ostream os(sbuf); + + os << "\n<tr><td class=\"num\"></td><td class=\"line\"><div id=\""; + + if (num == max) + os << "EndPath"; + else + os << "Path" << num; + + os << "\" class=\"msg"; + if (Kind) + os << " msg" << Kind; + os << "\" style=\"margin-left:" << PosNo << "ex"; + + // Output a maximum size. + if (!isa<PathDiagnosticMacroPiece>(P)) { + // Get the string and determining its maximum substring. + const std::string& Msg = P.getString(); + unsigned max_token = 0; + unsigned cnt = 0; + unsigned len = Msg.size(); + + for (std::string::const_iterator I=Msg.begin(), E=Msg.end(); I!=E; ++I) + switch (*I) { + default: + ++cnt; + continue; + case ' ': + case '\t': + case '\n': + if (cnt > max_token) max_token = cnt; + cnt = 0; + } + + if (cnt > max_token) + max_token = cnt; + + // Determine the approximate size of the message bubble in em. + unsigned em; + const unsigned max_line = 120; + + if (max_token >= max_line) + em = max_token / 2; + else { + unsigned characters = max_line; + unsigned lines = len / max_line; + + if (lines > 0) { + for (; characters > max_token; --characters) + if (len / characters > lines) { + ++characters; + break; + } + } + + em = characters / 2; + } + + if (em < max_line/2) + os << "; max-width:" << em << "em"; + } + else + os << "; max-width:100em"; + + os << "\">"; + + if (max > 1) { + os << "<table class=\"msgT\"><tr><td valign=\"top\">"; + os << "<div class=\"PathIndex"; + if (Kind) os << " PathIndex" << Kind; + os << "\">" << num << "</div>"; + os << "</td><td>"; + } + + if (const PathDiagnosticMacroPiece *MP = + dyn_cast<PathDiagnosticMacroPiece>(&P)) { + + os << "Within the expansion of the macro '"; + + // Get the name of the macro by relexing it. + { + FullSourceLoc L = MP->getLocation().asLocation().getInstantiationLoc(); + assert(L.isFileID()); + llvm::StringRef BufferInfo = L.getBufferData(); + const char* MacroName = L.getDecomposedLoc().second + BufferInfo.data(); + Lexer rawLexer(L, PP.getLangOptions(), BufferInfo.begin(), + MacroName, BufferInfo.end()); + + Token TheTok; + rawLexer.LexFromRawLexer(TheTok); + for (unsigned i = 0, n = TheTok.getLength(); i < n; ++i) + os << MacroName[i]; + } + + os << "':\n"; + + if (max > 1) + os << "</td></tr></table>"; + + // Within a macro piece. Write out each event. + ProcessMacroPiece(os, *MP, 0); + } + else { + os << html::EscapeText(P.getString()); + + if (max > 1) + os << "</td></tr></table>"; + } + + os << "</div></td></tr>"; + + // Insert the new html. + unsigned DisplayPos = LineEnd - FileStart; + SourceLocation Loc = + SM.getLocForStartOfFile(LPosInfo.first).getFileLocWithOffset(DisplayPos); + + R.InsertTextBefore(Loc, os.str()); + + // Now highlight the ranges. + for (const SourceRange *I = P.ranges_begin(), *E = P.ranges_end(); + I != E; ++I) + HighlightRange(R, LPosInfo.first, *I); + +#if 0 + // If there is a code insertion hint, insert that code. + // FIXME: This code is disabled because it seems to mangle the HTML + // output. I'm leaving it here because it's generally the right idea, + // but needs some help from someone more familiar with the rewriter. + for (const FixItHint *Hint = P.fixit_begin(), *HintEnd = P.fixit_end(); + Hint != HintEnd; ++Hint) { + if (Hint->RemoveRange.isValid()) { + HighlightRange(R, LPosInfo.first, Hint->RemoveRange, + "<span class=\"CodeRemovalHint\">", "</span>"); + } + if (Hint->InsertionLoc.isValid()) { + std::string EscapedCode = html::EscapeText(Hint->CodeToInsert, true); + EscapedCode = "<span class=\"CodeInsertionHint\">" + EscapedCode + + "</span>"; + R.InsertTextBefore(Hint->InsertionLoc, EscapedCode); + } + } +#endif +} + +static void EmitAlphaCounter(llvm::raw_ostream& os, unsigned n) { + unsigned x = n % ('z' - 'a'); + n /= 'z' - 'a'; + + if (n > 0) + EmitAlphaCounter(os, n); + + os << char('a' + x); +} + +unsigned HTMLDiagnostics::ProcessMacroPiece(llvm::raw_ostream& os, + const PathDiagnosticMacroPiece& P, + unsigned num) { + + for (PathDiagnosticMacroPiece::const_iterator I=P.begin(), E=P.end(); + I!=E; ++I) { + + if (const PathDiagnosticMacroPiece *MP = + dyn_cast<PathDiagnosticMacroPiece>(*I)) { + num = ProcessMacroPiece(os, *MP, num); + continue; + } + + if (PathDiagnosticEventPiece *EP = dyn_cast<PathDiagnosticEventPiece>(*I)) { + os << "<div class=\"msg msgEvent\" style=\"width:94%; " + "margin-left:5px\">" + "<table class=\"msgT\"><tr>" + "<td valign=\"top\"><div class=\"PathIndex PathIndexEvent\">"; + EmitAlphaCounter(os, num++); + os << "</div></td><td valign=\"top\">" + << html::EscapeText(EP->getString()) + << "</td></tr></table></div>\n"; + } + } + + return num; +} + +void HTMLDiagnostics::HighlightRange(Rewriter& R, FileID BugFileID, + SourceRange Range, + const char *HighlightStart, + const char *HighlightEnd) { + SourceManager &SM = R.getSourceMgr(); + const LangOptions &LangOpts = R.getLangOpts(); + + SourceLocation InstantiationStart = SM.getInstantiationLoc(Range.getBegin()); + unsigned StartLineNo = SM.getInstantiationLineNumber(InstantiationStart); + + SourceLocation InstantiationEnd = SM.getInstantiationLoc(Range.getEnd()); + unsigned EndLineNo = SM.getInstantiationLineNumber(InstantiationEnd); + + if (EndLineNo < StartLineNo) + return; + + if (SM.getFileID(InstantiationStart) != BugFileID || + SM.getFileID(InstantiationEnd) != BugFileID) + return; + + // Compute the column number of the end. + unsigned EndColNo = SM.getInstantiationColumnNumber(InstantiationEnd); + unsigned OldEndColNo = EndColNo; + + if (EndColNo) { + // Add in the length of the token, so that we cover multi-char tokens. + EndColNo += Lexer::MeasureTokenLength(Range.getEnd(), SM, LangOpts)-1; + } + + // Highlight the range. Make the span tag the outermost tag for the + // selected range. + + SourceLocation E = + InstantiationEnd.getFileLocWithOffset(EndColNo - OldEndColNo); + + html::HighlightRange(R, InstantiationStart, E, HighlightStart, HighlightEnd); +} diff --git a/lib/StaticAnalyzer/Core/Makefile b/lib/StaticAnalyzer/Core/Makefile new file mode 100644 index 0000000..4aebc16 --- /dev/null +++ b/lib/StaticAnalyzer/Core/Makefile @@ -0,0 +1,17 @@ +##===- clang/lib/StaticAnalyzer/Core/Makefile --------------*- Makefile -*-===## +# +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +# +##===----------------------------------------------------------------------===## +# +# This implements analyses built on top of source-level CFGs. +# +##===----------------------------------------------------------------------===## + +CLANG_LEVEL := ../../.. +LIBRARYNAME := clangStaticAnalyzerCore + +include $(CLANG_LEVEL)/Makefile diff --git a/lib/StaticAnalyzer/Core/MemRegion.cpp b/lib/StaticAnalyzer/Core/MemRegion.cpp new file mode 100644 index 0000000..d9e884a --- /dev/null +++ b/lib/StaticAnalyzer/Core/MemRegion.cpp @@ -0,0 +1,988 @@ +//== MemRegion.cpp - Abstract memory regions for static analysis --*- C++ -*--// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines MemRegion and its subclasses. MemRegion defines a +// partially-typed abstraction of memory useful for path-sensitive dataflow +// analyses. +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h" +#include "clang/Analysis/AnalysisContext.h" +#include "clang/Analysis/Support/BumpVector.h" +#include "clang/AST/CharUnits.h" +#include "clang/AST/RecordLayout.h" +#include "llvm/Support/raw_ostream.h" + +using namespace clang; +using namespace ento; + +//===----------------------------------------------------------------------===// +// MemRegion Construction. +//===----------------------------------------------------------------------===// + +template<typename RegionTy> struct MemRegionManagerTrait; + +template <typename RegionTy, typename A1> +RegionTy* MemRegionManager::getRegion(const A1 a1) { + + const typename MemRegionManagerTrait<RegionTy>::SuperRegionTy *superRegion = + MemRegionManagerTrait<RegionTy>::getSuperRegion(*this, a1); + + llvm::FoldingSetNodeID ID; + RegionTy::ProfileRegion(ID, a1, superRegion); + void* InsertPos; + RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID, + InsertPos)); + + if (!R) { + R = (RegionTy*) A.Allocate<RegionTy>(); + new (R) RegionTy(a1, superRegion); + Regions.InsertNode(R, InsertPos); + } + + return R; +} + +template <typename RegionTy, typename A1> +RegionTy* MemRegionManager::getSubRegion(const A1 a1, + const MemRegion *superRegion) { + llvm::FoldingSetNodeID ID; + RegionTy::ProfileRegion(ID, a1, superRegion); + void* InsertPos; + RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID, + InsertPos)); + + if (!R) { + R = (RegionTy*) A.Allocate<RegionTy>(); + new (R) RegionTy(a1, superRegion); + Regions.InsertNode(R, InsertPos); + } + + return R; +} + +template <typename RegionTy, typename A1, typename A2> +RegionTy* MemRegionManager::getRegion(const A1 a1, const A2 a2) { + + const typename MemRegionManagerTrait<RegionTy>::SuperRegionTy *superRegion = + MemRegionManagerTrait<RegionTy>::getSuperRegion(*this, a1, a2); + + llvm::FoldingSetNodeID ID; + RegionTy::ProfileRegion(ID, a1, a2, superRegion); + void* InsertPos; + RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID, + InsertPos)); + + if (!R) { + R = (RegionTy*) A.Allocate<RegionTy>(); + new (R) RegionTy(a1, a2, superRegion); + Regions.InsertNode(R, InsertPos); + } + + return R; +} + +template <typename RegionTy, typename A1, typename A2> +RegionTy* MemRegionManager::getSubRegion(const A1 a1, const A2 a2, + const MemRegion *superRegion) { + + llvm::FoldingSetNodeID ID; + RegionTy::ProfileRegion(ID, a1, a2, superRegion); + void* InsertPos; + RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID, + InsertPos)); + + if (!R) { + R = (RegionTy*) A.Allocate<RegionTy>(); + new (R) RegionTy(a1, a2, superRegion); + Regions.InsertNode(R, InsertPos); + } + + return R; +} + +template <typename RegionTy, typename A1, typename A2, typename A3> +RegionTy* MemRegionManager::getSubRegion(const A1 a1, const A2 a2, const A3 a3, + const MemRegion *superRegion) { + + llvm::FoldingSetNodeID ID; + RegionTy::ProfileRegion(ID, a1, a2, a3, superRegion); + void* InsertPos; + RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID, + InsertPos)); + + if (!R) { + R = (RegionTy*) A.Allocate<RegionTy>(); + new (R) RegionTy(a1, a2, a3, superRegion); + Regions.InsertNode(R, InsertPos); + } + + return R; +} + +//===----------------------------------------------------------------------===// +// Object destruction. +//===----------------------------------------------------------------------===// + +MemRegion::~MemRegion() {} + +MemRegionManager::~MemRegionManager() { + // All regions and their data are BumpPtrAllocated. No need to call + // their destructors. +} + +//===----------------------------------------------------------------------===// +// Basic methods. +//===----------------------------------------------------------------------===// + +bool SubRegion::isSubRegionOf(const MemRegion* R) const { + const MemRegion* r = getSuperRegion(); + while (r != 0) { + if (r == R) + return true; + if (const SubRegion* sr = dyn_cast<SubRegion>(r)) + r = sr->getSuperRegion(); + else + break; + } + return false; +} + +MemRegionManager* SubRegion::getMemRegionManager() const { + const SubRegion* r = this; + do { + const MemRegion *superRegion = r->getSuperRegion(); + if (const SubRegion *sr = dyn_cast<SubRegion>(superRegion)) { + r = sr; + continue; + } + return superRegion->getMemRegionManager(); + } while (1); +} + +const StackFrameContext *VarRegion::getStackFrame() const { + const StackSpaceRegion *SSR = dyn_cast<StackSpaceRegion>(getMemorySpace()); + return SSR ? SSR->getStackFrame() : NULL; +} + +//===----------------------------------------------------------------------===// +// Region extents. +//===----------------------------------------------------------------------===// + +DefinedOrUnknownSVal DeclRegion::getExtent(SValBuilder &svalBuilder) const { + ASTContext& Ctx = svalBuilder.getContext(); + QualType T = getDesugaredValueType(Ctx); + + if (isa<VariableArrayType>(T)) + return nonloc::SymbolVal(svalBuilder.getSymbolManager().getExtentSymbol(this)); + if (isa<IncompleteArrayType>(T)) + return UnknownVal(); + + CharUnits size = Ctx.getTypeSizeInChars(T); + QualType sizeTy = svalBuilder.getArrayIndexType(); + return svalBuilder.makeIntVal(size.getQuantity(), sizeTy); +} + +DefinedOrUnknownSVal FieldRegion::getExtent(SValBuilder &svalBuilder) const { + DefinedOrUnknownSVal Extent = DeclRegion::getExtent(svalBuilder); + + // A zero-length array at the end of a struct often stands for dynamically- + // allocated extra memory. + if (Extent.isZeroConstant()) { + QualType T = getDesugaredValueType(svalBuilder.getContext()); + + if (isa<ConstantArrayType>(T)) + return UnknownVal(); + } + + return Extent; +} + +DefinedOrUnknownSVal AllocaRegion::getExtent(SValBuilder &svalBuilder) const { + return nonloc::SymbolVal(svalBuilder.getSymbolManager().getExtentSymbol(this)); +} + +DefinedOrUnknownSVal SymbolicRegion::getExtent(SValBuilder &svalBuilder) const { + return nonloc::SymbolVal(svalBuilder.getSymbolManager().getExtentSymbol(this)); +} + +DefinedOrUnknownSVal StringRegion::getExtent(SValBuilder &svalBuilder) const { + return svalBuilder.makeIntVal(getStringLiteral()->getByteLength()+1, + svalBuilder.getArrayIndexType()); +} + +QualType CXXBaseObjectRegion::getValueType() const { + return QualType(decl->getTypeForDecl(), 0); +} + +//===----------------------------------------------------------------------===// +// FoldingSet profiling. +//===----------------------------------------------------------------------===// + +void MemSpaceRegion::Profile(llvm::FoldingSetNodeID& ID) const { + ID.AddInteger((unsigned)getKind()); +} + +void StackSpaceRegion::Profile(llvm::FoldingSetNodeID &ID) const { + ID.AddInteger((unsigned)getKind()); + ID.AddPointer(getStackFrame()); +} + +void StaticGlobalSpaceRegion::Profile(llvm::FoldingSetNodeID &ID) const { + ID.AddInteger((unsigned)getKind()); + ID.AddPointer(getCodeRegion()); +} + +void StringRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, + const StringLiteral* Str, + const MemRegion* superRegion) { + ID.AddInteger((unsigned) StringRegionKind); + ID.AddPointer(Str); + ID.AddPointer(superRegion); +} + +void AllocaRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, + const Expr* Ex, unsigned cnt, + const MemRegion *) { + ID.AddInteger((unsigned) AllocaRegionKind); + ID.AddPointer(Ex); + ID.AddInteger(cnt); +} + +void AllocaRegion::Profile(llvm::FoldingSetNodeID& ID) const { + ProfileRegion(ID, Ex, Cnt, superRegion); +} + +void CompoundLiteralRegion::Profile(llvm::FoldingSetNodeID& ID) const { + CompoundLiteralRegion::ProfileRegion(ID, CL, superRegion); +} + +void CompoundLiteralRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, + const CompoundLiteralExpr* CL, + const MemRegion* superRegion) { + ID.AddInteger((unsigned) CompoundLiteralRegionKind); + ID.AddPointer(CL); + ID.AddPointer(superRegion); +} + +void CXXThisRegion::ProfileRegion(llvm::FoldingSetNodeID &ID, + const PointerType *PT, + const MemRegion *sRegion) { + ID.AddInteger((unsigned) CXXThisRegionKind); + ID.AddPointer(PT); + ID.AddPointer(sRegion); +} + +void CXXThisRegion::Profile(llvm::FoldingSetNodeID &ID) const { + CXXThisRegion::ProfileRegion(ID, ThisPointerTy, superRegion); +} + +void DeclRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, const Decl* D, + const MemRegion* superRegion, Kind k) { + ID.AddInteger((unsigned) k); + ID.AddPointer(D); + ID.AddPointer(superRegion); +} + +void DeclRegion::Profile(llvm::FoldingSetNodeID& ID) const { + DeclRegion::ProfileRegion(ID, D, superRegion, getKind()); +} + +void VarRegion::Profile(llvm::FoldingSetNodeID &ID) const { + VarRegion::ProfileRegion(ID, getDecl(), superRegion); +} + +void SymbolicRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, SymbolRef sym, + const MemRegion *sreg) { + ID.AddInteger((unsigned) MemRegion::SymbolicRegionKind); + ID.Add(sym); + ID.AddPointer(sreg); +} + +void SymbolicRegion::Profile(llvm::FoldingSetNodeID& ID) const { + SymbolicRegion::ProfileRegion(ID, sym, getSuperRegion()); +} + +void ElementRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, + QualType ElementType, SVal Idx, + const MemRegion* superRegion) { + ID.AddInteger(MemRegion::ElementRegionKind); + ID.Add(ElementType); + ID.AddPointer(superRegion); + Idx.Profile(ID); +} + +void ElementRegion::Profile(llvm::FoldingSetNodeID& ID) const { + ElementRegion::ProfileRegion(ID, ElementType, Index, superRegion); +} + +void FunctionTextRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, + const FunctionDecl *FD, + const MemRegion*) { + ID.AddInteger(MemRegion::FunctionTextRegionKind); + ID.AddPointer(FD); +} + +void FunctionTextRegion::Profile(llvm::FoldingSetNodeID& ID) const { + FunctionTextRegion::ProfileRegion(ID, FD, superRegion); +} + +void BlockTextRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, + const BlockDecl *BD, CanQualType, + const AnalysisContext *AC, + const MemRegion*) { + ID.AddInteger(MemRegion::BlockTextRegionKind); + ID.AddPointer(BD); +} + +void BlockTextRegion::Profile(llvm::FoldingSetNodeID& ID) const { + BlockTextRegion::ProfileRegion(ID, BD, locTy, AC, superRegion); +} + +void BlockDataRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, + const BlockTextRegion *BC, + const LocationContext *LC, + const MemRegion *sReg) { + ID.AddInteger(MemRegion::BlockDataRegionKind); + ID.AddPointer(BC); + ID.AddPointer(LC); + ID.AddPointer(sReg); +} + +void BlockDataRegion::Profile(llvm::FoldingSetNodeID& ID) const { + BlockDataRegion::ProfileRegion(ID, BC, LC, getSuperRegion()); +} + +void CXXTempObjectRegion::ProfileRegion(llvm::FoldingSetNodeID &ID, + Expr const *Ex, + const MemRegion *sReg) { + ID.AddPointer(Ex); + ID.AddPointer(sReg); +} + +void CXXTempObjectRegion::Profile(llvm::FoldingSetNodeID &ID) const { + ProfileRegion(ID, Ex, getSuperRegion()); +} + +void CXXBaseObjectRegion::ProfileRegion(llvm::FoldingSetNodeID &ID, + const CXXRecordDecl *decl, + const MemRegion *sReg) { + ID.AddPointer(decl); + ID.AddPointer(sReg); +} + +void CXXBaseObjectRegion::Profile(llvm::FoldingSetNodeID &ID) const { + ProfileRegion(ID, decl, superRegion); +} + +//===----------------------------------------------------------------------===// +// Region pretty-printing. +//===----------------------------------------------------------------------===// + +void MemRegion::dump() const { + dumpToStream(llvm::errs()); +} + +std::string MemRegion::getString() const { + std::string s; + llvm::raw_string_ostream os(s); + dumpToStream(os); + return os.str(); +} + +void MemRegion::dumpToStream(llvm::raw_ostream& os) const { + os << "<Unknown Region>"; +} + +void AllocaRegion::dumpToStream(llvm::raw_ostream& os) const { + os << "alloca{" << (void*) Ex << ',' << Cnt << '}'; +} + +void FunctionTextRegion::dumpToStream(llvm::raw_ostream& os) const { + os << "code{" << getDecl()->getDeclName().getAsString() << '}'; +} + +void BlockTextRegion::dumpToStream(llvm::raw_ostream& os) const { + os << "block_code{" << (void*) this << '}'; +} + +void BlockDataRegion::dumpToStream(llvm::raw_ostream& os) const { + os << "block_data{" << BC << '}'; +} + +void CompoundLiteralRegion::dumpToStream(llvm::raw_ostream& os) const { + // FIXME: More elaborate pretty-printing. + os << "{ " << (void*) CL << " }"; +} + +void CXXTempObjectRegion::dumpToStream(llvm::raw_ostream &os) const { + os << "temp_object"; +} + +void CXXBaseObjectRegion::dumpToStream(llvm::raw_ostream &os) const { + os << "base " << decl->getName(); +} + +void CXXThisRegion::dumpToStream(llvm::raw_ostream &os) const { + os << "this"; +} + +void ElementRegion::dumpToStream(llvm::raw_ostream& os) const { + os << "element{" << superRegion << ',' + << Index << ',' << getElementType().getAsString() << '}'; +} + +void FieldRegion::dumpToStream(llvm::raw_ostream& os) const { + os << superRegion << "->" << getDecl(); +} + +void NonStaticGlobalSpaceRegion::dumpToStream(llvm::raw_ostream &os) const { + os << "NonStaticGlobalSpaceRegion"; +} + +void ObjCIvarRegion::dumpToStream(llvm::raw_ostream& os) const { + os << "ivar{" << superRegion << ',' << getDecl() << '}'; +} + +void StringRegion::dumpToStream(llvm::raw_ostream& os) const { + Str->printPretty(os, 0, PrintingPolicy(getContext().getLangOptions())); +} + +void SymbolicRegion::dumpToStream(llvm::raw_ostream& os) const { + os << "SymRegion{" << sym << '}'; +} + +void VarRegion::dumpToStream(llvm::raw_ostream& os) const { + os << cast<VarDecl>(D); +} + +void RegionRawOffset::dump() const { + dumpToStream(llvm::errs()); +} + +void RegionRawOffset::dumpToStream(llvm::raw_ostream& os) const { + os << "raw_offset{" << getRegion() << ',' << getOffset().getQuantity() << '}'; +} + +void StaticGlobalSpaceRegion::dumpToStream(llvm::raw_ostream &os) const { + os << "StaticGlobalsMemSpace{" << CR << '}'; +} + +//===----------------------------------------------------------------------===// +// MemRegionManager methods. +//===----------------------------------------------------------------------===// + +template <typename REG> +const REG *MemRegionManager::LazyAllocate(REG*& region) { + if (!region) { + region = (REG*) A.Allocate<REG>(); + new (region) REG(this); + } + + return region; +} + +template <typename REG, typename ARG> +const REG *MemRegionManager::LazyAllocate(REG*& region, ARG a) { + if (!region) { + region = (REG*) A.Allocate<REG>(); + new (region) REG(this, a); + } + + return region; +} + +const StackLocalsSpaceRegion* +MemRegionManager::getStackLocalsRegion(const StackFrameContext *STC) { + assert(STC); + StackLocalsSpaceRegion *&R = StackLocalsSpaceRegions[STC]; + + if (R) + return R; + + R = A.Allocate<StackLocalsSpaceRegion>(); + new (R) StackLocalsSpaceRegion(this, STC); + return R; +} + +const StackArgumentsSpaceRegion * +MemRegionManager::getStackArgumentsRegion(const StackFrameContext *STC) { + assert(STC); + StackArgumentsSpaceRegion *&R = StackArgumentsSpaceRegions[STC]; + + if (R) + return R; + + R = A.Allocate<StackArgumentsSpaceRegion>(); + new (R) StackArgumentsSpaceRegion(this, STC); + return R; +} + +const GlobalsSpaceRegion +*MemRegionManager::getGlobalsRegion(const CodeTextRegion *CR) { + if (!CR) + return LazyAllocate(globals); + + StaticGlobalSpaceRegion *&R = StaticsGlobalSpaceRegions[CR]; + if (R) + return R; + + R = A.Allocate<StaticGlobalSpaceRegion>(); + new (R) StaticGlobalSpaceRegion(this, CR); + return R; +} + +const HeapSpaceRegion *MemRegionManager::getHeapRegion() { + return LazyAllocate(heap); +} + +const MemSpaceRegion *MemRegionManager::getUnknownRegion() { + return LazyAllocate(unknown); +} + +const MemSpaceRegion *MemRegionManager::getCodeRegion() { + return LazyAllocate(code); +} + +//===----------------------------------------------------------------------===// +// Constructing regions. +//===----------------------------------------------------------------------===// + +const StringRegion* MemRegionManager::getStringRegion(const StringLiteral* Str){ + return getSubRegion<StringRegion>(Str, getGlobalsRegion()); +} + +const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D, + const LocationContext *LC) { + const MemRegion *sReg = 0; + + if (D->hasGlobalStorage() && !D->isStaticLocal()) + sReg = getGlobalsRegion(); + else { + // FIXME: Once we implement scope handling, we will need to properly lookup + // 'D' to the proper LocationContext. + const DeclContext *DC = D->getDeclContext(); + const StackFrameContext *STC = LC->getStackFrameForDeclContext(DC); + + if (!STC) + sReg = getUnknownRegion(); + else { + if (D->hasLocalStorage()) { + sReg = isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D) + ? static_cast<const MemRegion*>(getStackArgumentsRegion(STC)) + : static_cast<const MemRegion*>(getStackLocalsRegion(STC)); + } + else { + assert(D->isStaticLocal()); + const Decl *D = STC->getDecl(); + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) + sReg = getGlobalsRegion(getFunctionTextRegion(FD)); + else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) { + const BlockTextRegion *BTR = + getBlockTextRegion(BD, + C.getCanonicalType(BD->getSignatureAsWritten()->getType()), + STC->getAnalysisContext()); + sReg = getGlobalsRegion(BTR); + } + else { + // FIXME: For ObjC-methods, we need a new CodeTextRegion. For now + // just use the main global memspace. + sReg = getGlobalsRegion(); + } + } + } + } + + return getSubRegion<VarRegion>(D, sReg); +} + +const VarRegion *MemRegionManager::getVarRegion(const VarDecl *D, + const MemRegion *superR) { + return getSubRegion<VarRegion>(D, superR); +} + +const BlockDataRegion * +MemRegionManager::getBlockDataRegion(const BlockTextRegion *BC, + const LocationContext *LC) { + const MemRegion *sReg = 0; + + if (LC) { + // FIXME: Once we implement scope handling, we want the parent region + // to be the scope. + const StackFrameContext *STC = LC->getCurrentStackFrame(); + assert(STC); + sReg = getStackLocalsRegion(STC); + } + else { + // We allow 'LC' to be NULL for cases where want BlockDataRegions + // without context-sensitivity. + sReg = getUnknownRegion(); + } + + return getSubRegion<BlockDataRegion>(BC, LC, sReg); +} + +const CompoundLiteralRegion* +MemRegionManager::getCompoundLiteralRegion(const CompoundLiteralExpr* CL, + const LocationContext *LC) { + + const MemRegion *sReg = 0; + + if (CL->isFileScope()) + sReg = getGlobalsRegion(); + else { + const StackFrameContext *STC = LC->getCurrentStackFrame(); + assert(STC); + sReg = getStackLocalsRegion(STC); + } + + return getSubRegion<CompoundLiteralRegion>(CL, sReg); +} + +const ElementRegion* +MemRegionManager::getElementRegion(QualType elementType, NonLoc Idx, + const MemRegion* superRegion, + ASTContext& Ctx){ + + QualType T = Ctx.getCanonicalType(elementType).getUnqualifiedType(); + + llvm::FoldingSetNodeID ID; + ElementRegion::ProfileRegion(ID, T, Idx, superRegion); + + void* InsertPos; + MemRegion* data = Regions.FindNodeOrInsertPos(ID, InsertPos); + ElementRegion* R = cast_or_null<ElementRegion>(data); + + if (!R) { + R = (ElementRegion*) A.Allocate<ElementRegion>(); + new (R) ElementRegion(T, Idx, superRegion); + Regions.InsertNode(R, InsertPos); + } + + return R; +} + +const FunctionTextRegion * +MemRegionManager::getFunctionTextRegion(const FunctionDecl *FD) { + return getSubRegion<FunctionTextRegion>(FD, getCodeRegion()); +} + +const BlockTextRegion * +MemRegionManager::getBlockTextRegion(const BlockDecl *BD, CanQualType locTy, + AnalysisContext *AC) { + return getSubRegion<BlockTextRegion>(BD, locTy, AC, getCodeRegion()); +} + + +/// getSymbolicRegion - Retrieve or create a "symbolic" memory region. +const SymbolicRegion *MemRegionManager::getSymbolicRegion(SymbolRef sym) { + return getSubRegion<SymbolicRegion>(sym, getUnknownRegion()); +} + +const FieldRegion* +MemRegionManager::getFieldRegion(const FieldDecl* d, + const MemRegion* superRegion){ + return getSubRegion<FieldRegion>(d, superRegion); +} + +const ObjCIvarRegion* +MemRegionManager::getObjCIvarRegion(const ObjCIvarDecl* d, + const MemRegion* superRegion) { + return getSubRegion<ObjCIvarRegion>(d, superRegion); +} + +const CXXTempObjectRegion* +MemRegionManager::getCXXTempObjectRegion(Expr const *E, + LocationContext const *LC) { + const StackFrameContext *SFC = LC->getCurrentStackFrame(); + assert(SFC); + return getSubRegion<CXXTempObjectRegion>(E, getStackLocalsRegion(SFC)); +} + +const CXXBaseObjectRegion * +MemRegionManager::getCXXBaseObjectRegion(const CXXRecordDecl *decl, + const MemRegion *superRegion) { + return getSubRegion<CXXBaseObjectRegion>(decl, superRegion); +} + +const CXXThisRegion* +MemRegionManager::getCXXThisRegion(QualType thisPointerTy, + const LocationContext *LC) { + const StackFrameContext *STC = LC->getCurrentStackFrame(); + assert(STC); + const PointerType *PT = thisPointerTy->getAs<PointerType>(); + assert(PT); + return getSubRegion<CXXThisRegion>(PT, getStackArgumentsRegion(STC)); +} + +const AllocaRegion* +MemRegionManager::getAllocaRegion(const Expr* E, unsigned cnt, + const LocationContext *LC) { + const StackFrameContext *STC = LC->getCurrentStackFrame(); + assert(STC); + return getSubRegion<AllocaRegion>(E, cnt, getStackLocalsRegion(STC)); +} + +const MemSpaceRegion *MemRegion::getMemorySpace() const { + const MemRegion *R = this; + const SubRegion* SR = dyn_cast<SubRegion>(this); + + while (SR) { + R = SR->getSuperRegion(); + SR = dyn_cast<SubRegion>(R); + } + + return dyn_cast<MemSpaceRegion>(R); +} + +bool MemRegion::hasStackStorage() const { + return isa<StackSpaceRegion>(getMemorySpace()); +} + +bool MemRegion::hasStackNonParametersStorage() const { + return isa<StackLocalsSpaceRegion>(getMemorySpace()); +} + +bool MemRegion::hasStackParametersStorage() const { + return isa<StackArgumentsSpaceRegion>(getMemorySpace()); +} + +bool MemRegion::hasGlobalsOrParametersStorage() const { + const MemSpaceRegion *MS = getMemorySpace(); + return isa<StackArgumentsSpaceRegion>(MS) || + isa<GlobalsSpaceRegion>(MS); +} + +// getBaseRegion strips away all elements and fields, and get the base region +// of them. +const MemRegion *MemRegion::getBaseRegion() const { + const MemRegion *R = this; + while (true) { + switch (R->getKind()) { + case MemRegion::ElementRegionKind: + case MemRegion::FieldRegionKind: + case MemRegion::ObjCIvarRegionKind: + case MemRegion::CXXBaseObjectRegionKind: + R = cast<SubRegion>(R)->getSuperRegion(); + continue; + default: + break; + } + break; + } + return R; +} + +//===----------------------------------------------------------------------===// +// View handling. +//===----------------------------------------------------------------------===// + +const MemRegion *MemRegion::StripCasts() const { + const MemRegion *R = this; + while (true) { + if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) { + // FIXME: generalize. Essentially we want to strip away ElementRegions + // that were layered on a symbolic region because of casts. We only + // want to strip away ElementRegions, however, where the index is 0. + SVal index = ER->getIndex(); + if (nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(&index)) { + if (CI->getValue().getSExtValue() == 0) { + R = ER->getSuperRegion(); + continue; + } + } + } + break; + } + return R; +} + +// FIXME: Merge with the implementation of the same method in Store.cpp +static bool IsCompleteType(ASTContext &Ctx, QualType Ty) { + if (const RecordType *RT = Ty->getAs<RecordType>()) { + const RecordDecl *D = RT->getDecl(); + if (!D->getDefinition()) + return false; + } + + return true; +} + +RegionRawOffset ElementRegion::getAsArrayOffset() const { + CharUnits offset = CharUnits::Zero(); + const ElementRegion *ER = this; + const MemRegion *superR = NULL; + ASTContext &C = getContext(); + + // FIXME: Handle multi-dimensional arrays. + + while (ER) { + superR = ER->getSuperRegion(); + + // FIXME: generalize to symbolic offsets. + SVal index = ER->getIndex(); + if (nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(&index)) { + // Update the offset. + int64_t i = CI->getValue().getSExtValue(); + + if (i != 0) { + QualType elemType = ER->getElementType(); + + // If we are pointing to an incomplete type, go no further. + if (!IsCompleteType(C, elemType)) { + superR = ER; + break; + } + + CharUnits size = C.getTypeSizeInChars(elemType); + offset += (i * size); + } + + // Go to the next ElementRegion (if any). + ER = dyn_cast<ElementRegion>(superR); + continue; + } + + return NULL; + } + + assert(superR && "super region cannot be NULL"); + return RegionRawOffset(superR, offset); +} + +RegionOffset MemRegion::getAsOffset() const { + const MemRegion *R = this; + int64_t Offset = 0; + + while (1) { + switch (R->getKind()) { + default: + return RegionOffset(0); + case SymbolicRegionKind: + case AllocaRegionKind: + case CompoundLiteralRegionKind: + case CXXThisRegionKind: + case StringRegionKind: + case VarRegionKind: + case CXXTempObjectRegionKind: + goto Finish; + case ElementRegionKind: { + const ElementRegion *ER = cast<ElementRegion>(R); + QualType EleTy = ER->getValueType(); + + if (!IsCompleteType(getContext(), EleTy)) + return RegionOffset(0); + + SVal Index = ER->getIndex(); + if (const nonloc::ConcreteInt *CI=dyn_cast<nonloc::ConcreteInt>(&Index)) { + int64_t i = CI->getValue().getSExtValue(); + CharUnits Size = getContext().getTypeSizeInChars(EleTy); + Offset += i * Size.getQuantity() * 8; + } else { + // We cannot compute offset for non-concrete index. + return RegionOffset(0); + } + R = ER->getSuperRegion(); + break; + } + case FieldRegionKind: { + const FieldRegion *FR = cast<FieldRegion>(R); + const RecordDecl *RD = FR->getDecl()->getParent(); + if (!RD->isDefinition()) + // We cannot compute offset for incomplete type. + return RegionOffset(0); + // Get the field number. + unsigned idx = 0; + for (RecordDecl::field_iterator FI = RD->field_begin(), + FE = RD->field_end(); FI != FE; ++FI, ++idx) + if (FR->getDecl() == *FI) + break; + + const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); + // This is offset in bits. + Offset += Layout.getFieldOffset(idx); + R = FR->getSuperRegion(); + break; + } + } + } + + Finish: + return RegionOffset(R, Offset); +} + +//===----------------------------------------------------------------------===// +// BlockDataRegion +//===----------------------------------------------------------------------===// + +void BlockDataRegion::LazyInitializeReferencedVars() { + if (ReferencedVars) + return; + + AnalysisContext *AC = getCodeRegion()->getAnalysisContext(); + AnalysisContext::referenced_decls_iterator I, E; + llvm::tie(I, E) = AC->getReferencedBlockVars(BC->getDecl()); + + if (I == E) { + ReferencedVars = (void*) 0x1; + return; + } + + MemRegionManager &MemMgr = *getMemRegionManager(); + llvm::BumpPtrAllocator &A = MemMgr.getAllocator(); + BumpVectorContext BC(A); + + typedef BumpVector<const MemRegion*> VarVec; + VarVec *BV = (VarVec*) A.Allocate<VarVec>(); + new (BV) VarVec(BC, E - I); + + for ( ; I != E; ++I) { + const VarDecl *VD = *I; + const VarRegion *VR = 0; + + if (!VD->getAttr<BlocksAttr>() && VD->hasLocalStorage()) + VR = MemMgr.getVarRegion(VD, this); + else { + if (LC) + VR = MemMgr.getVarRegion(VD, LC); + else { + VR = MemMgr.getVarRegion(VD, MemMgr.getUnknownRegion()); + } + } + + assert(VR); + BV->push_back(VR, BC); + } + + ReferencedVars = BV; +} + +BlockDataRegion::referenced_vars_iterator +BlockDataRegion::referenced_vars_begin() const { + const_cast<BlockDataRegion*>(this)->LazyInitializeReferencedVars(); + + BumpVector<const MemRegion*> *Vec = + static_cast<BumpVector<const MemRegion*>*>(ReferencedVars); + + return BlockDataRegion::referenced_vars_iterator(Vec == (void*) 0x1 ? + NULL : Vec->begin()); +} + +BlockDataRegion::referenced_vars_iterator +BlockDataRegion::referenced_vars_end() const { + const_cast<BlockDataRegion*>(this)->LazyInitializeReferencedVars(); + + BumpVector<const MemRegion*> *Vec = + static_cast<BumpVector<const MemRegion*>*>(ReferencedVars); + + return BlockDataRegion::referenced_vars_iterator(Vec == (void*) 0x1 ? + NULL : Vec->end()); +} diff --git a/lib/StaticAnalyzer/Core/ObjCMessage.cpp b/lib/StaticAnalyzer/Core/ObjCMessage.cpp new file mode 100644 index 0000000..2e370d6 --- /dev/null +++ b/lib/StaticAnalyzer/Core/ObjCMessage.cpp @@ -0,0 +1,99 @@ +//===- ObjCMessage.cpp - Wrapper for ObjC messages and dot syntax -*- C++ -*--// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines ObjCMessage which serves as a common wrapper for ObjC +// message expressions or implicit messages for loading/storing ObjC properties. +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h" + +using namespace clang; +using namespace ento; + +QualType ObjCMessage::getType(ASTContext &ctx) const { + assert(isValid() && "This ObjCMessage is uninitialized!"); + if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE)) + return msgE->getType(); + const ObjCPropertyRefExpr *propE = cast<ObjCPropertyRefExpr>(MsgOrPropE); + if (isPropertySetter()) + return ctx.VoidTy; + return propE->getType(); +} + +Selector ObjCMessage::getSelector() const { + assert(isValid() && "This ObjCMessage is uninitialized!"); + if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE)) + return msgE->getSelector(); + const ObjCPropertyRefExpr *propE = cast<ObjCPropertyRefExpr>(MsgOrPropE); + if (isPropertySetter()) + return propE->getSetterSelector(); + return propE->getGetterSelector(); +} + +const ObjCMethodDecl *ObjCMessage::getMethodDecl() const { + assert(isValid() && "This ObjCMessage is uninitialized!"); + if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE)) + return msgE->getMethodDecl(); + const ObjCPropertyRefExpr *propE = cast<ObjCPropertyRefExpr>(MsgOrPropE); + if (propE->isImplicitProperty()) + return isPropertySetter() ? propE->getImplicitPropertySetter() + : propE->getImplicitPropertyGetter(); + return 0; +} + +const ObjCInterfaceDecl *ObjCMessage::getReceiverInterface() const { + assert(isValid() && "This ObjCMessage is uninitialized!"); + if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE)) + return msgE->getReceiverInterface(); + const ObjCPropertyRefExpr *propE = cast<ObjCPropertyRefExpr>(MsgOrPropE); + if (propE->isClassReceiver()) + return propE->getClassReceiver(); + QualType recT; + if (const Expr *recE = getInstanceReceiver()) + recT = recE->getType(); + else { + assert(propE->isSuperReceiver()); + recT = propE->getSuperReceiverType(); + } + if (const ObjCObjectPointerType *Ptr = recT->getAs<ObjCObjectPointerType>()) + return Ptr->getInterfaceDecl(); + return 0; +} + +const Expr *ObjCMessage::getArgExpr(unsigned i) const { + assert(isValid() && "This ObjCMessage is uninitialized!"); + assert(i < getNumArgs() && "Invalid index for argument"); + if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE)) + return msgE->getArg(i); + assert(isPropertySetter()); + if (const BinaryOperator *bop = dyn_cast<BinaryOperator>(OriginE)) + if (bop->isAssignmentOp()) + return bop->getRHS(); + return 0; +} + +QualType CallOrObjCMessage::getResultType(ASTContext &ctx) const { + if (CallE) { + const Expr *Callee = CallE->getCallee(); + if (const FunctionDecl *FD = State->getSVal(Callee).getAsFunctionDecl()) + return FD->getResultType(); + return CallE->getType(); + } + return Msg.getResultType(ctx); +} + +SVal CallOrObjCMessage::getArgSValAsScalarOrLoc(unsigned i) const { + assert(i < getNumArgs()); + if (CallE) return State->getSValAsScalarOrLoc(CallE->getArg(i)); + QualType argT = Msg.getArgType(i); + if (Loc::isLocType(argT) || argT->isIntegerType()) + return Msg.getArgSVal(i, State); + return UnknownVal(); +} diff --git a/lib/StaticAnalyzer/Core/PathDiagnostic.cpp b/lib/StaticAnalyzer/Core/PathDiagnostic.cpp new file mode 100644 index 0000000..872bbfe --- /dev/null +++ b/lib/StaticAnalyzer/Core/PathDiagnostic.cpp @@ -0,0 +1,280 @@ +//===--- PathDiagnostic.cpp - Path-Specific Diagnostic Handling -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the PathDiagnostic-related interfaces. +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h" +#include "clang/AST/Expr.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/StmtCXX.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/Support/Casting.h" + +using namespace clang; +using namespace ento; +using llvm::dyn_cast; +using llvm::isa; + +bool PathDiagnosticMacroPiece::containsEvent() const { + for (const_iterator I = begin(), E = end(); I!=E; ++I) { + if (isa<PathDiagnosticEventPiece>(*I)) + return true; + + if (PathDiagnosticMacroPiece *MP = dyn_cast<PathDiagnosticMacroPiece>(*I)) + if (MP->containsEvent()) + return true; + } + + return false; +} + +static llvm::StringRef StripTrailingDots(llvm::StringRef s) { + for (llvm::StringRef::size_type i = s.size(); i != 0; --i) + if (s[i - 1] != '.') + return s.substr(0, i); + return ""; +} + +PathDiagnosticPiece::PathDiagnosticPiece(llvm::StringRef s, + Kind k, DisplayHint hint) + : str(StripTrailingDots(s)), kind(k), Hint(hint) {} + +PathDiagnosticPiece::PathDiagnosticPiece(Kind k, DisplayHint hint) + : kind(k), Hint(hint) {} + +PathDiagnosticPiece::~PathDiagnosticPiece() {} +PathDiagnosticEventPiece::~PathDiagnosticEventPiece() {} +PathDiagnosticControlFlowPiece::~PathDiagnosticControlFlowPiece() {} + +PathDiagnosticMacroPiece::~PathDiagnosticMacroPiece() { + for (iterator I = begin(), E = end(); I != E; ++I) delete *I; +} + +PathDiagnostic::PathDiagnostic() : Size(0) {} + +PathDiagnostic::~PathDiagnostic() { + for (iterator I = begin(), E = end(); I != E; ++I) delete &*I; +} + +void PathDiagnostic::resetPath(bool deletePieces) { + Size = 0; + + if (deletePieces) + for (iterator I=begin(), E=end(); I!=E; ++I) + delete &*I; + + path.clear(); +} + + +PathDiagnostic::PathDiagnostic(llvm::StringRef bugtype, llvm::StringRef desc, + llvm::StringRef category) + : Size(0), + BugType(StripTrailingDots(bugtype)), + Desc(StripTrailingDots(desc)), + Category(StripTrailingDots(category)) {} + +void PathDiagnosticClient::HandleDiagnostic(Diagnostic::Level DiagLevel, + const DiagnosticInfo &Info) { + // Default implementation (Warnings/errors count). + DiagnosticClient::HandleDiagnostic(DiagLevel, Info); + + // Create a PathDiagnostic with a single piece. + + PathDiagnostic* D = new PathDiagnostic(); + + const char *LevelStr; + switch (DiagLevel) { + default: + case Diagnostic::Ignored: assert(0 && "Invalid diagnostic type"); + case Diagnostic::Note: LevelStr = "note: "; break; + case Diagnostic::Warning: LevelStr = "warning: "; break; + case Diagnostic::Error: LevelStr = "error: "; break; + case Diagnostic::Fatal: LevelStr = "fatal error: "; break; + } + + llvm::SmallString<100> StrC; + StrC += LevelStr; + Info.FormatDiagnostic(StrC); + + PathDiagnosticPiece *P = + new PathDiagnosticEventPiece(FullSourceLoc(Info.getLocation(), + Info.getSourceManager()), + StrC.str()); + + for (unsigned i = 0, e = Info.getNumRanges(); i != e; ++i) + P->addRange(Info.getRange(i).getAsRange()); + for (unsigned i = 0, e = Info.getNumFixItHints(); i != e; ++i) + P->addFixItHint(Info.getFixItHint(i)); + D->push_front(P); + + HandlePathDiagnostic(D); +} + +//===----------------------------------------------------------------------===// +// PathDiagnosticLocation methods. +//===----------------------------------------------------------------------===// + +FullSourceLoc PathDiagnosticLocation::asLocation() const { + assert(isValid()); + // Note that we want a 'switch' here so that the compiler can warn us in + // case we add more cases. + switch (K) { + case SingleLocK: + case RangeK: + break; + case StmtK: + return FullSourceLoc(S->getLocStart(), const_cast<SourceManager&>(*SM)); + case DeclK: + return FullSourceLoc(D->getLocation(), const_cast<SourceManager&>(*SM)); + } + + return FullSourceLoc(R.getBegin(), const_cast<SourceManager&>(*SM)); +} + +PathDiagnosticRange PathDiagnosticLocation::asRange() const { + assert(isValid()); + // Note that we want a 'switch' here so that the compiler can warn us in + // case we add more cases. + switch (K) { + case SingleLocK: + return PathDiagnosticRange(R, true); + case RangeK: + break; + case StmtK: { + const Stmt *S = asStmt(); + switch (S->getStmtClass()) { + default: + break; + case Stmt::DeclStmtClass: { + const DeclStmt *DS = cast<DeclStmt>(S); + if (DS->isSingleDecl()) { + // Should always be the case, but we'll be defensive. + return SourceRange(DS->getLocStart(), + DS->getSingleDecl()->getLocation()); + } + break; + } + // FIXME: Provide better range information for different + // terminators. + case Stmt::IfStmtClass: + case Stmt::WhileStmtClass: + case Stmt::DoStmtClass: + case Stmt::ForStmtClass: + case Stmt::ChooseExprClass: + case Stmt::IndirectGotoStmtClass: + case Stmt::SwitchStmtClass: + case Stmt::BinaryConditionalOperatorClass: + case Stmt::ConditionalOperatorClass: + case Stmt::ObjCForCollectionStmtClass: { + SourceLocation L = S->getLocStart(); + return SourceRange(L, L); + } + } + + return S->getSourceRange(); + } + case DeclK: + if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) + return MD->getSourceRange(); + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { + if (Stmt *Body = FD->getBody()) + return Body->getSourceRange(); + } + else { + SourceLocation L = D->getLocation(); + return PathDiagnosticRange(SourceRange(L, L), true); + } + } + + return R; +} + +void PathDiagnosticLocation::flatten() { + if (K == StmtK) { + R = asRange(); + K = RangeK; + S = 0; + D = 0; + } + else if (K == DeclK) { + SourceLocation L = D->getLocation(); + R = SourceRange(L, L); + K = SingleLocK; + S = 0; + D = 0; + } +} + +//===----------------------------------------------------------------------===// +// FoldingSet profiling methods. +//===----------------------------------------------------------------------===// + +void PathDiagnosticLocation::Profile(llvm::FoldingSetNodeID &ID) const { + ID.AddInteger((unsigned) K); + switch (K) { + case RangeK: + ID.AddInteger(R.getBegin().getRawEncoding()); + ID.AddInteger(R.getEnd().getRawEncoding()); + break; + case SingleLocK: + ID.AddInteger(R.getBegin().getRawEncoding()); + break; + case StmtK: + ID.Add(S); + break; + case DeclK: + ID.Add(D); + break; + } + return; +} + +void PathDiagnosticPiece::Profile(llvm::FoldingSetNodeID &ID) const { + ID.AddInteger((unsigned) getKind()); + ID.AddString(str); + // FIXME: Add profiling support for code hints. + ID.AddInteger((unsigned) getDisplayHint()); + for (range_iterator I = ranges_begin(), E = ranges_end(); I != E; ++I) { + ID.AddInteger(I->getBegin().getRawEncoding()); + ID.AddInteger(I->getEnd().getRawEncoding()); + } +} + +void PathDiagnosticSpotPiece::Profile(llvm::FoldingSetNodeID &ID) const { + PathDiagnosticPiece::Profile(ID); + ID.Add(Pos); +} + +void PathDiagnosticControlFlowPiece::Profile(llvm::FoldingSetNodeID &ID) const { + PathDiagnosticPiece::Profile(ID); + for (const_iterator I = begin(), E = end(); I != E; ++I) + ID.Add(*I); +} + +void PathDiagnosticMacroPiece::Profile(llvm::FoldingSetNodeID &ID) const { + PathDiagnosticSpotPiece::Profile(ID); + for (const_iterator I = begin(), E = end(); I != E; ++I) + ID.Add(**I); +} + +void PathDiagnostic::Profile(llvm::FoldingSetNodeID &ID) const { + ID.AddInteger(Size); + ID.AddString(BugType); + ID.AddString(Desc); + ID.AddString(Category); + for (const_iterator I = begin(), E = end(); I != E; ++I) + ID.Add(*I); + + for (meta_iterator I = meta_begin(), E = meta_end(); I != E; ++I) + ID.AddString(*I); +} diff --git a/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp new file mode 100644 index 0000000..fbbbd46 --- /dev/null +++ b/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp @@ -0,0 +1,472 @@ +//===--- PlistDiagnostics.cpp - Plist Diagnostics for Paths -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the PlistDiagnostics object. +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/PathDiagnosticClients.h" +#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Basic/FileManager.h" +#include "clang/Lex/Preprocessor.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Support/Casting.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallVector.h" +using namespace clang; +using namespace ento; +using llvm::cast; + +typedef llvm::DenseMap<FileID, unsigned> FIDMap; + +namespace clang { + class Preprocessor; +} + +namespace { +struct CompareDiagnostics { + // Compare if 'X' is "<" than 'Y'. + bool operator()(const PathDiagnostic *X, const PathDiagnostic *Y) const { + // First compare by location + const FullSourceLoc &XLoc = X->getLocation().asLocation(); + const FullSourceLoc &YLoc = Y->getLocation().asLocation(); + if (XLoc < YLoc) + return true; + if (XLoc != YLoc) + return false; + + // Next, compare by bug type. + llvm::StringRef XBugType = X->getBugType(); + llvm::StringRef YBugType = Y->getBugType(); + if (XBugType < YBugType) + return true; + if (XBugType != YBugType) + return false; + + // Next, compare by bug description. + llvm::StringRef XDesc = X->getDescription(); + llvm::StringRef YDesc = Y->getDescription(); + if (XDesc < YDesc) + return true; + if (XDesc != YDesc) + return false; + + // FIXME: Further refine by comparing PathDiagnosticPieces? + return false; + } +}; +} + +namespace { + class PlistDiagnostics : public PathDiagnosticClient { + std::vector<const PathDiagnostic*> BatchedDiags; + const std::string OutputFile; + const LangOptions &LangOpts; + llvm::OwningPtr<PathDiagnosticClient> SubPD; + bool flushed; + public: + PlistDiagnostics(const std::string& prefix, const LangOptions &LangOpts, + PathDiagnosticClient *subPD); + + ~PlistDiagnostics() { FlushDiagnostics(NULL); } + + void FlushDiagnostics(llvm::SmallVectorImpl<std::string> *FilesMade); + + void HandlePathDiagnostic(const PathDiagnostic* D); + + virtual llvm::StringRef getName() const { + return "PlistDiagnostics"; + } + + PathGenerationScheme getGenerationScheme() const; + bool supportsLogicalOpControlFlow() const { return true; } + bool supportsAllBlockEdges() const { return true; } + virtual bool useVerboseDescription() const { return false; } + }; +} // end anonymous namespace + +PlistDiagnostics::PlistDiagnostics(const std::string& output, + const LangOptions &LO, + PathDiagnosticClient *subPD) + : OutputFile(output), LangOpts(LO), SubPD(subPD), flushed(false) {} + +PathDiagnosticClient* +ento::createPlistDiagnosticClient(const std::string& s, const Preprocessor &PP, + PathDiagnosticClient *subPD) { + return new PlistDiagnostics(s, PP.getLangOptions(), subPD); +} + +PathDiagnosticClient::PathGenerationScheme +PlistDiagnostics::getGenerationScheme() const { + if (const PathDiagnosticClient *PD = SubPD.get()) + return PD->getGenerationScheme(); + + return Extensive; +} + +static void AddFID(FIDMap &FIDs, llvm::SmallVectorImpl<FileID> &V, + const SourceManager* SM, SourceLocation L) { + + FileID FID = SM->getFileID(SM->getInstantiationLoc(L)); + FIDMap::iterator I = FIDs.find(FID); + if (I != FIDs.end()) return; + FIDs[FID] = V.size(); + V.push_back(FID); +} + +static unsigned GetFID(const FIDMap& FIDs, const SourceManager &SM, + SourceLocation L) { + FileID FID = SM.getFileID(SM.getInstantiationLoc(L)); + FIDMap::const_iterator I = FIDs.find(FID); + assert(I != FIDs.end()); + return I->second; +} + +static llvm::raw_ostream& Indent(llvm::raw_ostream& o, const unsigned indent) { + for (unsigned i = 0; i < indent; ++i) o << ' '; + return o; +} + +static void EmitLocation(llvm::raw_ostream& o, const SourceManager &SM, + const LangOptions &LangOpts, + SourceLocation L, const FIDMap &FM, + unsigned indent, bool extend = false) { + + FullSourceLoc Loc(SM.getInstantiationLoc(L), const_cast<SourceManager&>(SM)); + + // Add in the length of the token, so that we cover multi-char tokens. + unsigned offset = + extend ? Lexer::MeasureTokenLength(Loc, SM, LangOpts) - 1 : 0; + + Indent(o, indent) << "<dict>\n"; + Indent(o, indent) << " <key>line</key><integer>" + << Loc.getInstantiationLineNumber() << "</integer>\n"; + Indent(o, indent) << " <key>col</key><integer>" + << Loc.getInstantiationColumnNumber() + offset << "</integer>\n"; + Indent(o, indent) << " <key>file</key><integer>" + << GetFID(FM, SM, Loc) << "</integer>\n"; + Indent(o, indent) << "</dict>\n"; +} + +static void EmitLocation(llvm::raw_ostream& o, const SourceManager &SM, + const LangOptions &LangOpts, + const PathDiagnosticLocation &L, const FIDMap& FM, + unsigned indent, bool extend = false) { + EmitLocation(o, SM, LangOpts, L.asLocation(), FM, indent, extend); +} + +static void EmitRange(llvm::raw_ostream& o, const SourceManager &SM, + const LangOptions &LangOpts, + PathDiagnosticRange R, const FIDMap &FM, + unsigned indent) { + Indent(o, indent) << "<array>\n"; + EmitLocation(o, SM, LangOpts, R.getBegin(), FM, indent+1); + EmitLocation(o, SM, LangOpts, R.getEnd(), FM, indent+1, !R.isPoint); + Indent(o, indent) << "</array>\n"; +} + +static llvm::raw_ostream& EmitString(llvm::raw_ostream& o, + const std::string& s) { + o << "<string>"; + for (std::string::const_iterator I=s.begin(), E=s.end(); I!=E; ++I) { + char c = *I; + switch (c) { + default: o << c; break; + case '&': o << "&"; break; + case '<': o << "<"; break; + case '>': o << ">"; break; + case '\'': o << "'"; break; + case '\"': o << """; break; + } + } + o << "</string>"; + return o; +} + +static void ReportControlFlow(llvm::raw_ostream& o, + const PathDiagnosticControlFlowPiece& P, + const FIDMap& FM, + const SourceManager &SM, + const LangOptions &LangOpts, + unsigned indent) { + + Indent(o, indent) << "<dict>\n"; + ++indent; + + Indent(o, indent) << "<key>kind</key><string>control</string>\n"; + + // Emit edges. + Indent(o, indent) << "<key>edges</key>\n"; + ++indent; + Indent(o, indent) << "<array>\n"; + ++indent; + for (PathDiagnosticControlFlowPiece::const_iterator I=P.begin(), E=P.end(); + I!=E; ++I) { + Indent(o, indent) << "<dict>\n"; + ++indent; + Indent(o, indent) << "<key>start</key>\n"; + EmitRange(o, SM, LangOpts, I->getStart().asRange(), FM, indent+1); + Indent(o, indent) << "<key>end</key>\n"; + EmitRange(o, SM, LangOpts, I->getEnd().asRange(), FM, indent+1); + --indent; + Indent(o, indent) << "</dict>\n"; + } + --indent; + Indent(o, indent) << "</array>\n"; + --indent; + + // Output any helper text. + const std::string& s = P.getString(); + if (!s.empty()) { + Indent(o, indent) << "<key>alternate</key>"; + EmitString(o, s) << '\n'; + } + + --indent; + Indent(o, indent) << "</dict>\n"; +} + +static void ReportEvent(llvm::raw_ostream& o, const PathDiagnosticPiece& P, + const FIDMap& FM, + const SourceManager &SM, + const LangOptions &LangOpts, + unsigned indent) { + + Indent(o, indent) << "<dict>\n"; + ++indent; + + Indent(o, indent) << "<key>kind</key><string>event</string>\n"; + + // Output the location. + FullSourceLoc L = P.getLocation().asLocation(); + + Indent(o, indent) << "<key>location</key>\n"; + EmitLocation(o, SM, LangOpts, L, FM, indent); + + // Output the ranges (if any). + PathDiagnosticPiece::range_iterator RI = P.ranges_begin(), + RE = P.ranges_end(); + + if (RI != RE) { + Indent(o, indent) << "<key>ranges</key>\n"; + Indent(o, indent) << "<array>\n"; + ++indent; + for (; RI != RE; ++RI) + EmitRange(o, SM, LangOpts, *RI, FM, indent+1); + --indent; + Indent(o, indent) << "</array>\n"; + } + + // Output the text. + assert(!P.getString().empty()); + Indent(o, indent) << "<key>extended_message</key>\n"; + Indent(o, indent); + EmitString(o, P.getString()) << '\n'; + + // Output the short text. + // FIXME: Really use a short string. + Indent(o, indent) << "<key>message</key>\n"; + EmitString(o, P.getString()) << '\n'; + + // Finish up. + --indent; + Indent(o, indent); o << "</dict>\n"; +} + +static void ReportMacro(llvm::raw_ostream& o, + const PathDiagnosticMacroPiece& P, + const FIDMap& FM, const SourceManager &SM, + const LangOptions &LangOpts, + unsigned indent) { + + for (PathDiagnosticMacroPiece::const_iterator I=P.begin(), E=P.end(); + I!=E; ++I) { + + switch ((*I)->getKind()) { + default: + break; + case PathDiagnosticPiece::Event: + ReportEvent(o, cast<PathDiagnosticEventPiece>(**I), FM, SM, LangOpts, + indent); + break; + case PathDiagnosticPiece::Macro: + ReportMacro(o, cast<PathDiagnosticMacroPiece>(**I), FM, SM, LangOpts, + indent); + break; + } + } +} + +static void ReportDiag(llvm::raw_ostream& o, const PathDiagnosticPiece& P, + const FIDMap& FM, const SourceManager &SM, + const LangOptions &LangOpts) { + + unsigned indent = 4; + + switch (P.getKind()) { + case PathDiagnosticPiece::ControlFlow: + ReportControlFlow(o, cast<PathDiagnosticControlFlowPiece>(P), FM, SM, + LangOpts, indent); + break; + case PathDiagnosticPiece::Event: + ReportEvent(o, cast<PathDiagnosticEventPiece>(P), FM, SM, LangOpts, + indent); + break; + case PathDiagnosticPiece::Macro: + ReportMacro(o, cast<PathDiagnosticMacroPiece>(P), FM, SM, LangOpts, + indent); + break; + } +} + +void PlistDiagnostics::HandlePathDiagnostic(const PathDiagnostic* D) { + if (!D) + return; + + if (D->empty()) { + delete D; + return; + } + + // We need to flatten the locations (convert Stmt* to locations) because + // the referenced statements may be freed by the time the diagnostics + // are emitted. + const_cast<PathDiagnostic*>(D)->flattenLocations(); + BatchedDiags.push_back(D); +} + +void PlistDiagnostics::FlushDiagnostics(llvm::SmallVectorImpl<std::string> + *FilesMade) { + + if (flushed) + return; + + flushed = true; + + // Sort the diagnostics so that they are always emitted in a deterministic + // order. + if (!BatchedDiags.empty()) + std::sort(BatchedDiags.begin(), BatchedDiags.end(), CompareDiagnostics()); + + // Build up a set of FIDs that we use by scanning the locations and + // ranges of the diagnostics. + FIDMap FM; + llvm::SmallVector<FileID, 10> Fids; + const SourceManager* SM = 0; + + if (!BatchedDiags.empty()) + SM = &(*BatchedDiags.begin())->begin()->getLocation().getManager(); + + for (std::vector<const PathDiagnostic*>::iterator DI = BatchedDiags.begin(), + DE = BatchedDiags.end(); DI != DE; ++DI) { + + const PathDiagnostic *D = *DI; + + for (PathDiagnostic::const_iterator I=D->begin(), E=D->end(); I!=E; ++I) { + AddFID(FM, Fids, SM, I->getLocation().asLocation()); + + for (PathDiagnosticPiece::range_iterator RI=I->ranges_begin(), + RE=I->ranges_end(); RI!=RE; ++RI) { + AddFID(FM, Fids, SM, RI->getBegin()); + AddFID(FM, Fids, SM, RI->getEnd()); + } + } + } + + // Open the file. + std::string ErrMsg; + llvm::raw_fd_ostream o(OutputFile.c_str(), ErrMsg); + if (!ErrMsg.empty()) { + llvm::errs() << "warning: could not creat file: " << OutputFile << '\n'; + return; + } + + // Write the plist header. + o << "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" + "<!DOCTYPE plist PUBLIC \"-//Apple Computer//DTD PLIST 1.0//EN\" " + "\"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n" + "<plist version=\"1.0\">\n"; + + // Write the root object: a <dict> containing... + // - "files", an <array> mapping from FIDs to file names + // - "diagnostics", an <array> containing the path diagnostics + o << "<dict>\n" + " <key>files</key>\n" + " <array>\n"; + + for (llvm::SmallVectorImpl<FileID>::iterator I=Fids.begin(), E=Fids.end(); + I!=E; ++I) { + o << " "; + EmitString(o, SM->getFileEntryForID(*I)->getName()) << '\n'; + } + + o << " </array>\n" + " <key>diagnostics</key>\n" + " <array>\n"; + + for (std::vector<const PathDiagnostic*>::iterator DI=BatchedDiags.begin(), + DE = BatchedDiags.end(); DI!=DE; ++DI) { + + o << " <dict>\n" + " <key>path</key>\n"; + + const PathDiagnostic *D = *DI; + // Create an owning smart pointer for 'D' just so that we auto-free it + // when we exit this method. + llvm::OwningPtr<PathDiagnostic> OwnedD(const_cast<PathDiagnostic*>(D)); + + o << " <array>\n"; + + for (PathDiagnostic::const_iterator I=D->begin(), E=D->end(); I != E; ++I) + ReportDiag(o, *I, FM, *SM, LangOpts); + + o << " </array>\n"; + + // Output the bug type and bug category. + o << " <key>description</key>"; + EmitString(o, D->getDescription()) << '\n'; + o << " <key>category</key>"; + EmitString(o, D->getCategory()) << '\n'; + o << " <key>type</key>"; + EmitString(o, D->getBugType()) << '\n'; + + // Output the location of the bug. + o << " <key>location</key>\n"; + EmitLocation(o, *SM, LangOpts, D->getLocation(), FM, 2); + + // Output the diagnostic to the sub-diagnostic client, if any. + if (SubPD) { + SubPD->HandlePathDiagnostic(OwnedD.take()); + llvm::SmallVector<std::string, 1> SubFilesMade; + SubPD->FlushDiagnostics(SubFilesMade); + + if (!SubFilesMade.empty()) { + o << " <key>" << SubPD->getName() << "_files</key>\n"; + o << " <array>\n"; + for (size_t i = 0, n = SubFilesMade.size(); i < n ; ++i) + o << " <string>" << SubFilesMade[i] << "</string>\n"; + o << " </array>\n"; + } + } + + // Close up the entry. + o << " </dict>\n"; + } + + o << " </array>\n"; + + // Finish. + o << "</dict>\n</plist>"; + + if (FilesMade) + FilesMade->push_back(OutputFile); + + BatchedDiags.clear(); +} diff --git a/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp new file mode 100644 index 0000000..389fff5 --- /dev/null +++ b/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp @@ -0,0 +1,441 @@ +//== RangeConstraintManager.cpp - Manage range constraints.------*- C++ -*--==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines RangeConstraintManager, a class that tracks simple +// equality and inequality constraints on symbolic values of GRState. +// +//===----------------------------------------------------------------------===// + +#include "SimpleConstraintManager.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/GRStateTrait.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/TransferFuncs.h" +#include "llvm/Support/Debug.h" +#include "llvm/ADT/FoldingSet.h" +#include "llvm/ADT/ImmutableSet.h" +#include "llvm/Support/raw_ostream.h" + +using namespace clang; +using namespace ento; + +namespace { class ConstraintRange {}; } +static int ConstraintRangeIndex = 0; + +/// A Range represents the closed range [from, to]. The caller must +/// guarantee that from <= to. Note that Range is immutable, so as not +/// to subvert RangeSet's immutability. +namespace { +class Range : public std::pair<const llvm::APSInt*, + const llvm::APSInt*> { +public: + Range(const llvm::APSInt &from, const llvm::APSInt &to) + : std::pair<const llvm::APSInt*, const llvm::APSInt*>(&from, &to) { + assert(from <= to); + } + bool Includes(const llvm::APSInt &v) const { + return *first <= v && v <= *second; + } + const llvm::APSInt &From() const { + return *first; + } + const llvm::APSInt &To() const { + return *second; + } + const llvm::APSInt *getConcreteValue() const { + return &From() == &To() ? &From() : NULL; + } + + void Profile(llvm::FoldingSetNodeID &ID) const { + ID.AddPointer(&From()); + ID.AddPointer(&To()); + } +}; + + +class RangeTrait : public llvm::ImutContainerInfo<Range> { +public: + // When comparing if one Range is less than another, we should compare + // the actual APSInt values instead of their pointers. This keeps the order + // consistent (instead of comparing by pointer values) and can potentially + // be used to speed up some of the operations in RangeSet. + static inline bool isLess(key_type_ref lhs, key_type_ref rhs) { + return *lhs.first < *rhs.first || (!(*rhs.first < *lhs.first) && + *lhs.second < *rhs.second); + } +}; + +/// RangeSet contains a set of ranges. If the set is empty, then +/// there the value of a symbol is overly constrained and there are no +/// possible values for that symbol. +class RangeSet { + typedef llvm::ImmutableSet<Range, RangeTrait> PrimRangeSet; + PrimRangeSet ranges; // no need to make const, since it is an + // ImmutableSet - this allows default operator= + // to work. +public: + typedef PrimRangeSet::Factory Factory; + typedef PrimRangeSet::iterator iterator; + + RangeSet(PrimRangeSet RS) : ranges(RS) {} + + iterator begin() const { return ranges.begin(); } + iterator end() const { return ranges.end(); } + + bool isEmpty() const { return ranges.isEmpty(); } + + /// Construct a new RangeSet representing '{ [from, to] }'. + RangeSet(Factory &F, const llvm::APSInt &from, const llvm::APSInt &to) + : ranges(F.add(F.getEmptySet(), Range(from, to))) {} + + /// Profile - Generates a hash profile of this RangeSet for use + /// by FoldingSet. + void Profile(llvm::FoldingSetNodeID &ID) const { ranges.Profile(ID); } + + /// getConcreteValue - If a symbol is contrained to equal a specific integer + /// constant then this method returns that value. Otherwise, it returns + /// NULL. + const llvm::APSInt* getConcreteValue() const { + return ranges.isSingleton() ? ranges.begin()->getConcreteValue() : 0; + } + +private: + void IntersectInRange(BasicValueFactory &BV, Factory &F, + const llvm::APSInt &Lower, + const llvm::APSInt &Upper, + PrimRangeSet &newRanges, + PrimRangeSet::iterator &i, + PrimRangeSet::iterator &e) const { + // There are six cases for each range R in the set: + // 1. R is entirely before the intersection range. + // 2. R is entirely after the intersection range. + // 3. R contains the entire intersection range. + // 4. R starts before the intersection range and ends in the middle. + // 5. R starts in the middle of the intersection range and ends after it. + // 6. R is entirely contained in the intersection range. + // These correspond to each of the conditions below. + for (/* i = begin(), e = end() */; i != e; ++i) { + if (i->To() < Lower) { + continue; + } + if (i->From() > Upper) { + break; + } + + if (i->Includes(Lower)) { + if (i->Includes(Upper)) { + newRanges = F.add(newRanges, Range(BV.getValue(Lower), + BV.getValue(Upper))); + break; + } else + newRanges = F.add(newRanges, Range(BV.getValue(Lower), i->To())); + } else { + if (i->Includes(Upper)) { + newRanges = F.add(newRanges, Range(i->From(), BV.getValue(Upper))); + break; + } else + newRanges = F.add(newRanges, *i); + } + } + } + +public: + // Returns a set containing the values in the receiving set, intersected with + // the closed range [Lower, Upper]. Unlike the Range type, this range uses + // modular arithmetic, corresponding to the common treatment of C integer + // overflow. Thus, if the Lower bound is greater than the Upper bound, the + // range is taken to wrap around. This is equivalent to taking the + // intersection with the two ranges [Min, Upper] and [Lower, Max], + // or, alternatively, /removing/ all integers between Upper and Lower. + RangeSet Intersect(BasicValueFactory &BV, Factory &F, + const llvm::APSInt &Lower, + const llvm::APSInt &Upper) const { + PrimRangeSet newRanges = F.getEmptySet(); + + PrimRangeSet::iterator i = begin(), e = end(); + if (Lower <= Upper) + IntersectInRange(BV, F, Lower, Upper, newRanges, i, e); + else { + // The order of the next two statements is important! + // IntersectInRange() does not reset the iteration state for i and e. + // Therefore, the lower range most be handled first. + IntersectInRange(BV, F, BV.getMinValue(Upper), Upper, newRanges, i, e); + IntersectInRange(BV, F, Lower, BV.getMaxValue(Lower), newRanges, i, e); + } + return newRanges; + } + + void print(llvm::raw_ostream &os) const { + bool isFirst = true; + os << "{ "; + for (iterator i = begin(), e = end(); i != e; ++i) { + if (isFirst) + isFirst = false; + else + os << ", "; + + os << '[' << i->From().toString(10) << ", " << i->To().toString(10) + << ']'; + } + os << " }"; + } + + bool operator==(const RangeSet &other) const { + return ranges == other.ranges; + } +}; +} // end anonymous namespace + +typedef llvm::ImmutableMap<SymbolRef,RangeSet> ConstraintRangeTy; + +namespace clang { +namespace ento { +template<> +struct GRStateTrait<ConstraintRange> + : public GRStatePartialTrait<ConstraintRangeTy> { + static inline void* GDMIndex() { return &ConstraintRangeIndex; } +}; +} +} + +namespace { +class RangeConstraintManager : public SimpleConstraintManager{ + RangeSet GetRange(const GRState *state, SymbolRef sym); +public: + RangeConstraintManager(SubEngine &subengine) + : SimpleConstraintManager(subengine) {} + + const GRState *assumeSymNE(const GRState* state, SymbolRef sym, + const llvm::APSInt& Int, + const llvm::APSInt& Adjustment); + + const GRState *assumeSymEQ(const GRState* state, SymbolRef sym, + const llvm::APSInt& Int, + const llvm::APSInt& Adjustment); + + const GRState *assumeSymLT(const GRState* state, SymbolRef sym, + const llvm::APSInt& Int, + const llvm::APSInt& Adjustment); + + const GRState *assumeSymGT(const GRState* state, SymbolRef sym, + const llvm::APSInt& Int, + const llvm::APSInt& Adjustment); + + const GRState *assumeSymGE(const GRState* state, SymbolRef sym, + const llvm::APSInt& Int, + const llvm::APSInt& Adjustment); + + const GRState *assumeSymLE(const GRState* state, SymbolRef sym, + const llvm::APSInt& Int, + const llvm::APSInt& Adjustment); + + const llvm::APSInt* getSymVal(const GRState* St, SymbolRef sym) const; + + // FIXME: Refactor into SimpleConstraintManager? + bool isEqual(const GRState* St, SymbolRef sym, const llvm::APSInt& V) const { + const llvm::APSInt *i = getSymVal(St, sym); + return i ? *i == V : false; + } + + const GRState* removeDeadBindings(const GRState* St, SymbolReaper& SymReaper); + + void print(const GRState* St, llvm::raw_ostream& Out, + const char* nl, const char *sep); + +private: + RangeSet::Factory F; +}; + +} // end anonymous namespace + +ConstraintManager* ento::CreateRangeConstraintManager(GRStateManager&, + SubEngine &subeng) { + return new RangeConstraintManager(subeng); +} + +const llvm::APSInt* RangeConstraintManager::getSymVal(const GRState* St, + SymbolRef sym) const { + const ConstraintRangeTy::data_type *T = St->get<ConstraintRange>(sym); + return T ? T->getConcreteValue() : NULL; +} + +/// Scan all symbols referenced by the constraints. If the symbol is not alive +/// as marked in LSymbols, mark it as dead in DSymbols. +const GRState* +RangeConstraintManager::removeDeadBindings(const GRState* state, + SymbolReaper& SymReaper) { + + ConstraintRangeTy CR = state->get<ConstraintRange>(); + ConstraintRangeTy::Factory& CRFactory = state->get_context<ConstraintRange>(); + + for (ConstraintRangeTy::iterator I = CR.begin(), E = CR.end(); I != E; ++I) { + SymbolRef sym = I.getKey(); + if (SymReaper.maybeDead(sym)) + CR = CRFactory.remove(CR, sym); + } + + return state->set<ConstraintRange>(CR); +} + +RangeSet +RangeConstraintManager::GetRange(const GRState *state, SymbolRef sym) { + if (ConstraintRangeTy::data_type* V = state->get<ConstraintRange>(sym)) + return *V; + + // Lazily generate a new RangeSet representing all possible values for the + // given symbol type. + QualType T = state->getSymbolManager().getType(sym); + BasicValueFactory& BV = state->getBasicVals(); + return RangeSet(F, BV.getMinValue(T), BV.getMaxValue(T)); +} + +//===------------------------------------------------------------------------=== +// assumeSymX methods: public interface for RangeConstraintManager. +//===------------------------------------------------------------------------===/ + +// The syntax for ranges below is mathematical, using [x, y] for closed ranges +// and (x, y) for open ranges. These ranges are modular, corresponding with +// a common treatment of C integer overflow. This means that these methods +// do not have to worry about overflow; RangeSet::Intersect can handle such a +// "wraparound" range. +// As an example, the range [UINT_MAX-1, 3) contains five values: UINT_MAX-1, +// UINT_MAX, 0, 1, and 2. + +const GRState* +RangeConstraintManager::assumeSymNE(const GRState* state, SymbolRef sym, + const llvm::APSInt& Int, + const llvm::APSInt& Adjustment) { + BasicValueFactory &BV = state->getBasicVals(); + + llvm::APSInt Lower = Int-Adjustment; + llvm::APSInt Upper = Lower; + --Lower; + ++Upper; + + // [Int-Adjustment+1, Int-Adjustment-1] + // Notice that the lower bound is greater than the upper bound. + RangeSet New = GetRange(state, sym).Intersect(BV, F, Upper, Lower); + return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New); +} + +const GRState* +RangeConstraintManager::assumeSymEQ(const GRState* state, SymbolRef sym, + const llvm::APSInt& Int, + const llvm::APSInt& Adjustment) { + // [Int-Adjustment, Int-Adjustment] + BasicValueFactory &BV = state->getBasicVals(); + llvm::APSInt AdjInt = Int-Adjustment; + RangeSet New = GetRange(state, sym).Intersect(BV, F, AdjInt, AdjInt); + return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New); +} + +const GRState* +RangeConstraintManager::assumeSymLT(const GRState* state, SymbolRef sym, + const llvm::APSInt& Int, + const llvm::APSInt& Adjustment) { + BasicValueFactory &BV = state->getBasicVals(); + + QualType T = state->getSymbolManager().getType(sym); + const llvm::APSInt &Min = BV.getMinValue(T); + + // Special case for Int == Min. This is always false. + if (Int == Min) + return NULL; + + llvm::APSInt Lower = Min-Adjustment; + llvm::APSInt Upper = Int-Adjustment; + --Upper; + + RangeSet New = GetRange(state, sym).Intersect(BV, F, Lower, Upper); + return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New); +} + +const GRState* +RangeConstraintManager::assumeSymGT(const GRState* state, SymbolRef sym, + const llvm::APSInt& Int, + const llvm::APSInt& Adjustment) { + BasicValueFactory &BV = state->getBasicVals(); + + QualType T = state->getSymbolManager().getType(sym); + const llvm::APSInt &Max = BV.getMaxValue(T); + + // Special case for Int == Max. This is always false. + if (Int == Max) + return NULL; + + llvm::APSInt Lower = Int-Adjustment; + llvm::APSInt Upper = Max-Adjustment; + ++Lower; + + RangeSet New = GetRange(state, sym).Intersect(BV, F, Lower, Upper); + return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New); +} + +const GRState* +RangeConstraintManager::assumeSymGE(const GRState* state, SymbolRef sym, + const llvm::APSInt& Int, + const llvm::APSInt& Adjustment) { + BasicValueFactory &BV = state->getBasicVals(); + + QualType T = state->getSymbolManager().getType(sym); + const llvm::APSInt &Min = BV.getMinValue(T); + + // Special case for Int == Min. This is always feasible. + if (Int == Min) + return state; + + const llvm::APSInt &Max = BV.getMaxValue(T); + + llvm::APSInt Lower = Int-Adjustment; + llvm::APSInt Upper = Max-Adjustment; + + RangeSet New = GetRange(state, sym).Intersect(BV, F, Lower, Upper); + return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New); +} + +const GRState* +RangeConstraintManager::assumeSymLE(const GRState* state, SymbolRef sym, + const llvm::APSInt& Int, + const llvm::APSInt& Adjustment) { + BasicValueFactory &BV = state->getBasicVals(); + + QualType T = state->getSymbolManager().getType(sym); + const llvm::APSInt &Max = BV.getMaxValue(T); + + // Special case for Int == Max. This is always feasible. + if (Int == Max) + return state; + + const llvm::APSInt &Min = BV.getMinValue(T); + + llvm::APSInt Lower = Min-Adjustment; + llvm::APSInt Upper = Int-Adjustment; + + RangeSet New = GetRange(state, sym).Intersect(BV, F, Lower, Upper); + return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New); +} + +//===------------------------------------------------------------------------=== +// Pretty-printing. +//===------------------------------------------------------------------------===/ + +void RangeConstraintManager::print(const GRState* St, llvm::raw_ostream& Out, + const char* nl, const char *sep) { + + ConstraintRangeTy Ranges = St->get<ConstraintRange>(); + + if (Ranges.isEmpty()) + return; + + Out << nl << sep << "ranges of symbol values:"; + + for (ConstraintRangeTy::iterator I=Ranges.begin(), E=Ranges.end(); I!=E; ++I){ + Out << nl << ' ' << I.getKey() << " : "; + I.getData().print(Out); + } +} diff --git a/lib/StaticAnalyzer/Core/RegionStore.cpp b/lib/StaticAnalyzer/Core/RegionStore.cpp new file mode 100644 index 0000000..19e0e12 --- /dev/null +++ b/lib/StaticAnalyzer/Core/RegionStore.cpp @@ -0,0 +1,1813 @@ +//== RegionStore.cpp - Field-sensitive store model --------------*- C++ -*--==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a basic region store model. In this model, we do have field +// sensitivity. But we assume nothing about the heap shape. So recursive data +// structures are largely ignored. Basically we do 1-limiting analysis. +// Parameter pointers are assumed with no aliasing. Pointee objects of +// parameters are created lazily. +// +//===----------------------------------------------------------------------===// +#include "clang/AST/CharUnits.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/ExprCXX.h" +#include "clang/Analysis/Analyses/LiveVariables.h" +#include "clang/Analysis/AnalysisContext.h" +#include "clang/Basic/TargetInfo.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/GRStateTrait.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h" +#include "llvm/ADT/ImmutableList.h" +#include "llvm/ADT/ImmutableMap.h" +#include "llvm/ADT/Optional.h" +#include "llvm/Support/raw_ostream.h" + +using namespace clang; +using namespace ento; +using llvm::Optional; + +//===----------------------------------------------------------------------===// +// Representation of binding keys. +//===----------------------------------------------------------------------===// + +namespace { +class BindingKey { +public: + enum Kind { Direct = 0x0, Default = 0x1 }; +private: + llvm ::PointerIntPair<const MemRegion*, 1> P; + uint64_t Offset; + + explicit BindingKey(const MemRegion *r, uint64_t offset, Kind k) + : P(r, (unsigned) k), Offset(offset) {} +public: + + bool isDirect() const { return P.getInt() == Direct; } + + const MemRegion *getRegion() const { return P.getPointer(); } + uint64_t getOffset() const { return Offset; } + + void Profile(llvm::FoldingSetNodeID& ID) const { + ID.AddPointer(P.getOpaqueValue()); + ID.AddInteger(Offset); + } + + static BindingKey Make(const MemRegion *R, Kind k); + + bool operator<(const BindingKey &X) const { + if (P.getOpaqueValue() < X.P.getOpaqueValue()) + return true; + if (P.getOpaqueValue() > X.P.getOpaqueValue()) + return false; + return Offset < X.Offset; + } + + bool operator==(const BindingKey &X) const { + return P.getOpaqueValue() == X.P.getOpaqueValue() && + Offset == X.Offset; + } + + bool isValid() const { + return getRegion() != NULL; + } +}; +} // end anonymous namespace + +BindingKey BindingKey::Make(const MemRegion *R, Kind k) { + if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) { + const RegionRawOffset &O = ER->getAsArrayOffset(); + + // FIXME: There are some ElementRegions for which we cannot compute + // raw offsets yet, including regions with symbolic offsets. These will be + // ignored by the store. + return BindingKey(O.getRegion(), O.getOffset().getQuantity(), k); + } + + return BindingKey(R, 0, k); +} + +namespace llvm { + static inline + llvm::raw_ostream& operator<<(llvm::raw_ostream& os, BindingKey K) { + os << '(' << K.getRegion() << ',' << K.getOffset() + << ',' << (K.isDirect() ? "direct" : "default") + << ')'; + return os; + } +} // end llvm namespace + +//===----------------------------------------------------------------------===// +// Actual Store type. +//===----------------------------------------------------------------------===// + +typedef llvm::ImmutableMap<BindingKey, SVal> RegionBindings; + +//===----------------------------------------------------------------------===// +// Fine-grained control of RegionStoreManager. +//===----------------------------------------------------------------------===// + +namespace { +struct minimal_features_tag {}; +struct maximal_features_tag {}; + +class RegionStoreFeatures { + bool SupportsFields; +public: + RegionStoreFeatures(minimal_features_tag) : + SupportsFields(false) {} + + RegionStoreFeatures(maximal_features_tag) : + SupportsFields(true) {} + + void enableFields(bool t) { SupportsFields = t; } + + bool supportsFields() const { return SupportsFields; } +}; +} + +//===----------------------------------------------------------------------===// +// Main RegionStore logic. +//===----------------------------------------------------------------------===// + +namespace { + +class RegionStoreSubRegionMap : public SubRegionMap { +public: + typedef llvm::ImmutableSet<const MemRegion*> Set; + typedef llvm::DenseMap<const MemRegion*, Set> Map; +private: + Set::Factory F; + Map M; +public: + bool add(const MemRegion* Parent, const MemRegion* SubRegion) { + Map::iterator I = M.find(Parent); + + if (I == M.end()) { + M.insert(std::make_pair(Parent, F.add(F.getEmptySet(), SubRegion))); + return true; + } + + I->second = F.add(I->second, SubRegion); + return false; + } + + void process(llvm::SmallVectorImpl<const SubRegion*> &WL, const SubRegion *R); + + ~RegionStoreSubRegionMap() {} + + const Set *getSubRegions(const MemRegion *Parent) const { + Map::const_iterator I = M.find(Parent); + return I == M.end() ? NULL : &I->second; + } + + bool iterSubRegions(const MemRegion* Parent, Visitor& V) const { + Map::const_iterator I = M.find(Parent); + + if (I == M.end()) + return true; + + Set S = I->second; + for (Set::iterator SI=S.begin(),SE=S.end(); SI != SE; ++SI) { + if (!V.Visit(Parent, *SI)) + return false; + } + + return true; + } +}; + +void +RegionStoreSubRegionMap::process(llvm::SmallVectorImpl<const SubRegion*> &WL, + const SubRegion *R) { + const MemRegion *superR = R->getSuperRegion(); + if (add(superR, R)) + if (const SubRegion *sr = dyn_cast<SubRegion>(superR)) + WL.push_back(sr); +} + +class RegionStoreManager : public StoreManager { + const RegionStoreFeatures Features; + RegionBindings::Factory RBFactory; + +public: + RegionStoreManager(GRStateManager& mgr, const RegionStoreFeatures &f) + : StoreManager(mgr), + Features(f), + RBFactory(mgr.getAllocator()) {} + + SubRegionMap *getSubRegionMap(Store store) { + return getRegionStoreSubRegionMap(store); + } + + RegionStoreSubRegionMap *getRegionStoreSubRegionMap(Store store); + + Optional<SVal> getDirectBinding(RegionBindings B, const MemRegion *R); + /// getDefaultBinding - Returns an SVal* representing an optional default + /// binding associated with a region and its subregions. + Optional<SVal> getDefaultBinding(RegionBindings B, const MemRegion *R); + + /// setImplicitDefaultValue - Set the default binding for the provided + /// MemRegion to the value implicitly defined for compound literals when + /// the value is not specified. + StoreRef setImplicitDefaultValue(Store store, const MemRegion *R, QualType T); + + /// ArrayToPointer - Emulates the "decay" of an array to a pointer + /// type. 'Array' represents the lvalue of the array being decayed + /// to a pointer, and the returned SVal represents the decayed + /// version of that lvalue (i.e., a pointer to the first element of + /// the array). This is called by ExprEngine when evaluating + /// casts from arrays to pointers. + SVal ArrayToPointer(Loc Array); + + /// For DerivedToBase casts, create a CXXBaseObjectRegion and return it. + virtual SVal evalDerivedToBase(SVal derived, QualType basePtrType); + + StoreRef getInitialStore(const LocationContext *InitLoc) { + return StoreRef(RBFactory.getEmptyMap().getRootWithoutRetain(), *this); + } + + //===-------------------------------------------------------------------===// + // Binding values to regions. + //===-------------------------------------------------------------------===// + + StoreRef invalidateRegions(Store store, + const MemRegion * const *Begin, + const MemRegion * const *End, + const Expr *E, unsigned Count, + InvalidatedSymbols *IS, + bool invalidateGlobals, + InvalidatedRegions *Regions); + +public: // Made public for helper classes. + + void RemoveSubRegionBindings(RegionBindings &B, const MemRegion *R, + RegionStoreSubRegionMap &M); + + RegionBindings addBinding(RegionBindings B, BindingKey K, SVal V); + + RegionBindings addBinding(RegionBindings B, const MemRegion *R, + BindingKey::Kind k, SVal V); + + const SVal *lookup(RegionBindings B, BindingKey K); + const SVal *lookup(RegionBindings B, const MemRegion *R, BindingKey::Kind k); + + RegionBindings removeBinding(RegionBindings B, BindingKey K); + RegionBindings removeBinding(RegionBindings B, const MemRegion *R, + BindingKey::Kind k); + + RegionBindings removeBinding(RegionBindings B, const MemRegion *R) { + return removeBinding(removeBinding(B, R, BindingKey::Direct), R, + BindingKey::Default); + } + +public: // Part of public interface to class. + + StoreRef Bind(Store store, Loc LV, SVal V); + + // BindDefault is only used to initialize a region with a default value. + StoreRef BindDefault(Store store, const MemRegion *R, SVal V) { + RegionBindings B = GetRegionBindings(store); + assert(!lookup(B, R, BindingKey::Default)); + assert(!lookup(B, R, BindingKey::Direct)); + return StoreRef(addBinding(B, R, BindingKey::Default, V).getRootWithoutRetain(), *this); + } + + StoreRef BindCompoundLiteral(Store store, const CompoundLiteralExpr* CL, + const LocationContext *LC, SVal V); + + StoreRef BindDecl(Store store, const VarRegion *VR, SVal InitVal); + + StoreRef BindDeclWithNoInit(Store store, const VarRegion *) { + return StoreRef(store, *this); + } + + /// BindStruct - Bind a compound value to a structure. + StoreRef BindStruct(Store store, const TypedRegion* R, SVal V); + + StoreRef BindArray(Store store, const TypedRegion* R, SVal V); + + /// KillStruct - Set the entire struct to unknown. + StoreRef KillStruct(Store store, const TypedRegion* R, SVal DefaultVal); + + StoreRef Remove(Store store, Loc LV); + + void incrementReferenceCount(Store store) { + GetRegionBindings(store).manualRetain(); + } + + /// If the StoreManager supports it, decrement the reference count of + /// the specified Store object. If the reference count hits 0, the memory + /// associated with the object is recycled. + void decrementReferenceCount(Store store) { + GetRegionBindings(store).manualRelease(); + } + + //===------------------------------------------------------------------===// + // Loading values from regions. + //===------------------------------------------------------------------===// + + /// The high level logic for this method is this: + /// Retrieve (L) + /// if L has binding + /// return L's binding + /// else if L is in killset + /// return unknown + /// else + /// if L is on stack or heap + /// return undefined + /// else + /// return symbolic + SVal Retrieve(Store store, Loc L, QualType T = QualType()); + + SVal RetrieveElement(Store store, const ElementRegion *R); + + SVal RetrieveField(Store store, const FieldRegion *R); + + SVal RetrieveObjCIvar(Store store, const ObjCIvarRegion *R); + + SVal RetrieveVar(Store store, const VarRegion *R); + + SVal RetrieveLazySymbol(const TypedRegion *R); + + SVal RetrieveFieldOrElementCommon(Store store, const TypedRegion *R, + QualType Ty, const MemRegion *superR); + + /// Retrieve the values in a struct and return a CompoundVal, used when doing + /// struct copy: + /// struct s x, y; + /// x = y; + /// y's value is retrieved by this method. + SVal RetrieveStruct(Store store, const TypedRegion* R); + + SVal RetrieveArray(Store store, const TypedRegion* R); + + /// Used to lazily generate derived symbols for bindings that are defined + /// implicitly by default bindings in a super region. + Optional<SVal> RetrieveDerivedDefaultValue(RegionBindings B, + const MemRegion *superR, + const TypedRegion *R, QualType Ty); + + /// Get the state and region whose binding this region R corresponds to. + std::pair<Store, const MemRegion*> + GetLazyBinding(RegionBindings B, const MemRegion *R); + + StoreRef CopyLazyBindings(nonloc::LazyCompoundVal V, Store store, + const TypedRegion *R); + + //===------------------------------------------------------------------===// + // State pruning. + //===------------------------------------------------------------------===// + + /// removeDeadBindings - Scans the RegionStore of 'state' for dead values. + /// It returns a new Store with these values removed. + StoreRef removeDeadBindings(Store store, const StackFrameContext *LCtx, + SymbolReaper& SymReaper, + llvm::SmallVectorImpl<const MemRegion*>& RegionRoots); + + StoreRef enterStackFrame(const GRState *state, const StackFrameContext *frame); + + //===------------------------------------------------------------------===// + // Region "extents". + //===------------------------------------------------------------------===// + + // FIXME: This method will soon be eliminated; see the note in Store.h. + DefinedOrUnknownSVal getSizeInElements(const GRState *state, + const MemRegion* R, QualType EleTy); + + //===------------------------------------------------------------------===// + // Utility methods. + //===------------------------------------------------------------------===// + + static inline RegionBindings GetRegionBindings(Store store) { + return RegionBindings(static_cast<const RegionBindings::TreeTy*>(store)); + } + + void print(Store store, llvm::raw_ostream& Out, const char* nl, + const char *sep); + + void iterBindings(Store store, BindingsHandler& f) { + RegionBindings B = GetRegionBindings(store); + for (RegionBindings::iterator I=B.begin(), E=B.end(); I!=E; ++I) { + const BindingKey &K = I.getKey(); + if (!K.isDirect()) + continue; + if (const SubRegion *R = dyn_cast<SubRegion>(I.getKey().getRegion())) { + // FIXME: Possibly incorporate the offset? + if (!f.HandleBinding(*this, store, R, I.getData())) + return; + } + } + } +}; + +} // end anonymous namespace + +//===----------------------------------------------------------------------===// +// RegionStore creation. +//===----------------------------------------------------------------------===// + +StoreManager *ento::CreateRegionStoreManager(GRStateManager& StMgr) { + RegionStoreFeatures F = maximal_features_tag(); + return new RegionStoreManager(StMgr, F); +} + +StoreManager *ento::CreateFieldsOnlyRegionStoreManager(GRStateManager &StMgr) { + RegionStoreFeatures F = minimal_features_tag(); + F.enableFields(true); + return new RegionStoreManager(StMgr, F); +} + + +RegionStoreSubRegionMap* +RegionStoreManager::getRegionStoreSubRegionMap(Store store) { + RegionBindings B = GetRegionBindings(store); + RegionStoreSubRegionMap *M = new RegionStoreSubRegionMap(); + + llvm::SmallVector<const SubRegion*, 10> WL; + + for (RegionBindings::iterator I=B.begin(), E=B.end(); I!=E; ++I) + if (const SubRegion *R = dyn_cast<SubRegion>(I.getKey().getRegion())) + M->process(WL, R); + + // We also need to record in the subregion map "intermediate" regions that + // don't have direct bindings but are super regions of those that do. + while (!WL.empty()) { + const SubRegion *R = WL.back(); + WL.pop_back(); + M->process(WL, R); + } + + return M; +} + +//===----------------------------------------------------------------------===// +// Region Cluster analysis. +//===----------------------------------------------------------------------===// + +namespace { +template <typename DERIVED> +class ClusterAnalysis { +protected: + typedef BumpVector<BindingKey> RegionCluster; + typedef llvm::DenseMap<const MemRegion *, RegionCluster *> ClusterMap; + llvm::DenseMap<const RegionCluster*, unsigned> Visited; + typedef llvm::SmallVector<std::pair<const MemRegion *, RegionCluster*>, 10> + WorkList; + + BumpVectorContext BVC; + ClusterMap ClusterM; + WorkList WL; + + RegionStoreManager &RM; + ASTContext &Ctx; + SValBuilder &svalBuilder; + + RegionBindings B; + + const bool includeGlobals; + +public: + ClusterAnalysis(RegionStoreManager &rm, GRStateManager &StateMgr, + RegionBindings b, const bool includeGlobals) + : RM(rm), Ctx(StateMgr.getContext()), + svalBuilder(StateMgr.getSValBuilder()), + B(b), includeGlobals(includeGlobals) {} + + RegionBindings getRegionBindings() const { return B; } + + RegionCluster &AddToCluster(BindingKey K) { + const MemRegion *R = K.getRegion(); + const MemRegion *baseR = R->getBaseRegion(); + RegionCluster &C = getCluster(baseR); + C.push_back(K, BVC); + static_cast<DERIVED*>(this)->VisitAddedToCluster(baseR, C); + return C; + } + + bool isVisited(const MemRegion *R) { + return (bool) Visited[&getCluster(R->getBaseRegion())]; + } + + RegionCluster& getCluster(const MemRegion *R) { + RegionCluster *&CRef = ClusterM[R]; + if (!CRef) { + void *Mem = BVC.getAllocator().template Allocate<RegionCluster>(); + CRef = new (Mem) RegionCluster(BVC, 10); + } + return *CRef; + } + + void GenerateClusters() { + // Scan the entire set of bindings and make the region clusters. + for (RegionBindings::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI){ + RegionCluster &C = AddToCluster(RI.getKey()); + if (const MemRegion *R = RI.getData().getAsRegion()) { + // Generate a cluster, but don't add the region to the cluster + // if there aren't any bindings. + getCluster(R->getBaseRegion()); + } + if (includeGlobals) { + const MemRegion *R = RI.getKey().getRegion(); + if (isa<NonStaticGlobalSpaceRegion>(R->getMemorySpace())) + AddToWorkList(R, C); + } + } + } + + bool AddToWorkList(const MemRegion *R, RegionCluster &C) { + if (unsigned &visited = Visited[&C]) + return false; + else + visited = 1; + + WL.push_back(std::make_pair(R, &C)); + return true; + } + + bool AddToWorkList(BindingKey K) { + return AddToWorkList(K.getRegion()); + } + + bool AddToWorkList(const MemRegion *R) { + const MemRegion *baseR = R->getBaseRegion(); + return AddToWorkList(baseR, getCluster(baseR)); + } + + void RunWorkList() { + while (!WL.empty()) { + const MemRegion *baseR; + RegionCluster *C; + llvm::tie(baseR, C) = WL.back(); + WL.pop_back(); + + // First visit the cluster. + static_cast<DERIVED*>(this)->VisitCluster(baseR, C->begin(), C->end()); + + // Next, visit the base region. + static_cast<DERIVED*>(this)->VisitBaseRegion(baseR); + } + } + +public: + void VisitAddedToCluster(const MemRegion *baseR, RegionCluster &C) {} + void VisitCluster(const MemRegion *baseR, BindingKey *I, BindingKey *E) {} + void VisitBaseRegion(const MemRegion *baseR) {} +}; +} + +//===----------------------------------------------------------------------===// +// Binding invalidation. +//===----------------------------------------------------------------------===// + +void RegionStoreManager::RemoveSubRegionBindings(RegionBindings &B, + const MemRegion *R, + RegionStoreSubRegionMap &M) { + + if (const RegionStoreSubRegionMap::Set *S = M.getSubRegions(R)) + for (RegionStoreSubRegionMap::Set::iterator I = S->begin(), E = S->end(); + I != E; ++I) + RemoveSubRegionBindings(B, *I, M); + + B = removeBinding(B, R); +} + +namespace { +class invalidateRegionsWorker : public ClusterAnalysis<invalidateRegionsWorker> +{ + const Expr *Ex; + unsigned Count; + StoreManager::InvalidatedSymbols *IS; + StoreManager::InvalidatedRegions *Regions; +public: + invalidateRegionsWorker(RegionStoreManager &rm, + GRStateManager &stateMgr, + RegionBindings b, + const Expr *ex, unsigned count, + StoreManager::InvalidatedSymbols *is, + StoreManager::InvalidatedRegions *r, + bool includeGlobals) + : ClusterAnalysis<invalidateRegionsWorker>(rm, stateMgr, b, includeGlobals), + Ex(ex), Count(count), IS(is), Regions(r) {} + + void VisitCluster(const MemRegion *baseR, BindingKey *I, BindingKey *E); + void VisitBaseRegion(const MemRegion *baseR); + +private: + void VisitBinding(SVal V); +}; +} + +void invalidateRegionsWorker::VisitBinding(SVal V) { + // A symbol? Mark it touched by the invalidation. + if (IS) + if (SymbolRef Sym = V.getAsSymbol()) + IS->insert(Sym); + + if (const MemRegion *R = V.getAsRegion()) { + AddToWorkList(R); + return; + } + + // Is it a LazyCompoundVal? All references get invalidated as well. + if (const nonloc::LazyCompoundVal *LCS = + dyn_cast<nonloc::LazyCompoundVal>(&V)) { + + const MemRegion *LazyR = LCS->getRegion(); + RegionBindings B = RegionStoreManager::GetRegionBindings(LCS->getStore()); + + for (RegionBindings::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI){ + const SubRegion *baseR = dyn_cast<SubRegion>(RI.getKey().getRegion()); + if (baseR && baseR->isSubRegionOf(LazyR)) + VisitBinding(RI.getData()); + } + + return; + } +} + +void invalidateRegionsWorker::VisitCluster(const MemRegion *baseR, + BindingKey *I, BindingKey *E) { + for ( ; I != E; ++I) { + // Get the old binding. Is it a region? If so, add it to the worklist. + const BindingKey &K = *I; + if (const SVal *V = RM.lookup(B, K)) + VisitBinding(*V); + + B = RM.removeBinding(B, K); + } +} + +void invalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) { + if (IS) { + // Symbolic region? Mark that symbol touched by the invalidation. + if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(baseR)) + IS->insert(SR->getSymbol()); + } + + // BlockDataRegion? If so, invalidate captured variables that are passed + // by reference. + if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(baseR)) { + for (BlockDataRegion::referenced_vars_iterator + BI = BR->referenced_vars_begin(), BE = BR->referenced_vars_end() ; + BI != BE; ++BI) { + const VarRegion *VR = *BI; + const VarDecl *VD = VR->getDecl(); + if (VD->getAttr<BlocksAttr>() || !VD->hasLocalStorage()) + AddToWorkList(VR); + } + return; + } + + // Otherwise, we have a normal data region. Record that we touched the region. + if (Regions) + Regions->push_back(baseR); + + if (isa<AllocaRegion>(baseR) || isa<SymbolicRegion>(baseR)) { + // Invalidate the region by setting its default value to + // conjured symbol. The type of the symbol is irrelavant. + DefinedOrUnknownSVal V = + svalBuilder.getConjuredSymbolVal(baseR, Ex, Ctx.IntTy, Count); + B = RM.addBinding(B, baseR, BindingKey::Default, V); + return; + } + + if (!baseR->isBoundable()) + return; + + const TypedRegion *TR = cast<TypedRegion>(baseR); + QualType T = TR->getValueType(); + + // Invalidate the binding. + if (T->isStructureType()) { + // Invalidate the region by setting its default value to + // conjured symbol. The type of the symbol is irrelavant. + DefinedOrUnknownSVal V = svalBuilder.getConjuredSymbolVal(baseR, Ex, Ctx.IntTy, + Count); + B = RM.addBinding(B, baseR, BindingKey::Default, V); + return; + } + + if (const ArrayType *AT = Ctx.getAsArrayType(T)) { + // Set the default value of the array to conjured symbol. + DefinedOrUnknownSVal V = + svalBuilder.getConjuredSymbolVal(baseR, Ex, AT->getElementType(), Count); + B = RM.addBinding(B, baseR, BindingKey::Default, V); + return; + } + + if (includeGlobals && + isa<NonStaticGlobalSpaceRegion>(baseR->getMemorySpace())) { + // If the region is a global and we are invalidating all globals, + // just erase the entry. This causes all globals to be lazily + // symbolicated from the same base symbol. + B = RM.removeBinding(B, baseR); + return; + } + + + DefinedOrUnknownSVal V = svalBuilder.getConjuredSymbolVal(baseR, Ex, T, Count); + assert(SymbolManager::canSymbolicate(T) || V.isUnknown()); + B = RM.addBinding(B, baseR, BindingKey::Direct, V); +} + +StoreRef RegionStoreManager::invalidateRegions(Store store, + const MemRegion * const *I, + const MemRegion * const *E, + const Expr *Ex, unsigned Count, + InvalidatedSymbols *IS, + bool invalidateGlobals, + InvalidatedRegions *Regions) { + invalidateRegionsWorker W(*this, StateMgr, + RegionStoreManager::GetRegionBindings(store), + Ex, Count, IS, Regions, invalidateGlobals); + + // Scan the bindings and generate the clusters. + W.GenerateClusters(); + + // Add I .. E to the worklist. + for ( ; I != E; ++I) + W.AddToWorkList(*I); + + W.RunWorkList(); + + // Return the new bindings. + RegionBindings B = W.getRegionBindings(); + + if (invalidateGlobals) { + // Bind the non-static globals memory space to a new symbol that we will + // use to derive the bindings for all non-static globals. + const GlobalsSpaceRegion *GS = MRMgr.getGlobalsRegion(); + SVal V = + svalBuilder.getConjuredSymbolVal(/* SymbolTag = */ (void*) GS, Ex, + /* symbol type, doesn't matter */ Ctx.IntTy, + Count); + B = addBinding(B, BindingKey::Make(GS, BindingKey::Default), V); + + // Even if there are no bindings in the global scope, we still need to + // record that we touched it. + if (Regions) + Regions->push_back(GS); + } + + return StoreRef(B.getRootWithoutRetain(), *this); +} + +//===----------------------------------------------------------------------===// +// Extents for regions. +//===----------------------------------------------------------------------===// + +DefinedOrUnknownSVal RegionStoreManager::getSizeInElements(const GRState *state, + const MemRegion *R, + QualType EleTy) { + SVal Size = cast<SubRegion>(R)->getExtent(svalBuilder); + const llvm::APSInt *SizeInt = svalBuilder.getKnownValue(state, Size); + if (!SizeInt) + return UnknownVal(); + + CharUnits RegionSize = CharUnits::fromQuantity(SizeInt->getSExtValue()); + + if (Ctx.getAsVariableArrayType(EleTy)) { + // FIXME: We need to track extra state to properly record the size + // of VLAs. Returning UnknownVal here, however, is a stop-gap so that + // we don't have a divide-by-zero below. + return UnknownVal(); + } + + CharUnits EleSize = Ctx.getTypeSizeInChars(EleTy); + + // If a variable is reinterpreted as a type that doesn't fit into a larger + // type evenly, round it down. + // This is a signed value, since it's used in arithmetic with signed indices. + return svalBuilder.makeIntVal(RegionSize / EleSize, false); +} + +//===----------------------------------------------------------------------===// +// Location and region casting. +//===----------------------------------------------------------------------===// + +/// ArrayToPointer - Emulates the "decay" of an array to a pointer +/// type. 'Array' represents the lvalue of the array being decayed +/// to a pointer, and the returned SVal represents the decayed +/// version of that lvalue (i.e., a pointer to the first element of +/// the array). This is called by ExprEngine when evaluating casts +/// from arrays to pointers. +SVal RegionStoreManager::ArrayToPointer(Loc Array) { + if (!isa<loc::MemRegionVal>(Array)) + return UnknownVal(); + + const MemRegion* R = cast<loc::MemRegionVal>(&Array)->getRegion(); + const TypedRegion* ArrayR = dyn_cast<TypedRegion>(R); + + if (!ArrayR) + return UnknownVal(); + + // Strip off typedefs from the ArrayRegion's ValueType. + QualType T = ArrayR->getValueType().getDesugaredType(Ctx); + const ArrayType *AT = cast<ArrayType>(T); + T = AT->getElementType(); + + NonLoc ZeroIdx = svalBuilder.makeZeroArrayIndex(); + return loc::MemRegionVal(MRMgr.getElementRegion(T, ZeroIdx, ArrayR, Ctx)); +} + +SVal RegionStoreManager::evalDerivedToBase(SVal derived, QualType baseType) { + const CXXRecordDecl *baseDecl; + if (baseType->isPointerType()) + baseDecl = baseType->getCXXRecordDeclForPointerType(); + else + baseDecl = baseType->getAsCXXRecordDecl(); + + assert(baseDecl && "not a CXXRecordDecl?"); + + loc::MemRegionVal *derivedRegVal = dyn_cast<loc::MemRegionVal>(&derived); + if (!derivedRegVal) + return derived; + + const MemRegion *baseReg = + MRMgr.getCXXBaseObjectRegion(baseDecl, derivedRegVal->getRegion()); + + return loc::MemRegionVal(baseReg); +} + +//===----------------------------------------------------------------------===// +// Loading values from regions. +//===----------------------------------------------------------------------===// + +Optional<SVal> RegionStoreManager::getDirectBinding(RegionBindings B, + const MemRegion *R) { + + if (const SVal *V = lookup(B, R, BindingKey::Direct)) + return *V; + + return Optional<SVal>(); +} + +Optional<SVal> RegionStoreManager::getDefaultBinding(RegionBindings B, + const MemRegion *R) { + if (R->isBoundable()) + if (const TypedRegion *TR = dyn_cast<TypedRegion>(R)) + if (TR->getValueType()->isUnionType()) + return UnknownVal(); + + if (const SVal *V = lookup(B, R, BindingKey::Default)) + return *V; + + return Optional<SVal>(); +} + +SVal RegionStoreManager::Retrieve(Store store, Loc L, QualType T) { + assert(!isa<UnknownVal>(L) && "location unknown"); + assert(!isa<UndefinedVal>(L) && "location undefined"); + + // For access to concrete addresses, return UnknownVal. Checks + // for null dereferences (and similar errors) are done by checkers, not + // the Store. + // FIXME: We can consider lazily symbolicating such memory, but we really + // should defer this when we can reason easily about symbolicating arrays + // of bytes. + if (isa<loc::ConcreteInt>(L)) { + return UnknownVal(); + } + if (!isa<loc::MemRegionVal>(L)) { + return UnknownVal(); + } + + const MemRegion *MR = cast<loc::MemRegionVal>(L).getRegion(); + + if (isa<AllocaRegion>(MR) || isa<SymbolicRegion>(MR)) { + if (T.isNull()) { + const SymbolicRegion *SR = cast<SymbolicRegion>(MR); + T = SR->getSymbol()->getType(Ctx); + } + MR = GetElementZeroRegion(MR, T); + } + + if (isa<CodeTextRegion>(MR)) { + assert(0 && "Why load from a code text region?"); + return UnknownVal(); + } + + // FIXME: Perhaps this method should just take a 'const MemRegion*' argument + // instead of 'Loc', and have the other Loc cases handled at a higher level. + const TypedRegion *R = cast<TypedRegion>(MR); + QualType RTy = R->getValueType(); + + // FIXME: We should eventually handle funny addressing. e.g.: + // + // int x = ...; + // int *p = &x; + // char *q = (char*) p; + // char c = *q; // returns the first byte of 'x'. + // + // Such funny addressing will occur due to layering of regions. + + if (RTy->isStructureOrClassType()) + return RetrieveStruct(store, R); + + // FIXME: Handle unions. + if (RTy->isUnionType()) + return UnknownVal(); + + if (RTy->isArrayType()) + return RetrieveArray(store, R); + + // FIXME: handle Vector types. + if (RTy->isVectorType()) + return UnknownVal(); + + if (const FieldRegion* FR = dyn_cast<FieldRegion>(R)) + return CastRetrievedVal(RetrieveField(store, FR), FR, T, false); + + if (const ElementRegion* ER = dyn_cast<ElementRegion>(R)) { + // FIXME: Here we actually perform an implicit conversion from the loaded + // value to the element type. Eventually we want to compose these values + // more intelligently. For example, an 'element' can encompass multiple + // bound regions (e.g., several bound bytes), or could be a subset of + // a larger value. + return CastRetrievedVal(RetrieveElement(store, ER), ER, T, false); + } + + if (const ObjCIvarRegion *IVR = dyn_cast<ObjCIvarRegion>(R)) { + // FIXME: Here we actually perform an implicit conversion from the loaded + // value to the ivar type. What we should model is stores to ivars + // that blow past the extent of the ivar. If the address of the ivar is + // reinterpretted, it is possible we stored a different value that could + // fit within the ivar. Either we need to cast these when storing them + // or reinterpret them lazily (as we do here). + return CastRetrievedVal(RetrieveObjCIvar(store, IVR), IVR, T, false); + } + + if (const VarRegion *VR = dyn_cast<VarRegion>(R)) { + // FIXME: Here we actually perform an implicit conversion from the loaded + // value to the variable type. What we should model is stores to variables + // that blow past the extent of the variable. If the address of the + // variable is reinterpretted, it is possible we stored a different value + // that could fit within the variable. Either we need to cast these when + // storing them or reinterpret them lazily (as we do here). + return CastRetrievedVal(RetrieveVar(store, VR), VR, T, false); + } + + RegionBindings B = GetRegionBindings(store); + const SVal *V = lookup(B, R, BindingKey::Direct); + + // Check if the region has a binding. + if (V) + return *V; + + // The location does not have a bound value. This means that it has + // the value it had upon its creation and/or entry to the analyzed + // function/method. These are either symbolic values or 'undefined'. + if (R->hasStackNonParametersStorage()) { + // All stack variables are considered to have undefined values + // upon creation. All heap allocated blocks are considered to + // have undefined values as well unless they are explicitly bound + // to specific values. + return UndefinedVal(); + } + + // All other values are symbolic. + return svalBuilder.getRegionValueSymbolVal(R); +} + +std::pair<Store, const MemRegion *> +RegionStoreManager::GetLazyBinding(RegionBindings B, const MemRegion *R) { + if (Optional<SVal> OV = getDirectBinding(B, R)) + if (const nonloc::LazyCompoundVal *V = + dyn_cast<nonloc::LazyCompoundVal>(OV.getPointer())) + return std::make_pair(V->getStore(), V->getRegion()); + + if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) { + const std::pair<Store, const MemRegion *> &X = + GetLazyBinding(B, ER->getSuperRegion()); + + if (X.second) + return std::make_pair(X.first, + MRMgr.getElementRegionWithSuper(ER, X.second)); + } + else if (const FieldRegion *FR = dyn_cast<FieldRegion>(R)) { + const std::pair<Store, const MemRegion *> &X = + GetLazyBinding(B, FR->getSuperRegion()); + + if (X.second) + return std::make_pair(X.first, + MRMgr.getFieldRegionWithSuper(FR, X.second)); + } + // C++ base object region is another kind of region that we should blast + // through to look for lazy compound value. It is like a field region. + else if (const CXXBaseObjectRegion *baseReg = + dyn_cast<CXXBaseObjectRegion>(R)) { + const std::pair<Store, const MemRegion *> &X = + GetLazyBinding(B, baseReg->getSuperRegion()); + + if (X.second) + return std::make_pair(X.first, + MRMgr.getCXXBaseObjectRegionWithSuper(baseReg, X.second)); + } + // The NULL MemRegion indicates an non-existent lazy binding. A NULL Store is + // possible for a valid lazy binding. + return std::make_pair((Store) 0, (const MemRegion *) 0); +} + +SVal RegionStoreManager::RetrieveElement(Store store, + const ElementRegion* R) { + // Check if the region has a binding. + RegionBindings B = GetRegionBindings(store); + if (const Optional<SVal> &V = getDirectBinding(B, R)) + return *V; + + const MemRegion* superR = R->getSuperRegion(); + + // Check if the region is an element region of a string literal. + if (const StringRegion *StrR=dyn_cast<StringRegion>(superR)) { + // FIXME: Handle loads from strings where the literal is treated as + // an integer, e.g., *((unsigned int*)"hello") + QualType T = Ctx.getAsArrayType(StrR->getValueType())->getElementType(); + if (T != Ctx.getCanonicalType(R->getElementType())) + return UnknownVal(); + + const StringLiteral *Str = StrR->getStringLiteral(); + SVal Idx = R->getIndex(); + if (nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(&Idx)) { + int64_t i = CI->getValue().getSExtValue(); + int64_t byteLength = Str->getByteLength(); + // Technically, only i == byteLength is guaranteed to be null. + // However, such overflows should be caught before reaching this point; + // the only time such an access would be made is if a string literal was + // used to initialize a larger array. + char c = (i >= byteLength) ? '\0' : Str->getString()[i]; + return svalBuilder.makeIntVal(c, T); + } + } + + // Check for loads from a code text region. For such loads, just give up. + if (isa<CodeTextRegion>(superR)) + return UnknownVal(); + + // Handle the case where we are indexing into a larger scalar object. + // For example, this handles: + // int x = ... + // char *y = &x; + // return *y; + // FIXME: This is a hack, and doesn't do anything really intelligent yet. + const RegionRawOffset &O = R->getAsArrayOffset(); + if (const TypedRegion *baseR = dyn_cast_or_null<TypedRegion>(O.getRegion())) { + QualType baseT = baseR->getValueType(); + if (baseT->isScalarType()) { + QualType elemT = R->getElementType(); + if (elemT->isScalarType()) { + if (Ctx.getTypeSizeInChars(baseT) >= Ctx.getTypeSizeInChars(elemT)) { + if (const Optional<SVal> &V = getDirectBinding(B, superR)) { + if (SymbolRef parentSym = V->getAsSymbol()) + return svalBuilder.getDerivedRegionValueSymbolVal(parentSym, R); + + if (V->isUnknownOrUndef()) + return *V; + // Other cases: give up. We are indexing into a larger object + // that has some value, but we don't know how to handle that yet. + return UnknownVal(); + } + } + } + } + } + return RetrieveFieldOrElementCommon(store, R, R->getElementType(), superR); +} + +SVal RegionStoreManager::RetrieveField(Store store, + const FieldRegion* R) { + + // Check if the region has a binding. + RegionBindings B = GetRegionBindings(store); + if (const Optional<SVal> &V = getDirectBinding(B, R)) + return *V; + + QualType Ty = R->getValueType(); + return RetrieveFieldOrElementCommon(store, R, Ty, R->getSuperRegion()); +} + +Optional<SVal> +RegionStoreManager::RetrieveDerivedDefaultValue(RegionBindings B, + const MemRegion *superR, + const TypedRegion *R, + QualType Ty) { + + if (const Optional<SVal> &D = getDefaultBinding(B, superR)) { + if (SymbolRef parentSym = D->getAsSymbol()) + return svalBuilder.getDerivedRegionValueSymbolVal(parentSym, R); + + if (D->isZeroConstant()) + return svalBuilder.makeZeroVal(Ty); + + if (D->isUnknownOrUndef()) + return *D; + + assert(0 && "Unknown default value"); + } + + return Optional<SVal>(); +} + +SVal RegionStoreManager::RetrieveFieldOrElementCommon(Store store, + const TypedRegion *R, + QualType Ty, + const MemRegion *superR) { + + // At this point we have already checked in either RetrieveElement or + // RetrieveField if 'R' has a direct binding. + + RegionBindings B = GetRegionBindings(store); + + while (superR) { + if (const Optional<SVal> &D = + RetrieveDerivedDefaultValue(B, superR, R, Ty)) + return *D; + + // If our super region is a field or element itself, walk up the region + // hierarchy to see if there is a default value installed in an ancestor. + if (const SubRegion *SR = dyn_cast<SubRegion>(superR)) { + superR = SR->getSuperRegion(); + continue; + } + break; + } + + // Lazy binding? + Store lazyBindingStore = NULL; + const MemRegion *lazyBindingRegion = NULL; + llvm::tie(lazyBindingStore, lazyBindingRegion) = GetLazyBinding(B, R); + + if (lazyBindingRegion) { + if (const ElementRegion *ER = dyn_cast<ElementRegion>(lazyBindingRegion)) + return RetrieveElement(lazyBindingStore, ER); + return RetrieveField(lazyBindingStore, + cast<FieldRegion>(lazyBindingRegion)); + } + + if (R->hasStackNonParametersStorage()) { + if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) { + // Currently we don't reason specially about Clang-style vectors. Check + // if superR is a vector and if so return Unknown. + if (const TypedRegion *typedSuperR = dyn_cast<TypedRegion>(superR)) { + if (typedSuperR->getValueType()->isVectorType()) + return UnknownVal(); + } + + // FIXME: We also need to take ElementRegions with symbolic indexes into + // account. + if (!ER->getIndex().isConstant()) + return UnknownVal(); + } + + return UndefinedVal(); + } + + // All other values are symbolic. + return svalBuilder.getRegionValueSymbolVal(R); +} + +SVal RegionStoreManager::RetrieveObjCIvar(Store store, const ObjCIvarRegion* R){ + + // Check if the region has a binding. + RegionBindings B = GetRegionBindings(store); + + if (const Optional<SVal> &V = getDirectBinding(B, R)) + return *V; + + const MemRegion *superR = R->getSuperRegion(); + + // Check if the super region has a default binding. + if (const Optional<SVal> &V = getDefaultBinding(B, superR)) { + if (SymbolRef parentSym = V->getAsSymbol()) + return svalBuilder.getDerivedRegionValueSymbolVal(parentSym, R); + + // Other cases: give up. + return UnknownVal(); + } + + return RetrieveLazySymbol(R); +} + +SVal RegionStoreManager::RetrieveVar(Store store, const VarRegion *R) { + + // Check if the region has a binding. + RegionBindings B = GetRegionBindings(store); + + if (const Optional<SVal> &V = getDirectBinding(B, R)) + return *V; + + // Lazily derive a value for the VarRegion. + const VarDecl *VD = R->getDecl(); + QualType T = VD->getType(); + const MemSpaceRegion *MS = R->getMemorySpace(); + + if (isa<UnknownSpaceRegion>(MS) || + isa<StackArgumentsSpaceRegion>(MS)) + return svalBuilder.getRegionValueSymbolVal(R); + + if (isa<GlobalsSpaceRegion>(MS)) { + if (isa<NonStaticGlobalSpaceRegion>(MS)) { + // Is 'VD' declared constant? If so, retrieve the constant value. + QualType CT = Ctx.getCanonicalType(T); + if (CT.isConstQualified()) { + const Expr *Init = VD->getInit(); + // Do the null check first, as we want to call 'IgnoreParenCasts'. + if (Init) + if (const IntegerLiteral *IL = + dyn_cast<IntegerLiteral>(Init->IgnoreParenCasts())) { + const nonloc::ConcreteInt &V = svalBuilder.makeIntVal(IL); + return svalBuilder.evalCast(V, Init->getType(), IL->getType()); + } + } + + if (const Optional<SVal> &V = RetrieveDerivedDefaultValue(B, MS, R, CT)) + return V.getValue(); + + return svalBuilder.getRegionValueSymbolVal(R); + } + + if (T->isIntegerType()) + return svalBuilder.makeIntVal(0, T); + if (T->isPointerType()) + return svalBuilder.makeNull(); + + return UnknownVal(); + } + + return UndefinedVal(); +} + +SVal RegionStoreManager::RetrieveLazySymbol(const TypedRegion *R) { + // All other values are symbolic. + return svalBuilder.getRegionValueSymbolVal(R); +} + +SVal RegionStoreManager::RetrieveStruct(Store store, const TypedRegion* R) { + QualType T = R->getValueType(); + assert(T->isStructureOrClassType()); + return svalBuilder.makeLazyCompoundVal(store, R); +} + +SVal RegionStoreManager::RetrieveArray(Store store, const TypedRegion * R) { + assert(Ctx.getAsConstantArrayType(R->getValueType())); + return svalBuilder.makeLazyCompoundVal(store, R); +} + +//===----------------------------------------------------------------------===// +// Binding values to regions. +//===----------------------------------------------------------------------===// + +StoreRef RegionStoreManager::Remove(Store store, Loc L) { + if (isa<loc::MemRegionVal>(L)) + if (const MemRegion* R = cast<loc::MemRegionVal>(L).getRegion()) + return StoreRef(removeBinding(GetRegionBindings(store), + R).getRootWithoutRetain(), + *this); + + return StoreRef(store, *this); +} + +StoreRef RegionStoreManager::Bind(Store store, Loc L, SVal V) { + if (isa<loc::ConcreteInt>(L)) + return StoreRef(store, *this); + + // If we get here, the location should be a region. + const MemRegion *R = cast<loc::MemRegionVal>(L).getRegion(); + + // Check if the region is a struct region. + if (const TypedRegion* TR = dyn_cast<TypedRegion>(R)) + if (TR->getValueType()->isStructureOrClassType()) + return BindStruct(store, TR, V); + + if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) { + if (ER->getIndex().isZeroConstant()) { + if (const TypedRegion *superR = + dyn_cast<TypedRegion>(ER->getSuperRegion())) { + QualType superTy = superR->getValueType(); + // For now, just invalidate the fields of the struct/union/class. + // This is for test rdar_test_7185607 in misc-ps-region-store.m. + // FIXME: Precisely handle the fields of the record. + if (superTy->isStructureOrClassType()) + return KillStruct(store, superR, UnknownVal()); + } + } + } + else if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) { + // Binding directly to a symbolic region should be treated as binding + // to element 0. + QualType T = SR->getSymbol()->getType(Ctx); + + // FIXME: Is this the right way to handle symbols that are references? + if (const PointerType *PT = T->getAs<PointerType>()) + T = PT->getPointeeType(); + else + T = T->getAs<ReferenceType>()->getPointeeType(); + + R = GetElementZeroRegion(SR, T); + } + + // Perform the binding. + RegionBindings B = GetRegionBindings(store); + return StoreRef(addBinding(B, R, BindingKey::Direct, + V).getRootWithoutRetain(), *this); +} + +StoreRef RegionStoreManager::BindDecl(Store store, const VarRegion *VR, + SVal InitVal) { + + QualType T = VR->getDecl()->getType(); + + if (T->isArrayType()) + return BindArray(store, VR, InitVal); + if (T->isStructureOrClassType()) + return BindStruct(store, VR, InitVal); + + return Bind(store, svalBuilder.makeLoc(VR), InitVal); +} + +// FIXME: this method should be merged into Bind(). +StoreRef RegionStoreManager::BindCompoundLiteral(Store store, + const CompoundLiteralExpr *CL, + const LocationContext *LC, + SVal V) { + return Bind(store, loc::MemRegionVal(MRMgr.getCompoundLiteralRegion(CL, LC)), + V); +} + +StoreRef RegionStoreManager::setImplicitDefaultValue(Store store, + const MemRegion *R, + QualType T) { + RegionBindings B = GetRegionBindings(store); + SVal V; + + if (Loc::isLocType(T)) + V = svalBuilder.makeNull(); + else if (T->isIntegerType()) + V = svalBuilder.makeZeroVal(T); + else if (T->isStructureOrClassType() || T->isArrayType()) { + // Set the default value to a zero constant when it is a structure + // or array. The type doesn't really matter. + V = svalBuilder.makeZeroVal(Ctx.IntTy); + } + else { + return StoreRef(store, *this); + } + + return StoreRef(addBinding(B, R, BindingKey::Default, + V).getRootWithoutRetain(), *this); +} + +StoreRef RegionStoreManager::BindArray(Store store, const TypedRegion* R, + SVal Init) { + + const ArrayType *AT =cast<ArrayType>(Ctx.getCanonicalType(R->getValueType())); + QualType ElementTy = AT->getElementType(); + Optional<uint64_t> Size; + + if (const ConstantArrayType* CAT = dyn_cast<ConstantArrayType>(AT)) + Size = CAT->getSize().getZExtValue(); + + // Check if the init expr is a string literal. + if (loc::MemRegionVal *MRV = dyn_cast<loc::MemRegionVal>(&Init)) { + const StringRegion *S = cast<StringRegion>(MRV->getRegion()); + + // Treat the string as a lazy compound value. + nonloc::LazyCompoundVal LCV = + cast<nonloc::LazyCompoundVal>(svalBuilder.makeLazyCompoundVal(store, S)); + return CopyLazyBindings(LCV, store, R); + } + + // Handle lazy compound values. + if (nonloc::LazyCompoundVal *LCV = dyn_cast<nonloc::LazyCompoundVal>(&Init)) + return CopyLazyBindings(*LCV, store, R); + + // Remaining case: explicit compound values. + + if (Init.isUnknown()) + return setImplicitDefaultValue(store, R, ElementTy); + + nonloc::CompoundVal& CV = cast<nonloc::CompoundVal>(Init); + nonloc::CompoundVal::iterator VI = CV.begin(), VE = CV.end(); + uint64_t i = 0; + + StoreRef newStore(store, *this); + for (; Size.hasValue() ? i < Size.getValue() : true ; ++i, ++VI) { + // The init list might be shorter than the array length. + if (VI == VE) + break; + + const NonLoc &Idx = svalBuilder.makeArrayIndex(i); + const ElementRegion *ER = MRMgr.getElementRegion(ElementTy, Idx, R, Ctx); + + if (ElementTy->isStructureOrClassType()) + newStore = BindStruct(newStore.getStore(), ER, *VI); + else if (ElementTy->isArrayType()) + newStore = BindArray(newStore.getStore(), ER, *VI); + else + newStore = Bind(newStore.getStore(), svalBuilder.makeLoc(ER), *VI); + } + + // If the init list is shorter than the array length, set the + // array default value. + if (Size.hasValue() && i < Size.getValue()) + newStore = setImplicitDefaultValue(newStore.getStore(), R, ElementTy); + + return newStore; +} + +StoreRef RegionStoreManager::BindStruct(Store store, const TypedRegion* R, + SVal V) { + + if (!Features.supportsFields()) + return StoreRef(store, *this); + + QualType T = R->getValueType(); + assert(T->isStructureOrClassType()); + + const RecordType* RT = T->getAs<RecordType>(); + RecordDecl* RD = RT->getDecl(); + + if (!RD->isDefinition()) + return StoreRef(store, *this); + + // Handle lazy compound values. + if (const nonloc::LazyCompoundVal *LCV=dyn_cast<nonloc::LazyCompoundVal>(&V)) + return CopyLazyBindings(*LCV, store, R); + + // We may get non-CompoundVal accidentally due to imprecise cast logic or + // that we are binding symbolic struct value. Kill the field values, and if + // the value is symbolic go and bind it as a "default" binding. + if (V.isUnknown() || !isa<nonloc::CompoundVal>(V)) { + SVal SV = isa<nonloc::SymbolVal>(V) ? V : UnknownVal(); + return KillStruct(store, R, SV); + } + + nonloc::CompoundVal& CV = cast<nonloc::CompoundVal>(V); + nonloc::CompoundVal::iterator VI = CV.begin(), VE = CV.end(); + + RecordDecl::field_iterator FI, FE; + StoreRef newStore(store, *this); + + for (FI = RD->field_begin(), FE = RD->field_end(); FI != FE; ++FI, ++VI) { + + if (VI == VE) + break; + + QualType FTy = (*FI)->getType(); + const FieldRegion* FR = MRMgr.getFieldRegion(*FI, R); + + if (FTy->isArrayType()) + newStore = BindArray(newStore.getStore(), FR, *VI); + else if (FTy->isStructureOrClassType()) + newStore = BindStruct(newStore.getStore(), FR, *VI); + else + newStore = Bind(newStore.getStore(), svalBuilder.makeLoc(FR), *VI); + } + + // There may be fewer values in the initialize list than the fields of struct. + if (FI != FE) { + RegionBindings B = GetRegionBindings(newStore.getStore()); + B = addBinding(B, R, BindingKey::Default, svalBuilder.makeIntVal(0, false)); + newStore = StoreRef(B.getRootWithoutRetain(), *this); + } + + return newStore; +} + +StoreRef RegionStoreManager::KillStruct(Store store, const TypedRegion* R, + SVal DefaultVal) { + BindingKey key = BindingKey::Make(R, BindingKey::Default); + + // The BindingKey may be "invalid" if we cannot handle the region binding + // explicitly. One example is something like array[index], where index + // is a symbolic value. In such cases, we want to invalidate the entire + // array, as the index assignment could have been to any element. In + // the case of nested symbolic indices, we need to march up the region + // hierarchy untile we reach a region whose binding we can reason about. + const SubRegion *subReg = R; + + while (!key.isValid()) { + if (const SubRegion *tmp = dyn_cast<SubRegion>(subReg->getSuperRegion())) { + subReg = tmp; + key = BindingKey::Make(tmp, BindingKey::Default); + } + else + break; + } + + // Remove the old bindings, using 'subReg' as the root of all regions + // we will invalidate. + RegionBindings B = GetRegionBindings(store); + llvm::OwningPtr<RegionStoreSubRegionMap> + SubRegions(getRegionStoreSubRegionMap(store)); + RemoveSubRegionBindings(B, subReg, *SubRegions); + + // Set the default value of the struct region to "unknown". + if (!key.isValid()) + return StoreRef(B.getRootWithoutRetain(), *this); + + return StoreRef(addBinding(B, key, DefaultVal).getRootWithoutRetain(), *this); +} + +StoreRef RegionStoreManager::CopyLazyBindings(nonloc::LazyCompoundVal V, + Store store, + const TypedRegion *R) { + + // Nuke the old bindings stemming from R. + RegionBindings B = GetRegionBindings(store); + + llvm::OwningPtr<RegionStoreSubRegionMap> + SubRegions(getRegionStoreSubRegionMap(store)); + + // B and DVM are updated after the call to RemoveSubRegionBindings. + RemoveSubRegionBindings(B, R, *SubRegions.get()); + + // Now copy the bindings. This amounts to just binding 'V' to 'R'. This + // results in a zero-copy algorithm. + return StoreRef(addBinding(B, R, BindingKey::Direct, + V).getRootWithoutRetain(), *this); +} + +//===----------------------------------------------------------------------===// +// "Raw" retrievals and bindings. +//===----------------------------------------------------------------------===// + + +RegionBindings RegionStoreManager::addBinding(RegionBindings B, BindingKey K, + SVal V) { + if (!K.isValid()) + return B; + return RBFactory.add(B, K, V); +} + +RegionBindings RegionStoreManager::addBinding(RegionBindings B, + const MemRegion *R, + BindingKey::Kind k, SVal V) { + return addBinding(B, BindingKey::Make(R, k), V); +} + +const SVal *RegionStoreManager::lookup(RegionBindings B, BindingKey K) { + if (!K.isValid()) + return NULL; + return B.lookup(K); +} + +const SVal *RegionStoreManager::lookup(RegionBindings B, + const MemRegion *R, + BindingKey::Kind k) { + return lookup(B, BindingKey::Make(R, k)); +} + +RegionBindings RegionStoreManager::removeBinding(RegionBindings B, + BindingKey K) { + if (!K.isValid()) + return B; + return RBFactory.remove(B, K); +} + +RegionBindings RegionStoreManager::removeBinding(RegionBindings B, + const MemRegion *R, + BindingKey::Kind k){ + return removeBinding(B, BindingKey::Make(R, k)); +} + +//===----------------------------------------------------------------------===// +// State pruning. +//===----------------------------------------------------------------------===// + +namespace { +class removeDeadBindingsWorker : + public ClusterAnalysis<removeDeadBindingsWorker> { + llvm::SmallVector<const SymbolicRegion*, 12> Postponed; + SymbolReaper &SymReaper; + const StackFrameContext *CurrentLCtx; + +public: + removeDeadBindingsWorker(RegionStoreManager &rm, GRStateManager &stateMgr, + RegionBindings b, SymbolReaper &symReaper, + const StackFrameContext *LCtx) + : ClusterAnalysis<removeDeadBindingsWorker>(rm, stateMgr, b, + /* includeGlobals = */ false), + SymReaper(symReaper), CurrentLCtx(LCtx) {} + + // Called by ClusterAnalysis. + void VisitAddedToCluster(const MemRegion *baseR, RegionCluster &C); + void VisitCluster(const MemRegion *baseR, BindingKey *I, BindingKey *E); + + void VisitBindingKey(BindingKey K); + bool UpdatePostponed(); + void VisitBinding(SVal V); +}; +} + +void removeDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR, + RegionCluster &C) { + + if (const VarRegion *VR = dyn_cast<VarRegion>(baseR)) { + if (SymReaper.isLive(VR)) + AddToWorkList(baseR, C); + + return; + } + + if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(baseR)) { + if (SymReaper.isLive(SR->getSymbol())) + AddToWorkList(SR, C); + else + Postponed.push_back(SR); + + return; + } + + if (isa<NonStaticGlobalSpaceRegion>(baseR)) { + AddToWorkList(baseR, C); + return; + } + + // CXXThisRegion in the current or parent location context is live. + if (const CXXThisRegion *TR = dyn_cast<CXXThisRegion>(baseR)) { + const StackArgumentsSpaceRegion *StackReg = + cast<StackArgumentsSpaceRegion>(TR->getSuperRegion()); + const StackFrameContext *RegCtx = StackReg->getStackFrame(); + if (RegCtx == CurrentLCtx || RegCtx->isParentOf(CurrentLCtx)) + AddToWorkList(TR, C); + } +} + +void removeDeadBindingsWorker::VisitCluster(const MemRegion *baseR, + BindingKey *I, BindingKey *E) { + for ( ; I != E; ++I) + VisitBindingKey(*I); +} + +void removeDeadBindingsWorker::VisitBinding(SVal V) { + // Is it a LazyCompoundVal? All referenced regions are live as well. + if (const nonloc::LazyCompoundVal *LCS = + dyn_cast<nonloc::LazyCompoundVal>(&V)) { + + const MemRegion *LazyR = LCS->getRegion(); + RegionBindings B = RegionStoreManager::GetRegionBindings(LCS->getStore()); + for (RegionBindings::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI){ + const SubRegion *baseR = dyn_cast<SubRegion>(RI.getKey().getRegion()); + if (baseR && baseR->isSubRegionOf(LazyR)) + VisitBinding(RI.getData()); + } + return; + } + + // If V is a region, then add it to the worklist. + if (const MemRegion *R = V.getAsRegion()) + AddToWorkList(R); + + // Update the set of live symbols. + for (SVal::symbol_iterator SI=V.symbol_begin(), SE=V.symbol_end(); + SI!=SE;++SI) + SymReaper.markLive(*SI); +} + +void removeDeadBindingsWorker::VisitBindingKey(BindingKey K) { + const MemRegion *R = K.getRegion(); + + // Mark this region "live" by adding it to the worklist. This will cause + // use to visit all regions in the cluster (if we haven't visited them + // already). + if (AddToWorkList(R)) { + // Mark the symbol for any live SymbolicRegion as "live". This means we + // should continue to track that symbol. + if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(R)) + SymReaper.markLive(SymR->getSymbol()); + + // For BlockDataRegions, enqueue the VarRegions for variables marked + // with __block (passed-by-reference). + // via BlockDeclRefExprs. + if (const BlockDataRegion *BD = dyn_cast<BlockDataRegion>(R)) { + for (BlockDataRegion::referenced_vars_iterator + RI = BD->referenced_vars_begin(), RE = BD->referenced_vars_end(); + RI != RE; ++RI) { + if ((*RI)->getDecl()->getAttr<BlocksAttr>()) + AddToWorkList(*RI); + } + + // No possible data bindings on a BlockDataRegion. + return; + } + } + + // Visit the data binding for K. + if (const SVal *V = RM.lookup(B, K)) + VisitBinding(*V); +} + +bool removeDeadBindingsWorker::UpdatePostponed() { + // See if any postponed SymbolicRegions are actually live now, after + // having done a scan. + bool changed = false; + + for (llvm::SmallVectorImpl<const SymbolicRegion*>::iterator + I = Postponed.begin(), E = Postponed.end() ; I != E ; ++I) { + if (const SymbolicRegion *SR = cast_or_null<SymbolicRegion>(*I)) { + if (SymReaper.isLive(SR->getSymbol())) { + changed |= AddToWorkList(SR); + *I = NULL; + } + } + } + + return changed; +} + +StoreRef RegionStoreManager::removeDeadBindings(Store store, + const StackFrameContext *LCtx, + SymbolReaper& SymReaper, + llvm::SmallVectorImpl<const MemRegion*>& RegionRoots) +{ + RegionBindings B = GetRegionBindings(store); + removeDeadBindingsWorker W(*this, StateMgr, B, SymReaper, LCtx); + W.GenerateClusters(); + + // Enqueue the region roots onto the worklist. + for (llvm::SmallVectorImpl<const MemRegion*>::iterator I=RegionRoots.begin(), + E=RegionRoots.end(); I!=E; ++I) + W.AddToWorkList(*I); + + do W.RunWorkList(); while (W.UpdatePostponed()); + + // We have now scanned the store, marking reachable regions and symbols + // as live. We now remove all the regions that are dead from the store + // as well as update DSymbols with the set symbols that are now dead. + for (RegionBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) { + const BindingKey &K = I.getKey(); + + // If the cluster has been visited, we know the region has been marked. + if (W.isVisited(K.getRegion())) + continue; + + // Remove the dead entry. + B = removeBinding(B, K); + + // Mark all non-live symbols that this binding references as dead. + if (const SymbolicRegion* SymR = dyn_cast<SymbolicRegion>(K.getRegion())) + SymReaper.maybeDead(SymR->getSymbol()); + + SVal X = I.getData(); + SVal::symbol_iterator SI = X.symbol_begin(), SE = X.symbol_end(); + for (; SI != SE; ++SI) + SymReaper.maybeDead(*SI); + } + + return StoreRef(B.getRootWithoutRetain(), *this); +} + + +StoreRef RegionStoreManager::enterStackFrame(const GRState *state, + const StackFrameContext *frame) { + FunctionDecl const *FD = cast<FunctionDecl>(frame->getDecl()); + FunctionDecl::param_const_iterator PI = FD->param_begin(), + PE = FD->param_end(); + StoreRef store = StoreRef(state->getStore(), *this); + + if (CallExpr const *CE = dyn_cast<CallExpr>(frame->getCallSite())) { + CallExpr::const_arg_iterator AI = CE->arg_begin(), AE = CE->arg_end(); + + // Copy the arg expression value to the arg variables. We check that + // PI != PE because the actual number of arguments may be different than + // the function declaration. + for (; AI != AE && PI != PE; ++AI, ++PI) { + SVal ArgVal = state->getSVal(*AI); + store = Bind(store.getStore(), + svalBuilder.makeLoc(MRMgr.getVarRegion(*PI, frame)), ArgVal); + } + } else if (const CXXConstructExpr *CE = + dyn_cast<CXXConstructExpr>(frame->getCallSite())) { + CXXConstructExpr::const_arg_iterator AI = CE->arg_begin(), + AE = CE->arg_end(); + + // Copy the arg expression value to the arg variables. + for (; AI != AE; ++AI, ++PI) { + SVal ArgVal = state->getSVal(*AI); + store = Bind(store.getStore(), + svalBuilder.makeLoc(MRMgr.getVarRegion(*PI,frame)), ArgVal); + } + } else + assert(isa<CXXDestructorDecl>(frame->getDecl())); + + return store; +} + +//===----------------------------------------------------------------------===// +// Utility methods. +//===----------------------------------------------------------------------===// + +void RegionStoreManager::print(Store store, llvm::raw_ostream& OS, + const char* nl, const char *sep) { + RegionBindings B = GetRegionBindings(store); + OS << "Store (direct and default bindings):" << nl; + + for (RegionBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) + OS << ' ' << I.getKey() << " : " << I.getData() << nl; +} diff --git a/lib/StaticAnalyzer/Core/SValBuilder.cpp b/lib/StaticAnalyzer/Core/SValBuilder.cpp new file mode 100644 index 0000000..b0fd497 --- /dev/null +++ b/lib/StaticAnalyzer/Core/SValBuilder.cpp @@ -0,0 +1,310 @@ +// SValBuilder.cpp - Basic class for all SValBuilder implementations -*- C++ -*- +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines SValBuilder, the base class for all (complete) SValBuilder +// implementations. +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h" + +using namespace clang; +using namespace ento; + +//===----------------------------------------------------------------------===// +// Basic SVal creation. +//===----------------------------------------------------------------------===// + +DefinedOrUnknownSVal SValBuilder::makeZeroVal(QualType T) { + if (Loc::isLocType(T)) + return makeNull(); + + if (T->isIntegerType()) + return makeIntVal(0, T); + + // FIXME: Handle floats. + // FIXME: Handle structs. + return UnknownVal(); +} + + +NonLoc SValBuilder::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op, + const llvm::APSInt& v, QualType T) { + // The Environment ensures we always get a persistent APSInt in + // BasicValueFactory, so we don't need to get the APSInt from + // BasicValueFactory again. + assert(!Loc::isLocType(T)); + return nonloc::SymExprVal(SymMgr.getSymIntExpr(lhs, op, v, T)); +} + +NonLoc SValBuilder::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op, + const SymExpr *rhs, QualType T) { + assert(SymMgr.getType(lhs) == SymMgr.getType(rhs)); + assert(!Loc::isLocType(T)); + return nonloc::SymExprVal(SymMgr.getSymSymExpr(lhs, op, rhs, T)); +} + + +SVal SValBuilder::convertToArrayIndex(SVal V) { + if (V.isUnknownOrUndef()) + return V; + + // Common case: we have an appropriately sized integer. + if (nonloc::ConcreteInt* CI = dyn_cast<nonloc::ConcreteInt>(&V)) { + const llvm::APSInt& I = CI->getValue(); + if (I.getBitWidth() == ArrayIndexWidth && I.isSigned()) + return V; + } + + return evalCastNL(cast<NonLoc>(V), ArrayIndexTy); +} + +DefinedOrUnknownSVal +SValBuilder::getRegionValueSymbolVal(const TypedRegion* R) { + QualType T = R->getValueType(); + + if (!SymbolManager::canSymbolicate(T)) + return UnknownVal(); + + SymbolRef sym = SymMgr.getRegionValueSymbol(R); + + if (Loc::isLocType(T)) + return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym)); + + return nonloc::SymbolVal(sym); +} + +DefinedOrUnknownSVal SValBuilder::getConjuredSymbolVal(const void *SymbolTag, + const Expr *E, + unsigned Count) { + QualType T = E->getType(); + + if (!SymbolManager::canSymbolicate(T)) + return UnknownVal(); + + SymbolRef sym = SymMgr.getConjuredSymbol(E, Count, SymbolTag); + + if (Loc::isLocType(T)) + return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym)); + + return nonloc::SymbolVal(sym); +} + +DefinedOrUnknownSVal SValBuilder::getConjuredSymbolVal(const void *SymbolTag, + const Expr *E, + QualType T, + unsigned Count) { + + if (!SymbolManager::canSymbolicate(T)) + return UnknownVal(); + + SymbolRef sym = SymMgr.getConjuredSymbol(E, T, Count, SymbolTag); + + if (Loc::isLocType(T)) + return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym)); + + return nonloc::SymbolVal(sym); +} + +DefinedSVal SValBuilder::getMetadataSymbolVal(const void *SymbolTag, + const MemRegion *MR, + const Expr *E, QualType T, + unsigned Count) { + assert(SymbolManager::canSymbolicate(T) && "Invalid metadata symbol type"); + + SymbolRef sym = SymMgr.getMetadataSymbol(MR, E, T, Count, SymbolTag); + + if (Loc::isLocType(T)) + return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym)); + + return nonloc::SymbolVal(sym); +} + +DefinedOrUnknownSVal +SValBuilder::getDerivedRegionValueSymbolVal(SymbolRef parentSymbol, + const TypedRegion *R) { + QualType T = R->getValueType(); + + if (!SymbolManager::canSymbolicate(T)) + return UnknownVal(); + + SymbolRef sym = SymMgr.getDerivedSymbol(parentSymbol, R); + + if (Loc::isLocType(T)) + return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym)); + + return nonloc::SymbolVal(sym); +} + +DefinedSVal SValBuilder::getFunctionPointer(const FunctionDecl* FD) { + return loc::MemRegionVal(MemMgr.getFunctionTextRegion(FD)); +} + +DefinedSVal SValBuilder::getBlockPointer(const BlockDecl *D, + CanQualType locTy, + const LocationContext *LC) { + const BlockTextRegion *BC = + MemMgr.getBlockTextRegion(D, locTy, LC->getAnalysisContext()); + const BlockDataRegion *BD = MemMgr.getBlockDataRegion(BC, LC); + return loc::MemRegionVal(BD); +} + +//===----------------------------------------------------------------------===// + +SVal SValBuilder::evalBinOp(const GRState *ST, BinaryOperator::Opcode Op, + SVal L, SVal R, QualType T) { + + if (L.isUndef() || R.isUndef()) + return UndefinedVal(); + + if (L.isUnknown() || R.isUnknown()) + return UnknownVal(); + + if (isa<Loc>(L)) { + if (isa<Loc>(R)) + return evalBinOpLL(ST, Op, cast<Loc>(L), cast<Loc>(R), T); + + return evalBinOpLN(ST, Op, cast<Loc>(L), cast<NonLoc>(R), T); + } + + if (isa<Loc>(R)) { + // Support pointer arithmetic where the addend is on the left + // and the pointer on the right. + assert(Op == BO_Add); + + // Commute the operands. + return evalBinOpLN(ST, Op, cast<Loc>(R), cast<NonLoc>(L), T); + } + + return evalBinOpNN(ST, Op, cast<NonLoc>(L), cast<NonLoc>(R), T); +} + +DefinedOrUnknownSVal SValBuilder::evalEQ(const GRState *ST, + DefinedOrUnknownSVal L, + DefinedOrUnknownSVal R) { + return cast<DefinedOrUnknownSVal>(evalBinOp(ST, BO_EQ, L, R, + Context.IntTy)); +} + +// FIXME: should rewrite according to the cast kind. +SVal SValBuilder::evalCast(SVal val, QualType castTy, QualType originalTy) { + if (val.isUnknownOrUndef() || castTy == originalTy) + return val; + + // For const casts, just propagate the value. + if (!castTy->isVariableArrayType() && !originalTy->isVariableArrayType()) + if (Context.hasSameUnqualifiedType(castTy, originalTy)) + return val; + + // Check for casts to real or complex numbers. We don't handle these at all + // right now. + if (castTy->isFloatingType() || castTy->isAnyComplexType()) + return UnknownVal(); + + // Check for casts from integers to integers. + if (castTy->isIntegerType() && originalTy->isIntegerType()) + return evalCastNL(cast<NonLoc>(val), castTy); + + // Check for casts from pointers to integers. + if (castTy->isIntegerType() && Loc::isLocType(originalTy)) + return evalCastL(cast<Loc>(val), castTy); + + // Check for casts from integers to pointers. + if (Loc::isLocType(castTy) && originalTy->isIntegerType()) { + if (nonloc::LocAsInteger *LV = dyn_cast<nonloc::LocAsInteger>(&val)) { + if (const MemRegion *R = LV->getLoc().getAsRegion()) { + StoreManager &storeMgr = StateMgr.getStoreManager(); + R = storeMgr.castRegion(R, castTy); + return R ? SVal(loc::MemRegionVal(R)) : UnknownVal(); + } + return LV->getLoc(); + } + goto DispatchCast; + } + + // Just pass through function and block pointers. + if (originalTy->isBlockPointerType() || originalTy->isFunctionPointerType()) { + assert(Loc::isLocType(castTy)); + return val; + } + + // Check for casts from array type to another type. + if (originalTy->isArrayType()) { + // We will always decay to a pointer. + val = StateMgr.ArrayToPointer(cast<Loc>(val)); + + // Are we casting from an array to a pointer? If so just pass on + // the decayed value. + if (castTy->isPointerType()) + return val; + + // Are we casting from an array to an integer? If so, cast the decayed + // pointer value to an integer. + assert(castTy->isIntegerType()); + + // FIXME: Keep these here for now in case we decide soon that we + // need the original decayed type. + // QualType elemTy = cast<ArrayType>(originalTy)->getElementType(); + // QualType pointerTy = C.getPointerType(elemTy); + return evalCastL(cast<Loc>(val), castTy); + } + + // Check for casts from a region to a specific type. + if (const MemRegion *R = val.getAsRegion()) { + // FIXME: We should handle the case where we strip off view layers to get + // to a desugared type. + + if (!Loc::isLocType(castTy)) { + // FIXME: There can be gross cases where one casts the result of a function + // (that returns a pointer) to some other value that happens to fit + // within that pointer value. We currently have no good way to + // model such operations. When this happens, the underlying operation + // is that the caller is reasoning about bits. Conceptually we are + // layering a "view" of a location on top of those bits. Perhaps + // we need to be more lazy about mutual possible views, even on an + // SVal? This may be necessary for bit-level reasoning as well. + return UnknownVal(); + } + + // We get a symbolic function pointer for a dereference of a function + // pointer, but it is of function type. Example: + + // struct FPRec { + // void (*my_func)(int * x); + // }; + // + // int bar(int x); + // + // int f1_a(struct FPRec* foo) { + // int x; + // (*foo->my_func)(&x); + // return bar(x)+1; // no-warning + // } + + assert(Loc::isLocType(originalTy) || originalTy->isFunctionType() || + originalTy->isBlockPointerType() || castTy->isReferenceType()); + + StoreManager &storeMgr = StateMgr.getStoreManager(); + + // Delegate to store manager to get the result of casting a region to a + // different type. If the MemRegion* returned is NULL, this expression + // Evaluates to UnknownVal. + R = storeMgr.castRegion(R, castTy); + return R ? SVal(loc::MemRegionVal(R)) : UnknownVal(); + } + +DispatchCast: + // All other cases. + return isa<Loc>(val) ? evalCastL(cast<Loc>(val), castTy) + : evalCastNL(cast<NonLoc>(val), castTy); +} diff --git a/lib/StaticAnalyzer/Core/SVals.cpp b/lib/StaticAnalyzer/Core/SVals.cpp new file mode 100644 index 0000000..4614e34 --- /dev/null +++ b/lib/StaticAnalyzer/Core/SVals.cpp @@ -0,0 +1,378 @@ +//= RValues.cpp - Abstract RValues for Path-Sens. Value Tracking -*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines SVal, Loc, and NonLoc, classes that represent +// abstract r-values for use with path-sensitive value tracking. +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h" +#include "clang/AST/ExprObjC.h" +#include "clang/Basic/IdentifierTable.h" + +using namespace clang; +using namespace ento; +using llvm::dyn_cast; +using llvm::cast; +using llvm::APSInt; + +//===----------------------------------------------------------------------===// +// Symbol iteration within an SVal. +//===----------------------------------------------------------------------===// + + +//===----------------------------------------------------------------------===// +// Utility methods. +//===----------------------------------------------------------------------===// + +bool SVal::hasConjuredSymbol() const { + if (const nonloc::SymbolVal* SV = dyn_cast<nonloc::SymbolVal>(this)) { + SymbolRef sym = SV->getSymbol(); + if (isa<SymbolConjured>(sym)) + return true; + } + + if (const loc::MemRegionVal *RV = dyn_cast<loc::MemRegionVal>(this)) { + const MemRegion *R = RV->getRegion(); + if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) { + SymbolRef sym = SR->getSymbol(); + if (isa<SymbolConjured>(sym)) + return true; + } + } + + return false; +} + +const FunctionDecl *SVal::getAsFunctionDecl() const { + if (const loc::MemRegionVal* X = dyn_cast<loc::MemRegionVal>(this)) { + const MemRegion* R = X->getRegion(); + if (const FunctionTextRegion *CTR = R->getAs<FunctionTextRegion>()) + return CTR->getDecl(); + } + + return NULL; +} + +/// getAsLocSymbol - If this SVal is a location (subclasses Loc) and +/// wraps a symbol, return that SymbolRef. Otherwise return 0. +// FIXME: should we consider SymbolRef wrapped in CodeTextRegion? +SymbolRef SVal::getAsLocSymbol() const { + if (const nonloc::LocAsInteger *X = dyn_cast<nonloc::LocAsInteger>(this)) + return X->getLoc().getAsLocSymbol(); + + if (const loc::MemRegionVal *X = dyn_cast<loc::MemRegionVal>(this)) { + const MemRegion *R = X->stripCasts(); + if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(R)) + return SymR->getSymbol(); + } + return NULL; +} + +/// Get the symbol in the SVal or its base region. +SymbolRef SVal::getLocSymbolInBase() const { + const loc::MemRegionVal *X = dyn_cast<loc::MemRegionVal>(this); + + if (!X) + return 0; + + const MemRegion *R = X->getRegion(); + + while (const SubRegion *SR = dyn_cast<SubRegion>(R)) { + if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(SR)) + return SymR->getSymbol(); + else + R = SR->getSuperRegion(); + } + + return 0; +} + +/// getAsSymbol - If this Sval wraps a symbol return that SymbolRef. +/// Otherwise return 0. +// FIXME: should we consider SymbolRef wrapped in CodeTextRegion? +SymbolRef SVal::getAsSymbol() const { + if (const nonloc::SymbolVal *X = dyn_cast<nonloc::SymbolVal>(this)) + return X->getSymbol(); + + if (const nonloc::SymExprVal *X = dyn_cast<nonloc::SymExprVal>(this)) + if (SymbolRef Y = dyn_cast<SymbolData>(X->getSymbolicExpression())) + return Y; + + return getAsLocSymbol(); +} + +/// getAsSymbolicExpression - If this Sval wraps a symbolic expression then +/// return that expression. Otherwise return NULL. +const SymExpr *SVal::getAsSymbolicExpression() const { + if (const nonloc::SymExprVal *X = dyn_cast<nonloc::SymExprVal>(this)) + return X->getSymbolicExpression(); + + return getAsSymbol(); +} + +const MemRegion *SVal::getAsRegion() const { + if (const loc::MemRegionVal *X = dyn_cast<loc::MemRegionVal>(this)) + return X->getRegion(); + + if (const nonloc::LocAsInteger *X = dyn_cast<nonloc::LocAsInteger>(this)) { + return X->getLoc().getAsRegion(); + } + + return 0; +} + +const MemRegion *loc::MemRegionVal::stripCasts() const { + const MemRegion *R = getRegion(); + return R ? R->StripCasts() : NULL; +} + +bool SVal::symbol_iterator::operator==(const symbol_iterator &X) const { + return itr == X.itr; +} + +bool SVal::symbol_iterator::operator!=(const symbol_iterator &X) const { + return itr != X.itr; +} + +SVal::symbol_iterator::symbol_iterator(const SymExpr *SE) { + itr.push_back(SE); + while (!isa<SymbolData>(itr.back())) expand(); +} + +SVal::symbol_iterator& SVal::symbol_iterator::operator++() { + assert(!itr.empty() && "attempting to iterate on an 'end' iterator"); + assert(isa<SymbolData>(itr.back())); + itr.pop_back(); + if (!itr.empty()) + while (!isa<SymbolData>(itr.back())) expand(); + return *this; +} + +SymbolRef SVal::symbol_iterator::operator*() { + assert(!itr.empty() && "attempting to dereference an 'end' iterator"); + return cast<SymbolData>(itr.back()); +} + +void SVal::symbol_iterator::expand() { + const SymExpr *SE = itr.back(); + itr.pop_back(); + + if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(SE)) { + itr.push_back(SIE->getLHS()); + return; + } + else if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(SE)) { + itr.push_back(SSE->getLHS()); + itr.push_back(SSE->getRHS()); + return; + } + + assert(false && "unhandled expansion case"); +} + +const void *nonloc::LazyCompoundVal::getStore() const { + return static_cast<const LazyCompoundValData*>(Data)->getStore(); +} + +const TypedRegion *nonloc::LazyCompoundVal::getRegion() const { + return static_cast<const LazyCompoundValData*>(Data)->getRegion(); +} + +//===----------------------------------------------------------------------===// +// Other Iterators. +//===----------------------------------------------------------------------===// + +nonloc::CompoundVal::iterator nonloc::CompoundVal::begin() const { + return getValue()->begin(); +} + +nonloc::CompoundVal::iterator nonloc::CompoundVal::end() const { + return getValue()->end(); +} + +//===----------------------------------------------------------------------===// +// Useful predicates. +//===----------------------------------------------------------------------===// + +bool SVal::isConstant() const { + return isa<nonloc::ConcreteInt>(this) || isa<loc::ConcreteInt>(this); +} + +bool SVal::isConstant(int I) const { + if (isa<loc::ConcreteInt>(*this)) + return cast<loc::ConcreteInt>(*this).getValue() == I; + else if (isa<nonloc::ConcreteInt>(*this)) + return cast<nonloc::ConcreteInt>(*this).getValue() == I; + else + return false; +} + +bool SVal::isZeroConstant() const { + return isConstant(0); +} + + +//===----------------------------------------------------------------------===// +// Transfer function dispatch for Non-Locs. +//===----------------------------------------------------------------------===// + +SVal nonloc::ConcreteInt::evalBinOp(SValBuilder &svalBuilder, + BinaryOperator::Opcode Op, + const nonloc::ConcreteInt& R) const { + const llvm::APSInt* X = + svalBuilder.getBasicValueFactory().evalAPSInt(Op, getValue(), R.getValue()); + + if (X) + return nonloc::ConcreteInt(*X); + else + return UndefinedVal(); +} + +nonloc::ConcreteInt +nonloc::ConcreteInt::evalComplement(SValBuilder &svalBuilder) const { + return svalBuilder.makeIntVal(~getValue()); +} + +nonloc::ConcreteInt +nonloc::ConcreteInt::evalMinus(SValBuilder &svalBuilder) const { + return svalBuilder.makeIntVal(-getValue()); +} + +//===----------------------------------------------------------------------===// +// Transfer function dispatch for Locs. +//===----------------------------------------------------------------------===// + +SVal loc::ConcreteInt::evalBinOp(BasicValueFactory& BasicVals, + BinaryOperator::Opcode Op, + const loc::ConcreteInt& R) const { + + assert (Op == BO_Add || Op == BO_Sub || + (Op >= BO_LT && Op <= BO_NE)); + + const llvm::APSInt* X = BasicVals.evalAPSInt(Op, getValue(), R.getValue()); + + if (X) + return loc::ConcreteInt(*X); + else + return UndefinedVal(); +} + +//===----------------------------------------------------------------------===// +// Pretty-Printing. +//===----------------------------------------------------------------------===// + +void SVal::dump() const { dumpToStream(llvm::errs()); } + +void SVal::dumpToStream(llvm::raw_ostream& os) const { + switch (getBaseKind()) { + case UnknownKind: + os << "Unknown"; + break; + case NonLocKind: + cast<NonLoc>(this)->dumpToStream(os); + break; + case LocKind: + cast<Loc>(this)->dumpToStream(os); + break; + case UndefinedKind: + os << "Undefined"; + break; + default: + assert (false && "Invalid SVal."); + } +} + +void NonLoc::dumpToStream(llvm::raw_ostream& os) const { + switch (getSubKind()) { + case nonloc::ConcreteIntKind: { + const nonloc::ConcreteInt& C = *cast<nonloc::ConcreteInt>(this); + if (C.getValue().isUnsigned()) + os << C.getValue().getZExtValue(); + else + os << C.getValue().getSExtValue(); + os << ' ' << (C.getValue().isUnsigned() ? 'U' : 'S') + << C.getValue().getBitWidth() << 'b'; + break; + } + case nonloc::SymbolValKind: + os << '$' << cast<nonloc::SymbolVal>(this)->getSymbol(); + break; + case nonloc::SymExprValKind: { + const nonloc::SymExprVal& C = *cast<nonloc::SymExprVal>(this); + const SymExpr *SE = C.getSymbolicExpression(); + os << SE; + break; + } + case nonloc::LocAsIntegerKind: { + const nonloc::LocAsInteger& C = *cast<nonloc::LocAsInteger>(this); + os << C.getLoc() << " [as " << C.getNumBits() << " bit integer]"; + break; + } + case nonloc::CompoundValKind: { + const nonloc::CompoundVal& C = *cast<nonloc::CompoundVal>(this); + os << "compoundVal{"; + bool first = true; + for (nonloc::CompoundVal::iterator I=C.begin(), E=C.end(); I!=E; ++I) { + if (first) { + os << ' '; first = false; + } + else + os << ", "; + + (*I).dumpToStream(os); + } + os << "}"; + break; + } + case nonloc::LazyCompoundValKind: { + const nonloc::LazyCompoundVal &C = *cast<nonloc::LazyCompoundVal>(this); + os << "lazyCompoundVal{" << const_cast<void *>(C.getStore()) + << ',' << C.getRegion() + << '}'; + break; + } + default: + assert (false && "Pretty-printed not implemented for this NonLoc."); + break; + } +} + +void Loc::dumpToStream(llvm::raw_ostream& os) const { + switch (getSubKind()) { + case loc::ConcreteIntKind: + os << cast<loc::ConcreteInt>(this)->getValue().getZExtValue() << " (Loc)"; + break; + case loc::GotoLabelKind: + os << "&&" << cast<loc::GotoLabel>(this)->getLabel()->getName(); + break; + case loc::MemRegionKind: + os << '&' << cast<loc::MemRegionVal>(this)->getRegion()->getString(); + break; + case loc::ObjCPropRefKind: { + const ObjCPropertyRefExpr *E = cast<loc::ObjCPropRef>(this)->getPropRefExpr(); + os << "objc-prop{"; + if (E->isSuperReceiver()) + os << "super."; + else if (E->getBase()) + os << "<base>."; + + if (E->isImplicitProperty()) + os << E->getImplicitPropertyGetter()->getSelector().getAsString(); + else + os << E->getExplicitProperty()->getName(); + + os << "}"; + break; + } + default: + assert(false && "Pretty-printing not implemented for this Loc."); + break; + } +} diff --git a/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp b/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp new file mode 100644 index 0000000..e0b61ab --- /dev/null +++ b/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp @@ -0,0 +1,303 @@ +//== SimpleConstraintManager.cpp --------------------------------*- C++ -*--==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines SimpleConstraintManager, a class that holds code shared +// between BasicConstraintManager and RangeConstraintManager. +// +//===----------------------------------------------------------------------===// + +#include "SimpleConstraintManager.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/Checker.h" + +namespace clang { + +namespace ento { + +SimpleConstraintManager::~SimpleConstraintManager() {} + +bool SimpleConstraintManager::canReasonAbout(SVal X) const { + if (nonloc::SymExprVal *SymVal = dyn_cast<nonloc::SymExprVal>(&X)) { + const SymExpr *SE = SymVal->getSymbolicExpression(); + + if (isa<SymbolData>(SE)) + return true; + + if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(SE)) { + switch (SIE->getOpcode()) { + // We don't reason yet about bitwise-constraints on symbolic values. + case BO_And: + case BO_Or: + case BO_Xor: + return false; + // We don't reason yet about these arithmetic constraints on + // symbolic values. + case BO_Mul: + case BO_Div: + case BO_Rem: + case BO_Shl: + case BO_Shr: + return false; + // All other cases. + default: + return true; + } + } + + return false; + } + + return true; +} + +const GRState *SimpleConstraintManager::assume(const GRState *state, + DefinedSVal Cond, + bool Assumption) { + if (isa<NonLoc>(Cond)) + return assume(state, cast<NonLoc>(Cond), Assumption); + else + return assume(state, cast<Loc>(Cond), Assumption); +} + +const GRState *SimpleConstraintManager::assume(const GRState *state, Loc cond, + bool assumption) { + state = assumeAux(state, cond, assumption); + return SU.processAssume(state, cond, assumption); +} + +const GRState *SimpleConstraintManager::assumeAux(const GRState *state, + Loc Cond, bool Assumption) { + + BasicValueFactory &BasicVals = state->getBasicVals(); + + switch (Cond.getSubKind()) { + default: + assert (false && "'Assume' not implemented for this Loc."); + return state; + + case loc::MemRegionKind: { + // FIXME: Should this go into the storemanager? + + const MemRegion *R = cast<loc::MemRegionVal>(Cond).getRegion(); + const SubRegion *SubR = dyn_cast<SubRegion>(R); + + while (SubR) { + // FIXME: now we only find the first symbolic region. + if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(SubR)) { + const llvm::APSInt &zero = BasicVals.getZeroWithPtrWidth(); + if (Assumption) + return assumeSymNE(state, SymR->getSymbol(), zero, zero); + else + return assumeSymEQ(state, SymR->getSymbol(), zero, zero); + } + SubR = dyn_cast<SubRegion>(SubR->getSuperRegion()); + } + + // FALL-THROUGH. + } + + case loc::GotoLabelKind: + return Assumption ? state : NULL; + + case loc::ConcreteIntKind: { + bool b = cast<loc::ConcreteInt>(Cond).getValue() != 0; + bool isFeasible = b ? Assumption : !Assumption; + return isFeasible ? state : NULL; + } + } // end switch +} + +const GRState *SimpleConstraintManager::assume(const GRState *state, + NonLoc cond, + bool assumption) { + state = assumeAux(state, cond, assumption); + return SU.processAssume(state, cond, assumption); +} + +static BinaryOperator::Opcode NegateComparison(BinaryOperator::Opcode op) { + // FIXME: This should probably be part of BinaryOperator, since this isn't + // the only place it's used. (This code was copied from SimpleSValBuilder.cpp.) + switch (op) { + default: + assert(false && "Invalid opcode."); + case BO_LT: return BO_GE; + case BO_GT: return BO_LE; + case BO_LE: return BO_GT; + case BO_GE: return BO_LT; + case BO_EQ: return BO_NE; + case BO_NE: return BO_EQ; + } +} + +const GRState *SimpleConstraintManager::assumeAux(const GRState *state, + NonLoc Cond, + bool Assumption) { + + // We cannot reason about SymSymExprs, + // and can only reason about some SymIntExprs. + if (!canReasonAbout(Cond)) { + // Just return the current state indicating that the path is feasible. + // This may be an over-approximation of what is possible. + return state; + } + + BasicValueFactory &BasicVals = state->getBasicVals(); + SymbolManager &SymMgr = state->getSymbolManager(); + + switch (Cond.getSubKind()) { + default: + assert(false && "'Assume' not implemented for this NonLoc"); + + case nonloc::SymbolValKind: { + nonloc::SymbolVal& SV = cast<nonloc::SymbolVal>(Cond); + SymbolRef sym = SV.getSymbol(); + QualType T = SymMgr.getType(sym); + const llvm::APSInt &zero = BasicVals.getValue(0, T); + if (Assumption) + return assumeSymNE(state, sym, zero, zero); + else + return assumeSymEQ(state, sym, zero, zero); + } + + case nonloc::SymExprValKind: { + nonloc::SymExprVal V = cast<nonloc::SymExprVal>(Cond); + + // For now, we only handle expressions whose RHS is an integer. + // All other expressions are assumed to be feasible. + const SymIntExpr *SE = dyn_cast<SymIntExpr>(V.getSymbolicExpression()); + if (!SE) + return state; + + BinaryOperator::Opcode op = SE->getOpcode(); + // Implicitly compare non-comparison expressions to 0. + if (!BinaryOperator::isComparisonOp(op)) { + QualType T = SymMgr.getType(SE); + const llvm::APSInt &zero = BasicVals.getValue(0, T); + op = (Assumption ? BO_NE : BO_EQ); + return assumeSymRel(state, SE, op, zero); + } + + // From here on out, op is the real comparison we'll be testing. + if (!Assumption) + op = NegateComparison(op); + + return assumeSymRel(state, SE->getLHS(), op, SE->getRHS()); + } + + case nonloc::ConcreteIntKind: { + bool b = cast<nonloc::ConcreteInt>(Cond).getValue() != 0; + bool isFeasible = b ? Assumption : !Assumption; + return isFeasible ? state : NULL; + } + + case nonloc::LocAsIntegerKind: + return assumeAux(state, cast<nonloc::LocAsInteger>(Cond).getLoc(), + Assumption); + } // end switch +} + +const GRState *SimpleConstraintManager::assumeSymRel(const GRState *state, + const SymExpr *LHS, + BinaryOperator::Opcode op, + const llvm::APSInt& Int) { + assert(BinaryOperator::isComparisonOp(op) && + "Non-comparison ops should be rewritten as comparisons to zero."); + + // We only handle simple comparisons of the form "$sym == constant" + // or "($sym+constant1) == constant2". + // The adjustment is "constant1" in the above expression. It's used to + // "slide" the solution range around for modular arithmetic. For example, + // x < 4 has the solution [0, 3]. x+2 < 4 has the solution [0-2, 3-2], which + // in modular arithmetic is [0, 1] U [UINT_MAX-1, UINT_MAX]. It's up to + // the subclasses of SimpleConstraintManager to handle the adjustment. + llvm::APSInt Adjustment; + + // First check if the LHS is a simple symbol reference. + SymbolRef Sym = dyn_cast<SymbolData>(LHS); + if (Sym) { + Adjustment = 0; + } else { + // Next, see if it's a "($sym+constant1)" expression. + const SymIntExpr *SE = dyn_cast<SymIntExpr>(LHS); + + // We don't handle "($sym1+$sym2)". + // Give up and assume the constraint is feasible. + if (!SE) + return state; + + // We don't handle "(<expr>+constant1)". + // Give up and assume the constraint is feasible. + Sym = dyn_cast<SymbolData>(SE->getLHS()); + if (!Sym) + return state; + + // Get the constant out of the expression "($sym+constant1)". + switch (SE->getOpcode()) { + case BO_Add: + Adjustment = SE->getRHS(); + break; + case BO_Sub: + Adjustment = -SE->getRHS(); + break; + default: + // We don't handle non-additive operators. + // Give up and assume the constraint is feasible. + return state; + } + } + + // FIXME: This next section is a hack. It silently converts the integers to + // be of the same type as the symbol, which is not always correct. Really the + // comparisons should be performed using the Int's type, then mapped back to + // the symbol's range of values. + GRStateManager &StateMgr = state->getStateManager(); + ASTContext &Ctx = StateMgr.getContext(); + + QualType T = Sym->getType(Ctx); + assert(T->isIntegerType() || Loc::isLocType(T)); + unsigned bitwidth = Ctx.getTypeSize(T); + bool isSymUnsigned = T->isUnsignedIntegerType() || Loc::isLocType(T); + + // Convert the adjustment. + Adjustment.setIsUnsigned(isSymUnsigned); + Adjustment = Adjustment.extOrTrunc(bitwidth); + + // Convert the right-hand side integer. + llvm::APSInt ConvertedInt(Int, isSymUnsigned); + ConvertedInt = ConvertedInt.extOrTrunc(bitwidth); + + switch (op) { + default: + // No logic yet for other operators. assume the constraint is feasible. + return state; + + case BO_EQ: + return assumeSymEQ(state, Sym, ConvertedInt, Adjustment); + + case BO_NE: + return assumeSymNE(state, Sym, ConvertedInt, Adjustment); + + case BO_GT: + return assumeSymGT(state, Sym, ConvertedInt, Adjustment); + + case BO_GE: + return assumeSymGE(state, Sym, ConvertedInt, Adjustment); + + case BO_LT: + return assumeSymLT(state, Sym, ConvertedInt, Adjustment); + + case BO_LE: + return assumeSymLE(state, Sym, ConvertedInt, Adjustment); + } // end switch +} + +} // end of namespace ento + +} // end of namespace clang diff --git a/lib/StaticAnalyzer/Core/SimpleConstraintManager.h b/lib/StaticAnalyzer/Core/SimpleConstraintManager.h new file mode 100644 index 0000000..a2952af --- /dev/null +++ b/lib/StaticAnalyzer/Core/SimpleConstraintManager.h @@ -0,0 +1,93 @@ +//== SimpleConstraintManager.h ----------------------------------*- C++ -*--==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Code shared between BasicConstraintManager and RangeConstraintManager. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_GR_SIMPLE_CONSTRAINT_MANAGER_H +#define LLVM_CLANG_GR_SIMPLE_CONSTRAINT_MANAGER_H + +#include "clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h" + +namespace clang { + +namespace ento { + +class SimpleConstraintManager : public ConstraintManager { + SubEngine &SU; +public: + SimpleConstraintManager(SubEngine &subengine) : SU(subengine) {} + virtual ~SimpleConstraintManager(); + + //===------------------------------------------------------------------===// + // Common implementation for the interface provided by ConstraintManager. + //===------------------------------------------------------------------===// + + bool canReasonAbout(SVal X) const; + + const GRState *assume(const GRState *state, DefinedSVal Cond, + bool Assumption); + + const GRState *assume(const GRState *state, Loc Cond, bool Assumption); + + const GRState *assume(const GRState *state, NonLoc Cond, bool Assumption); + + const GRState *assumeSymRel(const GRState *state, + const SymExpr *LHS, + BinaryOperator::Opcode op, + const llvm::APSInt& Int); + +protected: + + //===------------------------------------------------------------------===// + // Interface that subclasses must implement. + //===------------------------------------------------------------------===// + + // Each of these is of the form "$sym+Adj <> V", where "<>" is the comparison + // operation for the method being invoked. + virtual const GRState *assumeSymNE(const GRState *state, SymbolRef sym, + const llvm::APSInt& V, + const llvm::APSInt& Adjustment) = 0; + + virtual const GRState *assumeSymEQ(const GRState *state, SymbolRef sym, + const llvm::APSInt& V, + const llvm::APSInt& Adjustment) = 0; + + virtual const GRState *assumeSymLT(const GRState *state, SymbolRef sym, + const llvm::APSInt& V, + const llvm::APSInt& Adjustment) = 0; + + virtual const GRState *assumeSymGT(const GRState *state, SymbolRef sym, + const llvm::APSInt& V, + const llvm::APSInt& Adjustment) = 0; + + virtual const GRState *assumeSymLE(const GRState *state, SymbolRef sym, + const llvm::APSInt& V, + const llvm::APSInt& Adjustment) = 0; + + virtual const GRState *assumeSymGE(const GRState *state, SymbolRef sym, + const llvm::APSInt& V, + const llvm::APSInt& Adjustment) = 0; + + //===------------------------------------------------------------------===// + // Internal implementation. + //===------------------------------------------------------------------===// + + const GRState *assumeAux(const GRState *state, Loc Cond,bool Assumption); + + const GRState *assumeAux(const GRState *state, NonLoc Cond, bool Assumption); +}; + +} // end GR namespace + +} // end clang namespace + +#endif diff --git a/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp new file mode 100644 index 0000000..9a46bd6 --- /dev/null +++ b/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp @@ -0,0 +1,917 @@ +// SimpleSValBuilder.cpp - A basic SValBuilder -----------------------*- C++ -*- +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines SimpleSValBuilder, a basic implementation of SValBuilder. +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h" + +using namespace clang; +using namespace ento; + +namespace { +class SimpleSValBuilder : public SValBuilder { +protected: + virtual SVal evalCastNL(NonLoc val, QualType castTy); + virtual SVal evalCastL(Loc val, QualType castTy); + +public: + SimpleSValBuilder(llvm::BumpPtrAllocator &alloc, ASTContext &context, + GRStateManager &stateMgr) + : SValBuilder(alloc, context, stateMgr) {} + virtual ~SimpleSValBuilder() {} + + virtual SVal evalMinus(NonLoc val); + virtual SVal evalComplement(NonLoc val); + virtual SVal evalBinOpNN(const GRState *state, BinaryOperator::Opcode op, + NonLoc lhs, NonLoc rhs, QualType resultTy); + virtual SVal evalBinOpLL(const GRState *state, BinaryOperator::Opcode op, + Loc lhs, Loc rhs, QualType resultTy); + virtual SVal evalBinOpLN(const GRState *state, BinaryOperator::Opcode op, + Loc lhs, NonLoc rhs, QualType resultTy); + + /// getKnownValue - evaluates a given SVal. If the SVal has only one possible + /// (integer) value, that value is returned. Otherwise, returns NULL. + virtual const llvm::APSInt *getKnownValue(const GRState *state, SVal V); + + SVal MakeSymIntVal(const SymExpr *LHS, BinaryOperator::Opcode op, + const llvm::APSInt &RHS, QualType resultTy); +}; +} // end anonymous namespace + +SValBuilder *ento::createSimpleSValBuilder(llvm::BumpPtrAllocator &alloc, + ASTContext &context, + GRStateManager &stateMgr) { + return new SimpleSValBuilder(alloc, context, stateMgr); +} + +//===----------------------------------------------------------------------===// +// Transfer function for Casts. +//===----------------------------------------------------------------------===// + +SVal SimpleSValBuilder::evalCastNL(NonLoc val, QualType castTy) { + + bool isLocType = Loc::isLocType(castTy); + + if (nonloc::LocAsInteger *LI = dyn_cast<nonloc::LocAsInteger>(&val)) { + if (isLocType) + return LI->getLoc(); + + // FIXME: Correctly support promotions/truncations. + unsigned castSize = Context.getTypeSize(castTy); + if (castSize == LI->getNumBits()) + return val; + return makeLocAsInteger(LI->getLoc(), castSize); + } + + if (const SymExpr *se = val.getAsSymbolicExpression()) { + QualType T = Context.getCanonicalType(se->getType(Context)); + if (T == Context.getCanonicalType(castTy)) + return val; + + // FIXME: Remove this hack when we support symbolic truncation/extension. + // HACK: If both castTy and T are integers, ignore the cast. This is + // not a permanent solution. Eventually we want to precisely handle + // extension/truncation of symbolic integers. This prevents us from losing + // precision when we assign 'x = y' and 'y' is symbolic and x and y are + // different integer types. + if (T->isIntegerType() && castTy->isIntegerType()) + return val; + + return UnknownVal(); + } + + if (!isa<nonloc::ConcreteInt>(val)) + return UnknownVal(); + + // Only handle casts from integers to integers. + if (!isLocType && !castTy->isIntegerType()) + return UnknownVal(); + + llvm::APSInt i = cast<nonloc::ConcreteInt>(val).getValue(); + i.setIsUnsigned(castTy->isUnsignedIntegerType() || Loc::isLocType(castTy)); + i = i.extOrTrunc(Context.getTypeSize(castTy)); + + if (isLocType) + return makeIntLocVal(i); + else + return makeIntVal(i); +} + +SVal SimpleSValBuilder::evalCastL(Loc val, QualType castTy) { + + // Casts from pointers -> pointers, just return the lval. + // + // Casts from pointers -> references, just return the lval. These + // can be introduced by the frontend for corner cases, e.g + // casting from va_list* to __builtin_va_list&. + // + if (Loc::isLocType(castTy) || castTy->isReferenceType()) + return val; + + // FIXME: Handle transparent unions where a value can be "transparently" + // lifted into a union type. + if (castTy->isUnionType()) + return UnknownVal(); + + if (castTy->isIntegerType()) { + unsigned BitWidth = Context.getTypeSize(castTy); + + if (!isa<loc::ConcreteInt>(val)) + return makeLocAsInteger(val, BitWidth); + + llvm::APSInt i = cast<loc::ConcreteInt>(val).getValue(); + i.setIsUnsigned(castTy->isUnsignedIntegerType() || Loc::isLocType(castTy)); + i = i.extOrTrunc(BitWidth); + return makeIntVal(i); + } + + // All other cases: return 'UnknownVal'. This includes casting pointers + // to floats, which is probably badness it itself, but this is a good + // intermediate solution until we do something better. + return UnknownVal(); +} + +//===----------------------------------------------------------------------===// +// Transfer function for unary operators. +//===----------------------------------------------------------------------===// + +SVal SimpleSValBuilder::evalMinus(NonLoc val) { + switch (val.getSubKind()) { + case nonloc::ConcreteIntKind: + return cast<nonloc::ConcreteInt>(val).evalMinus(*this); + default: + return UnknownVal(); + } +} + +SVal SimpleSValBuilder::evalComplement(NonLoc X) { + switch (X.getSubKind()) { + case nonloc::ConcreteIntKind: + return cast<nonloc::ConcreteInt>(X).evalComplement(*this); + default: + return UnknownVal(); + } +} + +//===----------------------------------------------------------------------===// +// Transfer function for binary operators. +//===----------------------------------------------------------------------===// + +static BinaryOperator::Opcode NegateComparison(BinaryOperator::Opcode op) { + switch (op) { + default: + assert(false && "Invalid opcode."); + case BO_LT: return BO_GE; + case BO_GT: return BO_LE; + case BO_LE: return BO_GT; + case BO_GE: return BO_LT; + case BO_EQ: return BO_NE; + case BO_NE: return BO_EQ; + } +} + +static BinaryOperator::Opcode ReverseComparison(BinaryOperator::Opcode op) { + switch (op) { + default: + assert(false && "Invalid opcode."); + case BO_LT: return BO_GT; + case BO_GT: return BO_LT; + case BO_LE: return BO_GE; + case BO_GE: return BO_LE; + case BO_EQ: + case BO_NE: + return op; + } +} + +SVal SimpleSValBuilder::MakeSymIntVal(const SymExpr *LHS, + BinaryOperator::Opcode op, + const llvm::APSInt &RHS, + QualType resultTy) { + bool isIdempotent = false; + + // Check for a few special cases with known reductions first. + switch (op) { + default: + // We can't reduce this case; just treat it normally. + break; + case BO_Mul: + // a*0 and a*1 + if (RHS == 0) + return makeIntVal(0, resultTy); + else if (RHS == 1) + isIdempotent = true; + break; + case BO_Div: + // a/0 and a/1 + if (RHS == 0) + // This is also handled elsewhere. + return UndefinedVal(); + else if (RHS == 1) + isIdempotent = true; + break; + case BO_Rem: + // a%0 and a%1 + if (RHS == 0) + // This is also handled elsewhere. + return UndefinedVal(); + else if (RHS == 1) + return makeIntVal(0, resultTy); + break; + case BO_Add: + case BO_Sub: + case BO_Shl: + case BO_Shr: + case BO_Xor: + // a+0, a-0, a<<0, a>>0, a^0 + if (RHS == 0) + isIdempotent = true; + break; + case BO_And: + // a&0 and a&(~0) + if (RHS == 0) + return makeIntVal(0, resultTy); + else if (RHS.isAllOnesValue()) + isIdempotent = true; + break; + case BO_Or: + // a|0 and a|(~0) + if (RHS == 0) + isIdempotent = true; + else if (RHS.isAllOnesValue()) { + const llvm::APSInt &Result = BasicVals.Convert(resultTy, RHS); + return nonloc::ConcreteInt(Result); + } + break; + } + + // Idempotent ops (like a*1) can still change the type of an expression. + // Wrap the LHS up in a NonLoc again and let evalCastNL do the dirty work. + if (isIdempotent) { + if (SymbolRef LHSSym = dyn_cast<SymbolData>(LHS)) + return evalCastNL(nonloc::SymbolVal(LHSSym), resultTy); + return evalCastNL(nonloc::SymExprVal(LHS), resultTy); + } + + // If we reach this point, the expression cannot be simplified. + // Make a SymExprVal for the entire thing. + return makeNonLoc(LHS, op, RHS, resultTy); +} + +SVal SimpleSValBuilder::evalBinOpNN(const GRState *state, + BinaryOperator::Opcode op, + NonLoc lhs, NonLoc rhs, + QualType resultTy) { + // Handle trivial case where left-side and right-side are the same. + if (lhs == rhs) + switch (op) { + default: + break; + case BO_EQ: + case BO_LE: + case BO_GE: + return makeTruthVal(true, resultTy); + case BO_LT: + case BO_GT: + case BO_NE: + return makeTruthVal(false, resultTy); + case BO_Xor: + case BO_Sub: + return makeIntVal(0, resultTy); + case BO_Or: + case BO_And: + return evalCastNL(lhs, resultTy); + } + + while (1) { + switch (lhs.getSubKind()) { + default: + return UnknownVal(); + case nonloc::LocAsIntegerKind: { + Loc lhsL = cast<nonloc::LocAsInteger>(lhs).getLoc(); + switch (rhs.getSubKind()) { + case nonloc::LocAsIntegerKind: + return evalBinOpLL(state, op, lhsL, + cast<nonloc::LocAsInteger>(rhs).getLoc(), + resultTy); + case nonloc::ConcreteIntKind: { + // Transform the integer into a location and compare. + llvm::APSInt i = cast<nonloc::ConcreteInt>(rhs).getValue(); + i.setIsUnsigned(true); + i = i.extOrTrunc(Context.getTypeSize(Context.VoidPtrTy)); + return evalBinOpLL(state, op, lhsL, makeLoc(i), resultTy); + } + default: + switch (op) { + case BO_EQ: + return makeTruthVal(false, resultTy); + case BO_NE: + return makeTruthVal(true, resultTy); + default: + // This case also handles pointer arithmetic. + return UnknownVal(); + } + } + } + case nonloc::SymExprValKind: { + nonloc::SymExprVal *selhs = cast<nonloc::SymExprVal>(&lhs); + + // Only handle LHS of the form "$sym op constant", at least for now. + const SymIntExpr *symIntExpr = + dyn_cast<SymIntExpr>(selhs->getSymbolicExpression()); + + if (!symIntExpr) + return UnknownVal(); + + // Is this a logical not? (!x is represented as x == 0.) + if (op == BO_EQ && rhs.isZeroConstant()) { + // We know how to negate certain expressions. Simplify them here. + + BinaryOperator::Opcode opc = symIntExpr->getOpcode(); + switch (opc) { + default: + // We don't know how to negate this operation. + // Just handle it as if it were a normal comparison to 0. + break; + case BO_LAnd: + case BO_LOr: + assert(false && "Logical operators handled by branching logic."); + return UnknownVal(); + case BO_Assign: + case BO_MulAssign: + case BO_DivAssign: + case BO_RemAssign: + case BO_AddAssign: + case BO_SubAssign: + case BO_ShlAssign: + case BO_ShrAssign: + case BO_AndAssign: + case BO_XorAssign: + case BO_OrAssign: + case BO_Comma: + assert(false && "'=' and ',' operators handled by ExprEngine."); + return UnknownVal(); + case BO_PtrMemD: + case BO_PtrMemI: + assert(false && "Pointer arithmetic not handled here."); + return UnknownVal(); + case BO_LT: + case BO_GT: + case BO_LE: + case BO_GE: + case BO_EQ: + case BO_NE: + // Negate the comparison and make a value. + opc = NegateComparison(opc); + assert(symIntExpr->getType(Context) == resultTy); + return makeNonLoc(symIntExpr->getLHS(), opc, + symIntExpr->getRHS(), resultTy); + } + } + + // For now, only handle expressions whose RHS is a constant. + const nonloc::ConcreteInt *rhsInt = dyn_cast<nonloc::ConcreteInt>(&rhs); + if (!rhsInt) + return UnknownVal(); + + // If both the LHS and the current expression are additive, + // fold their constants. + if (BinaryOperator::isAdditiveOp(op)) { + BinaryOperator::Opcode lop = symIntExpr->getOpcode(); + if (BinaryOperator::isAdditiveOp(lop)) { + // resultTy may not be the best type to convert to, but it's + // probably the best choice in expressions with mixed type + // (such as x+1U+2LL). The rules for implicit conversions should + // choose a reasonable type to preserve the expression, and will + // at least match how the value is going to be used. + const llvm::APSInt &first = + BasicVals.Convert(resultTy, symIntExpr->getRHS()); + const llvm::APSInt &second = + BasicVals.Convert(resultTy, rhsInt->getValue()); + const llvm::APSInt *newRHS; + if (lop == op) + newRHS = BasicVals.evalAPSInt(BO_Add, first, second); + else + newRHS = BasicVals.evalAPSInt(BO_Sub, first, second); + return MakeSymIntVal(symIntExpr->getLHS(), lop, *newRHS, resultTy); + } + } + + // Otherwise, make a SymExprVal out of the expression. + return MakeSymIntVal(symIntExpr, op, rhsInt->getValue(), resultTy); + } + case nonloc::ConcreteIntKind: { + const nonloc::ConcreteInt& lhsInt = cast<nonloc::ConcreteInt>(lhs); + + if (isa<nonloc::ConcreteInt>(rhs)) { + return lhsInt.evalBinOp(*this, op, cast<nonloc::ConcreteInt>(rhs)); + } else { + const llvm::APSInt& lhsValue = lhsInt.getValue(); + + // Swap the left and right sides and flip the operator if doing so + // allows us to better reason about the expression (this is a form + // of expression canonicalization). + // While we're at it, catch some special cases for non-commutative ops. + NonLoc tmp = rhs; + rhs = lhs; + lhs = tmp; + + switch (op) { + case BO_LT: + case BO_GT: + case BO_LE: + case BO_GE: + op = ReverseComparison(op); + continue; + case BO_EQ: + case BO_NE: + case BO_Add: + case BO_Mul: + case BO_And: + case BO_Xor: + case BO_Or: + continue; + case BO_Shr: + if (lhsValue.isAllOnesValue() && lhsValue.isSigned()) + // At this point lhs and rhs have been swapped. + return rhs; + // FALL-THROUGH + case BO_Shl: + if (lhsValue == 0) + // At this point lhs and rhs have been swapped. + return rhs; + return UnknownVal(); + default: + return UnknownVal(); + } + } + } + case nonloc::SymbolValKind: { + nonloc::SymbolVal *slhs = cast<nonloc::SymbolVal>(&lhs); + SymbolRef Sym = slhs->getSymbol(); + // Does the symbol simplify to a constant? If so, "fold" the constant + // by setting 'lhs' to a ConcreteInt and try again. + if (Sym->getType(Context)->isIntegerType()) + if (const llvm::APSInt *Constant = state->getSymVal(Sym)) { + // The symbol evaluates to a constant. If necessary, promote the + // folded constant (LHS) to the result type. + const llvm::APSInt &lhs_I = BasicVals.Convert(resultTy, *Constant); + lhs = nonloc::ConcreteInt(lhs_I); + + // Also promote the RHS (if necessary). + + // For shifts, it is not necessary to promote the RHS. + if (BinaryOperator::isShiftOp(op)) + continue; + + // Other operators: do an implicit conversion. This shouldn't be + // necessary once we support truncation/extension of symbolic values. + if (nonloc::ConcreteInt *rhs_I = dyn_cast<nonloc::ConcreteInt>(&rhs)){ + rhs = nonloc::ConcreteInt(BasicVals.Convert(resultTy, + rhs_I->getValue())); + } + + continue; + } + + // Is the RHS a symbol we can simplify? + if (const nonloc::SymbolVal *srhs = dyn_cast<nonloc::SymbolVal>(&rhs)) { + SymbolRef RSym = srhs->getSymbol(); + if (RSym->getType(Context)->isIntegerType()) { + if (const llvm::APSInt *Constant = state->getSymVal(RSym)) { + // The symbol evaluates to a constant. + const llvm::APSInt &rhs_I = BasicVals.Convert(resultTy, *Constant); + rhs = nonloc::ConcreteInt(rhs_I); + } + } + } + + if (isa<nonloc::ConcreteInt>(rhs)) { + return MakeSymIntVal(slhs->getSymbol(), op, + cast<nonloc::ConcreteInt>(rhs).getValue(), + resultTy); + } + + return UnknownVal(); + } + } + } +} + +// FIXME: all this logic will change if/when we have MemRegion::getLocation(). +SVal SimpleSValBuilder::evalBinOpLL(const GRState *state, + BinaryOperator::Opcode op, + Loc lhs, Loc rhs, + QualType resultTy) { + // Only comparisons and subtractions are valid operations on two pointers. + // See [C99 6.5.5 through 6.5.14] or [C++0x 5.6 through 5.15]. + // However, if a pointer is casted to an integer, evalBinOpNN may end up + // calling this function with another operation (PR7527). We don't attempt to + // model this for now, but it could be useful, particularly when the + // "location" is actually an integer value that's been passed through a void*. + if (!(BinaryOperator::isComparisonOp(op) || op == BO_Sub)) + return UnknownVal(); + + // Special cases for when both sides are identical. + if (lhs == rhs) { + switch (op) { + default: + assert(false && "Unimplemented operation for two identical values"); + return UnknownVal(); + case BO_Sub: + return makeZeroVal(resultTy); + case BO_EQ: + case BO_LE: + case BO_GE: + return makeTruthVal(true, resultTy); + case BO_NE: + case BO_LT: + case BO_GT: + return makeTruthVal(false, resultTy); + } + } + + switch (lhs.getSubKind()) { + default: + assert(false && "Ordering not implemented for this Loc."); + return UnknownVal(); + + case loc::GotoLabelKind: + // The only thing we know about labels is that they're non-null. + if (rhs.isZeroConstant()) { + switch (op) { + default: + break; + case BO_Sub: + return evalCastL(lhs, resultTy); + case BO_EQ: + case BO_LE: + case BO_LT: + return makeTruthVal(false, resultTy); + case BO_NE: + case BO_GT: + case BO_GE: + return makeTruthVal(true, resultTy); + } + } + // There may be two labels for the same location, and a function region may + // have the same address as a label at the start of the function (depending + // on the ABI). + // FIXME: we can probably do a comparison against other MemRegions, though. + // FIXME: is there a way to tell if two labels refer to the same location? + return UnknownVal(); + + case loc::ConcreteIntKind: { + // If one of the operands is a symbol and the other is a constant, + // build an expression for use by the constraint manager. + if (SymbolRef rSym = rhs.getAsLocSymbol()) { + // We can only build expressions with symbols on the left, + // so we need a reversible operator. + if (!BinaryOperator::isComparisonOp(op)) + return UnknownVal(); + + const llvm::APSInt &lVal = cast<loc::ConcreteInt>(lhs).getValue(); + return makeNonLoc(rSym, ReverseComparison(op), lVal, resultTy); + } + + // If both operands are constants, just perform the operation. + if (loc::ConcreteInt *rInt = dyn_cast<loc::ConcreteInt>(&rhs)) { + SVal ResultVal = cast<loc::ConcreteInt>(lhs).evalBinOp(BasicVals, op, + *rInt); + if (Loc *Result = dyn_cast<Loc>(&ResultVal)) + return evalCastL(*Result, resultTy); + else + return UnknownVal(); + } + + // Special case comparisons against NULL. + // This must come after the test if the RHS is a symbol, which is used to + // build constraints. The address of any non-symbolic region is guaranteed + // to be non-NULL, as is any label. + assert(isa<loc::MemRegionVal>(rhs) || isa<loc::GotoLabel>(rhs)); + if (lhs.isZeroConstant()) { + switch (op) { + default: + break; + case BO_EQ: + case BO_GT: + case BO_GE: + return makeTruthVal(false, resultTy); + case BO_NE: + case BO_LT: + case BO_LE: + return makeTruthVal(true, resultTy); + } + } + + // Comparing an arbitrary integer to a region or label address is + // completely unknowable. + return UnknownVal(); + } + case loc::MemRegionKind: { + if (loc::ConcreteInt *rInt = dyn_cast<loc::ConcreteInt>(&rhs)) { + // If one of the operands is a symbol and the other is a constant, + // build an expression for use by the constraint manager. + if (SymbolRef lSym = lhs.getAsLocSymbol()) + return MakeSymIntVal(lSym, op, rInt->getValue(), resultTy); + + // Special case comparisons to NULL. + // This must come after the test if the LHS is a symbol, which is used to + // build constraints. The address of any non-symbolic region is guaranteed + // to be non-NULL. + if (rInt->isZeroConstant()) { + switch (op) { + default: + break; + case BO_Sub: + return evalCastL(lhs, resultTy); + case BO_EQ: + case BO_LT: + case BO_LE: + return makeTruthVal(false, resultTy); + case BO_NE: + case BO_GT: + case BO_GE: + return makeTruthVal(true, resultTy); + } + } + + // Comparing a region to an arbitrary integer is completely unknowable. + return UnknownVal(); + } + + // Get both values as regions, if possible. + const MemRegion *LeftMR = lhs.getAsRegion(); + assert(LeftMR && "MemRegionKind SVal doesn't have a region!"); + + const MemRegion *RightMR = rhs.getAsRegion(); + if (!RightMR) + // The RHS is probably a label, which in theory could address a region. + // FIXME: we can probably make a more useful statement about non-code + // regions, though. + return UnknownVal(); + + // If both values wrap regions, see if they're from different base regions. + const MemRegion *LeftBase = LeftMR->getBaseRegion(); + const MemRegion *RightBase = RightMR->getBaseRegion(); + if (LeftBase != RightBase && + !isa<SymbolicRegion>(LeftBase) && !isa<SymbolicRegion>(RightBase)) { + switch (op) { + default: + return UnknownVal(); + case BO_EQ: + return makeTruthVal(false, resultTy); + case BO_NE: + return makeTruthVal(true, resultTy); + } + } + + // The two regions are from the same base region. See if they're both a + // type of region we know how to compare. + + // FIXME: If/when there is a getAsRawOffset() for FieldRegions, this + // ElementRegion path and the FieldRegion path below should be unified. + if (const ElementRegion *LeftER = dyn_cast<ElementRegion>(LeftMR)) { + // First see if the right region is also an ElementRegion. + const ElementRegion *RightER = dyn_cast<ElementRegion>(RightMR); + if (!RightER) + return UnknownVal(); + + // Next, see if the two ERs have the same super-region and matching types. + // FIXME: This should do something useful even if the types don't match, + // though if both indexes are constant the RegionRawOffset path will + // give the correct answer. + if (LeftER->getSuperRegion() == RightER->getSuperRegion() && + LeftER->getElementType() == RightER->getElementType()) { + // Get the left index and cast it to the correct type. + // If the index is unknown or undefined, bail out here. + SVal LeftIndexVal = LeftER->getIndex(); + NonLoc *LeftIndex = dyn_cast<NonLoc>(&LeftIndexVal); + if (!LeftIndex) + return UnknownVal(); + LeftIndexVal = evalCastNL(*LeftIndex, resultTy); + LeftIndex = dyn_cast<NonLoc>(&LeftIndexVal); + if (!LeftIndex) + return UnknownVal(); + + // Do the same for the right index. + SVal RightIndexVal = RightER->getIndex(); + NonLoc *RightIndex = dyn_cast<NonLoc>(&RightIndexVal); + if (!RightIndex) + return UnknownVal(); + RightIndexVal = evalCastNL(*RightIndex, resultTy); + RightIndex = dyn_cast<NonLoc>(&RightIndexVal); + if (!RightIndex) + return UnknownVal(); + + // Actually perform the operation. + // evalBinOpNN expects the two indexes to already be the right type. + return evalBinOpNN(state, op, *LeftIndex, *RightIndex, resultTy); + } + + // If the element indexes aren't comparable, see if the raw offsets are. + RegionRawOffset LeftOffset = LeftER->getAsArrayOffset(); + RegionRawOffset RightOffset = RightER->getAsArrayOffset(); + + if (LeftOffset.getRegion() != NULL && + LeftOffset.getRegion() == RightOffset.getRegion()) { + CharUnits left = LeftOffset.getOffset(); + CharUnits right = RightOffset.getOffset(); + + switch (op) { + default: + return UnknownVal(); + case BO_LT: + return makeTruthVal(left < right, resultTy); + case BO_GT: + return makeTruthVal(left > right, resultTy); + case BO_LE: + return makeTruthVal(left <= right, resultTy); + case BO_GE: + return makeTruthVal(left >= right, resultTy); + case BO_EQ: + return makeTruthVal(left == right, resultTy); + case BO_NE: + return makeTruthVal(left != right, resultTy); + } + } + + // If we get here, we have no way of comparing the ElementRegions. + return UnknownVal(); + } + + // See if both regions are fields of the same structure. + // FIXME: This doesn't handle nesting, inheritance, or Objective-C ivars. + if (const FieldRegion *LeftFR = dyn_cast<FieldRegion>(LeftMR)) { + // Only comparisons are meaningful here! + if (!BinaryOperator::isComparisonOp(op)) + return UnknownVal(); + + // First see if the right region is also a FieldRegion. + const FieldRegion *RightFR = dyn_cast<FieldRegion>(RightMR); + if (!RightFR) + return UnknownVal(); + + // Next, see if the two FRs have the same super-region. + // FIXME: This doesn't handle casts yet, and simply stripping the casts + // doesn't help. + if (LeftFR->getSuperRegion() != RightFR->getSuperRegion()) + return UnknownVal(); + + const FieldDecl *LeftFD = LeftFR->getDecl(); + const FieldDecl *RightFD = RightFR->getDecl(); + const RecordDecl *RD = LeftFD->getParent(); + + // Make sure the two FRs are from the same kind of record. Just in case! + // FIXME: This is probably where inheritance would be a problem. + if (RD != RightFD->getParent()) + return UnknownVal(); + + // We know for sure that the two fields are not the same, since that + // would have given us the same SVal. + if (op == BO_EQ) + return makeTruthVal(false, resultTy); + if (op == BO_NE) + return makeTruthVal(true, resultTy); + + // Iterate through the fields and see which one comes first. + // [C99 6.7.2.1.13] "Within a structure object, the non-bit-field + // members and the units in which bit-fields reside have addresses that + // increase in the order in which they are declared." + bool leftFirst = (op == BO_LT || op == BO_LE); + for (RecordDecl::field_iterator I = RD->field_begin(), + E = RD->field_end(); I!=E; ++I) { + if (*I == LeftFD) + return makeTruthVal(leftFirst, resultTy); + if (*I == RightFD) + return makeTruthVal(!leftFirst, resultTy); + } + + assert(false && "Fields not found in parent record's definition"); + } + + // If we get here, we have no way of comparing the regions. + return UnknownVal(); + } + } +} + +SVal SimpleSValBuilder::evalBinOpLN(const GRState *state, + BinaryOperator::Opcode op, + Loc lhs, NonLoc rhs, QualType resultTy) { + + // Special case: rhs is a zero constant. + if (rhs.isZeroConstant()) + return lhs; + + // Special case: 'rhs' is an integer that has the same width as a pointer and + // we are using the integer location in a comparison. Normally this cannot be + // triggered, but transfer functions like those for OSCommpareAndSwapBarrier32 + // can generate comparisons that trigger this code. + // FIXME: Are all locations guaranteed to have pointer width? + if (BinaryOperator::isComparisonOp(op)) { + if (nonloc::ConcreteInt *rhsInt = dyn_cast<nonloc::ConcreteInt>(&rhs)) { + const llvm::APSInt *x = &rhsInt->getValue(); + ASTContext &ctx = Context; + if (ctx.getTypeSize(ctx.VoidPtrTy) == x->getBitWidth()) { + // Convert the signedness of the integer (if necessary). + if (x->isSigned()) + x = &getBasicValueFactory().getValue(*x, true); + + return evalBinOpLL(state, op, lhs, loc::ConcreteInt(*x), resultTy); + } + } + } + + // We are dealing with pointer arithmetic. + + // Handle pointer arithmetic on constant values. + if (nonloc::ConcreteInt *rhsInt = dyn_cast<nonloc::ConcreteInt>(&rhs)) { + if (loc::ConcreteInt *lhsInt = dyn_cast<loc::ConcreteInt>(&lhs)) { + const llvm::APSInt &leftI = lhsInt->getValue(); + assert(leftI.isUnsigned()); + llvm::APSInt rightI(rhsInt->getValue(), /* isUnsigned */ true); + + // Convert the bitwidth of rightI. This should deal with overflow + // since we are dealing with concrete values. + rightI = rightI.extOrTrunc(leftI.getBitWidth()); + + // Offset the increment by the pointer size. + llvm::APSInt Multiplicand(rightI.getBitWidth(), /* isUnsigned */ true); + rightI *= Multiplicand; + + // Compute the adjusted pointer. + switch (op) { + case BO_Add: + rightI = leftI + rightI; + break; + case BO_Sub: + rightI = leftI - rightI; + break; + default: + llvm_unreachable("Invalid pointer arithmetic operation"); + } + return loc::ConcreteInt(getBasicValueFactory().getValue(rightI)); + } + } + + // Handle cases where 'lhs' is a region. + if (const MemRegion *region = lhs.getAsRegion()) { + rhs = cast<NonLoc>(convertToArrayIndex(rhs)); + SVal index = UnknownVal(); + const MemRegion *superR = 0; + QualType elementType; + + if (const ElementRegion *elemReg = dyn_cast<ElementRegion>(region)) { + index = evalBinOpNN(state, BO_Add, elemReg->getIndex(), rhs, + getArrayIndexType()); + superR = elemReg->getSuperRegion(); + elementType = elemReg->getElementType(); + } + else if (isa<SubRegion>(region)) { + superR = region; + index = rhs; + if (const PointerType *PT = resultTy->getAs<PointerType>()) { + elementType = PT->getPointeeType(); + } + else { + const ObjCObjectPointerType *OT = + resultTy->getAs<ObjCObjectPointerType>(); + elementType = OT->getPointeeType(); + } + } + + if (NonLoc *indexV = dyn_cast<NonLoc>(&index)) { + return loc::MemRegionVal(MemMgr.getElementRegion(elementType, *indexV, + superR, getContext())); + } + } + return UnknownVal(); +} + +const llvm::APSInt *SimpleSValBuilder::getKnownValue(const GRState *state, + SVal V) { + if (V.isUnknownOrUndef()) + return NULL; + + if (loc::ConcreteInt* X = dyn_cast<loc::ConcreteInt>(&V)) + return &X->getValue(); + + if (nonloc::ConcreteInt* X = dyn_cast<nonloc::ConcreteInt>(&V)) + return &X->getValue(); + + if (SymbolRef Sym = V.getAsSymbol()) + return state->getSymVal(Sym); + + // FIXME: Add support for SymExprs. + return NULL; +} diff --git a/lib/StaticAnalyzer/Core/Store.cpp b/lib/StaticAnalyzer/Core/Store.cpp new file mode 100644 index 0000000..7225170 --- /dev/null +++ b/lib/StaticAnalyzer/Core/Store.cpp @@ -0,0 +1,338 @@ +//== Store.cpp - Interface for maps from Locations to Values ----*- C++ -*--==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defined the types Store and StoreManager. +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h" +#include "clang/AST/CharUnits.h" + +using namespace clang; +using namespace ento; + +StoreManager::StoreManager(GRStateManager &stateMgr) + : svalBuilder(stateMgr.getSValBuilder()), StateMgr(stateMgr), + MRMgr(svalBuilder.getRegionManager()), Ctx(stateMgr.getContext()) {} + +StoreRef StoreManager::enterStackFrame(const GRState *state, + const StackFrameContext *frame) { + return StoreRef(state->getStore(), *this); +} + +const MemRegion *StoreManager::MakeElementRegion(const MemRegion *Base, + QualType EleTy, uint64_t index) { + NonLoc idx = svalBuilder.makeArrayIndex(index); + return MRMgr.getElementRegion(EleTy, idx, Base, svalBuilder.getContext()); +} + +// FIXME: Merge with the implementation of the same method in MemRegion.cpp +static bool IsCompleteType(ASTContext &Ctx, QualType Ty) { + if (const RecordType *RT = Ty->getAs<RecordType>()) { + const RecordDecl *D = RT->getDecl(); + if (!D->getDefinition()) + return false; + } + + return true; +} + +StoreRef StoreManager::BindDefault(Store store, const MemRegion *R, SVal V) { + return StoreRef(store, *this); +} + +const ElementRegion *StoreManager::GetElementZeroRegion(const MemRegion *R, + QualType T) { + NonLoc idx = svalBuilder.makeZeroArrayIndex(); + assert(!T.isNull()); + return MRMgr.getElementRegion(T, idx, R, Ctx); +} + +const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy) { + + ASTContext& Ctx = StateMgr.getContext(); + + // Handle casts to Objective-C objects. + if (CastToTy->isObjCObjectPointerType()) + return R->StripCasts(); + + if (CastToTy->isBlockPointerType()) { + // FIXME: We may need different solutions, depending on the symbol + // involved. Blocks can be casted to/from 'id', as they can be treated + // as Objective-C objects. This could possibly be handled by enhancing + // our reasoning of downcasts of symbolic objects. + if (isa<CodeTextRegion>(R) || isa<SymbolicRegion>(R)) + return R; + + // We don't know what to make of it. Return a NULL region, which + // will be interpretted as UnknownVal. + return NULL; + } + + // Now assume we are casting from pointer to pointer. Other cases should + // already be handled. + QualType PointeeTy = CastToTy->getPointeeType(); + QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy); + + // Handle casts to void*. We just pass the region through. + if (CanonPointeeTy.getLocalUnqualifiedType() == Ctx.VoidTy) + return R; + + // Handle casts from compatible types. + if (R->isBoundable()) + if (const TypedRegion *TR = dyn_cast<TypedRegion>(R)) { + QualType ObjTy = Ctx.getCanonicalType(TR->getValueType()); + if (CanonPointeeTy == ObjTy) + return R; + } + + // Process region cast according to the kind of the region being cast. + switch (R->getKind()) { + case MemRegion::CXXThisRegionKind: + case MemRegion::GenericMemSpaceRegionKind: + case MemRegion::StackLocalsSpaceRegionKind: + case MemRegion::StackArgumentsSpaceRegionKind: + case MemRegion::HeapSpaceRegionKind: + case MemRegion::UnknownSpaceRegionKind: + case MemRegion::NonStaticGlobalSpaceRegionKind: + case MemRegion::StaticGlobalSpaceRegionKind: { + assert(0 && "Invalid region cast"); + break; + } + + case MemRegion::FunctionTextRegionKind: + case MemRegion::BlockTextRegionKind: + case MemRegion::BlockDataRegionKind: + case MemRegion::StringRegionKind: + // FIXME: Need to handle arbitrary downcasts. + case MemRegion::SymbolicRegionKind: + case MemRegion::AllocaRegionKind: + case MemRegion::CompoundLiteralRegionKind: + case MemRegion::FieldRegionKind: + case MemRegion::ObjCIvarRegionKind: + case MemRegion::VarRegionKind: + case MemRegion::CXXTempObjectRegionKind: + case MemRegion::CXXBaseObjectRegionKind: + return MakeElementRegion(R, PointeeTy); + + case MemRegion::ElementRegionKind: { + // If we are casting from an ElementRegion to another type, the + // algorithm is as follows: + // + // (1) Compute the "raw offset" of the ElementRegion from the + // base region. This is done by calling 'getAsRawOffset()'. + // + // (2a) If we get a 'RegionRawOffset' after calling + // 'getAsRawOffset()', determine if the absolute offset + // can be exactly divided into chunks of the size of the + // casted-pointee type. If so, create a new ElementRegion with + // the pointee-cast type as the new ElementType and the index + // being the offset divded by the chunk size. If not, create + // a new ElementRegion at offset 0 off the raw offset region. + // + // (2b) If we don't a get a 'RegionRawOffset' after calling + // 'getAsRawOffset()', it means that we are at offset 0. + // + // FIXME: Handle symbolic raw offsets. + + const ElementRegion *elementR = cast<ElementRegion>(R); + const RegionRawOffset &rawOff = elementR->getAsArrayOffset(); + const MemRegion *baseR = rawOff.getRegion(); + + // If we cannot compute a raw offset, throw up our hands and return + // a NULL MemRegion*. + if (!baseR) + return NULL; + + CharUnits off = rawOff.getOffset(); + + if (off.isZero()) { + // Edge case: we are at 0 bytes off the beginning of baseR. We + // check to see if type we are casting to is the same as the base + // region. If so, just return the base region. + if (const TypedRegion *TR = dyn_cast<TypedRegion>(baseR)) { + QualType ObjTy = Ctx.getCanonicalType(TR->getValueType()); + QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy); + if (CanonPointeeTy == ObjTy) + return baseR; + } + + // Otherwise, create a new ElementRegion at offset 0. + return MakeElementRegion(baseR, PointeeTy); + } + + // We have a non-zero offset from the base region. We want to determine + // if the offset can be evenly divided by sizeof(PointeeTy). If so, + // we create an ElementRegion whose index is that value. Otherwise, we + // create two ElementRegions, one that reflects a raw offset and the other + // that reflects the cast. + + // Compute the index for the new ElementRegion. + int64_t newIndex = 0; + const MemRegion *newSuperR = 0; + + // We can only compute sizeof(PointeeTy) if it is a complete type. + if (IsCompleteType(Ctx, PointeeTy)) { + // Compute the size in **bytes**. + CharUnits pointeeTySize = Ctx.getTypeSizeInChars(PointeeTy); + if (!pointeeTySize.isZero()) { + // Is the offset a multiple of the size? If so, we can layer the + // ElementRegion (with elementType == PointeeTy) directly on top of + // the base region. + if (off % pointeeTySize == 0) { + newIndex = off / pointeeTySize; + newSuperR = baseR; + } + } + } + + if (!newSuperR) { + // Create an intermediate ElementRegion to represent the raw byte. + // This will be the super region of the final ElementRegion. + newSuperR = MakeElementRegion(baseR, Ctx.CharTy, off.getQuantity()); + } + + return MakeElementRegion(newSuperR, PointeeTy, newIndex); + } + } + + assert(0 && "unreachable"); + return 0; +} + + +/// CastRetrievedVal - Used by subclasses of StoreManager to implement +/// implicit casts that arise from loads from regions that are reinterpreted +/// as another region. +SVal StoreManager::CastRetrievedVal(SVal V, const TypedRegion *R, + QualType castTy, bool performTestOnly) { + + if (castTy.isNull()) + return V; + + ASTContext &Ctx = svalBuilder.getContext(); + + if (performTestOnly) { + // Automatically translate references to pointers. + QualType T = R->getValueType(); + if (const ReferenceType *RT = T->getAs<ReferenceType>()) + T = Ctx.getPointerType(RT->getPointeeType()); + + assert(svalBuilder.getContext().hasSameUnqualifiedType(castTy, T)); + return V; + } + + if (const Loc *L = dyn_cast<Loc>(&V)) + return svalBuilder.evalCastL(*L, castTy); + else if (const NonLoc *NL = dyn_cast<NonLoc>(&V)) + return svalBuilder.evalCastNL(*NL, castTy); + + return V; +} + +SVal StoreManager::getLValueFieldOrIvar(const Decl* D, SVal Base) { + if (Base.isUnknownOrUndef()) + return Base; + + Loc BaseL = cast<Loc>(Base); + const MemRegion* BaseR = 0; + + switch (BaseL.getSubKind()) { + case loc::MemRegionKind: + BaseR = cast<loc::MemRegionVal>(BaseL).getRegion(); + break; + + case loc::GotoLabelKind: + // These are anormal cases. Flag an undefined value. + return UndefinedVal(); + + case loc::ConcreteIntKind: + // While these seem funny, this can happen through casts. + // FIXME: What we should return is the field offset. For example, + // add the field offset to the integer value. That way funny things + // like this work properly: &(((struct foo *) 0xa)->f) + return Base; + + default: + assert(0 && "Unhandled Base."); + return Base; + } + + // NOTE: We must have this check first because ObjCIvarDecl is a subclass + // of FieldDecl. + if (const ObjCIvarDecl *ID = dyn_cast<ObjCIvarDecl>(D)) + return loc::MemRegionVal(MRMgr.getObjCIvarRegion(ID, BaseR)); + + return loc::MemRegionVal(MRMgr.getFieldRegion(cast<FieldDecl>(D), BaseR)); +} + +SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset, + SVal Base) { + + // If the base is an unknown or undefined value, just return it back. + // FIXME: For absolute pointer addresses, we just return that value back as + // well, although in reality we should return the offset added to that + // value. + if (Base.isUnknownOrUndef() || isa<loc::ConcreteInt>(Base)) + return Base; + + const MemRegion* BaseRegion = cast<loc::MemRegionVal>(Base).getRegion(); + + // Pointer of any type can be cast and used as array base. + const ElementRegion *ElemR = dyn_cast<ElementRegion>(BaseRegion); + + // Convert the offset to the appropriate size and signedness. + Offset = cast<NonLoc>(svalBuilder.convertToArrayIndex(Offset)); + + if (!ElemR) { + // + // If the base region is not an ElementRegion, create one. + // This can happen in the following example: + // + // char *p = __builtin_alloc(10); + // p[1] = 8; + // + // Observe that 'p' binds to an AllocaRegion. + // + return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Offset, + BaseRegion, Ctx)); + } + + SVal BaseIdx = ElemR->getIndex(); + + if (!isa<nonloc::ConcreteInt>(BaseIdx)) + return UnknownVal(); + + const llvm::APSInt& BaseIdxI = cast<nonloc::ConcreteInt>(BaseIdx).getValue(); + + // Only allow non-integer offsets if the base region has no offset itself. + // FIXME: This is a somewhat arbitrary restriction. We should be using + // SValBuilder here to add the two offsets without checking their types. + if (!isa<nonloc::ConcreteInt>(Offset)) { + if (isa<ElementRegion>(BaseRegion->StripCasts())) + return UnknownVal(); + + return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Offset, + ElemR->getSuperRegion(), + Ctx)); + } + + const llvm::APSInt& OffI = cast<nonloc::ConcreteInt>(Offset).getValue(); + assert(BaseIdxI.isSigned()); + + // Compute the new index. + nonloc::ConcreteInt NewIdx(svalBuilder.getBasicValueFactory().getValue(BaseIdxI + + OffI)); + + // Construct the new ElementRegion. + const MemRegion *ArrayR = ElemR->getSuperRegion(); + return loc::MemRegionVal(MRMgr.getElementRegion(elementType, NewIdx, ArrayR, + Ctx)); +} diff --git a/lib/StaticAnalyzer/Core/SymbolManager.cpp b/lib/StaticAnalyzer/Core/SymbolManager.cpp new file mode 100644 index 0000000..c1ca1cf --- /dev/null +++ b/lib/StaticAnalyzer/Core/SymbolManager.cpp @@ -0,0 +1,345 @@ +//== SymbolManager.h - Management of Symbolic Values ------------*- C++ -*--==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines SymbolManager, a class that manages symbolic values +// created for use by ExprEngine and related classes. +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h" +#include "clang/Analysis/Analyses/LiveVariables.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h" +#include "llvm/Support/raw_ostream.h" + +using namespace clang; +using namespace ento; + +void SymExpr::dump() const { + dumpToStream(llvm::errs()); +} + +static void print(llvm::raw_ostream& os, BinaryOperator::Opcode Op) { + switch (Op) { + default: + assert(false && "operator printing not implemented"); + break; + case BO_Mul: os << '*' ; break; + case BO_Div: os << '/' ; break; + case BO_Rem: os << '%' ; break; + case BO_Add: os << '+' ; break; + case BO_Sub: os << '-' ; break; + case BO_Shl: os << "<<" ; break; + case BO_Shr: os << ">>" ; break; + case BO_LT: os << "<" ; break; + case BO_GT: os << '>' ; break; + case BO_LE: os << "<=" ; break; + case BO_GE: os << ">=" ; break; + case BO_EQ: os << "==" ; break; + case BO_NE: os << "!=" ; break; + case BO_And: os << '&' ; break; + case BO_Xor: os << '^' ; break; + case BO_Or: os << '|' ; break; + } +} + +void SymIntExpr::dumpToStream(llvm::raw_ostream& os) const { + os << '('; + getLHS()->dumpToStream(os); + os << ") "; + print(os, getOpcode()); + os << ' ' << getRHS().getZExtValue(); + if (getRHS().isUnsigned()) os << 'U'; +} + +void SymSymExpr::dumpToStream(llvm::raw_ostream& os) const { + os << '('; + getLHS()->dumpToStream(os); + os << ") "; + os << '('; + getRHS()->dumpToStream(os); + os << ')'; +} + +void SymbolConjured::dumpToStream(llvm::raw_ostream& os) const { + os << "conj_$" << getSymbolID() << '{' << T.getAsString() << '}'; +} + +void SymbolDerived::dumpToStream(llvm::raw_ostream& os) const { + os << "derived_$" << getSymbolID() << '{' + << getParentSymbol() << ',' << getRegion() << '}'; +} + +void SymbolExtent::dumpToStream(llvm::raw_ostream& os) const { + os << "extent_$" << getSymbolID() << '{' << getRegion() << '}'; +} + +void SymbolMetadata::dumpToStream(llvm::raw_ostream& os) const { + os << "meta_$" << getSymbolID() << '{' + << getRegion() << ',' << T.getAsString() << '}'; +} + +void SymbolRegionValue::dumpToStream(llvm::raw_ostream& os) const { + os << "reg_$" << getSymbolID() << "<" << R << ">"; +} + +const SymbolRegionValue* +SymbolManager::getRegionValueSymbol(const TypedRegion* R) { + llvm::FoldingSetNodeID profile; + SymbolRegionValue::Profile(profile, R); + void* InsertPos; + SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos); + if (!SD) { + SD = (SymExpr*) BPAlloc.Allocate<SymbolRegionValue>(); + new (SD) SymbolRegionValue(SymbolCounter, R); + DataSet.InsertNode(SD, InsertPos); + ++SymbolCounter; + } + + return cast<SymbolRegionValue>(SD); +} + +const SymbolConjured* +SymbolManager::getConjuredSymbol(const Stmt* E, QualType T, unsigned Count, + const void* SymbolTag) { + + llvm::FoldingSetNodeID profile; + SymbolConjured::Profile(profile, E, T, Count, SymbolTag); + void* InsertPos; + SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos); + if (!SD) { + SD = (SymExpr*) BPAlloc.Allocate<SymbolConjured>(); + new (SD) SymbolConjured(SymbolCounter, E, T, Count, SymbolTag); + DataSet.InsertNode(SD, InsertPos); + ++SymbolCounter; + } + + return cast<SymbolConjured>(SD); +} + +const SymbolDerived* +SymbolManager::getDerivedSymbol(SymbolRef parentSymbol, + const TypedRegion *R) { + + llvm::FoldingSetNodeID profile; + SymbolDerived::Profile(profile, parentSymbol, R); + void* InsertPos; + SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos); + if (!SD) { + SD = (SymExpr*) BPAlloc.Allocate<SymbolDerived>(); + new (SD) SymbolDerived(SymbolCounter, parentSymbol, R); + DataSet.InsertNode(SD, InsertPos); + ++SymbolCounter; + } + + return cast<SymbolDerived>(SD); +} + +const SymbolExtent* +SymbolManager::getExtentSymbol(const SubRegion *R) { + llvm::FoldingSetNodeID profile; + SymbolExtent::Profile(profile, R); + void* InsertPos; + SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos); + if (!SD) { + SD = (SymExpr*) BPAlloc.Allocate<SymbolExtent>(); + new (SD) SymbolExtent(SymbolCounter, R); + DataSet.InsertNode(SD, InsertPos); + ++SymbolCounter; + } + + return cast<SymbolExtent>(SD); +} + +const SymbolMetadata* +SymbolManager::getMetadataSymbol(const MemRegion* R, const Stmt* S, QualType T, + unsigned Count, const void* SymbolTag) { + + llvm::FoldingSetNodeID profile; + SymbolMetadata::Profile(profile, R, S, T, Count, SymbolTag); + void* InsertPos; + SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos); + if (!SD) { + SD = (SymExpr*) BPAlloc.Allocate<SymbolMetadata>(); + new (SD) SymbolMetadata(SymbolCounter, R, S, T, Count, SymbolTag); + DataSet.InsertNode(SD, InsertPos); + ++SymbolCounter; + } + + return cast<SymbolMetadata>(SD); +} + +const SymIntExpr *SymbolManager::getSymIntExpr(const SymExpr *lhs, + BinaryOperator::Opcode op, + const llvm::APSInt& v, + QualType t) { + llvm::FoldingSetNodeID ID; + SymIntExpr::Profile(ID, lhs, op, v, t); + void *InsertPos; + SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos); + + if (!data) { + data = (SymIntExpr*) BPAlloc.Allocate<SymIntExpr>(); + new (data) SymIntExpr(lhs, op, v, t); + DataSet.InsertNode(data, InsertPos); + } + + return cast<SymIntExpr>(data); +} + +const SymSymExpr *SymbolManager::getSymSymExpr(const SymExpr *lhs, + BinaryOperator::Opcode op, + const SymExpr *rhs, + QualType t) { + llvm::FoldingSetNodeID ID; + SymSymExpr::Profile(ID, lhs, op, rhs, t); + void *InsertPos; + SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos); + + if (!data) { + data = (SymSymExpr*) BPAlloc.Allocate<SymSymExpr>(); + new (data) SymSymExpr(lhs, op, rhs, t); + DataSet.InsertNode(data, InsertPos); + } + + return cast<SymSymExpr>(data); +} + +QualType SymbolConjured::getType(ASTContext&) const { + return T; +} + +QualType SymbolDerived::getType(ASTContext& Ctx) const { + return R->getValueType(); +} + +QualType SymbolExtent::getType(ASTContext& Ctx) const { + return Ctx.getSizeType(); +} + +QualType SymbolMetadata::getType(ASTContext&) const { + return T; +} + +QualType SymbolRegionValue::getType(ASTContext& C) const { + return R->getValueType(); +} + +SymbolManager::~SymbolManager() {} + +bool SymbolManager::canSymbolicate(QualType T) { + T = T.getCanonicalType(); + + if (Loc::isLocType(T)) + return true; + + if (T->isIntegerType()) + return T->isScalarType(); + + if (T->isRecordType() && !T->isUnionType()) + return true; + + return false; +} + +void SymbolReaper::markLive(SymbolRef sym) { + TheLiving.insert(sym); + TheDead.erase(sym); +} + +void SymbolReaper::markInUse(SymbolRef sym) { + if (isa<SymbolMetadata>(sym)) + MetadataInUse.insert(sym); +} + +bool SymbolReaper::maybeDead(SymbolRef sym) { + if (isLive(sym)) + return false; + + TheDead.insert(sym); + return true; +} + +static bool IsLiveRegion(SymbolReaper &Reaper, const MemRegion *MR) { + MR = MR->getBaseRegion(); + + if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(MR)) + return Reaper.isLive(SR->getSymbol()); + + if (const VarRegion *VR = dyn_cast<VarRegion>(MR)) + return Reaper.isLive(VR); + + // FIXME: This is a gross over-approximation. What we really need is a way to + // tell if anything still refers to this region. Unlike SymbolicRegions, + // AllocaRegions don't have associated symbols, though, so we don't actually + // have a way to track their liveness. + if (isa<AllocaRegion>(MR)) + return true; + + if (isa<CXXThisRegion>(MR)) + return true; + + if (isa<MemSpaceRegion>(MR)) + return true; + + return false; +} + +bool SymbolReaper::isLive(SymbolRef sym) { + if (TheLiving.count(sym)) + return true; + + if (const SymbolDerived *derived = dyn_cast<SymbolDerived>(sym)) { + if (isLive(derived->getParentSymbol())) { + markLive(sym); + return true; + } + return false; + } + + if (const SymbolExtent *extent = dyn_cast<SymbolExtent>(sym)) { + if (IsLiveRegion(*this, extent->getRegion())) { + markLive(sym); + return true; + } + return false; + } + + if (const SymbolMetadata *metadata = dyn_cast<SymbolMetadata>(sym)) { + if (MetadataInUse.count(sym)) { + if (IsLiveRegion(*this, metadata->getRegion())) { + markLive(sym); + MetadataInUse.erase(sym); + return true; + } + } + return false; + } + + // Interogate the symbol. It may derive from an input value to + // the analyzed function/method. + return isa<SymbolRegionValue>(sym); +} + +bool SymbolReaper::isLive(const Stmt* ExprVal) const { + return LCtx->getAnalysisContext()->getRelaxedLiveVariables()-> + isLive(Loc, ExprVal); +} + +bool SymbolReaper::isLive(const VarRegion *VR) const { + const StackFrameContext *VarContext = VR->getStackFrame(); + const StackFrameContext *CurrentContext = LCtx->getCurrentStackFrame(); + + if (VarContext == CurrentContext) + return LCtx->getAnalysisContext()->getRelaxedLiveVariables()-> + isLive(Loc, VR->getDecl()); + + return VarContext->isParentOf(CurrentContext); +} + +SymbolVisitor::~SymbolVisitor() {} diff --git a/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp b/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp new file mode 100644 index 0000000..230b6a10 --- /dev/null +++ b/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp @@ -0,0 +1,70 @@ +//===--- TextPathDiagnostics.cpp - Text Diagnostics for Paths ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the TextPathDiagnostics object. +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/PathDiagnosticClients.h" +#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h" +#include "clang/Lex/Preprocessor.h" +#include "llvm/Support/raw_ostream.h" +using namespace clang; +using namespace ento; +using namespace llvm; + +namespace { + +/// \brief Simple path diagnostic client used for outputting as diagnostic notes +/// the sequence of events. +class TextPathDiagnostics : public PathDiagnosticClient { + const std::string OutputFile; + Diagnostic &Diag; + +public: + TextPathDiagnostics(const std::string& output, Diagnostic &diag) + : OutputFile(output), Diag(diag) {} + + void HandlePathDiagnostic(const PathDiagnostic* D); + + void FlushDiagnostics(llvm::SmallVectorImpl<std::string> *FilesMade) { } + + virtual llvm::StringRef getName() const { + return "TextPathDiagnostics"; + } + + PathGenerationScheme getGenerationScheme() const { return Minimal; } + bool supportsLogicalOpControlFlow() const { return true; } + bool supportsAllBlockEdges() const { return true; } + virtual bool useVerboseDescription() const { return true; } +}; + +} // end anonymous namespace + +PathDiagnosticClient* +ento::createTextPathDiagnosticClient(const std::string& out, + const Preprocessor &PP) { + return new TextPathDiagnostics(out, PP.getDiagnostics()); +} + +void TextPathDiagnostics::HandlePathDiagnostic(const PathDiagnostic* D) { + if (!D) + return; + + if (D->empty()) { + delete D; + return; + } + + for (PathDiagnostic::const_iterator I=D->begin(), E=D->end(); I != E; ++I) { + unsigned diagID = Diag.getDiagnosticIDs()->getCustomDiagID( + DiagnosticIDs::Note, I->getString()); + Diag.Report(I->getLocation().asLocation(), diagID); + } +} |