aboutsummaryrefslogtreecommitdiff
path: root/lib/Analysis
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Analysis')
-rw-r--r--lib/Analysis/AnalysisContext.cpp138
-rw-r--r--lib/Analysis/AnalysisManager.cpp35
-rw-r--r--lib/Analysis/BasicConstraintManager.cpp62
-rw-r--r--lib/Analysis/BasicObjCFoundationChecks.cpp329
-rw-r--r--lib/Analysis/BasicObjCFoundationChecks.h21
-rw-r--r--lib/Analysis/BasicStore.cpp380
-rw-r--r--lib/Analysis/BasicValueFactory.cpp138
-rw-r--r--lib/Analysis/BugReporter.cpp994
-rw-r--r--lib/Analysis/BugReporterVisitors.cpp349
-rw-r--r--lib/Analysis/CFG.cpp2084
-rw-r--r--lib/Analysis/CFRefCount.cpp1882
-rw-r--r--lib/Analysis/CMakeLists.txt10
-rw-r--r--lib/Analysis/CallGraph.cpp150
-rw-r--r--lib/Analysis/CallInliner.cpp75
-rw-r--r--lib/Analysis/CheckDeadStores.cpp99
-rw-r--r--lib/Analysis/CheckNSError.cpp166
-rw-r--r--lib/Analysis/CheckObjCDealloc.cpp134
-rw-r--r--lib/Analysis/CheckObjCInstMethSignature.cpp47
-rw-r--r--lib/Analysis/CheckObjCUnusedIVars.cpp64
-rw-r--r--lib/Analysis/CheckSecuritySyntaxOnly.cpp409
-rw-r--r--lib/Analysis/Environment.cpp141
-rw-r--r--lib/Analysis/ExplodedGraph.cpp200
-rw-r--r--lib/Analysis/GRBlockCounter.cpp2
-rw-r--r--lib/Analysis/GRCoreEngine.cpp439
-rw-r--r--lib/Analysis/GRExprEngine.cpp2541
-rw-r--r--lib/Analysis/GRExprEngineInternalChecks.cpp844
-rw-r--r--lib/Analysis/GRState.cpp158
-rw-r--r--lib/Analysis/LiveVariables.cpp122
-rw-r--r--lib/Analysis/MemRegion.cpp212
-rw-r--r--lib/Analysis/PathDiagnostic.cpp96
-rw-r--r--lib/Analysis/RangeConstraintManager.cpp60
-rw-r--r--lib/Analysis/RegionStore.cpp1809
-rw-r--r--lib/Analysis/SVals.cpp163
-rw-r--r--lib/Analysis/SValuator.cpp160
-rw-r--r--lib/Analysis/SimpleConstraintManager.cpp64
-rw-r--r--lib/Analysis/SimpleConstraintManager.h24
-rw-r--r--lib/Analysis/SimpleSValuator.cpp158
-rw-r--r--lib/Analysis/Store.cpp246
-rw-r--r--lib/Analysis/SymbolManager.cpp139
-rw-r--r--lib/Analysis/UninitializedValues.cpp84
-rw-r--r--lib/Analysis/ValueManager.cpp114
41 files changed, 9728 insertions, 5614 deletions
diff --git a/lib/Analysis/AnalysisContext.cpp b/lib/Analysis/AnalysisContext.cpp
new file mode 100644
index 000000000000..a4cb66be04b3
--- /dev/null
+++ b/lib/Analysis/AnalysisContext.cpp
@@ -0,0 +1,138 @@
+//== AnalysisContext.cpp - Analysis context for Path Sens analysis -*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines AnalysisContext, a class that manages the analysis context
+// data for path sensitive analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/PathSensitive/AnalysisContext.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ParentMap.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace clang;
+
+AnalysisContext::~AnalysisContext() {
+ delete cfg;
+ delete liveness;
+ delete PM;
+}
+
+AnalysisContextManager::~AnalysisContextManager() {
+ for (ContextMap::iterator I = Contexts.begin(), E = Contexts.end(); I!=E; ++I)
+ delete I->second;
+}
+
+Stmt *AnalysisContext::getBody() {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ return FD->getBody();
+ else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
+ return MD->getBody();
+
+ llvm::llvm_unreachable("unknown code decl");
+}
+
+const ImplicitParamDecl *AnalysisContext::getSelfDecl() const {
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
+ return MD->getSelfDecl();
+
+ return NULL;
+}
+
+CFG *AnalysisContext::getCFG() {
+ if (!cfg)
+ cfg = CFG::buildCFG(getBody(), &D->getASTContext());
+ return cfg;
+}
+
+ParentMap &AnalysisContext::getParentMap() {
+ if (!PM)
+ PM = new ParentMap(getBody());
+ return *PM;
+}
+
+LiveVariables *AnalysisContext::getLiveVariables() {
+ if (!liveness) {
+ CFG *c = getCFG();
+ if (!c)
+ return 0;
+
+ liveness = new LiveVariables(D->getASTContext(), *c);
+ liveness->runOnCFG(*c);
+ liveness->runOnAllBlocks(*c, 0, true);
+ }
+
+ return liveness;
+}
+
+AnalysisContext *AnalysisContextManager::getContext(const Decl *D) {
+ AnalysisContext *&AC = Contexts[D];
+ if (!AC)
+ AC = new AnalysisContext(D);
+
+ return AC;
+}
+
+void LocationContext::Profile(llvm::FoldingSetNodeID &ID, ContextKind k,
+ AnalysisContext *ctx,
+ const LocationContext *parent) {
+ ID.AddInteger(k);
+ ID.AddPointer(ctx);
+ ID.AddPointer(parent);
+}
+
+void StackFrameContext::Profile(llvm::FoldingSetNodeID &ID,AnalysisContext *ctx,
+ const LocationContext *parent, const Stmt *s) {
+ LocationContext::Profile(ID, StackFrame, ctx, parent);
+ ID.AddPointer(s);
+}
+
+void ScopeContext::Profile(llvm::FoldingSetNodeID &ID, AnalysisContext *ctx,
+ const LocationContext *parent, const Stmt *s) {
+ LocationContext::Profile(ID, Scope, ctx, parent);
+ ID.AddPointer(s);
+}
+
+StackFrameContext*
+LocationContextManager::getStackFrame(AnalysisContext *ctx,
+ const LocationContext *parent,
+ const Stmt *s) {
+ llvm::FoldingSetNodeID ID;
+ StackFrameContext::Profile(ID, ctx, parent, s);
+ void *InsertPos;
+
+ StackFrameContext *f =
+ cast_or_null<StackFrameContext>(Contexts.FindNodeOrInsertPos(ID, InsertPos));
+ if (!f) {
+ f = new StackFrameContext(ctx, parent, s);
+ Contexts.InsertNode(f, InsertPos);
+ }
+ return f;
+}
+
+ScopeContext *LocationContextManager::getScope(AnalysisContext *ctx,
+ const LocationContext *parent,
+ const Stmt *s) {
+ llvm::FoldingSetNodeID ID;
+ ScopeContext::Profile(ID, ctx, parent, s);
+ void *InsertPos;
+
+ ScopeContext *scope =
+ cast_or_null<ScopeContext>(Contexts.FindNodeOrInsertPos(ID, InsertPos));
+
+ if (!scope) {
+ scope = new ScopeContext(ctx, parent, s);
+ Contexts.InsertNode(scope, InsertPos);
+ }
+ return scope;
+}
diff --git a/lib/Analysis/AnalysisManager.cpp b/lib/Analysis/AnalysisManager.cpp
new file mode 100644
index 000000000000..c2733faa683c
--- /dev/null
+++ b/lib/Analysis/AnalysisManager.cpp
@@ -0,0 +1,35 @@
+//== AnalysisManager.cpp - Path sensitive analysis data manager ----*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the AnalysisManager class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/PathSensitive/AnalysisManager.h"
+#include "clang/Basic/SourceManager.h"
+
+using namespace clang;
+
+void AnalysisManager::DisplayFunction(Decl *D) {
+
+ if (DisplayedFunction)
+ return;
+
+ DisplayedFunction = true;
+
+ // FIXME: Is getCodeDecl() always a named decl?
+ if (isa<FunctionDecl>(D) || isa<ObjCMethodDecl>(D)) {
+ const NamedDecl *ND = cast<NamedDecl>(D);
+ SourceManager &SM = getASTContext().getSourceManager();
+ (llvm::errs() << "ANALYZE: "
+ << SM.getPresumedLoc(ND->getLocation()).getFilename()
+ << ' ' << ND->getNameAsString() << '\n').flush();
+ }
+}
+
diff --git a/lib/Analysis/BasicConstraintManager.cpp b/lib/Analysis/BasicConstraintManager.cpp
index cb89d3065107..d0b828952854 100644
--- a/lib/Analysis/BasicConstraintManager.cpp
+++ b/lib/Analysis/BasicConstraintManager.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines BasicConstraintManager, a class that tracks simple
+// This file defines BasicConstraintManager, a class that tracks simple
// equality and inequality constraints on symbolic values of GRState.
//
//===----------------------------------------------------------------------===//
@@ -27,22 +27,22 @@ namespace { class VISIBILITY_HIDDEN ConstEq {}; }
typedef llvm::ImmutableMap<SymbolRef,GRState::IntSetTy> ConstNotEqTy;
typedef llvm::ImmutableMap<SymbolRef,const llvm::APSInt*> ConstEqTy;
-
+
static int ConstEqIndex = 0;
static int ConstNotEqIndex = 0;
namespace clang {
template<>
struct GRStateTrait<ConstNotEq> : public GRStatePartialTrait<ConstNotEqTy> {
- static inline void* GDMIndex() { return &ConstNotEqIndex; }
+ static inline void* GDMIndex() { return &ConstNotEqIndex; }
};
template<>
struct GRStateTrait<ConstEq> : public GRStatePartialTrait<ConstEqTy> {
- static inline void* GDMIndex() { return &ConstEqIndex; }
+ static inline void* GDMIndex() { return &ConstEqIndex; }
};
-}
-
+}
+
namespace {
// BasicConstraintManager only tracks equality and inequality constraints of
// constants and integer variables.
@@ -50,7 +50,7 @@ class VISIBILITY_HIDDEN BasicConstraintManager
: public SimpleConstraintManager {
GRState::IntSetTy::Factory ISetFactory;
public:
- BasicConstraintManager(GRStateManager& statemgr)
+ BasicConstraintManager(GRStateManager& statemgr)
: ISetFactory(statemgr.getAllocator()) {}
const GRState* AssumeSymNE(const GRState* state, SymbolRef sym,
@@ -83,7 +83,7 @@ public:
const GRState* RemoveDeadBindings(const GRState* state, SymbolReaper& SymReaper);
- void print(const GRState* state, llvm::raw_ostream& Out,
+ void print(const GRState* state, llvm::raw_ostream& Out,
const char* nl, const char *sep);
};
@@ -133,7 +133,7 @@ const GRState *BasicConstraintManager::AssumeSymEQ(const GRState *state,
// These logic will be handled in another ConstraintManager.
const GRState *BasicConstraintManager::AssumeSymLT(const GRState *state,
SymbolRef sym,
- const llvm::APSInt& V) {
+ const llvm::APSInt& V) {
// Is 'V' the smallest possible value?
if (V == llvm::APSInt::getMinValue(V.getBitWidth(), V.isUnsigned())) {
// sym cannot be any value less than 'V'. This path is infeasible.
@@ -167,14 +167,14 @@ const GRState *BasicConstraintManager::AssumeSymGE(const GRState *state,
bool isFeasible = *X >= V;
return isFeasible ? state : NULL;
}
-
+
// Sym is not a constant, but it is worth looking to see if V is the
// maximum integer value.
if (V == llvm::APSInt::getMaxValue(V.getBitWidth(), V.isUnsigned())) {
// If we know that sym != V, then this condition is infeasible since
- // there is no other value greater than V.
+ // there is no other value greater than V.
bool isFeasible = !isNotEqual(state, sym, V);
-
+
// If the path is still feasible then as a consequence we know that
// 'sym == V' because we cannot have 'sym > V' (no larger values).
// Add this constraint.
@@ -193,20 +193,20 @@ BasicConstraintManager::AssumeSymLE(const GRState* state, SymbolRef sym,
bool isFeasible = *X <= V;
return isFeasible ? state : NULL;
}
-
+
// Sym is not a constant, but it is worth looking to see if V is the
// minimum integer value.
if (V == llvm::APSInt::getMinValue(V.getBitWidth(), V.isUnsigned())) {
// If we know that sym != V, then this condition is infeasible since
- // there is no other value less than V.
+ // there is no other value less than V.
bool isFeasible = !isNotEqual(state, sym, V);
-
+
// If the path is still feasible then as a consequence we know that
// 'sym == V' because we cannot have 'sym < V' (no smaller values).
// Add this constraint.
return isFeasible ? AddEQ(state, sym, V) : NULL;
}
-
+
return state;
}
@@ -222,10 +222,10 @@ const GRState* BasicConstraintManager::AddNE(const GRState* state, SymbolRef sym
// First, retrieve the NE-set associated with the given symbol.
ConstNotEqTy::data_type* T = state->get<ConstNotEq>(sym);
GRState::IntSetTy S = T ? *T : ISetFactory.GetEmptySet();
-
+
// Now add V to the NE set.
S = ISetFactory.Add(S, &V);
-
+
// Create a new state with the old binding replaced.
return state->set<ConstNotEq>(sym, S);
}
@@ -236,7 +236,7 @@ const llvm::APSInt* BasicConstraintManager::getSymVal(const GRState* state,
return T ? *T : NULL;
}
-bool BasicConstraintManager::isNotEqual(const GRState* state, SymbolRef sym,
+bool BasicConstraintManager::isNotEqual(const GRState* state, SymbolRef sym,
const llvm::APSInt& V) const {
// Retrieve the NE-set associated with the given symbol.
@@ -273,14 +273,14 @@ BasicConstraintManager::RemoveDeadBindings(const GRState* state,
ConstNotEqTy::Factory& CNEFactory = state->get_context<ConstNotEq>();
for (ConstNotEqTy::iterator I = CNE.begin(), E = CNE.end(); I != E; ++I) {
- SymbolRef sym = I.getKey();
+ SymbolRef sym = I.getKey();
if (SymReaper.maybeDead(sym)) CNE = CNEFactory.Remove(CNE, sym);
}
-
+
return state->set<ConstNotEq>(CNE);
}
-void BasicConstraintManager::print(const GRState* state, llvm::raw_ostream& Out,
+void BasicConstraintManager::print(const GRState* state, llvm::raw_ostream& Out,
const char* nl, const char *sep) {
// Print equality constraints.
@@ -293,23 +293,23 @@ void BasicConstraintManager::print(const GRState* state, llvm::raw_ostream& Out,
}
// Print != constraints.
-
+
ConstNotEqTy CNE = state->get<ConstNotEq>();
-
+
if (!CNE.isEmpty()) {
Out << nl << sep << "'!=' constraints:";
-
+
for (ConstNotEqTy::iterator I = CNE.begin(), EI = CNE.end(); I!=EI; ++I) {
Out << nl << " $" << I.getKey() << " : ";
bool isFirst = true;
-
- GRState::IntSetTy::iterator J = I.getData().begin(),
- EJ = I.getData().end();
-
- for ( ; J != EJ; ++J) {
+
+ GRState::IntSetTy::iterator J = I.getData().begin(),
+ EJ = I.getData().end();
+
+ for ( ; J != EJ; ++J) {
if (isFirst) isFirst = false;
else Out << ", ";
-
+
Out << (*J)->getSExtValue(); // Hack: should print to raw_ostream.
}
}
diff --git a/lib/Analysis/BasicObjCFoundationChecks.cpp b/lib/Analysis/BasicObjCFoundationChecks.cpp
index aa85769157e7..af300f36fa72 100644
--- a/lib/Analysis/BasicObjCFoundationChecks.cpp
+++ b/lib/Analysis/BasicObjCFoundationChecks.cpp
@@ -31,26 +31,21 @@
using namespace clang;
-static ObjCInterfaceType* GetReceiverType(ObjCMessageExpr* ME) {
- Expr* Receiver = ME->getReceiver();
-
+static const ObjCInterfaceType* GetReceiverType(const ObjCMessageExpr* ME) {
+ const Expr* Receiver = ME->getReceiver();
+
if (!Receiver)
return NULL;
-
- QualType X = Receiver->getType();
-
- if (X->isPointerType()) {
- Type* TP = X.getTypePtr();
- const PointerType* T = TP->getAsPointerType();
- return dyn_cast<ObjCInterfaceType>(T->getPointeeType().getTypePtr());
- }
- // FIXME: Support ObjCQualifiedIdType?
+ if (const ObjCObjectPointerType *PT =
+ Receiver->getType()->getAs<ObjCObjectPointerType>())
+ return PT->getInterfaceType();
+
return NULL;
}
-static const char* GetReceiverNameType(ObjCMessageExpr* ME) {
- ObjCInterfaceType* ReceiverType = GetReceiverType(ME);
+static const char* GetReceiverNameType(const ObjCMessageExpr* ME) {
+ const ObjCInterfaceType *ReceiverType = GetReceiverType(ME);
return ReceiverType ? ReceiverType->getDecl()->getIdentifier()->getName()
: NULL;
}
@@ -61,76 +56,75 @@ class VISIBILITY_HIDDEN APIMisuse : public BugType {
public:
APIMisuse(const char* name) : BugType(name, "API Misuse (Apple)") {}
};
-
+
class VISIBILITY_HIDDEN BasicObjCFoundationChecks : public GRSimpleAPICheck {
APIMisuse *BT;
BugReporter& BR;
ASTContext &Ctx;
-
- bool isNSString(ObjCInterfaceType* T, const char* suffix);
- bool AuditNSString(NodeTy* N, ObjCMessageExpr* ME);
-
- void Warn(NodeTy* N, Expr* E, const std::string& s);
- void WarnNilArg(NodeTy* N, Expr* E);
-
- bool CheckNilArg(NodeTy* N, unsigned Arg);
+
+ bool isNSString(const ObjCInterfaceType *T, const char* suffix);
+ bool AuditNSString(ExplodedNode* N, const ObjCMessageExpr* ME);
+
+ void Warn(ExplodedNode* N, const Expr* E, const std::string& s);
+ void WarnNilArg(ExplodedNode* N, const Expr* E);
+
+ bool CheckNilArg(ExplodedNode* N, unsigned Arg);
public:
- BasicObjCFoundationChecks(ASTContext& ctx, BugReporter& br)
+ BasicObjCFoundationChecks(ASTContext& ctx, BugReporter& br)
: BT(0), BR(br), Ctx(ctx) {}
-
- bool Audit(ExplodedNode<GRState>* N, GRStateManager&);
-
-private:
- void WarnNilArg(NodeTy* N, ObjCMessageExpr* ME, unsigned Arg) {
+
+ bool Audit(ExplodedNode* N, GRStateManager&);
+
+private:
+ void WarnNilArg(ExplodedNode* N, const ObjCMessageExpr* ME, unsigned Arg) {
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
os << "Argument to '" << GetReceiverNameType(ME) << "' method '"
<< ME->getSelector().getAsString() << "' cannot be nil.";
-
+
// Lazily create the BugType object for NilArg. This will be owned
// by the BugReporter object 'BR' once we call BR.EmitWarning.
if (!BT) BT = new APIMisuse("nil argument");
-
+
RangedBugReport *R = new RangedBugReport(*BT, os.str().c_str(), N);
R->addRange(ME->getArg(Arg)->getSourceRange());
BR.EmitReport(R);
}
};
-
+
} // end anonymous namespace
GRSimpleAPICheck*
clang::CreateBasicObjCFoundationChecks(ASTContext& Ctx, BugReporter& BR) {
- return new BasicObjCFoundationChecks(Ctx, BR);
+ return new BasicObjCFoundationChecks(Ctx, BR);
}
-bool BasicObjCFoundationChecks::Audit(ExplodedNode<GRState>* N,
+bool BasicObjCFoundationChecks::Audit(ExplodedNode* N,
GRStateManager&) {
-
- ObjCMessageExpr* ME =
+
+ const ObjCMessageExpr* ME =
cast<ObjCMessageExpr>(cast<PostStmt>(N->getLocation()).getStmt());
- ObjCInterfaceType* ReceiverType = GetReceiverType(ME);
-
+ const ObjCInterfaceType *ReceiverType = GetReceiverType(ME);
+
if (!ReceiverType)
return false;
-
+
const char* name = ReceiverType->getDecl()->getIdentifier()->getName();
-
+
if (!name)
return false;
if (name[0] != 'N' || name[1] != 'S')
return false;
-
+
name += 2;
-
+
// FIXME: Make all of this faster.
-
if (isNSString(ReceiverType, name))
return AuditNSString(N, ME);
@@ -138,24 +132,24 @@ bool BasicObjCFoundationChecks::Audit(ExplodedNode<GRState>* N,
}
static inline bool isNil(SVal X) {
- return isa<loc::ConcreteInt>(X);
+ return isa<loc::ConcreteInt>(X);
}
//===----------------------------------------------------------------------===//
// Error reporting.
//===----------------------------------------------------------------------===//
-bool BasicObjCFoundationChecks::CheckNilArg(NodeTy* N, unsigned Arg) {
- ObjCMessageExpr* ME =
+bool BasicObjCFoundationChecks::CheckNilArg(ExplodedNode* N, unsigned Arg) {
+ const ObjCMessageExpr* ME =
cast<ObjCMessageExpr>(cast<PostStmt>(N->getLocation()).getStmt());
-
- Expr * E = ME->getArg(Arg);
-
+
+ const Expr * E = ME->getArg(Arg);
+
if (isNil(N->getState()->getSVal(E))) {
WarnNilArg(N, ME, Arg);
return true;
}
-
+
return false;
}
@@ -163,37 +157,36 @@ bool BasicObjCFoundationChecks::CheckNilArg(NodeTy* N, unsigned Arg) {
// NSString checking.
//===----------------------------------------------------------------------===//
-bool BasicObjCFoundationChecks::isNSString(ObjCInterfaceType* T,
+bool BasicObjCFoundationChecks::isNSString(const ObjCInterfaceType *T,
const char* suffix) {
-
return !strcmp("String", suffix) || !strcmp("MutableString", suffix);
}
-bool BasicObjCFoundationChecks::AuditNSString(NodeTy* N,
- ObjCMessageExpr* ME) {
-
+bool BasicObjCFoundationChecks::AuditNSString(ExplodedNode* N,
+ const ObjCMessageExpr* ME) {
+
Selector S = ME->getSelector();
-
+
if (S.isUnarySelector())
return false;
// FIXME: This is going to be really slow doing these checks with
// lexical comparisons.
-
+
std::string name = S.getAsString();
assert (!name.empty());
const char* cstr = &name[0];
unsigned len = name.size();
-
+
switch (len) {
default:
break;
- case 8:
+ case 8:
if (!strcmp(cstr, "compare:"))
return CheckNilArg(N, 0);
-
+
break;
-
+
case 15:
// FIXME: Checking for initWithFormat: will not work in most cases
// yet because [NSString alloc] returns id, not NSString*. We will
@@ -201,41 +194,41 @@ bool BasicObjCFoundationChecks::AuditNSString(NodeTy* N,
// to find these errors.
if (!strcmp(cstr, "initWithFormat:"))
return CheckNilArg(N, 0);
-
+
break;
-
+
case 16:
if (!strcmp(cstr, "compare:options:"))
return CheckNilArg(N, 0);
-
+
break;
-
+
case 22:
if (!strcmp(cstr, "compare:options:range:"))
return CheckNilArg(N, 0);
-
+
break;
-
+
case 23:
-
+
if (!strcmp(cstr, "caseInsensitiveCompare:"))
return CheckNilArg(N, 0);
-
+
break;
case 29:
if (!strcmp(cstr, "compare:options:range:locale:"))
return CheckNilArg(N, 0);
-
- break;
-
+
+ break;
+
case 37:
if (!strcmp(cstr, "componentsSeparatedByCharactersInSet:"))
return CheckNilArg(N, 0);
-
- break;
+
+ break;
}
-
+
return false;
}
@@ -247,7 +240,7 @@ namespace {
class VISIBILITY_HIDDEN AuditCFNumberCreate : public GRSimpleAPICheck {
APIMisuse* BT;
-
+
// FIXME: Either this should be refactored into GRSimpleAPICheck, or
// it should always be passed with a call to Audit. The latter
// approach makes this class more stateless.
@@ -256,16 +249,16 @@ class VISIBILITY_HIDDEN AuditCFNumberCreate : public GRSimpleAPICheck {
BugReporter& BR;
public:
- AuditCFNumberCreate(ASTContext& ctx, BugReporter& br)
+ AuditCFNumberCreate(ASTContext& ctx, BugReporter& br)
: BT(0), Ctx(ctx), II(&Ctx.Idents.get("CFNumberCreate")), BR(br){}
-
+
~AuditCFNumberCreate() {}
-
- bool Audit(ExplodedNode<GRState>* N, GRStateManager&);
-
+
+ bool Audit(ExplodedNode* N, GRStateManager&);
+
private:
- void AddError(const TypedRegion* R, Expr* Ex, ExplodedNode<GRState> *N,
- uint64_t SourceSize, uint64_t TargetSize, uint64_t NumberKind);
+ void AddError(const TypedRegion* R, const Expr* Ex, ExplodedNode *N,
+ uint64_t SourceSize, uint64_t TargetSize, uint64_t NumberKind);
};
} // end anonymous namespace
@@ -296,7 +289,7 @@ namespace {
public:
Optional() : IsKnown(false), Val(0) {}
Optional(const T& val) : IsKnown(true), Val(val) {}
-
+
bool isKnown() const { return IsKnown; }
const T& getValue() const {
@@ -312,12 +305,12 @@ namespace {
static Optional<uint64_t> GetCFNumberSize(ASTContext& Ctx, uint64_t i) {
static unsigned char FixedSize[] = { 8, 16, 32, 64, 32, 64 };
-
+
if (i < kCFNumberCharType)
return FixedSize[i-1];
-
+
QualType T;
-
+
switch (i) {
case kCFNumberCharType: T = Ctx.CharTy; break;
case kCFNumberShortType: T = Ctx.ShortTy; break;
@@ -329,11 +322,11 @@ static Optional<uint64_t> GetCFNumberSize(ASTContext& Ctx, uint64_t i) {
case kCFNumberCFIndexType:
case kCFNumberNSIntegerType:
case kCFNumberCGFloatType:
- // FIXME: We need a way to map from names to Type*.
+ // FIXME: We need a way to map from names to Type*.
default:
return Optional<uint64_t>();
}
-
+
return Ctx.getTypeSize(T);
}
@@ -357,100 +350,98 @@ static const char* GetCFNumberTypeStr(uint64_t i) {
"kCFNumberNSIntegerType",
"kCFNumberCGFloatType"
};
-
+
return i <= kCFNumberCGFloatType ? Names[i-1] : "Invalid CFNumberType";
}
#endif
-bool AuditCFNumberCreate::Audit(ExplodedNode<GRState>* N,GRStateManager&){
- CallExpr* CE = cast<CallExpr>(cast<PostStmt>(N->getLocation()).getStmt());
- Expr* Callee = CE->getCallee();
- SVal CallV = N->getState()->getSVal(Callee);
+bool AuditCFNumberCreate::Audit(ExplodedNode* N,GRStateManager&){
+ const CallExpr* CE =
+ cast<CallExpr>(cast<PostStmt>(N->getLocation()).getStmt());
+ const Expr* Callee = CE->getCallee();
+ SVal CallV = N->getState()->getSVal(Callee);
const FunctionDecl* FD = CallV.getAsFunctionDecl();
if (!FD || FD->getIdentifier() != II || CE->getNumArgs()!=3)
return false;
-
+
// Get the value of the "theType" argument.
SVal TheTypeVal = N->getState()->getSVal(CE->getArg(1));
-
+
// FIXME: We really should allow ranges of valid theType values, and
// bifurcate the state appropriately.
nonloc::ConcreteInt* V = dyn_cast<nonloc::ConcreteInt>(&TheTypeVal);
-
+
if (!V)
return false;
-
+
uint64_t NumberKind = V->getValue().getLimitedValue();
Optional<uint64_t> TargetSize = GetCFNumberSize(Ctx, NumberKind);
-
+
// FIXME: In some cases we can emit an error.
if (!TargetSize.isKnown())
return false;
-
+
// Look at the value of the integer being passed by reference. Essentially
// we want to catch cases where the value passed in is not equal to the
// size of the type being created.
SVal TheValueExpr = N->getState()->getSVal(CE->getArg(2));
-
+
// FIXME: Eventually we should handle arbitrary locations. We can do this
// by having an enhanced memory model that does low-level typing.
loc::MemRegionVal* LV = dyn_cast<loc::MemRegionVal>(&TheValueExpr);
if (!LV)
return false;
-
- const TypedRegion* R = dyn_cast<TypedRegion>(LV->getRegion());
- if (!R) return false;
-
- while (const TypedViewRegion* ATR = dyn_cast<TypedViewRegion>(R)) {
- R = dyn_cast<TypedRegion>(ATR->getSuperRegion());
- if (!R) return false;
- }
-
+
+ const TypedRegion* R = dyn_cast<TypedRegion>(LV->getBaseRegion());
+
+ if (!R)
+ return false;
+
QualType T = Ctx.getCanonicalType(R->getValueType(Ctx));
-
+
// FIXME: If the pointee isn't an integer type, should we flag a warning?
// People can do weird stuff with pointers.
-
- if (!T->isIntegerType())
+
+ if (!T->isIntegerType())
return false;
-
+
uint64_t SourceSize = Ctx.getTypeSize(T);
-
+
// CHECK: is SourceSize == TargetSize
-
+
if (SourceSize == TargetSize)
return false;
-
+
AddError(R, CE->getArg(2), N, SourceSize, TargetSize, NumberKind);
-
+
// FIXME: We can actually create an abstract "CFNumber" object that has
// the bits initialized to the provided values.
return SourceSize < TargetSize;
}
-void AuditCFNumberCreate::AddError(const TypedRegion* R, Expr* Ex,
- ExplodedNode<GRState> *N,
+void AuditCFNumberCreate::AddError(const TypedRegion* R, const Expr* Ex,
+ ExplodedNode *N,
uint64_t SourceSize, uint64_t TargetSize,
uint64_t NumberKind) {
-
+
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
-
+
os << (SourceSize == 8 ? "An " : "A ")
<< SourceSize << " bit integer is used to initialize a CFNumber "
"object that represents "
<< (TargetSize == 8 ? "an " : "a ")
- << TargetSize << " bit integer. ";
+ << TargetSize << " bit integer. ";
if (SourceSize < TargetSize)
os << (TargetSize - SourceSize)
- << " bits of the CFNumber value will be garbage." ;
+ << " bits of the CFNumber value will be garbage." ;
else
os << (SourceSize - TargetSize)
<< " bits of the input integer will be lost.";
-
+
// Lazily create the BugType object. This will be owned
// by the BugReporter object 'BR' once we call BR.EmitWarning.
if (!BT) BT = new APIMisuse("Bad use of CFNumberCreate");
@@ -460,22 +451,98 @@ void AuditCFNumberCreate::AddError(const TypedRegion* R, Expr* Ex,
}
GRSimpleAPICheck*
-clang::CreateAuditCFNumberCreate(ASTContext& Ctx, BugReporter& BR) {
+clang::CreateAuditCFNumberCreate(ASTContext& Ctx, BugReporter& BR) {
return new AuditCFNumberCreate(Ctx, BR);
}
//===----------------------------------------------------------------------===//
+// CFRetain/CFRelease auditing for null arguments.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VISIBILITY_HIDDEN AuditCFRetainRelease : public GRSimpleAPICheck {
+ APIMisuse *BT;
+
+ // FIXME: Either this should be refactored into GRSimpleAPICheck, or
+ // it should always be passed with a call to Audit. The latter
+ // approach makes this class more stateless.
+ ASTContext& Ctx;
+ IdentifierInfo *Retain, *Release;
+ BugReporter& BR;
+
+public:
+ AuditCFRetainRelease(ASTContext& ctx, BugReporter& br)
+ : BT(0), Ctx(ctx),
+ Retain(&Ctx.Idents.get("CFRetain")), Release(&Ctx.Idents.get("CFRelease")),
+ BR(br){}
+
+ ~AuditCFRetainRelease() {}
+
+ bool Audit(ExplodedNode* N, GRStateManager&);
+};
+} // end anonymous namespace
+
+
+bool AuditCFRetainRelease::Audit(ExplodedNode* N, GRStateManager&) {
+ const CallExpr* CE = cast<CallExpr>(cast<PostStmt>(N->getLocation()).getStmt());
+
+ // If the CallExpr doesn't have exactly 1 argument just give up checking.
+ if (CE->getNumArgs() != 1)
+ return false;
+
+ // Check if we called CFRetain/CFRelease.
+ const GRState* state = N->getState();
+ SVal X = state->getSVal(CE->getCallee());
+ const FunctionDecl* FD = X.getAsFunctionDecl();
+
+ if (!FD)
+ return false;
+
+ const IdentifierInfo *FuncII = FD->getIdentifier();
+ if (!(FuncII == Retain || FuncII == Release))
+ return false;
+
+ // Finally, check if the argument is NULL.
+ // FIXME: We should be able to bifurcate the state here, as a successful
+ // check will result in the value not being NULL afterwards.
+ // FIXME: Need a way to register vistors for the BugReporter. Would like
+ // to benefit from the same diagnostics that regular null dereference
+ // reporting has.
+ if (state->getStateManager().isEqual(state, CE->getArg(0), 0)) {
+ if (!BT)
+ BT = new APIMisuse("null passed to CFRetain/CFRelease");
+
+ const char *description = (FuncII == Retain)
+ ? "Null pointer argument in call to CFRetain"
+ : "Null pointer argument in call to CFRelease";
+
+ RangedBugReport *report = new RangedBugReport(*BT, description, N);
+ report->addRange(CE->getArg(0)->getSourceRange());
+ BR.EmitReport(report);
+ return true;
+ }
+
+ return false;
+}
+
+
+GRSimpleAPICheck*
+clang::CreateAuditCFRetainRelease(ASTContext& Ctx, BugReporter& BR) {
+ return new AuditCFRetainRelease(Ctx, BR);
+}
+
+//===----------------------------------------------------------------------===//
// Check registration.
+//===----------------------------------------------------------------------===//
-void clang::RegisterAppleChecks(GRExprEngine& Eng) {
+void clang::RegisterAppleChecks(GRExprEngine& Eng, const Decl &D) {
ASTContext& Ctx = Eng.getContext();
BugReporter &BR = Eng.getBugReporter();
Eng.AddCheck(CreateBasicObjCFoundationChecks(Ctx, BR),
Stmt::ObjCMessageExprClass);
+ Eng.AddCheck(CreateAuditCFNumberCreate(Ctx, BR), Stmt::CallExprClass);
+ Eng.AddCheck(CreateAuditCFRetainRelease(Ctx, BR), Stmt::CallExprClass);
- Eng.AddCheck(CreateAuditCFNumberCreate(Ctx, BR),
- Stmt::CallExprClass);
-
- RegisterNSErrorChecks(BR, Eng);
+ RegisterNSErrorChecks(BR, Eng, D);
}
diff --git a/lib/Analysis/BasicObjCFoundationChecks.h b/lib/Analysis/BasicObjCFoundationChecks.h
index 5c9701ecdd36..1271ae4ab1c0 100644
--- a/lib/Analysis/BasicObjCFoundationChecks.h
+++ b/lib/Analysis/BasicObjCFoundationChecks.h
@@ -25,21 +25,24 @@
#define LLVM_CLANG_ANALYSIS_BASICOBJCFOUNDATIONCHECKS
namespace clang {
-
+
class GRSimpleAPICheck;
class ASTContext;
-class GRStateManager;
+class GRStateManager;
class BugReporter;
class GRExprEngine;
-
-GRSimpleAPICheck* CreateBasicObjCFoundationChecks(ASTContext& Ctx,
+
+GRSimpleAPICheck *CreateBasicObjCFoundationChecks(ASTContext& Ctx,
BugReporter& BR);
-
-GRSimpleAPICheck* CreateAuditCFNumberCreate(ASTContext& Ctx,
+
+GRSimpleAPICheck *CreateAuditCFNumberCreate(ASTContext& Ctx,
BugReporter& BR);
-
-void RegisterNSErrorChecks(BugReporter& BR, GRExprEngine &Eng);
-
+
+GRSimpleAPICheck *CreateAuditCFRetainRelease(ASTContext& Ctx,
+ BugReporter& BR);
+
+void RegisterNSErrorChecks(BugReporter& BR, GRExprEngine &Eng, const Decl &D);
+
} // end clang namespace
#endif
diff --git a/lib/Analysis/BasicStore.cpp b/lib/Analysis/BasicStore.cpp
index 19d641ee9753..a4f451f36490 100644
--- a/lib/Analysis/BasicStore.cpp
+++ b/lib/Analysis/BasicStore.cpp
@@ -13,17 +13,17 @@
#include "clang/AST/ExprObjC.h"
#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/PathSensitive/AnalysisContext.h"
#include "clang/Analysis/PathSensitive/GRState.h"
-#include "llvm/ADT/ImmutableMap.h"
#include "llvm/Support/Compiler.h"
-#include "llvm/Support/Streams.h"
+#include "llvm/ADT/ImmutableMap.h"
using namespace clang;
-typedef llvm::ImmutableMap<const MemRegion*,SVal> BindingsTy;
+typedef llvm::ImmutableMap<const MemRegion*,SVal> BindingsTy;
namespace {
-
+
class VISIBILITY_HIDDEN BasicStoreSubRegionMap : public SubRegionMap {
public:
BasicStoreSubRegionMap() {}
@@ -32,81 +32,78 @@ public:
return true; // Do nothing. No subregions.
}
};
-
+
class VISIBILITY_HIDDEN BasicStoreManager : public StoreManager {
BindingsTy::Factory VBFactory;
- const MemRegion* SelfRegion;
-
public:
BasicStoreManager(GRStateManager& mgr)
- : StoreManager(mgr),
- VBFactory(mgr.getAllocator()),
- SelfRegion(0) {}
-
+ : StoreManager(mgr), VBFactory(mgr.getAllocator()) {}
+
~BasicStoreManager() {}
SubRegionMap *getSubRegionMap(const GRState *state) {
return new BasicStoreSubRegionMap();
}
- SVal Retrieve(const GRState *state, Loc loc, QualType T = QualType());
+ SValuator::CastResult Retrieve(const GRState *state, Loc loc,
+ QualType T = QualType());
+
+ const GRState *InvalidateRegion(const GRState *state, const MemRegion *R,
+ const Expr *E, unsigned Count);
const GRState *Bind(const GRState *state, Loc L, SVal V) {
return state->makeWithStore(BindInternal(state->getStore(), L, V));
}
- Store scanForIvars(Stmt *B, const Decl* SelfDecl, Store St);
-
- Store BindInternal(Store St, Loc loc, SVal V);
+ Store scanForIvars(Stmt *B, const Decl* SelfDecl,
+ const MemRegion *SelfRegion, Store St);
+
+ Store BindInternal(Store St, Loc loc, SVal V);
Store Remove(Store St, Loc loc);
- Store getInitialStore();
+ Store getInitialStore(const LocationContext *InitLoc);
// FIXME: Investigate what is using this. This method should be removed.
- virtual Loc getLoc(const VarDecl* VD) {
- return ValMgr.makeLoc(MRMgr.getVarRegion(VD));
+ virtual Loc getLoc(const VarDecl* VD, const LocationContext *LC) {
+ return ValMgr.makeLoc(MRMgr.getVarRegion(VD, LC));
}
-
+
const GRState *BindCompoundLiteral(const GRState *state,
const CompoundLiteralExpr* cl,
SVal val) {
return state;
}
-
- SVal getLValueVar(const GRState *state, const VarDecl* VD);
- SVal getLValueString(const GRState *state, const StringLiteral* S);
- SVal getLValueCompoundLiteral(const GRState *state,
- const CompoundLiteralExpr* CL);
- SVal getLValueIvar(const GRState *state, const ObjCIvarDecl* D, SVal Base);
- SVal getLValueField(const GRState *state, SVal Base, const FieldDecl* D);
- SVal getLValueElement(const GRState *state, QualType elementType,
- SVal Base, SVal Offset);
+
+ SVal getLValueVar(const VarDecl *VD, const LocationContext *LC);
+ SVal getLValueString(const StringLiteral *S);
+ SVal getLValueCompoundLiteral(const CompoundLiteralExpr *CL);
+ SVal getLValueIvar(const ObjCIvarDecl* D, SVal Base);
+ SVal getLValueField(const FieldDecl *D, SVal Base);
+ SVal getLValueElement(QualType elementType, SVal Offset, SVal Base);
/// ArrayToPointer - Used by GRExprEngine::VistCast to handle implicit
/// conversions between arrays and pointers.
SVal ArrayToPointer(Loc Array) { return Array; }
- /// getSelfRegion - Returns the region for the 'self' (Objective-C) or
- /// 'this' object (C++). When used when analyzing a normal function this
- /// method returns NULL.
- const MemRegion* getSelfRegion(Store) { return SelfRegion; }
-
/// RemoveDeadBindings - Scans a BasicStore of 'state' for dead values.
- /// It returns a new Store with these values removed.
- Store RemoveDeadBindings(const GRState *state, Stmt* Loc,
- SymbolReaper& SymReaper,
- llvm::SmallVectorImpl<const MemRegion*>& RegionRoots);
+ /// It updatees the GRState object in place with the values removed.
+ void RemoveDeadBindings(GRState &state, Stmt* Loc, SymbolReaper& SymReaper,
+ llvm::SmallVectorImpl<const MemRegion*>& RegionRoots);
void iterBindings(Store store, BindingsHandler& f);
- const GRState *BindDecl(const GRState *state, const VarDecl* VD, SVal InitVal) {
- return state->makeWithStore(BindDeclInternal(state->getStore(),VD, &InitVal));
+ const GRState *BindDecl(const GRState *state, const VarDecl *VD,
+ const LocationContext *LC, SVal InitVal) {
+ return state->makeWithStore(BindDeclInternal(state->getStore(),VD, LC,
+ &InitVal));
}
- const GRState *BindDeclWithNoInit(const GRState *state, const VarDecl* VD) {
- return state->makeWithStore(BindDeclInternal(state->getStore(), VD, 0));
+ const GRState *BindDeclWithNoInit(const GRState *state, const VarDecl *VD,
+ const LocationContext *LC) {
+ return state->makeWithStore(BindDeclInternal(state->getStore(), VD, LC, 0));
}
- Store BindDeclInternal(Store store, const VarDecl* VD, SVal* InitVal);
+ Store BindDeclInternal(Store store, const VarDecl *VD,
+ const LocationContext *LC, SVal *InitVal);
static inline BindingsTy GetBindings(Store store) {
return BindingsTy(static_cast<const BindingsTy::TreeTy*>(store));
@@ -118,7 +115,7 @@ public:
private:
ASTContext& getContext() { return StateMgr.getContext(); }
};
-
+
} // end anonymous namespace
@@ -126,23 +123,21 @@ StoreManager* clang::CreateBasicStoreManager(GRStateManager& StMgr) {
return new BasicStoreManager(StMgr);
}
-SVal BasicStoreManager::getLValueVar(const GRState *state, const VarDecl* VD) {
- return ValMgr.makeLoc(MRMgr.getVarRegion(VD));
+SVal BasicStoreManager::getLValueVar(const VarDecl* VD,
+ const LocationContext *LC) {
+ return ValMgr.makeLoc(MRMgr.getVarRegion(VD, LC));
}
-SVal BasicStoreManager::getLValueString(const GRState *state,
- const StringLiteral* S) {
+SVal BasicStoreManager::getLValueString(const StringLiteral* S) {
return ValMgr.makeLoc(MRMgr.getStringRegion(S));
}
-SVal BasicStoreManager::getLValueCompoundLiteral(const GRState *state,
- const CompoundLiteralExpr* CL){
+SVal BasicStoreManager::getLValueCompoundLiteral(const CompoundLiteralExpr* CL){
return ValMgr.makeLoc(MRMgr.getCompoundLiteralRegion(CL));
}
-SVal BasicStoreManager::getLValueIvar(const GRState *state, const ObjCIvarDecl* D,
- SVal Base) {
-
+SVal BasicStoreManager::getLValueIvar(const ObjCIvarDecl* D, SVal Base) {
+
if (Base.isUnknownOrUndef())
return Base;
@@ -150,23 +145,20 @@ SVal BasicStoreManager::getLValueIvar(const GRState *state, const ObjCIvarDecl*
if (isa<loc::MemRegionVal>(BaseL)) {
const MemRegion *BaseR = cast<loc::MemRegionVal>(BaseL).getRegion();
-
- if (BaseR == SelfRegion)
- return ValMgr.makeLoc(MRMgr.getObjCIvarRegion(D, BaseR));
+ return ValMgr.makeLoc(MRMgr.getObjCIvarRegion(D, BaseR));
}
-
+
return UnknownVal();
}
-SVal BasicStoreManager::getLValueField(const GRState *state, SVal Base,
- const FieldDecl* D) {
+SVal BasicStoreManager::getLValueField(const FieldDecl* D, SVal Base) {
if (Base.isUnknownOrUndef())
return Base;
-
- Loc BaseL = cast<Loc>(Base);
+
+ Loc BaseL = cast<Loc>(Base);
const MemRegion* BaseR = 0;
-
+
switch(BaseL.getSubKind()) {
case loc::GotoLabelKind:
return UndefinedVal();
@@ -174,7 +166,7 @@ SVal BasicStoreManager::getLValueField(const GRState *state, SVal Base,
case loc::MemRegionKind:
BaseR = cast<loc::MemRegionVal>(BaseL).getRegion();
break;
-
+
case loc::ConcreteIntKind:
// While these seem funny, this can happen through casts.
// FIXME: What we should return is the field offset. For example,
@@ -186,28 +178,27 @@ SVal BasicStoreManager::getLValueField(const GRState *state, SVal Base,
assert ("Unhandled Base.");
return Base;
}
-
+
return ValMgr.makeLoc(MRMgr.getFieldRegion(D, BaseR));
}
-SVal BasicStoreManager::getLValueElement(const GRState *state,
- QualType elementType,
- SVal Base, SVal Offset) {
+SVal BasicStoreManager::getLValueElement(QualType elementType,
+ SVal Offset, SVal Base) {
if (Base.isUnknownOrUndef())
return Base;
-
- Loc BaseL = cast<Loc>(Base);
+
+ Loc BaseL = cast<Loc>(Base);
const MemRegion* BaseR = 0;
-
+
switch(BaseL.getSubKind()) {
case loc::GotoLabelKind:
// Technically we can get here if people do funny things with casts.
return UndefinedVal();
-
+
case loc::MemRegionKind: {
const MemRegion *R = cast<loc::MemRegionVal>(BaseL).getRegion();
-
+
if (isa<ElementRegion>(R)) {
// int x;
// char* y = (char*) &x;
@@ -215,12 +206,12 @@ SVal BasicStoreManager::getLValueElement(const GRState *state,
// y[0] = 'a';
return Base;
}
-
+
if (isa<TypedRegion>(R) || isa<SymbolicRegion>(R)) {
BaseR = R;
break;
}
-
+
break;
}
@@ -230,13 +221,13 @@ SVal BasicStoreManager::getLValueElement(const GRState *state,
// add the field offset to the integer value. That way funny things
// like this work properly: &(((struct foo *) 0xa)->f)
return Base;
-
+
default:
assert ("Unhandled Base.");
return Base;
}
-
- if (BaseR) {
+
+ if (BaseR) {
return ValMgr.makeLoc(MRMgr.getElementRegion(elementType, UnknownVal(),
BaseR, getContext()));
}
@@ -246,37 +237,38 @@ SVal BasicStoreManager::getLValueElement(const GRState *state,
static bool isHigherOrderRawPtr(QualType T, ASTContext &C) {
bool foundPointer = false;
- while (1) {
- const PointerType *PT = T->getAsPointerType();
+ while (1) {
+ const PointerType *PT = T->getAs<PointerType>();
if (!PT) {
if (!foundPointer)
return false;
-
+
// intptr_t* or intptr_t**, etc?
if (T->isIntegerType() && C.getTypeSize(T) == C.getTypeSize(C.VoidPtrTy))
return true;
-
+
QualType X = C.getCanonicalType(T).getUnqualifiedType();
return X == C.VoidTy;
}
-
+
foundPointer = true;
T = PT->getPointeeType();
- }
+ }
}
-
-SVal BasicStoreManager::Retrieve(const GRState *state, Loc loc, QualType T) {
-
+
+SValuator::CastResult BasicStoreManager::Retrieve(const GRState *state,
+ Loc loc, QualType T) {
+
if (isa<UnknownVal>(loc))
- return UnknownVal();
-
- assert (!isa<UndefinedVal>(loc));
-
+ return SValuator::CastResult(state, UnknownVal());
+
+ assert(!isa<UndefinedVal>(loc));
+
switch (loc.getSubKind()) {
case loc::MemRegionKind: {
const MemRegion* R = cast<loc::MemRegionVal>(loc).getRegion();
-
+
if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
// Just support void**, void***, intptr_t*, intptr_t**, etc., for now.
// This is needed to handle OSCompareAndSwapPtr() and friends.
@@ -284,42 +276,46 @@ SVal BasicStoreManager::Retrieve(const GRState *state, Loc loc, QualType T) {
QualType T = ER->getLocationType(Ctx);
if (!isHigherOrderRawPtr(T, Ctx))
- return UnknownVal();
-
+ return SValuator::CastResult(state, UnknownVal());
+
// FIXME: Should check for element 0.
// Otherwise, strip the element region.
R = ER->getSuperRegion();
}
-
+
if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R)))
- return UnknownVal();
-
+ return SValuator::CastResult(state, UnknownVal());
+
BindingsTy B = GetBindings(state->getStore());
- BindingsTy::data_type* T = B.lookup(R);
- return T ? *T : UnknownVal();
+ BindingsTy::data_type *Val = B.lookup(R);
+
+ if (!Val)
+ break;
+
+ return CastRetrievedVal(*Val, state, cast<TypedRegion>(R), T);
}
-
+
case loc::ConcreteIntKind:
// Some clients may call GetSVal with such an option simply because
// they are doing a quick scan through their Locs (potentially to
// invalidate their bindings). Just return Undefined.
- return UndefinedVal();
-
+ return SValuator::CastResult(state, UndefinedVal());
+
default:
assert (false && "Invalid Loc.");
break;
}
-
- return UnknownVal();
+
+ return SValuator::CastResult(state, UnknownVal());
}
-
-Store BasicStoreManager::BindInternal(Store store, Loc loc, SVal V) {
+
+Store BasicStoreManager::BindInternal(Store store, Loc loc, SVal V) {
if (isa<loc::ConcreteInt>(loc))
return store;
const MemRegion* R = cast<loc::MemRegionVal>(loc).getRegion();
ASTContext &C = StateMgr.getContext();
-
+
// Special case: handle store of pointer values (Loc) to pointers via
// a cast to intXX_t*, void*, etc. This is needed to handle
// OSCompareAndSwap32Barrier/OSCompareAndSwap64Barrier.
@@ -327,19 +323,21 @@ Store BasicStoreManager::BindInternal(Store store, Loc loc, SVal V) {
if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
// FIXME: Should check for index 0.
QualType T = ER->getLocationType(C);
-
+
if (isHigherOrderRawPtr(T, C))
R = ER->getSuperRegion();
- }
-
+ }
+
if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R)))
return store;
-
- // We only track bindings to self.ivar.
- if (const ObjCIvarRegion *IVR = dyn_cast<ObjCIvarRegion>(R))
- if (IVR->getSuperRegion() != SelfRegion)
- return store;
-
+
+ const TypedRegion *TyR = cast<TypedRegion>(R);
+
+ // Do not bind to arrays. We need to explicitly check for this so that
+ // we do not encounter any weirdness of trying to load/store from arrays.
+ if (TyR->isBoundable() && TyR->getValueType(C)->isArrayType())
+ return store;
+
if (nonloc::LocAsInteger *X = dyn_cast<nonloc::LocAsInteger>(&V)) {
// Only convert 'V' to a location iff the underlying region type
// is a location as well.
@@ -347,11 +345,8 @@ Store BasicStoreManager::BindInternal(Store store, Loc loc, SVal V) {
// a pointer. We may wish to flag a type error here if the types
// are incompatible. This may also cause lots of breakage
// elsewhere. Food for thought.
- if (const TypedRegion *TyR = dyn_cast<TypedRegion>(R)) {
- if (TyR->isBoundable() &&
- Loc::IsLocType(TyR->getValueType(C)))
- V = X->getLoc();
- }
+ if (TyR->isBoundable() && Loc::IsLocType(TyR->getValueType(C)))
+ V = X->getLoc();
}
BindingsTy B = GetBindings(store);
@@ -364,10 +359,10 @@ Store BasicStoreManager::Remove(Store store, Loc loc) {
switch (loc.getSubKind()) {
case loc::MemRegionKind: {
const MemRegion* R = cast<loc::MemRegionVal>(loc).getRegion();
-
+
if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R)))
return store;
-
+
return VBFactory.Remove(GetBindings(store), R).getRoot();
}
default:
@@ -376,16 +371,15 @@ Store BasicStoreManager::Remove(Store store, Loc loc) {
}
}
-Store
-BasicStoreManager::RemoveDeadBindings(const GRState *state, Stmt* Loc,
+void
+BasicStoreManager::RemoveDeadBindings(GRState &state, Stmt* Loc,
SymbolReaper& SymReaper,
- llvm::SmallVectorImpl<const MemRegion*>& RegionRoots)
+ llvm::SmallVectorImpl<const MemRegion*>& RegionRoots)
{
-
- Store store = state->getStore();
+ Store store = state.getStore();
BindingsTy B = GetBindings(store);
typedef SVal::symbol_iterator symbol_iterator;
-
+
// Iterate over the variable bindings.
for (BindingsTy::iterator I=B.begin(), E=B.end(); I!=E ; ++I) {
if (const VarRegion *VR = dyn_cast<VarRegion>(I.getKey())) {
@@ -399,20 +393,20 @@ BasicStoreManager::RemoveDeadBindings(const GRState *state, Stmt* Loc,
}
else
continue;
-
+
// Mark the bindings in the data as live.
SVal X = I.getData();
for (symbol_iterator SI=X.symbol_begin(), SE=X.symbol_end(); SI!=SE; ++SI)
SymReaper.markLive(*SI);
}
-
+
// Scan for live variables and live symbols.
llvm::SmallPtrSet<const MemRegion*, 10> Marked;
-
+
while (!RegionRoots.empty()) {
const MemRegion* MR = RegionRoots.back();
RegionRoots.pop_back();
-
+
while (MR) {
if (const SymbolicRegion* SymR = dyn_cast<SymbolicRegion>(MR)) {
SymReaper.markLive(SymR->getSymbol());
@@ -421,17 +415,17 @@ BasicStoreManager::RemoveDeadBindings(const GRState *state, Stmt* Loc,
else if (isa<VarRegion>(MR) || isa<ObjCIvarRegion>(MR)) {
if (Marked.count(MR))
break;
-
+
Marked.insert(MR);
- SVal X = Retrieve(state, loc::MemRegionVal(MR));
-
+ SVal X = Retrieve(&state, loc::MemRegionVal(MR)).getSVal();
+
// FIXME: We need to handle symbols nested in region definitions.
for (symbol_iterator SI=X.symbol_begin(),SE=X.symbol_end();SI!=SE;++SI)
SymReaper.markLive(*SI);
-
+
if (!isa<loc::MemRegionVal>(X))
break;
-
+
const loc::MemRegionVal& LVD = cast<loc::MemRegionVal>(X);
RegionRoots.push_back(LVD.getRegion());
break;
@@ -442,30 +436,32 @@ BasicStoreManager::RemoveDeadBindings(const GRState *state, Stmt* Loc,
break;
}
}
-
- // Remove dead variable bindings.
+
+ // Remove dead variable bindings.
for (BindingsTy::iterator I=B.begin(), E=B.end(); I!=E ; ++I) {
const MemRegion* R = I.getKey();
-
+
if (!Marked.count(R)) {
store = Remove(store, ValMgr.makeLoc(R));
SVal X = I.getData();
-
+
for (symbol_iterator SI=X.symbol_begin(), SE=X.symbol_end(); SI!=SE; ++SI)
SymReaper.maybeDead(*SI);
}
}
- return store;
+ // Write the store back.
+ state.setStore(store);
}
-Store BasicStoreManager::scanForIvars(Stmt *B, const Decl* SelfDecl, Store St) {
+Store BasicStoreManager::scanForIvars(Stmt *B, const Decl* SelfDecl,
+ const MemRegion *SelfRegion, Store St) {
for (Stmt::child_iterator CI=B->child_begin(), CE=B->child_end();
CI != CE; ++CI) {
-
+
if (!*CI)
continue;
-
+
// Check if the statement is an ivar reference. We only
// care about self.ivar.
if (ObjCIvarRefExpr *IV = dyn_cast<ObjCIvarRefExpr>(*CI)) {
@@ -473,25 +469,25 @@ Store BasicStoreManager::scanForIvars(Stmt *B, const Decl* SelfDecl, Store St) {
if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Base)) {
if (DR->getDecl() == SelfDecl) {
const MemRegion *IVR = MRMgr.getObjCIvarRegion(IV->getDecl(),
- SelfRegion);
- SVal X = ValMgr.getRegionValueSymbolVal(IVR);
+ SelfRegion);
+ SVal X = ValMgr.getRegionValueSymbolVal(IVR);
St = BindInternal(St, ValMgr.makeLoc(IVR), X);
}
}
}
else
- St = scanForIvars(*CI, SelfDecl, St);
+ St = scanForIvars(*CI, SelfDecl, SelfRegion, St);
}
-
+
return St;
}
-Store BasicStoreManager::getInitialStore() {
+Store BasicStoreManager::getInitialStore(const LocationContext *InitLoc) {
// The LiveVariables information already has a compilation of all VarDecls
// used in the function. Iterate through this set, and "symbolicate"
// any VarDecl whose value originally comes from outside the function.
typedef LiveVariables::AnalysisDataTy LVDataTy;
- LVDataTy& D = StateMgr.getLiveVariables().getAnalysisData();
+ LVDataTy& D = InitLoc->getLiveVariables()->getAnalysisData();
Store St = VBFactory.GetEmptyMap().getRoot();
for (LVDataTy::decl_iterator I=D.begin_decl(), E=D.end_decl(); I != E; ++I) {
@@ -499,38 +495,35 @@ Store BasicStoreManager::getInitialStore() {
// Handle implicit parameters.
if (ImplicitParamDecl* PD = dyn_cast<ImplicitParamDecl>(ND)) {
- const Decl& CD = StateMgr.getCodeDecl();
+ const Decl& CD = *InitLoc->getDecl();
if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(&CD)) {
if (MD->getSelfDecl() == PD) {
- // Create a region for "self".
- assert (SelfRegion == 0);
- SelfRegion = MRMgr.getObjCObjectRegion(MD->getClassInterface(),
- MRMgr.getHeapRegion());
-
- St = BindInternal(St, ValMgr.makeLoc(MRMgr.getVarRegion(PD)),
+ // FIXME: Just use a symbolic region, and remove ObjCObjectRegion
+ // entirely.
+ const ObjCObjectRegion *SelfRegion =
+ MRMgr.getObjCObjectRegion(MD->getClassInterface(),
+ MRMgr.getHeapRegion());
+
+ St = BindInternal(St, ValMgr.makeLoc(MRMgr.getVarRegion(PD, InitLoc)),
ValMgr.makeLoc(SelfRegion));
-
+
// Scan the method for ivar references. While this requires an
// entire AST scan, the cost should not be high in practice.
- St = scanForIvars(MD->getBody(), PD, St);
+ St = scanForIvars(MD->getBody(), PD, SelfRegion, St);
}
}
}
else if (VarDecl* VD = dyn_cast<VarDecl>(ND)) {
- // Punt on static variables for now.
- if (VD->getStorageClass() == VarDecl::Static)
- continue;
-
// Only handle simple types that we can symbolicate.
if (!SymbolManager::canSymbolicate(VD->getType()))
continue;
// Initialize globals and parameters to symbolic values.
// Initialize local variables to undefined.
- const MemRegion *R = ValMgr.getRegionManager().getVarRegion(VD);
- SVal X = R->hasGlobalsOrParametersStorage()
- ? ValMgr.getRegionValueSymbolVal(R)
- : UndefinedVal();
+ const MemRegion *R = ValMgr.getRegionManager().getVarRegion(VD, InitLoc);
+ SVal X = UndefinedVal();
+ if (R->hasGlobalsOrParametersStorage())
+ X = ValMgr.getRegionValueSymbolVal(R);
St = BindInternal(St, ValMgr.makeLoc(R), X);
}
@@ -539,10 +532,11 @@ Store BasicStoreManager::getInitialStore() {
}
Store BasicStoreManager::BindDeclInternal(Store store, const VarDecl* VD,
+ const LocationContext *LC,
SVal* InitVal) {
-
+
BasicValueFactory& BasicVals = StateMgr.getBasicVals();
-
+
// BasicStore does not model arrays and structs.
if (VD->getType()->isArrayType() || VD->getType()->isStructureType())
return store;
@@ -557,28 +551,28 @@ Store BasicStoreManager::BindDeclInternal(Store store, const VarDecl* VD,
// Static global variables should not be visited here.
assert(!(VD->getStorageClass() == VarDecl::Static &&
VD->isFileVarDecl()));
-
+
// Process static variables.
if (VD->getStorageClass() == VarDecl::Static) {
// C99: 6.7.8 Initialization
// If an object that has static storage duration is not initialized
- // explicitly, then:
- // —if it has pointer type, it is initialized to a null pointer;
- // —if it has arithmetic type, it is initialized to (positive or
+ // explicitly, then:
+ // —if it has pointer type, it is initialized to a null pointer;
+ // —if it has arithmetic type, it is initialized to (positive or
// unsigned) zero;
if (!InitVal) {
QualType T = VD->getType();
if (Loc::IsLocType(T))
- store = BindInternal(store, getLoc(VD),
+ store = BindInternal(store, getLoc(VD, LC),
loc::ConcreteInt(BasicVals.getValue(0, T)));
else if (T->isIntegerType())
- store = BindInternal(store, getLoc(VD),
+ store = BindInternal(store, getLoc(VD, LC),
nonloc::ConcreteInt(BasicVals.getValue(0, T)));
else {
// assert(0 && "ignore other types of variables");
}
} else {
- store = BindInternal(store, getLoc(VD), *InitVal);
+ store = BindInternal(store, getLoc(VD, LC), *InitVal);
}
}
} else {
@@ -586,7 +580,7 @@ Store BasicStoreManager::BindDeclInternal(Store store, const VarDecl* VD,
QualType T = VD->getType();
if (ValMgr.getSymbolManager().canSymbolicate(T)) {
SVal V = InitVal ? *InitVal : UndefinedVal();
- store = BindInternal(store, getLoc(VD), V);
+ store = BindInternal(store, getLoc(VD, LC), V);
}
}
@@ -595,30 +589,48 @@ Store BasicStoreManager::BindDeclInternal(Store store, const VarDecl* VD,
void BasicStoreManager::print(Store store, llvm::raw_ostream& Out,
const char* nl, const char *sep) {
-
+
BindingsTy B = GetBindings(store);
Out << "Variables:" << nl;
-
+
bool isFirst = true;
-
+
for (BindingsTy::iterator I=B.begin(), E=B.end(); I != E; ++I) {
if (isFirst)
isFirst = false;
else
Out << nl;
-
- Out << ' ' << I.getKey() << " : ";
- I.getData().print(Out);
+
+ Out << ' ' << I.getKey() << " : " << I.getData();
}
}
void BasicStoreManager::iterBindings(Store store, BindingsHandler& f) {
BindingsTy B = GetBindings(store);
-
+
for (BindingsTy::iterator I=B.begin(), E=B.end(); I != E; ++I)
f.HandleBinding(*this, store, I.getKey(), I.getData());
}
StoreManager::BindingsHandler::~BindingsHandler() {}
+
+//===----------------------------------------------------------------------===//
+// Binding invalidation.
+//===----------------------------------------------------------------------===//
+
+const GRState *BasicStoreManager::InvalidateRegion(const GRState *state,
+ const MemRegion *R,
+ const Expr *E,
+ unsigned Count) {
+ R = R->getBaseRegion();
+
+ if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R)))
+ return state;
+
+ QualType T = cast<TypedRegion>(R)->getValueType(R->getContext());
+ SVal V = ValMgr.getConjuredSymbolVal(R, E, T, Count);
+ return Bind(state, loc::MemRegionVal(R), V);
+}
+
diff --git a/lib/Analysis/BasicValueFactory.cpp b/lib/Analysis/BasicValueFactory.cpp
index 72ad0a5ed8f1..b33c277f86f9 100644
--- a/lib/Analysis/BasicValueFactory.cpp
+++ b/lib/Analysis/BasicValueFactory.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
//
// This file defines BasicValueFactory, a class that manages the lifetime
-// of APSInt objects and symbolic constraints used by GRExprEngine
+// of APSInt objects and symbolic constraints used by GRExprEngine
// and related classes.
//
//===----------------------------------------------------------------------===//
@@ -17,12 +17,19 @@
using namespace clang;
-void CompoundValData::Profile(llvm::FoldingSetNodeID& ID, QualType T,
+void CompoundValData::Profile(llvm::FoldingSetNodeID& ID, QualType T,
llvm::ImmutableList<SVal> L) {
T.Profile(ID);
ID.AddPointer(L.getInternalPointer());
}
+void LazyCompoundValData::Profile(llvm::FoldingSetNodeID& ID,
+ const GRState *state,
+ const TypedRegion *region) {
+ ID.AddPointer(state);
+ ID.AddPointer(region);
+}
+
typedef std::pair<SVal, uintptr_t> SValData;
typedef std::pair<SVal, SVal> SValPair;
@@ -33,7 +40,7 @@ template<> struct FoldingSetTrait<SValData> {
ID.AddPointer( (void*) X.second);
}
};
-
+
template<> struct FoldingSetTrait<SValPair> {
static inline void Profile(const SValPair& X, llvm::FoldingSetNodeID& ID) {
X.first.Profile(ID);
@@ -54,8 +61,8 @@ BasicValueFactory::~BasicValueFactory() {
// frees an aux. memory allocated to represent very large constants.
for (APSIntSetTy::iterator I=APSIntSet.begin(), E=APSIntSet.end(); I!=E; ++I)
I->getValue().~APSInt();
-
- delete (PersistentSValsTy*) PersistentSVals;
+
+ delete (PersistentSValsTy*) PersistentSVals;
delete (PersistentSValPairsTy*) PersistentSValPairs;
}
@@ -63,16 +70,16 @@ const llvm::APSInt& BasicValueFactory::getValue(const llvm::APSInt& X) {
llvm::FoldingSetNodeID ID;
void* InsertPos;
typedef llvm::FoldingSetNodeWrapper<llvm::APSInt> FoldNodeTy;
-
+
X.Profile(ID);
FoldNodeTy* P = APSIntSet.FindNodeOrInsertPos(ID, InsertPos);
-
- if (!P) {
+
+ if (!P) {
P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
new (P) FoldNodeTy(X);
APSIntSet.InsertNode(P, InsertPos);
}
-
+
return *P;
}
@@ -85,22 +92,22 @@ const llvm::APSInt& BasicValueFactory::getValue(const llvm::APInt& X,
const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, unsigned BitWidth,
bool isUnsigned) {
llvm::APSInt V(BitWidth, isUnsigned);
- V = X;
+ V = X;
return getValue(V);
}
const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, QualType T) {
-
+
unsigned bits = Ctx.getTypeSize(T);
llvm::APSInt V(bits, T->isUnsignedIntegerType() || Loc::IsLocType(T));
V = X;
return getValue(V);
}
-const CompoundValData*
+const CompoundValData*
BasicValueFactory::getCompoundValData(QualType T,
llvm::ImmutableList<SVal> Vals) {
-
+
llvm::FoldingSetNodeID ID;
CompoundValData::Profile(ID, T, Vals);
void* InsertPos;
@@ -116,91 +123,110 @@ BasicValueFactory::getCompoundValData(QualType T,
return D;
}
+const LazyCompoundValData*
+BasicValueFactory::getLazyCompoundValData(const GRState *state,
+ const TypedRegion *region) {
+ llvm::FoldingSetNodeID ID;
+ LazyCompoundValData::Profile(ID, state, region);
+ void* InsertPos;
+
+ LazyCompoundValData *D =
+ LazyCompoundValDataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!D) {
+ D = (LazyCompoundValData*) BPAlloc.Allocate<LazyCompoundValData>();
+ new (D) LazyCompoundValData(state, region);
+ LazyCompoundValDataSet.InsertNode(D, InsertPos);
+ }
+
+ return D;
+}
+
const llvm::APSInt*
BasicValueFactory::EvaluateAPSInt(BinaryOperator::Opcode Op,
const llvm::APSInt& V1, const llvm::APSInt& V2) {
-
+
switch (Op) {
default:
assert (false && "Invalid Opcode.");
-
+
case BinaryOperator::Mul:
return &getValue( V1 * V2 );
-
+
case BinaryOperator::Div:
return &getValue( V1 / V2 );
-
+
case BinaryOperator::Rem:
return &getValue( V1 % V2 );
-
+
case BinaryOperator::Add:
return &getValue( V1 + V2 );
-
+
case BinaryOperator::Sub:
return &getValue( V1 - V2 );
-
+
case BinaryOperator::Shl: {
// FIXME: This logic should probably go higher up, where we can
// test these conditions symbolically.
-
+
// FIXME: Expand these checks to include all undefined behavior.
-
+
if (V2.isSigned() && V2.isNegative())
return NULL;
-
+
uint64_t Amt = V2.getZExtValue();
-
+
if (Amt > V1.getBitWidth())
return NULL;
-
+
return &getValue( V1.operator<<( (unsigned) Amt ));
}
-
+
case BinaryOperator::Shr: {
-
+
// FIXME: This logic should probably go higher up, where we can
// test these conditions symbolically.
-
+
// FIXME: Expand these checks to include all undefined behavior.
-
+
if (V2.isSigned() && V2.isNegative())
return NULL;
-
+
uint64_t Amt = V2.getZExtValue();
-
+
if (Amt > V1.getBitWidth())
return NULL;
-
+
return &getValue( V1.operator>>( (unsigned) Amt ));
}
-
+
case BinaryOperator::LT:
return &getTruthValue( V1 < V2 );
-
+
case BinaryOperator::GT:
return &getTruthValue( V1 > V2 );
-
+
case BinaryOperator::LE:
return &getTruthValue( V1 <= V2 );
-
+
case BinaryOperator::GE:
return &getTruthValue( V1 >= V2 );
-
+
case BinaryOperator::EQ:
return &getTruthValue( V1 == V2 );
-
+
case BinaryOperator::NE:
return &getTruthValue( V1 != V2 );
-
+
// Note: LAnd, LOr, Comma are handled specially by higher-level logic.
-
+
case BinaryOperator::And:
return &getValue( V1 & V2 );
-
+
case BinaryOperator::Or:
return &getValue( V1 | V2 );
-
+
case BinaryOperator::Xor:
return &getValue( V1 ^ V2 );
}
@@ -209,21 +235,21 @@ BasicValueFactory::EvaluateAPSInt(BinaryOperator::Opcode Op,
const std::pair<SVal, uintptr_t>&
BasicValueFactory::getPersistentSValWithData(const SVal& V, uintptr_t Data) {
-
+
// Lazily create the folding set.
if (!PersistentSVals) PersistentSVals = new PersistentSValsTy();
-
+
llvm::FoldingSetNodeID ID;
void* InsertPos;
V.Profile(ID);
ID.AddPointer((void*) Data);
-
+
PersistentSValsTy& Map = *((PersistentSValsTy*) PersistentSVals);
-
+
typedef llvm::FoldingSetNodeWrapper<SValData> FoldNodeTy;
FoldNodeTy* P = Map.FindNodeOrInsertPos(ID, InsertPos);
-
- if (!P) {
+
+ if (!P) {
P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
new (P) FoldNodeTy(std::make_pair(V, Data));
Map.InsertNode(P, InsertPos);
@@ -234,31 +260,31 @@ BasicValueFactory::getPersistentSValWithData(const SVal& V, uintptr_t Data) {
const std::pair<SVal, SVal>&
BasicValueFactory::getPersistentSValPair(const SVal& V1, const SVal& V2) {
-
+
// Lazily create the folding set.
if (!PersistentSValPairs) PersistentSValPairs = new PersistentSValPairsTy();
-
+
llvm::FoldingSetNodeID ID;
void* InsertPos;
V1.Profile(ID);
V2.Profile(ID);
-
+
PersistentSValPairsTy& Map = *((PersistentSValPairsTy*) PersistentSValPairs);
-
+
typedef llvm::FoldingSetNodeWrapper<SValPair> FoldNodeTy;
FoldNodeTy* P = Map.FindNodeOrInsertPos(ID, InsertPos);
-
- if (!P) {
+
+ if (!P) {
P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
new (P) FoldNodeTy(std::make_pair(V1, V2));
Map.InsertNode(P, InsertPos);
}
-
+
return P->getValue();
}
const SVal* BasicValueFactory::getPersistentSVal(SVal X) {
return &getPersistentSValWithData(X, 0).first;
-}
+}
diff --git a/lib/Analysis/BugReporter.cpp b/lib/Analysis/BugReporter.cpp
index 3db96ca9eacb..8235f4acb179 100644
--- a/lib/Analysis/BugReporter.cpp
+++ b/lib/Analysis/BugReporter.cpp
@@ -15,7 +15,7 @@
#include "clang/Analysis/PathSensitive/BugReporter.h"
#include "clang/Analysis/PathSensitive/GRExprEngine.h"
#include "clang/AST/ASTContext.h"
-#include "clang/AST/CFG.h"
+#include "clang/Analysis/CFG.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ParentMap.h"
#include "clang/AST/StmtObjC.h"
@@ -40,36 +40,36 @@ BugReporterContext::~BugReporterContext() {
// Helper routines for walking the ExplodedGraph and fetching statements.
//===----------------------------------------------------------------------===//
-static inline Stmt* GetStmt(ProgramPoint P) {
- if (const PostStmt* PS = dyn_cast<PostStmt>(&P))
- return PS->getStmt();
+static inline const Stmt* GetStmt(ProgramPoint P) {
+ if (const StmtPoint* SP = dyn_cast<StmtPoint>(&P))
+ return SP->getStmt();
else if (const BlockEdge* BE = dyn_cast<BlockEdge>(&P))
return BE->getSrc()->getTerminator();
-
+
return 0;
}
-static inline const ExplodedNode<GRState>*
-GetPredecessorNode(const ExplodedNode<GRState>* N) {
+static inline const ExplodedNode*
+GetPredecessorNode(const ExplodedNode* N) {
return N->pred_empty() ? NULL : *(N->pred_begin());
}
-static inline const ExplodedNode<GRState>*
-GetSuccessorNode(const ExplodedNode<GRState>* N) {
+static inline const ExplodedNode*
+GetSuccessorNode(const ExplodedNode* N) {
return N->succ_empty() ? NULL : *(N->succ_begin());
}
-static Stmt* GetPreviousStmt(const ExplodedNode<GRState>* N) {
+static const Stmt* GetPreviousStmt(const ExplodedNode* N) {
for (N = GetPredecessorNode(N); N; N = GetPredecessorNode(N))
- if (Stmt *S = GetStmt(N->getLocation()))
+ if (const Stmt *S = GetStmt(N->getLocation()))
return S;
-
+
return 0;
}
-static Stmt* GetNextStmt(const ExplodedNode<GRState>* N) {
+static const Stmt* GetNextStmt(const ExplodedNode* N) {
for (N = GetSuccessorNode(N); N; N = GetSuccessorNode(N))
- if (Stmt *S = GetStmt(N->getLocation())) {
+ if (const Stmt *S = GetStmt(N->getLocation())) {
// Check if the statement is '?' or '&&'/'||'. These are "merges",
// not actual statement points.
switch (S->getStmtClass()) {
@@ -84,23 +84,30 @@ static Stmt* GetNextStmt(const ExplodedNode<GRState>* N) {
default:
break;
}
+
+ // Some expressions don't have locations.
+ if (S->getLocStart().isInvalid())
+ continue;
+
return S;
}
-
+
return 0;
}
-static inline Stmt* GetCurrentOrPreviousStmt(const ExplodedNode<GRState>* N) {
- if (Stmt *S = GetStmt(N->getLocation()))
+static inline const Stmt*
+GetCurrentOrPreviousStmt(const ExplodedNode* N) {
+ if (const Stmt *S = GetStmt(N->getLocation()))
return S;
-
+
return GetPreviousStmt(N);
}
-
-static inline Stmt* GetCurrentOrNextStmt(const ExplodedNode<GRState>* N) {
- if (Stmt *S = GetStmt(N->getLocation()))
+
+static inline const Stmt*
+GetCurrentOrNextStmt(const ExplodedNode* N) {
+ if (const Stmt *S = GetStmt(N->getLocation()))
return S;
-
+
return GetNextStmt(N);
}
@@ -108,8 +115,8 @@ static inline Stmt* GetCurrentOrNextStmt(const ExplodedNode<GRState>* N) {
// PathDiagnosticBuilder and its associated routines and helper objects.
//===----------------------------------------------------------------------===//
-typedef llvm::DenseMap<const ExplodedNode<GRState>*,
-const ExplodedNode<GRState>*> NodeBackMap;
+typedef llvm::DenseMap<const ExplodedNode*,
+const ExplodedNode*> NodeBackMap;
namespace {
class VISIBILITY_HIDDEN NodeMapClosure : public BugReport::NodeResolver {
@@ -117,101 +124,100 @@ class VISIBILITY_HIDDEN NodeMapClosure : public BugReport::NodeResolver {
public:
NodeMapClosure(NodeBackMap *m) : M(*m) {}
~NodeMapClosure() {}
-
- const ExplodedNode<GRState>* getOriginalNode(const ExplodedNode<GRState>* N) {
+
+ const ExplodedNode* getOriginalNode(const ExplodedNode* N) {
NodeBackMap::iterator I = M.find(N);
return I == M.end() ? 0 : I->second;
}
};
-
+
class VISIBILITY_HIDDEN PathDiagnosticBuilder : public BugReporterContext {
BugReport *R;
PathDiagnosticClient *PDC;
llvm::OwningPtr<ParentMap> PM;
NodeMapClosure NMC;
-public:
+public:
PathDiagnosticBuilder(GRBugReporter &br,
- BugReport *r, NodeBackMap *Backmap,
+ BugReport *r, NodeBackMap *Backmap,
PathDiagnosticClient *pdc)
: BugReporterContext(br),
- R(r), PDC(pdc), NMC(Backmap)
- {
+ R(r), PDC(pdc), NMC(Backmap) {
addVisitor(R);
}
-
- PathDiagnosticLocation ExecutionContinues(const ExplodedNode<GRState>* N);
-
+
+ PathDiagnosticLocation ExecutionContinues(const ExplodedNode* N);
+
PathDiagnosticLocation ExecutionContinues(llvm::raw_string_ostream& os,
- const ExplodedNode<GRState>* N);
-
- ParentMap& getParentMap() {
- if (PM.get() == 0)
- PM.reset(new ParentMap(getCodeDecl().getBody()));
- return *PM.get();
- }
-
+ const ExplodedNode* N);
+
+ Decl const &getCodeDecl() { return R->getEndNode()->getCodeDecl(); }
+
+ ParentMap& getParentMap() { return R->getEndNode()->getParentMap(); }
+
const Stmt *getParent(const Stmt *S) {
return getParentMap().getParent(S);
}
-
+
virtual NodeMapClosure& getNodeResolver() { return NMC; }
BugReport& getReport() { return *R; }
PathDiagnosticLocation getEnclosingStmtLocation(const Stmt *S);
-
+
PathDiagnosticLocation
getEnclosingStmtLocation(const PathDiagnosticLocation &L) {
if (const Stmt *S = L.asStmt())
return getEnclosingStmtLocation(S);
-
+
return L;
}
-
+
PathDiagnosticClient::PathGenerationScheme getGenerationScheme() const {
return PDC ? PDC->getGenerationScheme() : PathDiagnosticClient::Extensive;
}
bool supportsLogicalOpControlFlow() const {
return PDC ? PDC->supportsLogicalOpControlFlow() : true;
- }
+ }
};
} // end anonymous namespace
PathDiagnosticLocation
-PathDiagnosticBuilder::ExecutionContinues(const ExplodedNode<GRState>* N) {
- if (Stmt *S = GetNextStmt(N))
+PathDiagnosticBuilder::ExecutionContinues(const ExplodedNode* N) {
+ if (const Stmt *S = GetNextStmt(N))
return PathDiagnosticLocation(S, getSourceManager());
- return FullSourceLoc(getCodeDecl().getBodyRBrace(), getSourceManager());
+ return FullSourceLoc(N->getLocationContext()->getDecl()->getBodyRBrace(),
+ getSourceManager());
}
-
+
PathDiagnosticLocation
PathDiagnosticBuilder::ExecutionContinues(llvm::raw_string_ostream& os,
- const ExplodedNode<GRState>* N) {
+ const ExplodedNode* N) {
// Slow, but probably doesn't matter.
if (os.str().empty())
os << ' ';
-
+
const PathDiagnosticLocation &Loc = ExecutionContinues(N);
-
+
if (Loc.asStmt())
os << "Execution continues on line "
<< getSourceManager().getInstantiationLineNumber(Loc.asLocation())
<< '.';
else
os << "Execution jumps to the end of the "
- << (isa<ObjCMethodDecl>(getCodeDecl()) ? "method" : "function") << '.';
-
+ << (isa<ObjCMethodDecl>(N->getLocationContext()->getDecl()) ?
+ "method" : "function") << '.';
+
return Loc;
}
static bool IsNested(const Stmt *S, ParentMap &PM) {
if (isa<Expr>(S) && PM.isConsumedExpr(cast<Expr>(S)))
return true;
-
+
const Stmt *Parent = PM.getParentIgnoreParens(S);
-
+
if (Parent)
switch (Parent->getStmtClass()) {
case Stmt::ForStmtClass:
@@ -221,29 +227,29 @@ static bool IsNested(const Stmt *S, ParentMap &PM) {
default:
break;
}
-
- return false;
+
+ return false;
}
PathDiagnosticLocation
PathDiagnosticBuilder::getEnclosingStmtLocation(const Stmt *S) {
assert(S && "Null Stmt* passed to getEnclosingStmtLocation");
- ParentMap &P = getParentMap();
+ ParentMap &P = getParentMap();
SourceManager &SMgr = getSourceManager();
while (IsNested(S, P)) {
const Stmt *Parent = P.getParentIgnoreParens(S);
-
+
if (!Parent)
break;
-
+
switch (Parent->getStmtClass()) {
case Stmt::BinaryOperatorClass: {
const BinaryOperator *B = cast<BinaryOperator>(Parent);
if (B->isLogicalOp())
return PathDiagnosticLocation(S, SMgr);
break;
- }
+ }
case Stmt::CompoundStmtClass:
case Stmt::StmtExprClass:
return PathDiagnosticLocation(S, SMgr);
@@ -253,20 +259,20 @@ PathDiagnosticBuilder::getEnclosingStmtLocation(const Stmt *S) {
if (cast<ChooseExpr>(Parent)->getCond() == S)
return PathDiagnosticLocation(Parent, SMgr);
else
- return PathDiagnosticLocation(S, SMgr);
+ return PathDiagnosticLocation(S, SMgr);
case Stmt::ConditionalOperatorClass:
// For '?', if we are referring to condition, just have the edge point
// to the entire '?' expression.
if (cast<ConditionalOperator>(Parent)->getCond() == S)
return PathDiagnosticLocation(Parent, SMgr);
else
- return PathDiagnosticLocation(S, SMgr);
+ return PathDiagnosticLocation(S, SMgr);
case Stmt::DoStmtClass:
- return PathDiagnosticLocation(S, SMgr);
+ return PathDiagnosticLocation(S, SMgr);
case Stmt::ForStmtClass:
if (cast<ForStmt>(Parent)->getBody() == S)
- return PathDiagnosticLocation(S, SMgr);
- break;
+ return PathDiagnosticLocation(S, SMgr);
+ break;
case Stmt::IfStmtClass:
if (cast<IfStmt>(Parent)->getCond() != S)
return PathDiagnosticLocation(S, SMgr);
@@ -285,7 +291,7 @@ PathDiagnosticBuilder::getEnclosingStmtLocation(const Stmt *S) {
S = Parent;
}
-
+
assert(S && "Cannot have null Stmt for PathDiagnosticLocation");
// Special case: DeclStmts can appear in for statement declarations, in which
@@ -298,8 +304,8 @@ PathDiagnosticBuilder::getEnclosingStmtLocation(const Stmt *S) {
return PathDiagnosticLocation(Parent, SMgr);
default:
break;
- }
- }
+ }
+ }
}
else if (isa<BinaryOperator>(S)) {
// Special case: the binary operator represents the initialization
@@ -320,86 +326,86 @@ PathDiagnosticBuilder::getEnclosingStmtLocation(const Stmt *S) {
//===----------------------------------------------------------------------===//
static const VarDecl*
-GetMostRecentVarDeclBinding(const ExplodedNode<GRState>* N,
+GetMostRecentVarDeclBinding(const ExplodedNode* N,
GRStateManager& VMgr, SVal X) {
-
+
for ( ; N ; N = N->pred_empty() ? 0 : *N->pred_begin()) {
-
+
ProgramPoint P = N->getLocation();
-
+
if (!isa<PostStmt>(P))
continue;
-
- DeclRefExpr* DR = dyn_cast<DeclRefExpr>(cast<PostStmt>(P).getStmt());
-
+
+ const DeclRefExpr* DR = dyn_cast<DeclRefExpr>(cast<PostStmt>(P).getStmt());
+
if (!DR)
continue;
-
+
SVal Y = N->getState()->getSVal(DR);
-
+
if (X != Y)
continue;
-
- VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl());
-
+
+ const VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl());
+
if (!VD)
continue;
-
+
return VD;
}
-
+
return 0;
}
namespace {
-class VISIBILITY_HIDDEN NotableSymbolHandler
+class VISIBILITY_HIDDEN NotableSymbolHandler
: public StoreManager::BindingsHandler {
-
+
SymbolRef Sym;
const GRState* PrevSt;
const Stmt* S;
GRStateManager& VMgr;
- const ExplodedNode<GRState>* Pred;
- PathDiagnostic& PD;
+ const ExplodedNode* Pred;
+ PathDiagnostic& PD;
BugReporter& BR;
-
+
public:
-
+
NotableSymbolHandler(SymbolRef sym, const GRState* prevst, const Stmt* s,
- GRStateManager& vmgr, const ExplodedNode<GRState>* pred,
+ GRStateManager& vmgr, const ExplodedNode* pred,
PathDiagnostic& pd, BugReporter& br)
: Sym(sym), PrevSt(prevst), S(s), VMgr(vmgr), Pred(pred), PD(pd), BR(br) {}
-
+
bool HandleBinding(StoreManager& SMgr, Store store, const MemRegion* R,
SVal V) {
-
+
SymbolRef ScanSym = V.getAsSymbol();
-
+
if (ScanSym != Sym)
return true;
-
- // Check if the previous state has this binding.
+
+ // Check if the previous state has this binding.
SVal X = PrevSt->getSVal(loc::MemRegionVal(R));
-
+
if (X == V) // Same binding?
return true;
-
+
// Different binding. Only handle assignments for now. We don't pull
- // this check out of the loop because we will eventually handle other
+ // this check out of the loop because we will eventually handle other
// cases.
-
+
VarDecl *VD = 0;
-
+
if (const BinaryOperator* B = dyn_cast<BinaryOperator>(S)) {
if (!B->isAssignmentOp())
return true;
-
+
// What variable did we assign to?
DeclRefExpr* DR = dyn_cast<DeclRefExpr>(B->getLHS()->IgnoreParenCasts());
-
+
if (!DR)
return true;
-
+
VD = dyn_cast<VarDecl>(DR->getDecl());
}
else if (const DeclStmt* DS = dyn_cast<DeclStmt>(S)) {
@@ -408,42 +414,42 @@ public:
// holds by contruction in the CFG.
VD = dyn_cast<VarDecl>(*DS->decl_begin());
}
-
+
if (!VD)
return true;
-
+
// What is the most recently referenced variable with this binding?
const VarDecl* MostRecent = GetMostRecentVarDeclBinding(Pred, VMgr, V);
-
+
if (!MostRecent)
return true;
-
+
// Create the diagnostic.
FullSourceLoc L(S->getLocStart(), BR.getSourceManager());
-
+
if (Loc::IsLocType(VD->getType())) {
std::string msg = "'" + std::string(VD->getNameAsString()) +
"' now aliases '" + MostRecent->getNameAsString() + "'";
-
+
PD.push_front(new PathDiagnosticEventPiece(L, msg));
}
-
+
return true;
- }
+ }
};
}
-static void HandleNotableSymbol(const ExplodedNode<GRState>* N,
+static void HandleNotableSymbol(const ExplodedNode* N,
const Stmt* S,
SymbolRef Sym, BugReporter& BR,
PathDiagnostic& PD) {
-
- const ExplodedNode<GRState>* Pred = N->pred_empty() ? 0 : *N->pred_begin();
+
+ const ExplodedNode* Pred = N->pred_empty() ? 0 : *N->pred_begin();
const GRState* PrevSt = Pred ? Pred->getState() : 0;
-
+
if (!PrevSt)
return;
-
+
// Look at the region bindings of the current state that map to the
// specified symbol. Are any of them not in the previous state?
GRStateManager& VMgr = cast<GRBugReporter>(BR).getStateManager();
@@ -454,34 +460,34 @@ static void HandleNotableSymbol(const ExplodedNode<GRState>* N,
namespace {
class VISIBILITY_HIDDEN ScanNotableSymbols
: public StoreManager::BindingsHandler {
-
+
llvm::SmallSet<SymbolRef, 10> AlreadyProcessed;
- const ExplodedNode<GRState>* N;
- Stmt* S;
+ const ExplodedNode* N;
+ const Stmt* S;
GRBugReporter& BR;
PathDiagnostic& PD;
-
+
public:
- ScanNotableSymbols(const ExplodedNode<GRState>* n, Stmt* s, GRBugReporter& br,
- PathDiagnostic& pd)
+ ScanNotableSymbols(const ExplodedNode* n, const Stmt* s,
+ GRBugReporter& br, PathDiagnostic& pd)
: N(n), S(s), BR(br), PD(pd) {}
-
+
bool HandleBinding(StoreManager& SMgr, Store store,
const MemRegion* R, SVal V) {
-
+
SymbolRef ScanSym = V.getAsSymbol();
-
+
if (!ScanSym)
return true;
-
+
if (!BR.isNotable(ScanSym))
return true;
-
+
if (AlreadyProcessed.count(ScanSym))
return true;
-
+
AlreadyProcessed.insert(ScanSym);
-
+
HandleNotableSymbol(N, S, ScanSym, BR, PD);
return true;
}
@@ -496,57 +502,57 @@ static void CompactPathDiagnostic(PathDiagnostic &PD, const SourceManager& SM);
static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
PathDiagnosticBuilder &PDB,
- const ExplodedNode<GRState> *N) {
+ const ExplodedNode *N) {
SourceManager& SMgr = PDB.getSourceManager();
- const ExplodedNode<GRState>* NextNode = N->pred_empty()
+ const ExplodedNode* NextNode = N->pred_empty()
? NULL : *(N->pred_begin());
while (NextNode) {
- N = NextNode;
+ N = NextNode;
NextNode = GetPredecessorNode(N);
-
+
ProgramPoint P = N->getLocation();
-
+
if (const BlockEdge* BE = dyn_cast<BlockEdge>(&P)) {
CFGBlock* Src = BE->getSrc();
CFGBlock* Dst = BE->getDst();
Stmt* T = Src->getTerminator();
-
+
if (!T)
continue;
-
+
FullSourceLoc Start(T->getLocStart(), SMgr);
-
+
switch (T->getStmtClass()) {
default:
break;
-
+
case Stmt::GotoStmtClass:
- case Stmt::IndirectGotoStmtClass: {
- Stmt* S = GetNextStmt(N);
-
+ case Stmt::IndirectGotoStmtClass: {
+ const Stmt* S = GetNextStmt(N);
+
if (!S)
continue;
-
+
std::string sbuf;
- llvm::raw_string_ostream os(sbuf);
+ llvm::raw_string_ostream os(sbuf);
const PathDiagnosticLocation &End = PDB.getEnclosingStmtLocation(S);
-
+
os << "Control jumps to line "
<< End.asLocation().getInstantiationLineNumber();
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
os.str()));
break;
}
-
- case Stmt::SwitchStmtClass: {
+
+ case Stmt::SwitchStmtClass: {
// Figure out what case arm we took.
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
-
+
if (Stmt* S = Dst->getLabel()) {
PathDiagnosticLocation End(S, SMgr);
-
+
switch (S->getStmtClass()) {
default:
os << "No cases match in the switch statement. "
@@ -557,21 +563,21 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
os << "Control jumps to the 'default' case at line "
<< End.asLocation().getInstantiationLineNumber();
break;
-
+
case Stmt::CaseStmtClass: {
- os << "Control jumps to 'case ";
- CaseStmt* Case = cast<CaseStmt>(S);
+ os << "Control jumps to 'case ";
+ CaseStmt* Case = cast<CaseStmt>(S);
Expr* LHS = Case->getLHS()->IgnoreParenCasts();
-
- // Determine if it is an enum.
+
+ // Determine if it is an enum.
bool GetRawInt = true;
-
+
if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(LHS)) {
// FIXME: Maybe this should be an assertion. Are there cases
// were it is not an EnumConstantDecl?
EnumConstantDecl* D =
dyn_cast<EnumConstantDecl>(DR->getDecl());
-
+
if (D) {
GetRawInt = false;
os << D->getNameAsString();
@@ -591,14 +597,14 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
}
else {
os << "'Default' branch taken. ";
- const PathDiagnosticLocation &End = PDB.ExecutionContinues(os, N);
+ const PathDiagnosticLocation &End = PDB.ExecutionContinues(os, N);
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
os.str()));
}
-
+
break;
}
-
+
case Stmt::BreakStmtClass:
case Stmt::ContinueStmtClass: {
std::string sbuf;
@@ -608,117 +614,117 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
os.str()));
break;
}
-
+
// Determine control-flow for ternary '?'.
case Stmt::ConditionalOperatorClass: {
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
os << "'?' condition is ";
-
+
if (*(Src->succ_begin()+1) == Dst)
os << "false";
else
os << "true";
-
+
PathDiagnosticLocation End = PDB.ExecutionContinues(N);
-
+
if (const Stmt *S = End.asStmt())
End = PDB.getEnclosingStmtLocation(S);
-
+
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
os.str()));
break;
}
-
+
// Determine control-flow for short-circuited '&&' and '||'.
case Stmt::BinaryOperatorClass: {
if (!PDB.supportsLogicalOpControlFlow())
break;
-
+
BinaryOperator *B = cast<BinaryOperator>(T);
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
os << "Left side of '";
-
+
if (B->getOpcode() == BinaryOperator::LAnd) {
os << "&&" << "' is ";
-
+
if (*(Src->succ_begin()+1) == Dst) {
os << "false";
PathDiagnosticLocation End(B->getLHS(), SMgr);
PathDiagnosticLocation Start(B->getOperatorLoc(), SMgr);
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
os.str()));
- }
+ }
else {
os << "true";
PathDiagnosticLocation Start(B->getLHS(), SMgr);
PathDiagnosticLocation End = PDB.ExecutionContinues(N);
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
os.str()));
- }
+ }
}
else {
assert(B->getOpcode() == BinaryOperator::LOr);
os << "||" << "' is ";
-
+
if (*(Src->succ_begin()+1) == Dst) {
os << "false";
PathDiagnosticLocation Start(B->getLHS(), SMgr);
PathDiagnosticLocation End = PDB.ExecutionContinues(N);
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
- os.str()));
+ os.str()));
}
else {
os << "true";
PathDiagnosticLocation End(B->getLHS(), SMgr);
PathDiagnosticLocation Start(B->getOperatorLoc(), SMgr);
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
- os.str()));
+ os.str()));
}
}
-
+
break;
}
-
- case Stmt::DoStmtClass: {
+
+ case Stmt::DoStmtClass: {
if (*(Src->succ_begin()) == Dst) {
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
-
+
os << "Loop condition is true. ";
PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
-
+
if (const Stmt *S = End.asStmt())
End = PDB.getEnclosingStmtLocation(S);
-
+
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
os.str()));
}
else {
PathDiagnosticLocation End = PDB.ExecutionContinues(N);
-
+
if (const Stmt *S = End.asStmt())
End = PDB.getEnclosingStmtLocation(S);
-
+
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
"Loop condition is false. Exiting loop"));
}
-
+
break;
}
-
+
case Stmt::WhileStmtClass:
- case Stmt::ForStmtClass: {
+ case Stmt::ForStmtClass: {
if (*(Src->succ_begin()+1) == Dst) {
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
-
+
os << "Loop condition is false. ";
PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
if (const Stmt *S = End.asStmt())
End = PDB.getEnclosingStmtLocation(S);
-
+
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
os.str()));
}
@@ -726,32 +732,32 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
PathDiagnosticLocation End = PDB.ExecutionContinues(N);
if (const Stmt *S = End.asStmt())
End = PDB.getEnclosingStmtLocation(S);
-
+
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
"Loop condition is true. Entering loop body"));
}
-
+
break;
}
-
+
case Stmt::IfStmtClass: {
PathDiagnosticLocation End = PDB.ExecutionContinues(N);
-
+
if (const Stmt *S = End.asStmt())
End = PDB.getEnclosingStmtLocation(S);
-
+
if (*(Src->succ_begin()+1) == Dst)
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
"Taking false branch"));
- else
+ else
PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
"Taking true branch"));
-
+
break;
}
}
}
-
+
if (NextNode) {
for (BugReporterContext::visitor_iterator I = PDB.visitor_begin(),
E = PDB.visitor_end(); I!=E; ++I) {
@@ -759,15 +765,15 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
PD.push_front(p);
}
}
-
- if (const PostStmt* PS = dyn_cast<PostStmt>(&P)) {
+
+ if (const PostStmt* PS = dyn_cast<PostStmt>(&P)) {
// Scan the region bindings, and see if a "notable" symbol has a new
// lval binding.
ScanNotableSymbols SNS(N, PS->getStmt(), PDB.getBugReporter(), PD);
PDB.getStateManager().iterBindings(N->getState(), SNS);
}
}
-
+
// After constructing the full PathDiagnostic, do a pass over it to compact
// PathDiagnosticPieces that occur within a macro.
CompactPathDiagnostic(PD, PDB.getSourceManager());
@@ -779,20 +785,20 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
static bool IsControlFlowExpr(const Stmt *S) {
const Expr *E = dyn_cast<Expr>(S);
-
+
if (!E)
return false;
-
- E = E->IgnoreParenCasts();
-
+
+ E = E->IgnoreParenCasts();
+
if (isa<ConditionalOperator>(E))
return true;
-
+
if (const BinaryOperator *B = dyn_cast<BinaryOperator>(E))
if (B->isLogicalOp())
return true;
-
- return false;
+
+ return false;
}
namespace {
@@ -801,25 +807,25 @@ class VISIBILITY_HIDDEN ContextLocation : public PathDiagnosticLocation {
public:
ContextLocation(const PathDiagnosticLocation &L, bool isdead = false)
: PathDiagnosticLocation(L), IsDead(isdead) {}
-
- void markDead() { IsDead = true; }
+
+ void markDead() { IsDead = true; }
bool isDead() const { return IsDead; }
};
-
+
class VISIBILITY_HIDDEN EdgeBuilder {
std::vector<ContextLocation> CLocs;
typedef std::vector<ContextLocation>::iterator iterator;
PathDiagnostic &PD;
PathDiagnosticBuilder &PDB;
PathDiagnosticLocation PrevLoc;
-
+
bool IsConsumedExpr(const PathDiagnosticLocation &L);
-
+
bool containsLocation(const PathDiagnosticLocation &Container,
const PathDiagnosticLocation &Containee);
-
+
PathDiagnosticLocation getContextLocation(const PathDiagnosticLocation &L);
-
+
PathDiagnosticLocation cleanUpLocation(PathDiagnosticLocation L,
bool firstCharOnly = false) {
if (const Stmt *S = L.asStmt()) {
@@ -847,20 +853,20 @@ class VISIBILITY_HIDDEN EdgeBuilder {
firstCharOnly = true;
continue;
}
-
+
break;
}
-
+
if (S != Original)
L = PathDiagnosticLocation(S, L.getManager());
}
-
+
if (firstCharOnly)
L = PathDiagnosticLocation(L.asLocation());
return L;
}
-
+
void popLocation() {
if (!CLocs.back().isDead() && CLocs.back().asLocation().isFileID()) {
// For contexts, we only one the first character as the range.
@@ -868,18 +874,18 @@ class VISIBILITY_HIDDEN EdgeBuilder {
}
CLocs.pop_back();
}
-
- PathDiagnosticLocation IgnoreParens(const PathDiagnosticLocation &L);
+
+ PathDiagnosticLocation IgnoreParens(const PathDiagnosticLocation &L);
public:
EdgeBuilder(PathDiagnostic &pd, PathDiagnosticBuilder &pdb)
: PD(pd), PDB(pdb) {
-
+
// If the PathDiagnostic already has pieces, add the enclosing statement
// of the first piece as a context as well.
if (!PD.empty()) {
PrevLoc = PD.begin()->getLocation();
-
+
if (const Stmt *S = PrevLoc.asStmt())
addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt());
}
@@ -887,7 +893,7 @@ public:
~EdgeBuilder() {
while (!CLocs.empty()) popLocation();
-
+
// Finally, add an initial edge from the start location of the first
// statement (if it doesn't already exist).
// FIXME: Should handle CXXTryStmt if analyser starts supporting C++.
@@ -897,20 +903,20 @@ public:
SourceLocation Loc = (*CS->body_begin())->getLocStart();
rawAddEdge(PathDiagnosticLocation(Loc, PDB.getSourceManager()));
}
-
+
}
void addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd = false);
-
+
void addEdge(const Stmt *S, bool alwaysAdd = false) {
addEdge(PathDiagnosticLocation(S, PDB.getSourceManager()), alwaysAdd);
}
-
+
void rawAddEdge(PathDiagnosticLocation NewLoc);
-
+
void addContext(const Stmt *S);
void addExtendedContext(const Stmt *S);
-};
+};
} // end anonymous namespace
@@ -919,10 +925,10 @@ EdgeBuilder::getContextLocation(const PathDiagnosticLocation &L) {
if (const Stmt *S = L.asStmt()) {
if (IsControlFlowExpr(S))
return L;
-
- return PDB.getEnclosingStmtLocation(S);
+
+ return PDB.getEnclosingStmtLocation(S);
}
-
+
return L;
}
@@ -931,10 +937,10 @@ bool EdgeBuilder::containsLocation(const PathDiagnosticLocation &Container,
if (Container == Containee)
return true;
-
+
if (Container.asDecl())
return true;
-
+
if (const Stmt *S = Containee.asStmt())
if (const Stmt *ContainerS = Container.asStmt()) {
while (S) {
@@ -948,25 +954,25 @@ bool EdgeBuilder::containsLocation(const PathDiagnosticLocation &Container,
// Less accurate: compare using source ranges.
SourceRange ContainerR = Container.asRange();
SourceRange ContaineeR = Containee.asRange();
-
+
SourceManager &SM = PDB.getSourceManager();
SourceLocation ContainerRBeg = SM.getInstantiationLoc(ContainerR.getBegin());
SourceLocation ContainerREnd = SM.getInstantiationLoc(ContainerR.getEnd());
SourceLocation ContaineeRBeg = SM.getInstantiationLoc(ContaineeR.getBegin());
SourceLocation ContaineeREnd = SM.getInstantiationLoc(ContaineeR.getEnd());
-
+
unsigned ContainerBegLine = SM.getInstantiationLineNumber(ContainerRBeg);
unsigned ContainerEndLine = SM.getInstantiationLineNumber(ContainerREnd);
unsigned ContaineeBegLine = SM.getInstantiationLineNumber(ContaineeRBeg);
unsigned ContaineeEndLine = SM.getInstantiationLineNumber(ContaineeREnd);
-
+
assert(ContainerBegLine <= ContainerEndLine);
- assert(ContaineeBegLine <= ContaineeEndLine);
-
+ assert(ContaineeBegLine <= ContaineeEndLine);
+
return (ContainerBegLine <= ContaineeBegLine &&
ContainerEndLine >= ContaineeEndLine &&
(ContainerBegLine != ContaineeBegLine ||
- SM.getInstantiationColumnNumber(ContainerRBeg) <=
+ SM.getInstantiationColumnNumber(ContainerRBeg) <=
SM.getInstantiationColumnNumber(ContaineeRBeg)) &&
(ContainerEndLine != ContaineeEndLine ||
SM.getInstantiationColumnNumber(ContainerREnd) >=
@@ -986,13 +992,13 @@ void EdgeBuilder::rawAddEdge(PathDiagnosticLocation NewLoc) {
PrevLoc = NewLoc;
return;
}
-
+
const PathDiagnosticLocation &NewLocClean = cleanUpLocation(NewLoc);
const PathDiagnosticLocation &PrevLocClean = cleanUpLocation(PrevLoc);
-
+
if (NewLocClean.asLocation() == PrevLocClean.asLocation())
return;
-
+
// FIXME: Ignore intra-macro edges for now.
if (NewLocClean.asLocation().getInstantiationLoc() ==
PrevLocClean.asLocation().getInstantiationLoc())
@@ -1003,15 +1009,15 @@ void EdgeBuilder::rawAddEdge(PathDiagnosticLocation NewLoc) {
}
void EdgeBuilder::addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd) {
-
+
if (!alwaysAdd && NewLoc.asLocation().isMacroID())
return;
-
+
const PathDiagnosticLocation &CLoc = getContextLocation(NewLoc);
while (!CLocs.empty()) {
ContextLocation &TopContextLoc = CLocs.back();
-
+
// Is the top location context the same as the one for the new location?
if (TopContextLoc == CLoc) {
if (alwaysAdd) {
@@ -1028,21 +1034,21 @@ void EdgeBuilder::addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd) {
if (containsLocation(TopContextLoc, CLoc)) {
if (alwaysAdd) {
rawAddEdge(NewLoc);
-
+
if (IsConsumedExpr(CLoc) && !IsControlFlowExpr(CLoc.asStmt())) {
CLocs.push_back(ContextLocation(CLoc, true));
return;
}
}
-
+
CLocs.push_back(CLoc);
- return;
+ return;
}
// Context does not contain the location. Flush it.
popLocation();
}
-
+
// If we reach here, there is no enclosing context. Just add the edge.
rawAddEdge(NewLoc);
}
@@ -1050,15 +1056,15 @@ void EdgeBuilder::addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd) {
bool EdgeBuilder::IsConsumedExpr(const PathDiagnosticLocation &L) {
if (const Expr *X = dyn_cast_or_null<Expr>(L.asStmt()))
return PDB.getParentMap().isConsumedExpr(X) && !IsControlFlowExpr(X);
-
+
return false;
}
-
+
void EdgeBuilder::addExtendedContext(const Stmt *S) {
if (!S)
return;
-
- const Stmt *Parent = PDB.getParent(S);
+
+ const Stmt *Parent = PDB.getParent(S);
while (Parent) {
if (isa<CompoundStmt>(Parent))
Parent = PDB.getParent(Parent);
@@ -1075,16 +1081,16 @@ void EdgeBuilder::addExtendedContext(const Stmt *S) {
break;
}
}
-
+
addContext(S);
}
-
+
void EdgeBuilder::addContext(const Stmt *S) {
if (!S)
return;
PathDiagnosticLocation L(S, PDB.getSourceManager());
-
+
while (!CLocs.empty()) {
const PathDiagnosticLocation &TopContextLoc = CLocs.back();
@@ -1094,7 +1100,7 @@ void EdgeBuilder::addContext(const Stmt *S) {
if (containsLocation(TopContextLoc, L)) {
CLocs.push_back(L);
- return;
+ return;
}
// Context does not contain the location. Flush it.
@@ -1106,12 +1112,12 @@ void EdgeBuilder::addContext(const Stmt *S) {
static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
PathDiagnosticBuilder &PDB,
- const ExplodedNode<GRState> *N) {
-
-
+ const ExplodedNode *N) {
+
+
EdgeBuilder EB(PD, PDB);
- const ExplodedNode<GRState>* NextNode = N->pred_empty()
+ const ExplodedNode* NextNode = N->pred_empty()
? NULL : *(N->pred_begin());
while (NextNode) {
N = NextNode;
@@ -1123,26 +1129,26 @@ static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
if (const BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
const CFGBlock &Blk = *BE->getSrc();
const Stmt *Term = Blk.getTerminator();
-
+
// Are we jumping to the head of a loop? Add a special diagnostic.
if (const Stmt *Loop = BE->getDst()->getLoopTarget()) {
PathDiagnosticLocation L(Loop, PDB.getSourceManager());
const CompoundStmt *CS = NULL;
-
+
if (!Term) {
if (const ForStmt *FS = dyn_cast<ForStmt>(Loop))
CS = dyn_cast<CompoundStmt>(FS->getBody());
else if (const WhileStmt *WS = dyn_cast<WhileStmt>(Loop))
- CS = dyn_cast<CompoundStmt>(WS->getBody());
+ CS = dyn_cast<CompoundStmt>(WS->getBody());
}
-
+
PathDiagnosticEventPiece *p =
new PathDiagnosticEventPiece(L,
"Looping back to the head of the loop");
-
+
EB.addEdge(p->getLocation(), true);
PD.push_front(p);
-
+
if (CS) {
PathDiagnosticLocation BL(CS->getRBracLoc(),
PDB.getSourceManager());
@@ -1150,14 +1156,14 @@ static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
EB.addEdge(BL);
}
}
-
+
if (Term)
EB.addContext(Term);
-
+
break;
}
- if (const BlockEntrance *BE = dyn_cast<BlockEntrance>(&P)) {
+ if (const BlockEntrance *BE = dyn_cast<BlockEntrance>(&P)) {
if (const Stmt* S = BE->getFirstStmt()) {
if (IsControlFlowExpr(S)) {
// Add the proper context for '&&', '||', and '?'.
@@ -1170,10 +1176,10 @@ static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
break;
}
} while (0);
-
+
if (!NextNode)
continue;
-
+
for (BugReporterContext::visitor_iterator I = PDB.visitor_begin(),
E = PDB.visitor_end(); I!=E; ++I) {
if (PathDiagnosticPiece* p = (*I)->VisitNode(N, NextNode, PDB)) {
@@ -1181,16 +1187,25 @@ static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
EB.addEdge(Loc, true);
PD.push_front(p);
if (const Stmt *S = Loc.asStmt())
- EB.addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt());
+ EB.addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt());
}
- }
+ }
}
}
//===----------------------------------------------------------------------===//
// Methods for BugType and subclasses.
//===----------------------------------------------------------------------===//
-BugType::~BugType() {}
+BugType::~BugType() {
+ // Free up the equivalence class objects. Observe that we get a pointer to
+ // the object first before incrementing the iterator, as destroying the
+ // node before doing so means we will read from freed memory.
+ for (iterator I = begin(), E = end(); I !=E; ) {
+ BugReportEquivClass *EQ = &*I;
+ ++I;
+ delete EQ;
+ }
+}
void BugType::FlushReports(BugReporter &BR) {}
//===----------------------------------------------------------------------===//
@@ -1199,46 +1214,47 @@ void BugType::FlushReports(BugReporter &BR) {}
BugReport::~BugReport() {}
RangedBugReport::~RangedBugReport() {}
-Stmt* BugReport::getStmt(BugReporter& BR) const {
- ProgramPoint ProgP = EndNode->getLocation();
- Stmt *S = NULL;
-
+const Stmt* BugReport::getStmt() const {
+ ProgramPoint ProgP = EndNode->getLocation();
+ const Stmt *S = NULL;
+
if (BlockEntrance* BE = dyn_cast<BlockEntrance>(&ProgP)) {
- if (BE->getBlock() == &BR.getCFG()->getExit()) S = GetPreviousStmt(EndNode);
+ CFGBlock &Exit = ProgP.getLocationContext()->getCFG()->getExit();
+ if (BE->getBlock() == &Exit)
+ S = GetPreviousStmt(EndNode);
}
- if (!S) S = GetStmt(ProgP);
-
- return S;
+ if (!S)
+ S = GetStmt(ProgP);
+
+ return S;
}
PathDiagnosticPiece*
BugReport::getEndPath(BugReporterContext& BRC,
- const ExplodedNode<GRState>* EndPathNode) {
-
- Stmt* S = getStmt(BRC.getBugReporter());
-
+ const ExplodedNode* EndPathNode) {
+
+ const Stmt* S = getStmt();
+
if (!S)
return NULL;
const SourceRange *Beg, *End;
- getRanges(BRC.getBugReporter(), Beg, End);
+ getRanges(Beg, End);
PathDiagnosticLocation L(S, BRC.getSourceManager());
-
+
// Only add the statement itself as a range if we didn't specify any
// special ranges for this report.
PathDiagnosticPiece* P = new PathDiagnosticEventPiece(L, getDescription(),
Beg == End);
-
+
for (; Beg != End; ++Beg)
P->addRange(*Beg);
-
+
return P;
}
-void BugReport::getRanges(BugReporter& BR, const SourceRange*& beg,
- const SourceRange*& end) {
-
- if (Expr* E = dyn_cast_or_null<Expr>(getStmt(BR))) {
+void BugReport::getRanges(const SourceRange*& beg, const SourceRange*& end) {
+ if (const Expr* E = dyn_cast_or_null<Expr>(getStmt())) {
R = E->getSourceRange();
assert(R.isValid());
beg = &R;
@@ -1248,12 +1264,15 @@ void BugReport::getRanges(BugReporter& BR, const SourceRange*& beg,
beg = end = 0;
}
-SourceLocation BugReport::getLocation() const {
+SourceLocation BugReport::getLocation() const {
if (EndNode)
- if (Stmt* S = GetCurrentOrPreviousStmt(EndNode)) {
+ if (const Stmt* S = GetCurrentOrPreviousStmt(EndNode)) {
// For member expressions, return the location of the '.' or '->'.
- if (MemberExpr* ME = dyn_cast<MemberExpr>(S))
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(S))
return ME->getMemberLoc();
+ // For binary operators, return the location of the operator.
+ if (const BinaryOperator *B = dyn_cast<BinaryOperator>(S))
+ return B->getOperatorLoc();
return S->getLocStart();
}
@@ -1261,8 +1280,8 @@ SourceLocation BugReport::getLocation() const {
return FullSourceLoc();
}
-PathDiagnosticPiece* BugReport::VisitNode(const ExplodedNode<GRState>* N,
- const ExplodedNode<GRState>* PrevN,
+PathDiagnosticPiece* BugReport::VisitNode(const ExplodedNode* N,
+ const ExplodedNode* PrevN,
BugReporterContext &BRC) {
return NULL;
}
@@ -1275,11 +1294,10 @@ BugReportEquivClass::~BugReportEquivClass() {
for (iterator I=begin(), E=end(); I!=E; ++I) delete *I;
}
-GRBugReporter::~GRBugReporter() { FlushReports(); }
+GRBugReporter::~GRBugReporter() { }
BugReporterData::~BugReporterData() {}
-ExplodedGraph<GRState>&
-GRBugReporter::getGraph() { return Eng.getGraph(); }
+ExplodedGraph &GRBugReporter::getGraph() { return Eng.getGraph(); }
GRStateManager&
GRBugReporter::getStateManager() { return Eng.getStateManager(); }
@@ -1308,9 +1326,8 @@ void BugReporter::FlushReports() {
BugReportEquivClass& EQ = *EI;
FlushReport(EQ);
}
-
- // Delete the BugType object. This will also delete the equivalence
- // classes.
+
+ // Delete the BugType object.
delete BT;
}
@@ -1322,137 +1339,134 @@ void BugReporter::FlushReports() {
// PathDiagnostics generation.
//===----------------------------------------------------------------------===//
-static std::pair<std::pair<ExplodedGraph<GRState>*, NodeBackMap*>,
- std::pair<ExplodedNode<GRState>*, unsigned> >
-MakeReportGraph(const ExplodedGraph<GRState>* G,
- const ExplodedNode<GRState>** NStart,
- const ExplodedNode<GRState>** NEnd) {
-
+static std::pair<std::pair<ExplodedGraph*, NodeBackMap*>,
+ std::pair<ExplodedNode*, unsigned> >
+MakeReportGraph(const ExplodedGraph* G,
+ const ExplodedNode** NStart,
+ const ExplodedNode** NEnd) {
+
// Create the trimmed graph. It will contain the shortest paths from the
- // error nodes to the root. In the new graph we should only have one
+ // error nodes to the root. In the new graph we should only have one
// error node unless there are two or more error nodes with the same minimum
// path length.
- ExplodedGraph<GRState>* GTrim;
- InterExplodedGraphMap<GRState>* NMap;
+ ExplodedGraph* GTrim;
+ InterExplodedGraphMap* NMap;
llvm::DenseMap<const void*, const void*> InverseMap;
llvm::tie(GTrim, NMap) = G->Trim(NStart, NEnd, &InverseMap);
-
+
// Create owning pointers for GTrim and NMap just to ensure that they are
// released when this function exists.
- llvm::OwningPtr<ExplodedGraph<GRState> > AutoReleaseGTrim(GTrim);
- llvm::OwningPtr<InterExplodedGraphMap<GRState> > AutoReleaseNMap(NMap);
-
+ llvm::OwningPtr<ExplodedGraph> AutoReleaseGTrim(GTrim);
+ llvm::OwningPtr<InterExplodedGraphMap> AutoReleaseNMap(NMap);
+
// Find the (first) error node in the trimmed graph. We just need to consult
// the node map (NMap) which maps from nodes in the original graph to nodes
// in the new graph.
- std::queue<const ExplodedNode<GRState>*> WS;
- typedef llvm::DenseMap<const ExplodedNode<GRState>*,unsigned> IndexMapTy;
+ std::queue<const ExplodedNode*> WS;
+ typedef llvm::DenseMap<const ExplodedNode*, unsigned> IndexMapTy;
IndexMapTy IndexMap;
- for (const ExplodedNode<GRState>** I = NStart; I != NEnd; ++I)
- if (const ExplodedNode<GRState> *N = NMap->getMappedNode(*I)) {
+ for (const ExplodedNode** I = NStart; I != NEnd; ++I)
+ if (const ExplodedNode *N = NMap->getMappedNode(*I)) {
unsigned NodeIndex = (I - NStart) / sizeof(*I);
WS.push(N);
IndexMap[*I] = NodeIndex;
}
-
+
assert(!WS.empty() && "No error node found in the trimmed graph.");
// Create a new (third!) graph with a single path. This is the graph
// that will be returned to the caller.
- ExplodedGraph<GRState> *GNew =
- new ExplodedGraph<GRState>(GTrim->getCFG(), GTrim->getCodeDecl(),
- GTrim->getContext());
-
+ ExplodedGraph *GNew = new ExplodedGraph(GTrim->getContext());
+
// Sometimes the trimmed graph can contain a cycle. Perform a reverse BFS
// to the root node, and then construct a new graph that contains only
// a single path.
llvm::DenseMap<const void*,unsigned> Visited;
-
+
unsigned cnt = 0;
- const ExplodedNode<GRState>* Root = 0;
-
+ const ExplodedNode* Root = 0;
+
while (!WS.empty()) {
- const ExplodedNode<GRState>* Node = WS.front();
+ const ExplodedNode* Node = WS.front();
WS.pop();
-
+
if (Visited.find(Node) != Visited.end())
continue;
-
+
Visited[Node] = cnt++;
-
+
if (Node->pred_empty()) {
Root = Node;
break;
}
-
- for (ExplodedNode<GRState>::const_pred_iterator I=Node->pred_begin(),
+
+ for (ExplodedNode::const_pred_iterator I=Node->pred_begin(),
E=Node->pred_end(); I!=E; ++I)
WS.push(*I);
}
-
+
assert(Root);
-
+
// Now walk from the root down the BFS path, always taking the successor
// with the lowest number.
- ExplodedNode<GRState> *Last = 0, *First = 0;
+ ExplodedNode *Last = 0, *First = 0;
NodeBackMap *BM = new NodeBackMap();
unsigned NodeIndex = 0;
-
- for ( const ExplodedNode<GRState> *N = Root ;;) {
+
+ for ( const ExplodedNode *N = Root ;;) {
// Lookup the number associated with the current node.
llvm::DenseMap<const void*,unsigned>::iterator I = Visited.find(N);
assert(I != Visited.end());
-
+
// Create the equivalent node in the new graph with the same state
// and location.
- ExplodedNode<GRState>* NewN =
- GNew->getNode(N->getLocation(), N->getState());
-
+ ExplodedNode* NewN = GNew->getNode(N->getLocation(), N->getState());
+
// Store the mapping to the original node.
llvm::DenseMap<const void*, const void*>::iterator IMitr=InverseMap.find(N);
assert(IMitr != InverseMap.end() && "No mapping to original node.");
- (*BM)[NewN] = (const ExplodedNode<GRState>*) IMitr->second;
-
+ (*BM)[NewN] = (const ExplodedNode*) IMitr->second;
+
// Link up the new node with the previous node.
if (Last)
- NewN->addPredecessor(Last);
-
+ NewN->addPredecessor(Last, *GNew);
+
Last = NewN;
-
+
// Are we at the final node?
IndexMapTy::iterator IMI =
- IndexMap.find((const ExplodedNode<GRState>*)(IMitr->second));
+ IndexMap.find((const ExplodedNode*)(IMitr->second));
if (IMI != IndexMap.end()) {
First = NewN;
NodeIndex = IMI->second;
break;
}
-
+
// Find the next successor node. We choose the node that is marked
// with the lowest DFS number.
- ExplodedNode<GRState>::const_succ_iterator SI = N->succ_begin();
- ExplodedNode<GRState>::const_succ_iterator SE = N->succ_end();
+ ExplodedNode::const_succ_iterator SI = N->succ_begin();
+ ExplodedNode::const_succ_iterator SE = N->succ_end();
N = 0;
-
+
for (unsigned MinVal = 0; SI != SE; ++SI) {
-
+
I = Visited.find(*SI);
-
+
if (I == Visited.end())
continue;
-
+
if (!N || I->second < MinVal) {
N = *SI;
MinVal = I->second;
}
}
-
+
assert(N);
}
-
+
assert(First);
return std::make_pair(std::make_pair(GNew, BM),
@@ -1464,23 +1478,23 @@ MakeReportGraph(const ExplodedGraph<GRState>* G,
static void CompactPathDiagnostic(PathDiagnostic &PD, const SourceManager& SM) {
typedef std::vector<std::pair<PathDiagnosticMacroPiece*, SourceLocation> >
MacroStackTy;
-
+
typedef std::vector<PathDiagnosticPiece*>
PiecesTy;
-
+
MacroStackTy MacroStack;
PiecesTy Pieces;
-
+
for (PathDiagnostic::iterator I = PD.begin(), E = PD.end(); I!=E; ++I) {
// Get the location of the PathDiagnosticPiece.
- const FullSourceLoc Loc = I->getLocation().asLocation();
-
+ const FullSourceLoc Loc = I->getLocation().asLocation();
+
// Determine the instantiation location, which is the location we group
// related PathDiagnosticPieces.
- SourceLocation InstantiationLoc = Loc.isMacroID() ?
+ SourceLocation InstantiationLoc = Loc.isMacroID() ?
SM.getInstantiationLoc(Loc) :
SourceLocation();
-
+
if (Loc.isFileID()) {
MacroStack.clear();
Pieces.push_back(&*I);
@@ -1488,7 +1502,7 @@ static void CompactPathDiagnostic(PathDiagnostic &PD, const SourceManager& SM) {
}
assert(Loc.isMacroID());
-
+
// Is the PathDiagnosticPiece within the same macro group?
if (!MacroStack.empty() && InstantiationLoc == MacroStack.back().second) {
MacroStack.back().first->push_back(&*I);
@@ -1502,22 +1516,22 @@ static void CompactPathDiagnostic(PathDiagnostic &PD, const SourceManager& SM) {
SourceLocation ParentInstantiationLoc = InstantiationLoc.isMacroID() ?
SM.getInstantiationLoc(Loc) :
SourceLocation();
-
+
// Walk the entire macro stack.
while (!MacroStack.empty()) {
if (InstantiationLoc == MacroStack.back().second) {
MacroGroup = MacroStack.back().first;
break;
}
-
+
if (ParentInstantiationLoc == MacroStack.back().second) {
MacroGroup = MacroStack.back().first;
break;
}
-
+
MacroStack.pop_back();
}
-
+
if (!MacroGroup || ParentInstantiationLoc == MacroStack.back().second) {
// Create a new macro group and add it to the stack.
PathDiagnosticMacroPiece *NewGroup = new PathDiagnosticMacroPiece(Loc);
@@ -1528,7 +1542,7 @@ static void CompactPathDiagnostic(PathDiagnostic &PD, const SourceManager& SM) {
assert(InstantiationLoc.isFileID());
Pieces.push_back(NewGroup);
}
-
+
MacroGroup = NewGroup;
MacroStack.push_back(std::make_pair(MacroGroup, InstantiationLoc));
}
@@ -1536,62 +1550,62 @@ static void CompactPathDiagnostic(PathDiagnostic &PD, const SourceManager& SM) {
// Finally, add the PathDiagnosticPiece to the group.
MacroGroup->push_back(&*I);
}
-
+
// Now take the pieces and construct a new PathDiagnostic.
PD.resetPath(false);
-
+
for (PiecesTy::iterator I=Pieces.begin(), E=Pieces.end(); I!=E; ++I) {
if (PathDiagnosticMacroPiece *MP=dyn_cast<PathDiagnosticMacroPiece>(*I))
if (!MP->containsEvent()) {
delete MP;
continue;
}
-
+
PD.push_back(*I);
}
}
void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD,
BugReportEquivClass& EQ) {
-
- std::vector<const ExplodedNode<GRState>*> Nodes;
-
+
+ std::vector<const ExplodedNode*> Nodes;
+
for (BugReportEquivClass::iterator I=EQ.begin(), E=EQ.end(); I!=E; ++I) {
- const ExplodedNode<GRState>* N = I->getEndNode();
+ const ExplodedNode* N = I->getEndNode();
if (N) Nodes.push_back(N);
}
-
+
if (Nodes.empty())
return;
-
+
// Construct a new graph that contains only a single path from the error
- // node to a root.
- const std::pair<std::pair<ExplodedGraph<GRState>*, NodeBackMap*>,
- std::pair<ExplodedNode<GRState>*, unsigned> >&
+ // node to a root.
+ const std::pair<std::pair<ExplodedGraph*, NodeBackMap*>,
+ std::pair<ExplodedNode*, unsigned> >&
GPair = MakeReportGraph(&getGraph(), &Nodes[0], &Nodes[0] + Nodes.size());
-
+
// Find the BugReport with the original location.
BugReport *R = 0;
unsigned i = 0;
for (BugReportEquivClass::iterator I=EQ.begin(), E=EQ.end(); I!=E; ++I, ++i)
if (i == GPair.second.second) { R = *I; break; }
-
+
assert(R && "No original report found for sliced graph.");
-
- llvm::OwningPtr<ExplodedGraph<GRState> > ReportGraph(GPair.first.first);
+
+ llvm::OwningPtr<ExplodedGraph> ReportGraph(GPair.first.first);
llvm::OwningPtr<NodeBackMap> BackMap(GPair.first.second);
- const ExplodedNode<GRState> *N = GPair.second.first;
-
- // Start building the path diagnostic...
+ const ExplodedNode *N = GPair.second.first;
+
+ // Start building the path diagnostic...
PathDiagnosticBuilder PDB(*this, R, BackMap.get(), getPathDiagnosticClient());
-
+
if (PathDiagnosticPiece* Piece = R->getEndPath(PDB, N))
PD.push_back(Piece);
else
return;
-
+
R->registerInitialVisitors(PDB, N);
-
+
switch (PDB.getGenerationScheme()) {
case PathDiagnosticClient::Extensive:
GenerateExtensivePathDiagnostic(PD, PDB, N);
@@ -1606,17 +1620,17 @@ void BugReporter::Register(BugType *BT) {
BugTypes = F.Add(BugTypes, BT);
}
-void BugReporter::EmitReport(BugReport* R) {
+void BugReporter::EmitReport(BugReport* R) {
// Compute the bug report's hash to determine its equivalence class.
llvm::FoldingSetNodeID ID;
R->Profile(ID);
-
- // Lookup the equivance class. If there isn't one, create it.
+
+ // Lookup the equivance class. If there isn't one, create it.
BugType& BT = R->getBugType();
Register(&BT);
void *InsertPos;
- BugReportEquivClass* EQ = BT.EQClasses.FindNodeOrInsertPos(ID, InsertPos);
-
+ BugReportEquivClass* EQ = BT.EQClasses.FindNodeOrInsertPos(ID, InsertPos);
+
if (!EQ) {
EQ = new BugReportEquivClass(R);
BT.EQClasses.InsertNode(EQ, InsertPos);
@@ -1625,34 +1639,178 @@ void BugReporter::EmitReport(BugReport* R) {
EQ->AddReport(R);
}
+
+//===----------------------------------------------------------------------===//
+// Emitting reports in equivalence classes.
+//===----------------------------------------------------------------------===//
+
+namespace {
+struct VISIBILITY_HIDDEN FRIEC_WLItem {
+ const ExplodedNode *N;
+ ExplodedNode::const_succ_iterator I, E;
+
+ FRIEC_WLItem(const ExplodedNode *n)
+ : N(n), I(N->succ_begin()), E(N->succ_end()) {}
+};
+}
+
+static BugReport *FindReportInEquivalenceClass(BugReportEquivClass& EQ) {
+ BugReportEquivClass::iterator I = EQ.begin(), E = EQ.end();
+ assert(I != E);
+ BugReport *R = *I;
+ BugType& BT = R->getBugType();
+
+ if (!BT.isSuppressOnSink())
+ return R;
+
+ // For bug reports that should be suppressed when all paths are post-dominated
+ // by a sink node, iterate through the reports in the equivalence class
+ // until we find one that isn't post-dominated (if one exists). We use a
+ // DFS traversal of the ExplodedGraph to find a non-sink node. We could write
+ // this as a recursive function, but we don't want to risk blowing out the
+ // stack for very long paths.
+ for (; I != E; ++I) {
+ R = *I;
+ const ExplodedNode *N = R->getEndNode();
+
+ if (!N)
+ continue;
+
+ if (N->isSink()) {
+ assert(false &&
+ "BugType::isSuppressSink() should not be 'true' for sink end nodes");
+ return R;
+ }
+
+ if (N->succ_empty())
+ return R;
+
+ // At this point we know that 'N' is not a sink and it has at least one
+ // successor. Use a DFS worklist to find a non-sink end-of-path node.
+ typedef FRIEC_WLItem WLItem;
+ typedef llvm::SmallVector<WLItem, 10> DFSWorkList;
+ llvm::DenseMap<const ExplodedNode *, unsigned> Visited;
+
+ DFSWorkList WL;
+ WL.push_back(N);
+ Visited[N] = 1;
+
+ while (!WL.empty()) {
+ WLItem &WI = WL.back();
+ assert(!WI.N->succ_empty());
+
+ for (; WI.I != WI.E; ++WI.I) {
+ const ExplodedNode *Succ = *WI.I;
+ // End-of-path node?
+ if (Succ->succ_empty()) {
+ // If we found an end-of-path node that is not a sink, then return
+ // this report.
+ if (!Succ->isSink())
+ return R;
+
+ // Found a sink? Continue on to the next successor.
+ continue;
+ }
+
+ // Mark the successor as visited. If it hasn't been explored,
+ // enqueue it to the DFS worklist.
+ unsigned &mark = Visited[Succ];
+ if (!mark) {
+ mark = 1;
+ WL.push_back(Succ);
+ break;
+ }
+ }
+
+ if (&WL.back() == &WI)
+ WL.pop_back();
+ }
+ }
+
+ // If we reach here, the end nodes for all reports in the equivalence
+ // class are post-dominated by a sink node.
+ return NULL;
+}
+
+
+//===----------------------------------------------------------------------===//
+// DiagnosticCache. This is a hack to cache analyzer diagnostics. It
+// uses global state, which eventually should go elsewhere.
+//===----------------------------------------------------------------------===//
+namespace {
+class VISIBILITY_HIDDEN DiagCacheItem : public llvm::FoldingSetNode {
+ llvm::FoldingSetNodeID ID;
+public:
+ DiagCacheItem(BugReport *R, PathDiagnostic *PD) {
+ ID.AddString(R->getBugType().getName());
+ ID.AddString(R->getBugType().getCategory());
+ ID.AddString(R->getDescription());
+ ID.AddInteger(R->getLocation().getRawEncoding());
+ PD->Profile(ID);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &id) {
+ id = ID;
+ }
+
+ llvm::FoldingSetNodeID &getID() { return ID; }
+};
+}
+
+static bool IsCachedDiagnostic(BugReport *R, PathDiagnostic *PD) {
+ // FIXME: Eventually this diagnostic cache should reside in something
+ // like AnalysisManager instead of being a static variable. This is
+ // really unsafe in the long term.
+ typedef llvm::FoldingSet<DiagCacheItem> DiagnosticCache;
+ static DiagnosticCache DC;
+
+ void *InsertPos;
+ DiagCacheItem *Item = new DiagCacheItem(R, PD);
+
+ if (DC.FindNodeOrInsertPos(Item->getID(), InsertPos)) {
+ delete Item;
+ return true;
+ }
+
+ DC.InsertNode(Item, InsertPos);
+ return false;
+}
+
void BugReporter::FlushReport(BugReportEquivClass& EQ) {
- assert(!EQ.Reports.empty());
- BugReport &R = **EQ.begin();
- PathDiagnosticClient* PD = getPathDiagnosticClient();
+ BugReport *R = FindReportInEquivalenceClass(EQ);
+
+ if (!R)
+ return;
+ PathDiagnosticClient* PD = getPathDiagnosticClient();
+
// FIXME: Make sure we use the 'R' for the path that was actually used.
- // Probably doesn't make a difference in practice.
- BugType& BT = R.getBugType();
-
+ // Probably doesn't make a difference in practice.
+ BugType& BT = R->getBugType();
+
llvm::OwningPtr<PathDiagnostic>
- D(new PathDiagnostic(R.getBugType().getName(),
+ D(new PathDiagnostic(R->getBugType().getName(),
!PD || PD->useVerboseDescription()
- ? R.getDescription() : R.getShortDescription(),
+ ? R->getDescription() : R->getShortDescription(),
BT.getCategory()));
GeneratePathDiagnostic(*D.get(), EQ);
+
+ if (IsCachedDiagnostic(R, D.get()))
+ return;
// Get the meta data.
- std::pair<const char**, const char**> Meta = R.getExtraDescriptiveText();
- for (const char** s = Meta.first; s != Meta.second; ++s) D->addMeta(*s);
+ std::pair<const char**, const char**> Meta = R->getExtraDescriptiveText();
+ for (const char** s = Meta.first; s != Meta.second; ++s)
+ D->addMeta(*s);
// Emit a summary diagnostic to the regular Diagnostics engine.
const SourceRange *Beg = 0, *End = 0;
- R.getRanges(*this, Beg, End);
+ R->getRanges(Beg, End);
Diagnostic& Diag = getDiagnostic();
- FullSourceLoc L(R.getLocation(), getSourceManager());
+ FullSourceLoc L(R->getLocation(), getSourceManager());
unsigned ErrorDiag = Diag.getCustomDiagID(Diagnostic::Warning,
- R.getShortDescription().c_str());
+ R->getShortDescription().c_str());
switch (End-Beg) {
default: assert(0 && "Don't handle this many ranges yet!");
@@ -1665,15 +1823,15 @@ void BugReporter::FlushReport(BugReportEquivClass& EQ) {
// Emit a full diagnostic for the path if we have a PathDiagnosticClient.
if (!PD)
return;
-
- if (D->empty()) {
+
+ if (D->empty()) {
PathDiagnosticPiece* piece =
- new PathDiagnosticEventPiece(L, R.getDescription());
+ new PathDiagnosticEventPiece(L, R->getDescription());
for ( ; Beg != End; ++Beg) piece->addRange(*Beg);
D->push_back(piece);
}
-
+
PD->HandlePathDiagnostic(D.take());
}
@@ -1686,7 +1844,7 @@ void BugReporter::EmitBasicReport(const char* name, const char* str,
void BugReporter::EmitBasicReport(const char* name, const char* category,
const char* str, SourceLocation Loc,
SourceRange* RBeg, unsigned NumRanges) {
-
+
// 'BT' will be owned by BugReporter as soon as we call 'EmitReport'.
BugType *BT = new BugType(name, category);
FullSourceLoc L = getContext().getFullLoc(Loc);
diff --git a/lib/Analysis/BugReporterVisitors.cpp b/lib/Analysis/BugReporterVisitors.cpp
new file mode 100644
index 000000000000..89c9ca10ec57
--- /dev/null
+++ b/lib/Analysis/BugReporterVisitors.cpp
@@ -0,0 +1,349 @@
+// BugReporterVisitors.cpp - Helpers for reporting bugs -----------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a set of BugReporter "visitors" which can be used to
+// enhance the diagnostics reported for a bug.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Analysis/PathSensitive/BugReporter.h"
+#include "clang/Analysis/PathDiagnostic.h"
+#include "clang/Analysis/PathSensitive/GRState.h"
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Utility functions.
+//===----------------------------------------------------------------------===//
+
+const Stmt *clang::bugreporter::GetDerefExpr(const ExplodedNode *N) {
+ // Pattern match for a few useful cases (do something smarter later):
+ // a[0], p->f, *p
+ const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
+
+ if (const UnaryOperator *U = dyn_cast<UnaryOperator>(S)) {
+ if (U->getOpcode() == UnaryOperator::Deref)
+ return U->getSubExpr()->IgnoreParenCasts();
+ }
+ else if (const MemberExpr *ME = dyn_cast<MemberExpr>(S)) {
+ return ME->getBase()->IgnoreParenCasts();
+ }
+ else if (const ArraySubscriptExpr *AE = dyn_cast<ArraySubscriptExpr>(S)) {
+ // Retrieve the base for arrays since BasicStoreManager doesn't know how
+ // to reason about them.
+ return AE->getBase();
+ }
+
+ return NULL;
+}
+
+const Stmt*
+clang::bugreporter::GetReceiverExpr(const ExplodedNode *N){
+ const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
+ if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S))
+ return ME->getReceiver();
+ return NULL;
+}
+
+const Stmt*
+clang::bugreporter::GetDenomExpr(const ExplodedNode *N) {
+ const Stmt *S = N->getLocationAs<PreStmt>()->getStmt();
+ if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(S))
+ return BE->getRHS();
+ return NULL;
+}
+
+const Stmt*
+clang::bugreporter::GetCalleeExpr(const ExplodedNode *N) {
+ // Callee is checked as a PreVisit to the CallExpr.
+ const Stmt *S = N->getLocationAs<PreStmt>()->getStmt();
+ if (const CallExpr *CE = dyn_cast<CallExpr>(S))
+ return CE->getCallee();
+ return NULL;
+}
+
+const Stmt*
+clang::bugreporter::GetRetValExpr(const ExplodedNode *N) {
+ const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
+ if (const ReturnStmt *RS = dyn_cast<ReturnStmt>(S))
+ return RS->getRetValue();
+ return NULL;
+}
+
+//===----------------------------------------------------------------------===//
+// Definitions for bug reporter visitors.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VISIBILITY_HIDDEN FindLastStoreBRVisitor : public BugReporterVisitor {
+ const MemRegion *R;
+ SVal V;
+ bool satisfied;
+ const ExplodedNode *StoreSite;
+public:
+ FindLastStoreBRVisitor(SVal v, const MemRegion *r)
+ : R(r), V(v), satisfied(false), StoreSite(0) {}
+
+ PathDiagnosticPiece* VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext& BRC) {
+
+ if (satisfied)
+ return NULL;
+
+ if (!StoreSite) {
+ const ExplodedNode *Node = N, *Last = NULL;
+
+ for ( ; Node ; Last = Node, Node = Node->getFirstPred()) {
+
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ if (const PostStmt *P = Node->getLocationAs<PostStmt>())
+ if (const DeclStmt *DS = P->getStmtAs<DeclStmt>())
+ if (DS->getSingleDecl() == VR->getDecl()) {
+ Last = Node;
+ break;
+ }
+ }
+
+ if (Node->getState()->getSVal(R) != V)
+ break;
+ }
+
+ if (!Node || !Last) {
+ satisfied = true;
+ return NULL;
+ }
+
+ StoreSite = Last;
+ }
+
+ if (StoreSite != N)
+ return NULL;
+
+ satisfied = true;
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ if (const PostStmt *PS = N->getLocationAs<PostStmt>()) {
+ if (const DeclStmt *DS = PS->getStmtAs<DeclStmt>()) {
+
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ os << "Variable '" << VR->getDecl()->getNameAsString() << "' ";
+ }
+ else
+ return NULL;
+
+ if (isa<loc::ConcreteInt>(V)) {
+ bool b = false;
+ ASTContext &C = BRC.getASTContext();
+ if (R->isBoundable()) {
+ if (const TypedRegion *TR = dyn_cast<TypedRegion>(R)) {
+ if (TR->getValueType(C)->isObjCObjectPointerType()) {
+ os << "initialized to nil";
+ b = true;
+ }
+ }
+ }
+
+ if (!b)
+ os << "initialized to a null pointer value";
+ }
+ else if (isa<nonloc::ConcreteInt>(V)) {
+ os << "initialized to " << cast<nonloc::ConcreteInt>(V).getValue();
+ }
+ else if (V.isUndef()) {
+ if (isa<VarRegion>(R)) {
+ const VarDecl *VD = cast<VarDecl>(DS->getSingleDecl());
+ if (VD->getInit())
+ os << "initialized to a garbage value";
+ else
+ os << "declared without an initial value";
+ }
+ }
+ }
+ }
+
+ if (os.str().empty()) {
+ if (isa<loc::ConcreteInt>(V)) {
+ bool b = false;
+ ASTContext &C = BRC.getASTContext();
+ if (R->isBoundable()) {
+ if (const TypedRegion *TR = dyn_cast<TypedRegion>(R)) {
+ if (TR->getValueType(C)->isObjCObjectPointerType()) {
+ os << "nil object reference stored to ";
+ b = true;
+ }
+ }
+ }
+
+ if (!b)
+ os << "Null pointer value stored to ";
+ }
+ else if (V.isUndef()) {
+ os << "Uninitialized value stored to ";
+ }
+ else if (isa<nonloc::ConcreteInt>(V)) {
+ os << "The value " << cast<nonloc::ConcreteInt>(V).getValue()
+ << " is assigned to ";
+ }
+ else
+ return NULL;
+
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ os << '\'' << VR->getDecl()->getNameAsString() << '\'';
+ }
+ else
+ return NULL;
+ }
+
+ // FIXME: Refactor this into BugReporterContext.
+ const Stmt *S = 0;
+ ProgramPoint P = N->getLocation();
+
+ if (BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
+ CFGBlock *BSrc = BE->getSrc();
+ S = BSrc->getTerminatorCondition();
+ }
+ else if (PostStmt *PS = dyn_cast<PostStmt>(&P)) {
+ S = PS->getStmt();
+ }
+
+ if (!S)
+ return NULL;
+
+ // Construct a new PathDiagnosticPiece.
+ PathDiagnosticLocation L(S, BRC.getSourceManager());
+ return new PathDiagnosticEventPiece(L, os.str());
+ }
+};
+
+
+static void registerFindLastStore(BugReporterContext& BRC, const MemRegion *R,
+ SVal V) {
+ BRC.addVisitor(new FindLastStoreBRVisitor(V, R));
+}
+
+class VISIBILITY_HIDDEN TrackConstraintBRVisitor : public BugReporterVisitor {
+ DefinedSVal Constraint;
+ const bool Assumption;
+ bool isSatisfied;
+public:
+ TrackConstraintBRVisitor(DefinedSVal constraint, bool assumption)
+ : Constraint(constraint), Assumption(assumption), isSatisfied(false) {}
+
+ PathDiagnosticPiece* VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext& BRC) {
+ if (isSatisfied)
+ return NULL;
+
+ // Check if in the previous state it was feasible for this constraint
+ // to *not* be true.
+ if (PrevN->getState()->Assume(Constraint, !Assumption)) {
+
+ isSatisfied = true;
+
+ // As a sanity check, make sure that the negation of the constraint
+ // was infeasible in the current state. If it is feasible, we somehow
+ // missed the transition point.
+ if (N->getState()->Assume(Constraint, !Assumption))
+ return NULL;
+
+ // We found the transition point for the constraint. We now need to
+ // pretty-print the constraint. (work-in-progress)
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ if (isa<Loc>(Constraint)) {
+ os << "Assuming pointer value is ";
+ os << (Assumption ? "non-null" : "null");
+ }
+
+ if (os.str().empty())
+ return NULL;
+
+ // FIXME: Refactor this into BugReporterContext.
+ const Stmt *S = 0;
+ ProgramPoint P = N->getLocation();
+
+ if (BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
+ CFGBlock *BSrc = BE->getSrc();
+ S = BSrc->getTerminatorCondition();
+ }
+ else if (PostStmt *PS = dyn_cast<PostStmt>(&P)) {
+ S = PS->getStmt();
+ }
+
+ if (!S)
+ return NULL;
+
+ // Construct a new PathDiagnosticPiece.
+ PathDiagnosticLocation L(S, BRC.getSourceManager());
+ return new PathDiagnosticEventPiece(L, os.str());
+ }
+
+ return NULL;
+ }
+};
+} // end anonymous namespace
+
+static void registerTrackConstraint(BugReporterContext& BRC,
+ DefinedSVal Constraint,
+ bool Assumption) {
+ BRC.addVisitor(new TrackConstraintBRVisitor(Constraint, Assumption));
+}
+
+void clang::bugreporter::registerTrackNullOrUndefValue(BugReporterContext& BRC,
+ const void *data,
+ const ExplodedNode* N) {
+
+ const Stmt *S = static_cast<const Stmt*>(data);
+
+ if (!S)
+ return;
+
+ GRStateManager &StateMgr = BRC.getStateManager();
+ const GRState *state = N->getState();
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ const VarRegion *R =
+ StateMgr.getRegionManager().getVarRegion(VD, N->getLocationContext());
+
+ // What did we load?
+ SVal V = state->getSVal(S);
+
+ if (isa<loc::ConcreteInt>(V) || isa<nonloc::ConcreteInt>(V)
+ || V.isUndef()) {
+ registerFindLastStore(BRC, R, V);
+ }
+ }
+ }
+
+ SVal V = state->getSValAsScalarOrLoc(S);
+
+ // Uncomment this to find cases where we aren't properly getting the
+ // base value that was dereferenced.
+ // assert(!V.isUnknownOrUndef());
+
+ // Is it a symbolic value?
+ if (loc::MemRegionVal *L = dyn_cast<loc::MemRegionVal>(&V)) {
+ const SubRegion *R = cast<SubRegion>(L->getRegion());
+ while (R && !isa<SymbolicRegion>(R)) {
+ R = dyn_cast<SubRegion>(R->getSuperRegion());
+ }
+
+ if (R) {
+ assert(isa<SymbolicRegion>(R));
+ registerTrackConstraint(BRC, loc::MemRegionVal(R), false);
+ }
+ }
+}
diff --git a/lib/Analysis/CFG.cpp b/lib/Analysis/CFG.cpp
new file mode 100644
index 000000000000..7b1d50cb3aee
--- /dev/null
+++ b/lib/Analysis/CFG.cpp
@@ -0,0 +1,2084 @@
+//===--- CFG.cpp - Classes for representing and building CFGs----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the CFG and CFGBuilder classes for representing and
+// building Control-Flow Graphs (CFGs) from ASTs.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Support/SaveAndRestore.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "llvm/Support/GraphWriter.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Format.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+
+using namespace clang;
+
+namespace {
+
+static SourceLocation GetEndLoc(Decl* D) {
+ if (VarDecl* VD = dyn_cast<VarDecl>(D))
+ if (Expr* Ex = VD->getInit())
+ return Ex->getSourceRange().getEnd();
+
+ return D->getLocation();
+}
+
+/// CFGBuilder - This class implements CFG construction from an AST.
+/// The builder is stateful: an instance of the builder should be used to only
+/// construct a single CFG.
+///
+/// Example usage:
+///
+/// CFGBuilder builder;
+/// CFG* cfg = builder.BuildAST(stmt1);
+///
+/// CFG construction is done via a recursive walk of an AST. We actually parse
+/// the AST in reverse order so that the successor of a basic block is
+/// constructed prior to its predecessor. This allows us to nicely capture
+/// implicit fall-throughs without extra basic blocks.
+///
+class VISIBILITY_HIDDEN CFGBuilder {
+ ASTContext *Context;
+ CFG* cfg;
+
+ CFGBlock* Block;
+ CFGBlock* Succ;
+ CFGBlock* ContinueTargetBlock;
+ CFGBlock* BreakTargetBlock;
+ CFGBlock* SwitchTerminatedBlock;
+ CFGBlock* DefaultCaseBlock;
+
+ // LabelMap records the mapping from Label expressions to their blocks.
+ typedef llvm::DenseMap<LabelStmt*,CFGBlock*> LabelMapTy;
+ LabelMapTy LabelMap;
+
+ // A list of blocks that end with a "goto" that must be backpatched to their
+ // resolved targets upon completion of CFG construction.
+ typedef std::vector<CFGBlock*> BackpatchBlocksTy;
+ BackpatchBlocksTy BackpatchBlocks;
+
+ // A list of labels whose address has been taken (for indirect gotos).
+ typedef llvm::SmallPtrSet<LabelStmt*,5> LabelSetTy;
+ LabelSetTy AddressTakenLabels;
+
+public:
+ explicit CFGBuilder() : cfg(new CFG()), // crew a new CFG
+ Block(NULL), Succ(NULL),
+ ContinueTargetBlock(NULL), BreakTargetBlock(NULL),
+ SwitchTerminatedBlock(NULL), DefaultCaseBlock(NULL) {}
+
+ ~CFGBuilder() { delete cfg; }
+
+ // buildCFG - Used by external clients to construct the CFG.
+ CFG* buildCFG(Stmt *Statement, ASTContext *C);
+
+private:
+ // Visitors to walk an AST and construct the CFG.
+ CFGBlock *VisitAddrLabelExpr(AddrLabelExpr *A, bool alwaysAdd);
+ CFGBlock *VisitBinaryOperator(BinaryOperator *B, bool alwaysAdd);
+ CFGBlock *VisitBlockExpr(BlockExpr* E, bool alwaysAdd);
+ CFGBlock *VisitBlockDeclRefExpr(BlockDeclRefExpr* E, bool alwaysAdd);
+ CFGBlock *VisitBreakStmt(BreakStmt *B);
+ CFGBlock *VisitCallExpr(CallExpr *C, bool alwaysAdd);
+ CFGBlock *VisitCaseStmt(CaseStmt *C);
+ CFGBlock *VisitChooseExpr(ChooseExpr *C);
+ CFGBlock *VisitCompoundStmt(CompoundStmt *C);
+ CFGBlock *VisitConditionalOperator(ConditionalOperator *C);
+ CFGBlock *VisitContinueStmt(ContinueStmt *C);
+ CFGBlock *VisitCXXThrowExpr(CXXThrowExpr *T);
+ CFGBlock *VisitDeclStmt(DeclStmt *DS);
+ CFGBlock *VisitDeclSubExpr(Decl* D);
+ CFGBlock *VisitDefaultStmt(DefaultStmt *D);
+ CFGBlock *VisitDoStmt(DoStmt *D);
+ CFGBlock *VisitForStmt(ForStmt *F);
+ CFGBlock *VisitGotoStmt(GotoStmt* G);
+ CFGBlock *VisitIfStmt(IfStmt *I);
+ CFGBlock *VisitIndirectGotoStmt(IndirectGotoStmt *I);
+ CFGBlock *VisitLabelStmt(LabelStmt *L);
+ CFGBlock *VisitObjCAtCatchStmt(ObjCAtCatchStmt *S);
+ CFGBlock *VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S);
+ CFGBlock *VisitObjCAtThrowStmt(ObjCAtThrowStmt *S);
+ CFGBlock *VisitObjCAtTryStmt(ObjCAtTryStmt *S);
+ CFGBlock *VisitObjCForCollectionStmt(ObjCForCollectionStmt *S);
+ CFGBlock *VisitReturnStmt(ReturnStmt* R);
+ CFGBlock *VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E, bool alwaysAdd);
+ CFGBlock *VisitStmtExpr(StmtExpr *S, bool alwaysAdd);
+ CFGBlock *VisitSwitchStmt(SwitchStmt *S);
+ CFGBlock *VisitWhileStmt(WhileStmt *W);
+
+ CFGBlock *Visit(Stmt *S, bool alwaysAdd = false);
+ CFGBlock *VisitStmt(Stmt *S, bool alwaysAdd);
+ CFGBlock *VisitChildren(Stmt* S);
+
+ // NYS == Not Yet Supported
+ CFGBlock* NYS() {
+ badCFG = true;
+ return Block;
+ }
+
+ void autoCreateBlock() { if (!Block) Block = createBlock(); }
+ CFGBlock *createBlock(bool add_successor = true);
+ bool FinishBlock(CFGBlock* B);
+ CFGBlock *addStmt(Stmt *S) { return Visit(S, true); }
+
+ void AppendStmt(CFGBlock *B, Stmt *S) {
+ B->appendStmt(S, cfg->getBumpVectorContext());
+ }
+
+ void AddSuccessor(CFGBlock *B, CFGBlock *S) {
+ B->addSuccessor(S, cfg->getBumpVectorContext());
+ }
+
+ /// TryResult - a class representing a variant over the values
+ /// 'true', 'false', or 'unknown'. This is returned by TryEvaluateBool,
+ /// and is used by the CFGBuilder to decide if a branch condition
+ /// can be decided up front during CFG construction.
+ class TryResult {
+ int X;
+ public:
+ TryResult(bool b) : X(b ? 1 : 0) {}
+ TryResult() : X(-1) {}
+
+ bool isTrue() const { return X == 1; }
+ bool isFalse() const { return X == 0; }
+ bool isKnown() const { return X >= 0; }
+ void negate() {
+ assert(isKnown());
+ X ^= 0x1;
+ }
+ };
+
+ /// TryEvaluateBool - Try and evaluate the Stmt and return 0 or 1
+ /// if we can evaluate to a known value, otherwise return -1.
+ TryResult TryEvaluateBool(Expr *S) {
+ Expr::EvalResult Result;
+ if (!S->isTypeDependent() && !S->isValueDependent() &&
+ S->Evaluate(Result, *Context) && Result.Val.isInt())
+ return Result.Val.getInt().getBoolValue();
+
+ return TryResult();
+ }
+
+ bool badCFG;
+};
+
+// FIXME: Add support for dependent-sized array types in C++?
+// Does it even make sense to build a CFG for an uninstantiated template?
+static VariableArrayType* FindVA(Type* t) {
+ while (ArrayType* vt = dyn_cast<ArrayType>(t)) {
+ if (VariableArrayType* vat = dyn_cast<VariableArrayType>(vt))
+ if (vat->getSizeExpr())
+ return vat;
+
+ t = vt->getElementType().getTypePtr();
+ }
+
+ return 0;
+}
+
+/// BuildCFG - Constructs a CFG from an AST (a Stmt*). The AST can represent an
+/// arbitrary statement. Examples include a single expression or a function
+/// body (compound statement). The ownership of the returned CFG is
+/// transferred to the caller. If CFG construction fails, this method returns
+/// NULL.
+CFG* CFGBuilder::buildCFG(Stmt* Statement, ASTContext* C) {
+ Context = C;
+ assert(cfg);
+ if (!Statement)
+ return NULL;
+
+ badCFG = false;
+
+ // Create an empty block that will serve as the exit block for the CFG. Since
+ // this is the first block added to the CFG, it will be implicitly registered
+ // as the exit block.
+ Succ = createBlock();
+ assert(Succ == &cfg->getExit());
+ Block = NULL; // the EXIT block is empty. Create all other blocks lazily.
+
+ // Visit the statements and create the CFG.
+ CFGBlock* B = addStmt(Statement);
+ if (!B) B = Succ;
+
+ if (B) {
+ // Finalize the last constructed block. This usually involves reversing the
+ // order of the statements in the block.
+ if (Block) FinishBlock(B);
+
+ // Backpatch the gotos whose label -> block mappings we didn't know when we
+ // encountered them.
+ for (BackpatchBlocksTy::iterator I = BackpatchBlocks.begin(),
+ E = BackpatchBlocks.end(); I != E; ++I ) {
+
+ CFGBlock* B = *I;
+ GotoStmt* G = cast<GotoStmt>(B->getTerminator());
+ LabelMapTy::iterator LI = LabelMap.find(G->getLabel());
+
+ // If there is no target for the goto, then we are looking at an
+ // incomplete AST. Handle this by not registering a successor.
+ if (LI == LabelMap.end()) continue;
+
+ AddSuccessor(B, LI->second);
+ }
+
+ // Add successors to the Indirect Goto Dispatch block (if we have one).
+ if (CFGBlock* B = cfg->getIndirectGotoBlock())
+ for (LabelSetTy::iterator I = AddressTakenLabels.begin(),
+ E = AddressTakenLabels.end(); I != E; ++I ) {
+
+ // Lookup the target block.
+ LabelMapTy::iterator LI = LabelMap.find(*I);
+
+ // If there is no target block that contains label, then we are looking
+ // at an incomplete AST. Handle this by not registering a successor.
+ if (LI == LabelMap.end()) continue;
+
+ AddSuccessor(B, LI->second);
+ }
+
+ Succ = B;
+ }
+
+ // Create an empty entry block that has no predecessors.
+ cfg->setEntry(createBlock());
+
+ if (badCFG) {
+ delete cfg;
+ cfg = NULL;
+ return NULL;
+ }
+
+ // NULL out cfg so that repeated calls to the builder will fail and that the
+ // ownership of the constructed CFG is passed to the caller.
+ CFG* t = cfg;
+ cfg = NULL;
+ return t;
+}
+
+/// createBlock - Used to lazily create blocks that are connected
+/// to the current (global) succcessor.
+CFGBlock* CFGBuilder::createBlock(bool add_successor) {
+ CFGBlock* B = cfg->createBlock();
+ if (add_successor && Succ)
+ AddSuccessor(B, Succ);
+ return B;
+}
+
+/// FinishBlock - "Finalize" the block by checking if we have a bad CFG.
+bool CFGBuilder::FinishBlock(CFGBlock* B) {
+ if (badCFG)
+ return false;
+
+ assert(B);
+ return true;
+}
+
+/// Visit - Walk the subtree of a statement and add extra
+/// blocks for ternary operators, &&, and ||. We also process "," and
+/// DeclStmts (which may contain nested control-flow).
+CFGBlock* CFGBuilder::Visit(Stmt * S, bool alwaysAdd) {
+tryAgain:
+ switch (S->getStmtClass()) {
+ default:
+ return VisitStmt(S, alwaysAdd);
+
+ case Stmt::AddrLabelExprClass:
+ return VisitAddrLabelExpr(cast<AddrLabelExpr>(S), alwaysAdd);
+
+ case Stmt::BinaryOperatorClass:
+ return VisitBinaryOperator(cast<BinaryOperator>(S), alwaysAdd);
+
+ case Stmt::BlockExprClass:
+ return VisitBlockExpr(cast<BlockExpr>(S), alwaysAdd);
+
+ case Stmt::BlockDeclRefExprClass:
+ return VisitBlockDeclRefExpr(cast<BlockDeclRefExpr>(S), alwaysAdd);
+
+ case Stmt::BreakStmtClass:
+ return VisitBreakStmt(cast<BreakStmt>(S));
+
+ case Stmt::CallExprClass:
+ return VisitCallExpr(cast<CallExpr>(S), alwaysAdd);
+
+ case Stmt::CaseStmtClass:
+ return VisitCaseStmt(cast<CaseStmt>(S));
+
+ case Stmt::ChooseExprClass:
+ return VisitChooseExpr(cast<ChooseExpr>(S));
+
+ case Stmt::CompoundStmtClass:
+ return VisitCompoundStmt(cast<CompoundStmt>(S));
+
+ case Stmt::ConditionalOperatorClass:
+ return VisitConditionalOperator(cast<ConditionalOperator>(S));
+
+ case Stmt::ContinueStmtClass:
+ return VisitContinueStmt(cast<ContinueStmt>(S));
+
+ case Stmt::DeclStmtClass:
+ return VisitDeclStmt(cast<DeclStmt>(S));
+
+ case Stmt::DefaultStmtClass:
+ return VisitDefaultStmt(cast<DefaultStmt>(S));
+
+ case Stmt::DoStmtClass:
+ return VisitDoStmt(cast<DoStmt>(S));
+
+ case Stmt::ForStmtClass:
+ return VisitForStmt(cast<ForStmt>(S));
+
+ case Stmt::GotoStmtClass:
+ return VisitGotoStmt(cast<GotoStmt>(S));
+
+ case Stmt::IfStmtClass:
+ return VisitIfStmt(cast<IfStmt>(S));
+
+ case Stmt::IndirectGotoStmtClass:
+ return VisitIndirectGotoStmt(cast<IndirectGotoStmt>(S));
+
+ case Stmt::LabelStmtClass:
+ return VisitLabelStmt(cast<LabelStmt>(S));
+
+ case Stmt::ObjCAtCatchStmtClass:
+ return VisitObjCAtCatchStmt(cast<ObjCAtCatchStmt>(S));
+
+ case Stmt::CXXThrowExprClass:
+ return VisitCXXThrowExpr(cast<CXXThrowExpr>(S));
+
+ case Stmt::ObjCAtSynchronizedStmtClass:
+ return VisitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(S));
+
+ case Stmt::ObjCAtThrowStmtClass:
+ return VisitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(S));
+
+ case Stmt::ObjCAtTryStmtClass:
+ return VisitObjCAtTryStmt(cast<ObjCAtTryStmt>(S));
+
+ case Stmt::ObjCForCollectionStmtClass:
+ return VisitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(S));
+
+ case Stmt::ParenExprClass:
+ S = cast<ParenExpr>(S)->getSubExpr();
+ goto tryAgain;
+
+ case Stmt::NullStmtClass:
+ return Block;
+
+ case Stmt::ReturnStmtClass:
+ return VisitReturnStmt(cast<ReturnStmt>(S));
+
+ case Stmt::SizeOfAlignOfExprClass:
+ return VisitSizeOfAlignOfExpr(cast<SizeOfAlignOfExpr>(S), alwaysAdd);
+
+ case Stmt::StmtExprClass:
+ return VisitStmtExpr(cast<StmtExpr>(S), alwaysAdd);
+
+ case Stmt::SwitchStmtClass:
+ return VisitSwitchStmt(cast<SwitchStmt>(S));
+
+ case Stmt::WhileStmtClass:
+ return VisitWhileStmt(cast<WhileStmt>(S));
+ }
+}
+
+CFGBlock *CFGBuilder::VisitStmt(Stmt *S, bool alwaysAdd) {
+ if (alwaysAdd) {
+ autoCreateBlock();
+ AppendStmt(Block, S);
+ }
+
+ return VisitChildren(S);
+}
+
+/// VisitChildren - Visit the children of a Stmt.
+CFGBlock *CFGBuilder::VisitChildren(Stmt* Terminator) {
+ CFGBlock *B = Block;
+ for (Stmt::child_iterator I = Terminator->child_begin(),
+ E = Terminator->child_end(); I != E; ++I) {
+ if (*I) B = Visit(*I);
+ }
+ return B;
+}
+
+CFGBlock *CFGBuilder::VisitAddrLabelExpr(AddrLabelExpr *A, bool alwaysAdd) {
+ AddressTakenLabels.insert(A->getLabel());
+
+ if (alwaysAdd) {
+ autoCreateBlock();
+ AppendStmt(Block, A);
+ }
+
+ return Block;
+}
+
+CFGBlock *CFGBuilder::VisitBinaryOperator(BinaryOperator *B, bool alwaysAdd) {
+ if (B->isLogicalOp()) { // && or ||
+ CFGBlock* ConfluenceBlock = Block ? Block : createBlock();
+ AppendStmt(ConfluenceBlock, B);
+
+ if (!FinishBlock(ConfluenceBlock))
+ return 0;
+
+ // create the block evaluating the LHS
+ CFGBlock* LHSBlock = createBlock(false);
+ LHSBlock->setTerminator(B);
+
+ // create the block evaluating the RHS
+ Succ = ConfluenceBlock;
+ Block = NULL;
+ CFGBlock* RHSBlock = addStmt(B->getRHS());
+ if (!FinishBlock(RHSBlock))
+ return 0;
+
+ // See if this is a known constant.
+ TryResult KnownVal = TryEvaluateBool(B->getLHS());
+ if (KnownVal.isKnown() && (B->getOpcode() == BinaryOperator::LOr))
+ KnownVal.negate();
+
+ // Now link the LHSBlock with RHSBlock.
+ if (B->getOpcode() == BinaryOperator::LOr) {
+ AddSuccessor(LHSBlock, KnownVal.isTrue() ? NULL : ConfluenceBlock);
+ AddSuccessor(LHSBlock, KnownVal.isFalse() ? NULL : RHSBlock);
+ } else {
+ assert (B->getOpcode() == BinaryOperator::LAnd);
+ AddSuccessor(LHSBlock, KnownVal.isFalse() ? NULL : RHSBlock);
+ AddSuccessor(LHSBlock, KnownVal.isTrue() ? NULL : ConfluenceBlock);
+ }
+
+ // Generate the blocks for evaluating the LHS.
+ Block = LHSBlock;
+ return addStmt(B->getLHS());
+ }
+ else if (B->getOpcode() == BinaryOperator::Comma) { // ,
+ autoCreateBlock();
+ AppendStmt(Block, B);
+ addStmt(B->getRHS());
+ return addStmt(B->getLHS());
+ }
+
+ return VisitStmt(B, alwaysAdd);
+}
+
+CFGBlock *CFGBuilder::VisitBlockExpr(BlockExpr* E, bool alwaysAdd) {
+ // FIXME
+ return NYS();
+}
+
+CFGBlock *CFGBuilder::VisitBlockDeclRefExpr(BlockDeclRefExpr* E,
+ bool alwaysAdd) {
+ // FIXME
+ return NYS();
+}
+
+CFGBlock *CFGBuilder::VisitBreakStmt(BreakStmt *B) {
+ // "break" is a control-flow statement. Thus we stop processing the current
+ // block.
+ if (Block && !FinishBlock(Block))
+ return 0;
+
+ // Now create a new block that ends with the break statement.
+ Block = createBlock(false);
+ Block->setTerminator(B);
+
+ // If there is no target for the break, then we are looking at an incomplete
+ // AST. This means that the CFG cannot be constructed.
+ if (BreakTargetBlock)
+ AddSuccessor(Block, BreakTargetBlock);
+ else
+ badCFG = true;
+
+
+ return Block;
+}
+
+CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, bool alwaysAdd) {
+ // If this is a call to a no-return function, this stops the block here.
+ bool NoReturn = false;
+ if (C->getCallee()->getType().getNoReturnAttr()) {
+ NoReturn = true;
+ }
+
+ if (FunctionDecl *FD = C->getDirectCallee())
+ if (FD->hasAttr<NoReturnAttr>())
+ NoReturn = true;
+
+ if (!NoReturn)
+ return VisitStmt(C, alwaysAdd);
+
+ if (Block && !FinishBlock(Block))
+ return 0;
+
+ // Create new block with no successor for the remaining pieces.
+ Block = createBlock(false);
+ AppendStmt(Block, C);
+
+ // Wire this to the exit block directly.
+ AddSuccessor(Block, &cfg->getExit());
+
+ return VisitChildren(C);
+}
+
+CFGBlock *CFGBuilder::VisitChooseExpr(ChooseExpr *C) {
+ CFGBlock* ConfluenceBlock = Block ? Block : createBlock();
+ AppendStmt(ConfluenceBlock, C);
+ if (!FinishBlock(ConfluenceBlock))
+ return 0;
+
+ Succ = ConfluenceBlock;
+ Block = NULL;
+ CFGBlock* LHSBlock = addStmt(C->getLHS());
+ if (!FinishBlock(LHSBlock))
+ return 0;
+
+ Succ = ConfluenceBlock;
+ Block = NULL;
+ CFGBlock* RHSBlock = addStmt(C->getRHS());
+ if (!FinishBlock(RHSBlock))
+ return 0;
+
+ Block = createBlock(false);
+ // See if this is a known constant.
+ const TryResult& KnownVal = TryEvaluateBool(C->getCond());
+ AddSuccessor(Block, KnownVal.isFalse() ? NULL : LHSBlock);
+ AddSuccessor(Block, KnownVal.isTrue() ? NULL : RHSBlock);
+ Block->setTerminator(C);
+ return addStmt(C->getCond());
+}
+
+
+CFGBlock* CFGBuilder::VisitCompoundStmt(CompoundStmt* C) {
+ CFGBlock* LastBlock = Block;
+
+ for (CompoundStmt::reverse_body_iterator I=C->body_rbegin(), E=C->body_rend();
+ I != E; ++I ) {
+ LastBlock = addStmt(*I);
+
+ if (badCFG)
+ return NULL;
+ }
+ return LastBlock;
+}
+
+CFGBlock *CFGBuilder::VisitConditionalOperator(ConditionalOperator *C) {
+ // Create the confluence block that will "merge" the results of the ternary
+ // expression.
+ CFGBlock* ConfluenceBlock = Block ? Block : createBlock();
+ AppendStmt(ConfluenceBlock, C);
+ if (!FinishBlock(ConfluenceBlock))
+ return 0;
+
+ // Create a block for the LHS expression if there is an LHS expression. A
+ // GCC extension allows LHS to be NULL, causing the condition to be the
+ // value that is returned instead.
+ // e.g: x ?: y is shorthand for: x ? x : y;
+ Succ = ConfluenceBlock;
+ Block = NULL;
+ CFGBlock* LHSBlock = NULL;
+ if (C->getLHS()) {
+ LHSBlock = addStmt(C->getLHS());
+ if (!FinishBlock(LHSBlock))
+ return 0;
+ Block = NULL;
+ }
+
+ // Create the block for the RHS expression.
+ Succ = ConfluenceBlock;
+ CFGBlock* RHSBlock = addStmt(C->getRHS());
+ if (!FinishBlock(RHSBlock))
+ return 0;
+
+ // Create the block that will contain the condition.
+ Block = createBlock(false);
+
+ // See if this is a known constant.
+ const TryResult& KnownVal = TryEvaluateBool(C->getCond());
+ if (LHSBlock) {
+ AddSuccessor(Block, KnownVal.isFalse() ? NULL : LHSBlock);
+ } else {
+ if (KnownVal.isFalse()) {
+ // If we know the condition is false, add NULL as the successor for
+ // the block containing the condition. In this case, the confluence
+ // block will have just one predecessor.
+ AddSuccessor(Block, 0);
+ assert(ConfluenceBlock->pred_size() == 1);
+ } else {
+ // If we have no LHS expression, add the ConfluenceBlock as a direct
+ // successor for the block containing the condition. Moreover, we need to
+ // reverse the order of the predecessors in the ConfluenceBlock because
+ // the RHSBlock will have been added to the succcessors already, and we
+ // want the first predecessor to the the block containing the expression
+ // for the case when the ternary expression evaluates to true.
+ AddSuccessor(Block, ConfluenceBlock);
+ assert(ConfluenceBlock->pred_size() == 2);
+ std::reverse(ConfluenceBlock->pred_begin(),
+ ConfluenceBlock->pred_end());
+ }
+ }
+
+ AddSuccessor(Block, KnownVal.isTrue() ? NULL : RHSBlock);
+ Block->setTerminator(C);
+ return addStmt(C->getCond());
+}
+
+CFGBlock *CFGBuilder::VisitDeclStmt(DeclStmt *DS) {
+ autoCreateBlock();
+
+ if (DS->isSingleDecl()) {
+ AppendStmt(Block, DS);
+ return VisitDeclSubExpr(DS->getSingleDecl());
+ }
+
+ CFGBlock *B = 0;
+
+ // FIXME: Add a reverse iterator for DeclStmt to avoid this extra copy.
+ typedef llvm::SmallVector<Decl*,10> BufTy;
+ BufTy Buf(DS->decl_begin(), DS->decl_end());
+
+ for (BufTy::reverse_iterator I = Buf.rbegin(), E = Buf.rend(); I != E; ++I) {
+ // Get the alignment of the new DeclStmt, padding out to >=8 bytes.
+ unsigned A = llvm::AlignOf<DeclStmt>::Alignment < 8
+ ? 8 : llvm::AlignOf<DeclStmt>::Alignment;
+
+ // Allocate the DeclStmt using the BumpPtrAllocator. It will get
+ // automatically freed with the CFG.
+ DeclGroupRef DG(*I);
+ Decl *D = *I;
+ void *Mem = cfg->getAllocator().Allocate(sizeof(DeclStmt), A);
+ DeclStmt *DSNew = new (Mem) DeclStmt(DG, D->getLocation(), GetEndLoc(D));
+
+ // Append the fake DeclStmt to block.
+ AppendStmt(Block, DSNew);
+ B = VisitDeclSubExpr(D);
+ }
+
+ return B;
+}
+
+/// VisitDeclSubExpr - Utility method to add block-level expressions for
+/// initializers in Decls.
+CFGBlock *CFGBuilder::VisitDeclSubExpr(Decl* D) {
+ assert(Block);
+
+ VarDecl *VD = dyn_cast<VarDecl>(D);
+
+ if (!VD)
+ return Block;
+
+ Expr *Init = VD->getInit();
+
+ if (Init) {
+ // Optimization: Don't create separate block-level statements for literals.
+ switch (Init->getStmtClass()) {
+ case Stmt::IntegerLiteralClass:
+ case Stmt::CharacterLiteralClass:
+ case Stmt::StringLiteralClass:
+ break;
+ default:
+ Block = addStmt(Init);
+ }
+ }
+
+ // If the type of VD is a VLA, then we must process its size expressions.
+ for (VariableArrayType* VA = FindVA(VD->getType().getTypePtr()); VA != 0;
+ VA = FindVA(VA->getElementType().getTypePtr()))
+ Block = addStmt(VA->getSizeExpr());
+
+ return Block;
+}
+
+CFGBlock* CFGBuilder::VisitIfStmt(IfStmt* I) {
+ // We may see an if statement in the middle of a basic block, or it may be the
+ // first statement we are processing. In either case, we create a new basic
+ // block. First, we create the blocks for the then...else statements, and
+ // then we create the block containing the if statement. If we were in the
+ // middle of a block, we stop processing that block. That block is then the
+ // implicit successor for the "then" and "else" clauses.
+
+ // The block we were proccessing is now finished. Make it the successor
+ // block.
+ if (Block) {
+ Succ = Block;
+ if (!FinishBlock(Block))
+ return 0;
+ }
+
+ // Process the false branch.
+ CFGBlock* ElseBlock = Succ;
+
+ if (Stmt* Else = I->getElse()) {
+ SaveAndRestore<CFGBlock*> sv(Succ);
+
+ // NULL out Block so that the recursive call to Visit will
+ // create a new basic block.
+ Block = NULL;
+ ElseBlock = addStmt(Else);
+
+ if (!ElseBlock) // Can occur when the Else body has all NullStmts.
+ ElseBlock = sv.get();
+ else if (Block) {
+ if (!FinishBlock(ElseBlock))
+ return 0;
+ }
+ }
+
+ // Process the true branch.
+ CFGBlock* ThenBlock;
+ {
+ Stmt* Then = I->getThen();
+ assert (Then);
+ SaveAndRestore<CFGBlock*> sv(Succ);
+ Block = NULL;
+ ThenBlock = addStmt(Then);
+
+ if (!ThenBlock) {
+ // We can reach here if the "then" body has all NullStmts.
+ // Create an empty block so we can distinguish between true and false
+ // branches in path-sensitive analyses.
+ ThenBlock = createBlock(false);
+ AddSuccessor(ThenBlock, sv.get());
+ } else if (Block) {
+ if (!FinishBlock(ThenBlock))
+ return 0;
+ }
+ }
+
+ // Now create a new block containing the if statement.
+ Block = createBlock(false);
+
+ // Set the terminator of the new block to the If statement.
+ Block->setTerminator(I);
+
+ // See if this is a known constant.
+ const TryResult &KnownVal = TryEvaluateBool(I->getCond());
+
+ // Now add the successors.
+ AddSuccessor(Block, KnownVal.isFalse() ? NULL : ThenBlock);
+ AddSuccessor(Block, KnownVal.isTrue()? NULL : ElseBlock);
+
+ // Add the condition as the last statement in the new block. This may create
+ // new blocks as the condition may contain control-flow. Any newly created
+ // blocks will be pointed to be "Block".
+ return addStmt(I->getCond());
+}
+
+
+CFGBlock* CFGBuilder::VisitReturnStmt(ReturnStmt* R) {
+ // If we were in the middle of a block we stop processing that block.
+ //
+ // NOTE: If a "return" appears in the middle of a block, this means that the
+ // code afterwards is DEAD (unreachable). We still keep a basic block
+ // for that code; a simple "mark-and-sweep" from the entry block will be
+ // able to report such dead blocks.
+ if (Block)
+ FinishBlock(Block);
+
+ // Create the new block.
+ Block = createBlock(false);
+
+ // The Exit block is the only successor.
+ AddSuccessor(Block, &cfg->getExit());
+
+ // Add the return statement to the block. This may create new blocks if R
+ // contains control-flow (short-circuit operations).
+ return VisitStmt(R, true);
+}
+
+CFGBlock* CFGBuilder::VisitLabelStmt(LabelStmt* L) {
+ // Get the block of the labeled statement. Add it to our map.
+ addStmt(L->getSubStmt());
+ CFGBlock* LabelBlock = Block;
+
+ if (!LabelBlock) // This can happen when the body is empty, i.e.
+ LabelBlock = createBlock(); // scopes that only contains NullStmts.
+
+ assert(LabelMap.find(L) == LabelMap.end() && "label already in map");
+ LabelMap[ L ] = LabelBlock;
+
+ // Labels partition blocks, so this is the end of the basic block we were
+ // processing (L is the block's label). Because this is label (and we have
+ // already processed the substatement) there is no extra control-flow to worry
+ // about.
+ LabelBlock->setLabel(L);
+ if (!FinishBlock(LabelBlock))
+ return 0;
+
+ // We set Block to NULL to allow lazy creation of a new block (if necessary);
+ Block = NULL;
+
+ // This block is now the implicit successor of other blocks.
+ Succ = LabelBlock;
+
+ return LabelBlock;
+}
+
+CFGBlock* CFGBuilder::VisitGotoStmt(GotoStmt* G) {
+ // Goto is a control-flow statement. Thus we stop processing the current
+ // block and create a new one.
+ if (Block)
+ FinishBlock(Block);
+
+ Block = createBlock(false);
+ Block->setTerminator(G);
+
+ // If we already know the mapping to the label block add the successor now.
+ LabelMapTy::iterator I = LabelMap.find(G->getLabel());
+
+ if (I == LabelMap.end())
+ // We will need to backpatch this block later.
+ BackpatchBlocks.push_back(Block);
+ else
+ AddSuccessor(Block, I->second);
+
+ return Block;
+}
+
+CFGBlock* CFGBuilder::VisitForStmt(ForStmt* F) {
+ CFGBlock* LoopSuccessor = NULL;
+
+ // "for" is a control-flow statement. Thus we stop processing the current
+ // block.
+ if (Block) {
+ if (!FinishBlock(Block))
+ return 0;
+ LoopSuccessor = Block;
+ } else
+ LoopSuccessor = Succ;
+
+ // Because of short-circuit evaluation, the condition of the loop can span
+ // multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that
+ // evaluate the condition.
+ CFGBlock* ExitConditionBlock = createBlock(false);
+ CFGBlock* EntryConditionBlock = ExitConditionBlock;
+
+ // Set the terminator for the "exit" condition block.
+ ExitConditionBlock->setTerminator(F);
+
+ // Now add the actual condition to the condition block. Because the condition
+ // itself may contain control-flow, new blocks may be created.
+ if (Stmt* C = F->getCond()) {
+ Block = ExitConditionBlock;
+ EntryConditionBlock = addStmt(C);
+ if (Block) {
+ if (!FinishBlock(EntryConditionBlock))
+ return 0;
+ }
+ }
+
+ // The condition block is the implicit successor for the loop body as well as
+ // any code above the loop.
+ Succ = EntryConditionBlock;
+
+ // See if this is a known constant.
+ TryResult KnownVal(true);
+
+ if (F->getCond())
+ KnownVal = TryEvaluateBool(F->getCond());
+
+ // Now create the loop body.
+ {
+ assert (F->getBody());
+
+ // Save the current values for Block, Succ, and continue and break targets
+ SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ),
+ save_continue(ContinueTargetBlock),
+ save_break(BreakTargetBlock);
+
+ // Create a new block to contain the (bottom) of the loop body.
+ Block = NULL;
+
+ if (Stmt* I = F->getInc()) {
+ // Generate increment code in its own basic block. This is the target of
+ // continue statements.
+ Succ = addStmt(I);
+ } else {
+ // No increment code. Create a special, empty, block that is used as the
+ // target block for "looping back" to the start of the loop.
+ assert(Succ == EntryConditionBlock);
+ Succ = createBlock();
+ }
+
+ // Finish up the increment (or empty) block if it hasn't been already.
+ if (Block) {
+ assert(Block == Succ);
+ if (!FinishBlock(Block))
+ return 0;
+ Block = 0;
+ }
+
+ ContinueTargetBlock = Succ;
+
+ // The starting block for the loop increment is the block that should
+ // represent the 'loop target' for looping back to the start of the loop.
+ ContinueTargetBlock->setLoopTarget(F);
+
+ // All breaks should go to the code following the loop.
+ BreakTargetBlock = LoopSuccessor;
+
+ // Now populate the body block, and in the process create new blocks as we
+ // walk the body of the loop.
+ CFGBlock* BodyBlock = addStmt(F->getBody());
+
+ if (!BodyBlock)
+ BodyBlock = ContinueTargetBlock; // can happen for "for (...;...;...) ;"
+ else if (Block && !FinishBlock(BodyBlock))
+ return 0;
+
+ // This new body block is a successor to our "exit" condition block.
+ AddSuccessor(ExitConditionBlock, KnownVal.isFalse() ? NULL : BodyBlock);
+ }
+
+ // Link up the condition block with the code that follows the loop. (the
+ // false branch).
+ AddSuccessor(ExitConditionBlock, KnownVal.isTrue() ? NULL : LoopSuccessor);
+
+ // If the loop contains initialization, create a new block for those
+ // statements. This block can also contain statements that precede the loop.
+ if (Stmt* I = F->getInit()) {
+ Block = createBlock();
+ return addStmt(I);
+ } else {
+ // There is no loop initialization. We are thus basically a while loop.
+ // NULL out Block to force lazy block construction.
+ Block = NULL;
+ Succ = EntryConditionBlock;
+ return EntryConditionBlock;
+ }
+}
+
+CFGBlock* CFGBuilder::VisitObjCForCollectionStmt(ObjCForCollectionStmt* S) {
+ // Objective-C fast enumeration 'for' statements:
+ // http://developer.apple.com/documentation/Cocoa/Conceptual/ObjectiveC
+ //
+ // for ( Type newVariable in collection_expression ) { statements }
+ //
+ // becomes:
+ //
+ // prologue:
+ // 1. collection_expression
+ // T. jump to loop_entry
+ // loop_entry:
+ // 1. side-effects of element expression
+ // 1. ObjCForCollectionStmt [performs binding to newVariable]
+ // T. ObjCForCollectionStmt TB, FB [jumps to TB if newVariable != nil]
+ // TB:
+ // statements
+ // T. jump to loop_entry
+ // FB:
+ // what comes after
+ //
+ // and
+ //
+ // Type existingItem;
+ // for ( existingItem in expression ) { statements }
+ //
+ // becomes:
+ //
+ // the same with newVariable replaced with existingItem; the binding works
+ // the same except that for one ObjCForCollectionStmt::getElement() returns
+ // a DeclStmt and the other returns a DeclRefExpr.
+ //
+
+ CFGBlock* LoopSuccessor = 0;
+
+ if (Block) {
+ if (!FinishBlock(Block))
+ return 0;
+ LoopSuccessor = Block;
+ Block = 0;
+ } else
+ LoopSuccessor = Succ;
+
+ // Build the condition blocks.
+ CFGBlock* ExitConditionBlock = createBlock(false);
+ CFGBlock* EntryConditionBlock = ExitConditionBlock;
+
+ // Set the terminator for the "exit" condition block.
+ ExitConditionBlock->setTerminator(S);
+
+ // The last statement in the block should be the ObjCForCollectionStmt, which
+ // performs the actual binding to 'element' and determines if there are any
+ // more items in the collection.
+ AppendStmt(ExitConditionBlock, S);
+ Block = ExitConditionBlock;
+
+ // Walk the 'element' expression to see if there are any side-effects. We
+ // generate new blocks as necesary. We DON'T add the statement by default to
+ // the CFG unless it contains control-flow.
+ EntryConditionBlock = Visit(S->getElement(), false);
+ if (Block) {
+ if (!FinishBlock(EntryConditionBlock))
+ return 0;
+ Block = 0;
+ }
+
+ // The condition block is the implicit successor for the loop body as well as
+ // any code above the loop.
+ Succ = EntryConditionBlock;
+
+ // Now create the true branch.
+ {
+ // Save the current values for Succ, continue and break targets.
+ SaveAndRestore<CFGBlock*> save_Succ(Succ),
+ save_continue(ContinueTargetBlock), save_break(BreakTargetBlock);
+
+ BreakTargetBlock = LoopSuccessor;
+ ContinueTargetBlock = EntryConditionBlock;
+
+ CFGBlock* BodyBlock = addStmt(S->getBody());
+
+ if (!BodyBlock)
+ BodyBlock = EntryConditionBlock; // can happen for "for (X in Y) ;"
+ else if (Block) {
+ if (!FinishBlock(BodyBlock))
+ return 0;
+ }
+
+ // This new body block is a successor to our "exit" condition block.
+ AddSuccessor(ExitConditionBlock, BodyBlock);
+ }
+
+ // Link up the condition block with the code that follows the loop.
+ // (the false branch).
+ AddSuccessor(ExitConditionBlock, LoopSuccessor);
+
+ // Now create a prologue block to contain the collection expression.
+ Block = createBlock();
+ return addStmt(S->getCollection());
+}
+
+CFGBlock* CFGBuilder::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt* S) {
+ // FIXME: Add locking 'primitives' to CFG for @synchronized.
+
+ // Inline the body.
+ CFGBlock *SyncBlock = addStmt(S->getSynchBody());
+
+ // The sync body starts its own basic block. This makes it a little easier
+ // for diagnostic clients.
+ if (SyncBlock) {
+ if (!FinishBlock(SyncBlock))
+ return 0;
+
+ Block = 0;
+ }
+
+ Succ = SyncBlock;
+
+ // Inline the sync expression.
+ return addStmt(S->getSynchExpr());
+}
+
+CFGBlock* CFGBuilder::VisitObjCAtTryStmt(ObjCAtTryStmt* S) {
+ // FIXME
+ return NYS();
+}
+
+CFGBlock* CFGBuilder::VisitWhileStmt(WhileStmt* W) {
+ CFGBlock* LoopSuccessor = NULL;
+
+ // "while" is a control-flow statement. Thus we stop processing the current
+ // block.
+ if (Block) {
+ if (!FinishBlock(Block))
+ return 0;
+ LoopSuccessor = Block;
+ } else
+ LoopSuccessor = Succ;
+
+ // Because of short-circuit evaluation, the condition of the loop can span
+ // multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that
+ // evaluate the condition.
+ CFGBlock* ExitConditionBlock = createBlock(false);
+ CFGBlock* EntryConditionBlock = ExitConditionBlock;
+
+ // Set the terminator for the "exit" condition block.
+ ExitConditionBlock->setTerminator(W);
+
+ // Now add the actual condition to the condition block. Because the condition
+ // itself may contain control-flow, new blocks may be created. Thus we update
+ // "Succ" after adding the condition.
+ if (Stmt* C = W->getCond()) {
+ Block = ExitConditionBlock;
+ EntryConditionBlock = addStmt(C);
+ assert(Block == EntryConditionBlock);
+ if (Block) {
+ if (!FinishBlock(EntryConditionBlock))
+ return 0;
+ }
+ }
+
+ // The condition block is the implicit successor for the loop body as well as
+ // any code above the loop.
+ Succ = EntryConditionBlock;
+
+ // See if this is a known constant.
+ const TryResult& KnownVal = TryEvaluateBool(W->getCond());
+
+ // Process the loop body.
+ {
+ assert(W->getBody());
+
+ // Save the current values for Block, Succ, and continue and break targets
+ SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ),
+ save_continue(ContinueTargetBlock),
+ save_break(BreakTargetBlock);
+
+ // Create an empty block to represent the transition block for looping back
+ // to the head of the loop.
+ Block = 0;
+ assert(Succ == EntryConditionBlock);
+ Succ = createBlock();
+ Succ->setLoopTarget(W);
+ ContinueTargetBlock = Succ;
+
+ // All breaks should go to the code following the loop.
+ BreakTargetBlock = LoopSuccessor;
+
+ // NULL out Block to force lazy instantiation of blocks for the body.
+ Block = NULL;
+
+ // Create the body. The returned block is the entry to the loop body.
+ CFGBlock* BodyBlock = addStmt(W->getBody());
+
+ if (!BodyBlock)
+ BodyBlock = ContinueTargetBlock; // can happen for "while(...) ;"
+ else if (Block) {
+ if (!FinishBlock(BodyBlock))
+ return 0;
+ }
+
+ // Add the loop body entry as a successor to the condition.
+ AddSuccessor(ExitConditionBlock, KnownVal.isFalse() ? NULL : BodyBlock);
+ }
+
+ // Link up the condition block with the code that follows the loop. (the
+ // false branch).
+ AddSuccessor(ExitConditionBlock, KnownVal.isTrue() ? NULL : LoopSuccessor);
+
+ // There can be no more statements in the condition block since we loop back
+ // to this block. NULL out Block to force lazy creation of another block.
+ Block = NULL;
+
+ // Return the condition block, which is the dominating block for the loop.
+ Succ = EntryConditionBlock;
+ return EntryConditionBlock;
+}
+
+
+CFGBlock *CFGBuilder::VisitObjCAtCatchStmt(ObjCAtCatchStmt* S) {
+ // FIXME: For now we pretend that @catch and the code it contains does not
+ // exit.
+ return Block;
+}
+
+CFGBlock* CFGBuilder::VisitObjCAtThrowStmt(ObjCAtThrowStmt* S) {
+ // FIXME: This isn't complete. We basically treat @throw like a return
+ // statement.
+
+ // If we were in the middle of a block we stop processing that block.
+ if (Block && !FinishBlock(Block))
+ return 0;
+
+ // Create the new block.
+ Block = createBlock(false);
+
+ // The Exit block is the only successor.
+ AddSuccessor(Block, &cfg->getExit());
+
+ // Add the statement to the block. This may create new blocks if S contains
+ // control-flow (short-circuit operations).
+ return VisitStmt(S, true);
+}
+
+CFGBlock* CFGBuilder::VisitCXXThrowExpr(CXXThrowExpr* T) {
+ // If we were in the middle of a block we stop processing that block.
+ if (Block && !FinishBlock(Block))
+ return 0;
+
+ // Create the new block.
+ Block = createBlock(false);
+
+ // The Exit block is the only successor.
+ AddSuccessor(Block, &cfg->getExit());
+
+ // Add the statement to the block. This may create new blocks if S contains
+ // control-flow (short-circuit operations).
+ return VisitStmt(T, true);
+}
+
+CFGBlock *CFGBuilder::VisitDoStmt(DoStmt* D) {
+ CFGBlock* LoopSuccessor = NULL;
+
+ // "do...while" is a control-flow statement. Thus we stop processing the
+ // current block.
+ if (Block) {
+ if (!FinishBlock(Block))
+ return 0;
+ LoopSuccessor = Block;
+ } else
+ LoopSuccessor = Succ;
+
+ // Because of short-circuit evaluation, the condition of the loop can span
+ // multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that
+ // evaluate the condition.
+ CFGBlock* ExitConditionBlock = createBlock(false);
+ CFGBlock* EntryConditionBlock = ExitConditionBlock;
+
+ // Set the terminator for the "exit" condition block.
+ ExitConditionBlock->setTerminator(D);
+
+ // Now add the actual condition to the condition block. Because the condition
+ // itself may contain control-flow, new blocks may be created.
+ if (Stmt* C = D->getCond()) {
+ Block = ExitConditionBlock;
+ EntryConditionBlock = addStmt(C);
+ if (Block) {
+ if (!FinishBlock(EntryConditionBlock))
+ return 0;
+ }
+ }
+
+ // The condition block is the implicit successor for the loop body.
+ Succ = EntryConditionBlock;
+
+ // See if this is a known constant.
+ const TryResult &KnownVal = TryEvaluateBool(D->getCond());
+
+ // Process the loop body.
+ CFGBlock* BodyBlock = NULL;
+ {
+ assert (D->getBody());
+
+ // Save the current values for Block, Succ, and continue and break targets
+ SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ),
+ save_continue(ContinueTargetBlock),
+ save_break(BreakTargetBlock);
+
+ // All continues within this loop should go to the condition block
+ ContinueTargetBlock = EntryConditionBlock;
+
+ // All breaks should go to the code following the loop.
+ BreakTargetBlock = LoopSuccessor;
+
+ // NULL out Block to force lazy instantiation of blocks for the body.
+ Block = NULL;
+
+ // Create the body. The returned block is the entry to the loop body.
+ BodyBlock = addStmt(D->getBody());
+
+ if (!BodyBlock)
+ BodyBlock = EntryConditionBlock; // can happen for "do ; while(...)"
+ else if (Block) {
+ if (!FinishBlock(BodyBlock))
+ return 0;
+ }
+
+ // Add an intermediate block between the BodyBlock and the
+ // ExitConditionBlock to represent the "loop back" transition. Create an
+ // empty block to represent the transition block for looping back to the
+ // head of the loop.
+ // FIXME: Can we do this more efficiently without adding another block?
+ Block = NULL;
+ Succ = BodyBlock;
+ CFGBlock *LoopBackBlock = createBlock();
+ LoopBackBlock->setLoopTarget(D);
+
+ // Add the loop body entry as a successor to the condition.
+ AddSuccessor(ExitConditionBlock, KnownVal.isFalse() ? NULL : LoopBackBlock);
+ }
+
+ // Link up the condition block with the code that follows the loop.
+ // (the false branch).
+ AddSuccessor(ExitConditionBlock, KnownVal.isTrue() ? NULL : LoopSuccessor);
+
+ // There can be no more statements in the body block(s) since we loop back to
+ // the body. NULL out Block to force lazy creation of another block.
+ Block = NULL;
+
+ // Return the loop body, which is the dominating block for the loop.
+ Succ = BodyBlock;
+ return BodyBlock;
+}
+
+CFGBlock* CFGBuilder::VisitContinueStmt(ContinueStmt* C) {
+ // "continue" is a control-flow statement. Thus we stop processing the
+ // current block.
+ if (Block && !FinishBlock(Block))
+ return 0;
+
+ // Now create a new block that ends with the continue statement.
+ Block = createBlock(false);
+ Block->setTerminator(C);
+
+ // If there is no target for the continue, then we are looking at an
+ // incomplete AST. This means the CFG cannot be constructed.
+ if (ContinueTargetBlock)
+ AddSuccessor(Block, ContinueTargetBlock);
+ else
+ badCFG = true;
+
+ return Block;
+}
+
+CFGBlock *CFGBuilder::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E,
+ bool alwaysAdd) {
+
+ if (alwaysAdd) {
+ autoCreateBlock();
+ AppendStmt(Block, E);
+ }
+
+ // VLA types have expressions that must be evaluated.
+ if (E->isArgumentType()) {
+ for (VariableArrayType* VA = FindVA(E->getArgumentType().getTypePtr());
+ VA != 0; VA = FindVA(VA->getElementType().getTypePtr()))
+ addStmt(VA->getSizeExpr());
+ }
+
+ return Block;
+}
+
+/// VisitStmtExpr - Utility method to handle (nested) statement
+/// expressions (a GCC extension).
+CFGBlock* CFGBuilder::VisitStmtExpr(StmtExpr *SE, bool alwaysAdd) {
+ if (alwaysAdd) {
+ autoCreateBlock();
+ AppendStmt(Block, SE);
+ }
+ return VisitCompoundStmt(SE->getSubStmt());
+}
+
+CFGBlock* CFGBuilder::VisitSwitchStmt(SwitchStmt* Terminator) {
+ // "switch" is a control-flow statement. Thus we stop processing the current
+ // block.
+ CFGBlock* SwitchSuccessor = NULL;
+
+ if (Block) {
+ if (!FinishBlock(Block))
+ return 0;
+ SwitchSuccessor = Block;
+ } else SwitchSuccessor = Succ;
+
+ // Save the current "switch" context.
+ SaveAndRestore<CFGBlock*> save_switch(SwitchTerminatedBlock),
+ save_break(BreakTargetBlock),
+ save_default(DefaultCaseBlock);
+
+ // Set the "default" case to be the block after the switch statement. If the
+ // switch statement contains a "default:", this value will be overwritten with
+ // the block for that code.
+ DefaultCaseBlock = SwitchSuccessor;
+
+ // Create a new block that will contain the switch statement.
+ SwitchTerminatedBlock = createBlock(false);
+
+ // Now process the switch body. The code after the switch is the implicit
+ // successor.
+ Succ = SwitchSuccessor;
+ BreakTargetBlock = SwitchSuccessor;
+
+ // When visiting the body, the case statements should automatically get linked
+ // up to the switch. We also don't keep a pointer to the body, since all
+ // control-flow from the switch goes to case/default statements.
+ assert (Terminator->getBody() && "switch must contain a non-NULL body");
+ Block = NULL;
+ CFGBlock *BodyBlock = addStmt(Terminator->getBody());
+ if (Block) {
+ if (!FinishBlock(BodyBlock))
+ return 0;
+ }
+
+ // If we have no "default:" case, the default transition is to the code
+ // following the switch body.
+ AddSuccessor(SwitchTerminatedBlock, DefaultCaseBlock);
+
+ // Add the terminator and condition in the switch block.
+ SwitchTerminatedBlock->setTerminator(Terminator);
+ assert (Terminator->getCond() && "switch condition must be non-NULL");
+ Block = SwitchTerminatedBlock;
+
+ return addStmt(Terminator->getCond());
+}
+
+CFGBlock* CFGBuilder::VisitCaseStmt(CaseStmt* CS) {
+ // CaseStmts are essentially labels, so they are the first statement in a
+ // block.
+
+ if (CS->getSubStmt())
+ addStmt(CS->getSubStmt());
+
+ CFGBlock* CaseBlock = Block;
+ if (!CaseBlock)
+ CaseBlock = createBlock();
+
+ // Cases statements partition blocks, so this is the top of the basic block we
+ // were processing (the "case XXX:" is the label).
+ CaseBlock->setLabel(CS);
+
+ if (!FinishBlock(CaseBlock))
+ return 0;
+
+ // Add this block to the list of successors for the block with the switch
+ // statement.
+ assert(SwitchTerminatedBlock);
+ AddSuccessor(SwitchTerminatedBlock, CaseBlock);
+
+ // We set Block to NULL to allow lazy creation of a new block (if necessary)
+ Block = NULL;
+
+ // This block is now the implicit successor of other blocks.
+ Succ = CaseBlock;
+
+ return CaseBlock;
+}
+
+CFGBlock* CFGBuilder::VisitDefaultStmt(DefaultStmt* Terminator) {
+ if (Terminator->getSubStmt())
+ addStmt(Terminator->getSubStmt());
+
+ DefaultCaseBlock = Block;
+
+ if (!DefaultCaseBlock)
+ DefaultCaseBlock = createBlock();
+
+ // Default statements partition blocks, so this is the top of the basic block
+ // we were processing (the "default:" is the label).
+ DefaultCaseBlock->setLabel(Terminator);
+
+ if (!FinishBlock(DefaultCaseBlock))
+ return 0;
+
+ // Unlike case statements, we don't add the default block to the successors
+ // for the switch statement immediately. This is done when we finish
+ // processing the switch statement. This allows for the default case
+ // (including a fall-through to the code after the switch statement) to always
+ // be the last successor of a switch-terminated block.
+
+ // We set Block to NULL to allow lazy creation of a new block (if necessary)
+ Block = NULL;
+
+ // This block is now the implicit successor of other blocks.
+ Succ = DefaultCaseBlock;
+
+ return DefaultCaseBlock;
+}
+
+CFGBlock* CFGBuilder::VisitIndirectGotoStmt(IndirectGotoStmt* I) {
+ // Lazily create the indirect-goto dispatch block if there isn't one already.
+ CFGBlock* IBlock = cfg->getIndirectGotoBlock();
+
+ if (!IBlock) {
+ IBlock = createBlock(false);
+ cfg->setIndirectGotoBlock(IBlock);
+ }
+
+ // IndirectGoto is a control-flow statement. Thus we stop processing the
+ // current block and create a new one.
+ if (Block && !FinishBlock(Block))
+ return 0;
+
+ Block = createBlock(false);
+ Block->setTerminator(I);
+ AddSuccessor(Block, IBlock);
+ return addStmt(I->getTarget());
+}
+
+} // end anonymous namespace
+
+/// createBlock - Constructs and adds a new CFGBlock to the CFG. The block has
+/// no successors or predecessors. If this is the first block created in the
+/// CFG, it is automatically set to be the Entry and Exit of the CFG.
+CFGBlock* CFG::createBlock() {
+ bool first_block = begin() == end();
+
+ // Create the block.
+ CFGBlock *Mem = getAllocator().Allocate<CFGBlock>();
+ new (Mem) CFGBlock(NumBlockIDs++, BlkBVC);
+ Blocks.push_back(Mem, BlkBVC);
+
+ // If this is the first block, set it as the Entry and Exit.
+ if (first_block)
+ Entry = Exit = &back();
+
+ // Return the block.
+ return &back();
+}
+
+/// buildCFG - Constructs a CFG from an AST. Ownership of the returned
+/// CFG is returned to the caller.
+CFG* CFG::buildCFG(Stmt* Statement, ASTContext *C) {
+ CFGBuilder Builder;
+ return Builder.buildCFG(Statement, C);
+}
+
+//===----------------------------------------------------------------------===//
+// CFG: Queries for BlkExprs.
+//===----------------------------------------------------------------------===//
+
+namespace {
+ typedef llvm::DenseMap<const Stmt*,unsigned> BlkExprMapTy;
+}
+
+static void FindSubExprAssignments(Stmt* Terminator, llvm::SmallPtrSet<Expr*,50>& Set) {
+ if (!Terminator)
+ return;
+
+ for (Stmt::child_iterator I=Terminator->child_begin(), E=Terminator->child_end(); I!=E; ++I) {
+ if (!*I) continue;
+
+ if (BinaryOperator* B = dyn_cast<BinaryOperator>(*I))
+ if (B->isAssignmentOp()) Set.insert(B);
+
+ FindSubExprAssignments(*I, Set);
+ }
+}
+
+static BlkExprMapTy* PopulateBlkExprMap(CFG& cfg) {
+ BlkExprMapTy* M = new BlkExprMapTy();
+
+ // Look for assignments that are used as subexpressions. These are the only
+ // assignments that we want to *possibly* register as a block-level
+ // expression. Basically, if an assignment occurs both in a subexpression and
+ // at the block-level, it is a block-level expression.
+ llvm::SmallPtrSet<Expr*,50> SubExprAssignments;
+
+ for (CFG::iterator I=cfg.begin(), E=cfg.end(); I != E; ++I)
+ for (CFGBlock::iterator BI=(*I)->begin(), EI=(*I)->end(); BI != EI; ++BI)
+ FindSubExprAssignments(*BI, SubExprAssignments);
+
+ for (CFG::iterator I=cfg.begin(), E=cfg.end(); I != E; ++I) {
+
+ // Iterate over the statements again on identify the Expr* and Stmt* at the
+ // block-level that are block-level expressions.
+
+ for (CFGBlock::iterator BI=(*I)->begin(), EI=(*I)->end(); BI != EI; ++BI)
+ if (Expr* Exp = dyn_cast<Expr>(*BI)) {
+
+ if (BinaryOperator* B = dyn_cast<BinaryOperator>(Exp)) {
+ // Assignment expressions that are not nested within another
+ // expression are really "statements" whose value is never used by
+ // another expression.
+ if (B->isAssignmentOp() && !SubExprAssignments.count(Exp))
+ continue;
+ } else if (const StmtExpr* Terminator = dyn_cast<StmtExpr>(Exp)) {
+ // Special handling for statement expressions. The last statement in
+ // the statement expression is also a block-level expr.
+ const CompoundStmt* C = Terminator->getSubStmt();
+ if (!C->body_empty()) {
+ unsigned x = M->size();
+ (*M)[C->body_back()] = x;
+ }
+ }
+
+ unsigned x = M->size();
+ (*M)[Exp] = x;
+ }
+
+ // Look at terminators. The condition is a block-level expression.
+
+ Stmt* S = (*I)->getTerminatorCondition();
+
+ if (S && M->find(S) == M->end()) {
+ unsigned x = M->size();
+ (*M)[S] = x;
+ }
+ }
+
+ return M;
+}
+
+CFG::BlkExprNumTy CFG::getBlkExprNum(const Stmt* S) {
+ assert(S != NULL);
+ if (!BlkExprMap) { BlkExprMap = (void*) PopulateBlkExprMap(*this); }
+
+ BlkExprMapTy* M = reinterpret_cast<BlkExprMapTy*>(BlkExprMap);
+ BlkExprMapTy::iterator I = M->find(S);
+
+ if (I == M->end()) return CFG::BlkExprNumTy();
+ else return CFG::BlkExprNumTy(I->second);
+}
+
+unsigned CFG::getNumBlkExprs() {
+ if (const BlkExprMapTy* M = reinterpret_cast<const BlkExprMapTy*>(BlkExprMap))
+ return M->size();
+ else {
+ // We assume callers interested in the number of BlkExprs will want
+ // the map constructed if it doesn't already exist.
+ BlkExprMap = (void*) PopulateBlkExprMap(*this);
+ return reinterpret_cast<BlkExprMapTy*>(BlkExprMap)->size();
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Cleanup: CFG dstor.
+//===----------------------------------------------------------------------===//
+
+CFG::~CFG() {
+ delete reinterpret_cast<const BlkExprMapTy*>(BlkExprMap);
+}
+
+//===----------------------------------------------------------------------===//
+// CFG pretty printing
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class VISIBILITY_HIDDEN StmtPrinterHelper : public PrinterHelper {
+
+ typedef llvm::DenseMap<Stmt*,std::pair<unsigned,unsigned> > StmtMapTy;
+ StmtMapTy StmtMap;
+ signed CurrentBlock;
+ unsigned CurrentStmt;
+ const LangOptions &LangOpts;
+public:
+
+ StmtPrinterHelper(const CFG* cfg, const LangOptions &LO)
+ : CurrentBlock(0), CurrentStmt(0), LangOpts(LO) {
+ for (CFG::const_iterator I = cfg->begin(), E = cfg->end(); I != E; ++I ) {
+ unsigned j = 1;
+ for (CFGBlock::const_iterator BI = (*I)->begin(), BEnd = (*I)->end() ;
+ BI != BEnd; ++BI, ++j )
+ StmtMap[*BI] = std::make_pair((*I)->getBlockID(),j);
+ }
+ }
+
+ virtual ~StmtPrinterHelper() {}
+
+ const LangOptions &getLangOpts() const { return LangOpts; }
+ void setBlockID(signed i) { CurrentBlock = i; }
+ void setStmtID(unsigned i) { CurrentStmt = i; }
+
+ virtual bool handledStmt(Stmt* Terminator, llvm::raw_ostream& OS) {
+
+ StmtMapTy::iterator I = StmtMap.find(Terminator);
+
+ if (I == StmtMap.end())
+ return false;
+
+ if (CurrentBlock >= 0 && I->second.first == (unsigned) CurrentBlock
+ && I->second.second == CurrentStmt)
+ return false;
+
+ OS << "[B" << I->second.first << "." << I->second.second << "]";
+ return true;
+ }
+};
+} // end anonymous namespace
+
+
+namespace {
+class VISIBILITY_HIDDEN CFGBlockTerminatorPrint
+ : public StmtVisitor<CFGBlockTerminatorPrint,void> {
+
+ llvm::raw_ostream& OS;
+ StmtPrinterHelper* Helper;
+ PrintingPolicy Policy;
+
+public:
+ CFGBlockTerminatorPrint(llvm::raw_ostream& os, StmtPrinterHelper* helper,
+ const PrintingPolicy &Policy)
+ : OS(os), Helper(helper), Policy(Policy) {}
+
+ void VisitIfStmt(IfStmt* I) {
+ OS << "if ";
+ I->getCond()->printPretty(OS,Helper,Policy);
+ }
+
+ // Default case.
+ void VisitStmt(Stmt* Terminator) {
+ Terminator->printPretty(OS, Helper, Policy);
+ }
+
+ void VisitForStmt(ForStmt* F) {
+ OS << "for (" ;
+ if (F->getInit()) OS << "...";
+ OS << "; ";
+ if (Stmt* C = F->getCond()) C->printPretty(OS, Helper, Policy);
+ OS << "; ";
+ if (F->getInc()) OS << "...";
+ OS << ")";
+ }
+
+ void VisitWhileStmt(WhileStmt* W) {
+ OS << "while " ;
+ if (Stmt* C = W->getCond()) C->printPretty(OS, Helper, Policy);
+ }
+
+ void VisitDoStmt(DoStmt* D) {
+ OS << "do ... while ";
+ if (Stmt* C = D->getCond()) C->printPretty(OS, Helper, Policy);
+ }
+
+ void VisitSwitchStmt(SwitchStmt* Terminator) {
+ OS << "switch ";
+ Terminator->getCond()->printPretty(OS, Helper, Policy);
+ }
+
+ void VisitConditionalOperator(ConditionalOperator* C) {
+ C->getCond()->printPretty(OS, Helper, Policy);
+ OS << " ? ... : ...";
+ }
+
+ void VisitChooseExpr(ChooseExpr* C) {
+ OS << "__builtin_choose_expr( ";
+ C->getCond()->printPretty(OS, Helper, Policy);
+ OS << " )";
+ }
+
+ void VisitIndirectGotoStmt(IndirectGotoStmt* I) {
+ OS << "goto *";
+ I->getTarget()->printPretty(OS, Helper, Policy);
+ }
+
+ void VisitBinaryOperator(BinaryOperator* B) {
+ if (!B->isLogicalOp()) {
+ VisitExpr(B);
+ return;
+ }
+
+ B->getLHS()->printPretty(OS, Helper, Policy);
+
+ switch (B->getOpcode()) {
+ case BinaryOperator::LOr:
+ OS << " || ...";
+ return;
+ case BinaryOperator::LAnd:
+ OS << " && ...";
+ return;
+ default:
+ assert(false && "Invalid logical operator.");
+ }
+ }
+
+ void VisitExpr(Expr* E) {
+ E->printPretty(OS, Helper, Policy);
+ }
+};
+} // end anonymous namespace
+
+
+static void print_stmt(llvm::raw_ostream &OS, StmtPrinterHelper* Helper,
+ Stmt* Terminator) {
+ if (Helper) {
+ // special printing for statement-expressions.
+ if (StmtExpr* SE = dyn_cast<StmtExpr>(Terminator)) {
+ CompoundStmt* Sub = SE->getSubStmt();
+
+ if (Sub->child_begin() != Sub->child_end()) {
+ OS << "({ ... ; ";
+ Helper->handledStmt(*SE->getSubStmt()->body_rbegin(),OS);
+ OS << " })\n";
+ return;
+ }
+ }
+
+ // special printing for comma expressions.
+ if (BinaryOperator* B = dyn_cast<BinaryOperator>(Terminator)) {
+ if (B->getOpcode() == BinaryOperator::Comma) {
+ OS << "... , ";
+ Helper->handledStmt(B->getRHS(),OS);
+ OS << '\n';
+ return;
+ }
+ }
+ }
+
+ Terminator->printPretty(OS, Helper, PrintingPolicy(Helper->getLangOpts()));
+
+ // Expressions need a newline.
+ if (isa<Expr>(Terminator)) OS << '\n';
+}
+
+static void print_block(llvm::raw_ostream& OS, const CFG* cfg,
+ const CFGBlock& B,
+ StmtPrinterHelper* Helper, bool print_edges) {
+
+ if (Helper) Helper->setBlockID(B.getBlockID());
+
+ // Print the header.
+ OS << "\n [ B" << B.getBlockID();
+
+ if (&B == &cfg->getEntry())
+ OS << " (ENTRY) ]\n";
+ else if (&B == &cfg->getExit())
+ OS << " (EXIT) ]\n";
+ else if (&B == cfg->getIndirectGotoBlock())
+ OS << " (INDIRECT GOTO DISPATCH) ]\n";
+ else
+ OS << " ]\n";
+
+ // Print the label of this block.
+ if (Stmt* Terminator = const_cast<Stmt*>(B.getLabel())) {
+
+ if (print_edges)
+ OS << " ";
+
+ if (LabelStmt* L = dyn_cast<LabelStmt>(Terminator))
+ OS << L->getName();
+ else if (CaseStmt* C = dyn_cast<CaseStmt>(Terminator)) {
+ OS << "case ";
+ C->getLHS()->printPretty(OS, Helper,
+ PrintingPolicy(Helper->getLangOpts()));
+ if (C->getRHS()) {
+ OS << " ... ";
+ C->getRHS()->printPretty(OS, Helper,
+ PrintingPolicy(Helper->getLangOpts()));
+ }
+ } else if (isa<DefaultStmt>(Terminator))
+ OS << "default";
+ else
+ assert(false && "Invalid label statement in CFGBlock.");
+
+ OS << ":\n";
+ }
+
+ // Iterate through the statements in the block and print them.
+ unsigned j = 1;
+
+ for (CFGBlock::const_iterator I = B.begin(), E = B.end() ;
+ I != E ; ++I, ++j ) {
+
+ // Print the statement # in the basic block and the statement itself.
+ if (print_edges)
+ OS << " ";
+
+ OS << llvm::format("%3d", j) << ": ";
+
+ if (Helper)
+ Helper->setStmtID(j);
+
+ print_stmt(OS,Helper,*I);
+ }
+
+ // Print the terminator of this block.
+ if (B.getTerminator()) {
+ if (print_edges)
+ OS << " ";
+
+ OS << " T: ";
+
+ if (Helper) Helper->setBlockID(-1);
+
+ CFGBlockTerminatorPrint TPrinter(OS, Helper,
+ PrintingPolicy(Helper->getLangOpts()));
+ TPrinter.Visit(const_cast<Stmt*>(B.getTerminator()));
+ OS << '\n';
+ }
+
+ if (print_edges) {
+ // Print the predecessors of this block.
+ OS << " Predecessors (" << B.pred_size() << "):";
+ unsigned i = 0;
+
+ for (CFGBlock::const_pred_iterator I = B.pred_begin(), E = B.pred_end();
+ I != E; ++I, ++i) {
+
+ if (i == 8 || (i-8) == 0)
+ OS << "\n ";
+
+ OS << " B" << (*I)->getBlockID();
+ }
+
+ OS << '\n';
+
+ // Print the successors of this block.
+ OS << " Successors (" << B.succ_size() << "):";
+ i = 0;
+
+ for (CFGBlock::const_succ_iterator I = B.succ_begin(), E = B.succ_end();
+ I != E; ++I, ++i) {
+
+ if (i == 8 || (i-8) % 10 == 0)
+ OS << "\n ";
+
+ if (*I)
+ OS << " B" << (*I)->getBlockID();
+ else
+ OS << " NULL";
+ }
+
+ OS << '\n';
+ }
+}
+
+
+/// dump - A simple pretty printer of a CFG that outputs to stderr.
+void CFG::dump(const LangOptions &LO) const { print(llvm::errs(), LO); }
+
+/// print - A simple pretty printer of a CFG that outputs to an ostream.
+void CFG::print(llvm::raw_ostream &OS, const LangOptions &LO) const {
+ StmtPrinterHelper Helper(this, LO);
+
+ // Print the entry block.
+ print_block(OS, this, getEntry(), &Helper, true);
+
+ // Iterate through the CFGBlocks and print them one by one.
+ for (const_iterator I = Blocks.begin(), E = Blocks.end() ; I != E ; ++I) {
+ // Skip the entry block, because we already printed it.
+ if (&(**I) == &getEntry() || &(**I) == &getExit())
+ continue;
+
+ print_block(OS, this, **I, &Helper, true);
+ }
+
+ // Print the exit block.
+ print_block(OS, this, getExit(), &Helper, true);
+ OS.flush();
+}
+
+/// dump - A simply pretty printer of a CFGBlock that outputs to stderr.
+void CFGBlock::dump(const CFG* cfg, const LangOptions &LO) const {
+ print(llvm::errs(), cfg, LO);
+}
+
+/// print - A simple pretty printer of a CFGBlock that outputs to an ostream.
+/// Generally this will only be called from CFG::print.
+void CFGBlock::print(llvm::raw_ostream& OS, const CFG* cfg,
+ const LangOptions &LO) const {
+ StmtPrinterHelper Helper(cfg, LO);
+ print_block(OS, cfg, *this, &Helper, true);
+}
+
+/// printTerminator - A simple pretty printer of the terminator of a CFGBlock.
+void CFGBlock::printTerminator(llvm::raw_ostream &OS,
+ const LangOptions &LO) const {
+ CFGBlockTerminatorPrint TPrinter(OS, NULL, PrintingPolicy(LO));
+ TPrinter.Visit(const_cast<Stmt*>(getTerminator()));
+}
+
+Stmt* CFGBlock::getTerminatorCondition() {
+
+ if (!Terminator)
+ return NULL;
+
+ Expr* E = NULL;
+
+ switch (Terminator->getStmtClass()) {
+ default:
+ break;
+
+ case Stmt::ForStmtClass:
+ E = cast<ForStmt>(Terminator)->getCond();
+ break;
+
+ case Stmt::WhileStmtClass:
+ E = cast<WhileStmt>(Terminator)->getCond();
+ break;
+
+ case Stmt::DoStmtClass:
+ E = cast<DoStmt>(Terminator)->getCond();
+ break;
+
+ case Stmt::IfStmtClass:
+ E = cast<IfStmt>(Terminator)->getCond();
+ break;
+
+ case Stmt::ChooseExprClass:
+ E = cast<ChooseExpr>(Terminator)->getCond();
+ break;
+
+ case Stmt::IndirectGotoStmtClass:
+ E = cast<IndirectGotoStmt>(Terminator)->getTarget();
+ break;
+
+ case Stmt::SwitchStmtClass:
+ E = cast<SwitchStmt>(Terminator)->getCond();
+ break;
+
+ case Stmt::ConditionalOperatorClass:
+ E = cast<ConditionalOperator>(Terminator)->getCond();
+ break;
+
+ case Stmt::BinaryOperatorClass: // '&&' and '||'
+ E = cast<BinaryOperator>(Terminator)->getLHS();
+ break;
+
+ case Stmt::ObjCForCollectionStmtClass:
+ return Terminator;
+ }
+
+ return E ? E->IgnoreParens() : NULL;
+}
+
+bool CFGBlock::hasBinaryBranchTerminator() const {
+
+ if (!Terminator)
+ return false;
+
+ Expr* E = NULL;
+
+ switch (Terminator->getStmtClass()) {
+ default:
+ return false;
+
+ case Stmt::ForStmtClass:
+ case Stmt::WhileStmtClass:
+ case Stmt::DoStmtClass:
+ case Stmt::IfStmtClass:
+ case Stmt::ChooseExprClass:
+ case Stmt::ConditionalOperatorClass:
+ case Stmt::BinaryOperatorClass:
+ return true;
+ }
+
+ return E ? E->IgnoreParens() : NULL;
+}
+
+
+//===----------------------------------------------------------------------===//
+// CFG Graphviz Visualization
+//===----------------------------------------------------------------------===//
+
+
+#ifndef NDEBUG
+static StmtPrinterHelper* GraphHelper;
+#endif
+
+void CFG::viewCFG(const LangOptions &LO) const {
+#ifndef NDEBUG
+ StmtPrinterHelper H(this, LO);
+ GraphHelper = &H;
+ llvm::ViewGraph(this,"CFG");
+ GraphHelper = NULL;
+#endif
+}
+
+namespace llvm {
+template<>
+struct DOTGraphTraits<const CFG*> : public DefaultDOTGraphTraits {
+ static std::string getNodeLabel(const CFGBlock* Node, const CFG* Graph,
+ bool ShortNames) {
+
+#ifndef NDEBUG
+ std::string OutSStr;
+ llvm::raw_string_ostream Out(OutSStr);
+ print_block(Out,Graph, *Node, GraphHelper, false);
+ std::string& OutStr = Out.str();
+
+ if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
+
+ // Process string output to make it nicer...
+ for (unsigned i = 0; i != OutStr.length(); ++i)
+ if (OutStr[i] == '\n') { // Left justify
+ OutStr[i] = '\\';
+ OutStr.insert(OutStr.begin()+i+1, 'l');
+ }
+
+ return OutStr;
+#else
+ return "";
+#endif
+ }
+};
+} // end namespace llvm
diff --git a/lib/Analysis/CFRefCount.cpp b/lib/Analysis/CFRefCount.cpp
index 3cca482633ca..9b6125705d9a 100644
--- a/lib/Analysis/CFRefCount.cpp
+++ b/lib/Analysis/CFRefCount.cpp
@@ -22,7 +22,7 @@
#include "clang/Analysis/PathSensitive/BugReporter.h"
#include "clang/Analysis/PathSensitive/SymbolManager.h"
#include "clang/Analysis/PathSensitive/GRTransferFuncs.h"
-#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclObjC.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableMap.h"
@@ -44,7 +44,7 @@ using namespace clang;
// MemoryMgmt/Tasks/MemoryManagementRules.html
//
// "You take ownership of an object if you create it using a method whose name
-// begins with “alloc” or “new” or contains “copy” (for example, alloc,
+// begins with "alloc" or "new" or contains "copy" (for example, alloc,
// newObject, or mutableCopy), or if you send it a retain message. You are
// responsible for relinquishing ownership of objects you own using release
// or autorelease. Any other time you receive an object, you must
@@ -62,8 +62,8 @@ static inline bool isWordEnd(char ch, char prev, char next) {
|| (isupper(prev) && isupper(ch) && islower(next)) // XXCreate
|| !isalpha(ch);
}
-
-static inline const char* parseWord(const char* s) {
+
+static inline const char* parseWord(const char* s) {
char ch = *s, prev = '\0';
assert(ch != '\0');
char next = *(s+1);
@@ -77,18 +77,18 @@ static inline const char* parseWord(const char* s) {
static NamingConvention deriveNamingConvention(Selector S) {
IdentifierInfo *II = S.getIdentifierInfoForSlot(0);
-
+
if (!II)
return NoConvention;
-
+
const char *s = II->getName();
-
+
// A method/function name may contain a prefix. We don't know it is there,
// however, until we encounter the first '_'.
bool InPossiblePrefix = true;
bool AtBeginning = true;
NamingConvention C = NoConvention;
-
+
while (*s != '\0') {
// Skip '_'.
if (*s == '_') {
@@ -103,24 +103,24 @@ static NamingConvention deriveNamingConvention(Selector S) {
++s;
continue;
}
-
+
// Skip numbers, ':', etc.
if (!isalpha(*s)) {
++s;
continue;
}
-
+
const char *wordEnd = parseWord(s);
assert(wordEnd > s);
unsigned len = wordEnd - s;
-
+
switch (len) {
default:
break;
case 3:
// Methods starting with 'new' follow the create rule.
if (AtBeginning && StringsEqualNoCase("new", s, len))
- C = CreateRule;
+ C = CreateRule;
break;
case 4:
// Methods starting with 'alloc' or contain 'copy' follow the
@@ -136,7 +136,7 @@ static NamingConvention deriveNamingConvention(Selector S) {
C = CreateRule;
break;
}
-
+
// If we aren't in the prefix and have a derived convention then just
// return it now.
if (!InPossiblePrefix && C != NoConvention)
@@ -156,10 +156,10 @@ static bool followsFundamentalRule(Selector S) {
}
static const ObjCMethodDecl*
-ResolveToInterfaceMethodDecl(const ObjCMethodDecl *MD) {
+ResolveToInterfaceMethodDecl(const ObjCMethodDecl *MD) {
ObjCInterfaceDecl *ID =
const_cast<ObjCInterfaceDecl*>(MD->getClassInterface());
-
+
return MD->isInstanceMethod()
? ID->lookupInstanceMethod(MD->getSelector())
: ID->lookupClassMethod(MD->getSelector());
@@ -167,22 +167,23 @@ ResolveToInterfaceMethodDecl(const ObjCMethodDecl *MD) {
namespace {
class VISIBILITY_HIDDEN GenericNodeBuilder {
- GRStmtNodeBuilder<GRState> *SNB;
+ GRStmtNodeBuilder *SNB;
Stmt *S;
const void *tag;
- GREndPathNodeBuilder<GRState> *ENB;
+ GREndPathNodeBuilder *ENB;
public:
- GenericNodeBuilder(GRStmtNodeBuilder<GRState> &snb, Stmt *s,
+ GenericNodeBuilder(GRStmtNodeBuilder &snb, Stmt *s,
const void *t)
: SNB(&snb), S(s), tag(t), ENB(0) {}
- GenericNodeBuilder(GREndPathNodeBuilder<GRState> &enb)
+
+ GenericNodeBuilder(GREndPathNodeBuilder &enb)
: SNB(0), S(0), tag(0), ENB(&enb) {}
-
- ExplodedNode<GRState> *MakeNode(const GRState *state,
- ExplodedNode<GRState> *Pred) {
+
+ ExplodedNode *MakeNode(const GRState *state, ExplodedNode *Pred) {
if (SNB)
- return SNB->generateNode(PostStmt(S, tag), state, Pred);
-
+ return SNB->generateNode(PostStmt(S, Pred->getLocationContext(), tag),
+ state, Pred);
+
assert(ENB);
return ENB->generateNode(state, Pred);
}
@@ -210,16 +211,16 @@ static inline Selector GetUnarySelector(const char* name, ASTContext& Ctx) {
static bool hasPrefix(const char* s, const char* prefix) {
if (!prefix)
return true;
-
+
char c = *s;
char cP = *prefix;
-
+
while (c != '\0' && cP != '\0') {
if (c != cP) break;
c = *(++s);
cP = *(++prefix);
}
-
+
return cP == '\0';
}
@@ -230,14 +231,14 @@ static bool hasSuffix(const char* s, const char* suffix) {
static bool isRefType(QualType RetTy, const char* prefix,
ASTContext* Ctx = 0, const char* name = 0) {
-
+
// Recursively walk the typedef stack, allowing typedefs of reference types.
while (1) {
if (TypedefType* TD = dyn_cast<TypedefType>(RetTy.getTypePtr())) {
const char* TDName = TD->getDecl()->getIdentifier()->getName();
if (hasPrefix(TDName, prefix) && hasSuffix(TDName, "Ref"))
return true;
-
+
RetTy = TD->getDecl()->getUnderlyingType();
continue;
}
@@ -248,7 +249,7 @@ static bool isRefType(QualType RetTy, const char* prefix,
return false;
// Is the type void*?
- const PointerType* PT = RetTy->getAsPointerType();
+ const PointerType* PT = RetTy->getAs<PointerType>();
if (!(PT->getPointeeType().getUnqualifiedType() == Ctx->VoidTy))
return false;
@@ -281,14 +282,14 @@ typedef llvm::ImmutableMap<unsigned,ArgEffect> ArgEffects;
namespace {
/// RetEffect is used to summarize a function/method call's behavior with
-/// respect to its return value.
+/// respect to its return value.
class VISIBILITY_HIDDEN RetEffect {
public:
enum Kind { NoRet, Alias, OwnedSymbol, OwnedAllocatedSymbol,
NotOwnedSymbol, GCNotOwnedSymbol, ReceiverAlias,
OwnedWhenTrackedReceiver };
-
- enum ObjKind { CF, ObjC, AnyObj };
+
+ enum ObjKind { CF, ObjC, AnyObj };
private:
Kind K;
@@ -297,124 +298,124 @@ private:
RetEffect(Kind k, unsigned idx = 0) : K(k), O(AnyObj), index(idx) {}
RetEffect(Kind k, ObjKind o) : K(k), O(o), index(0) {}
-
+
public:
Kind getKind() const { return K; }
ObjKind getObjKind() const { return O; }
-
- unsigned getIndex() const {
+
+ unsigned getIndex() const {
assert(getKind() == Alias);
return index;
}
-
+
bool isOwned() const {
return K == OwnedSymbol || K == OwnedAllocatedSymbol ||
K == OwnedWhenTrackedReceiver;
}
-
+
static RetEffect MakeOwnedWhenTrackedReceiver() {
return RetEffect(OwnedWhenTrackedReceiver, ObjC);
}
-
+
static RetEffect MakeAlias(unsigned Idx) {
return RetEffect(Alias, Idx);
}
static RetEffect MakeReceiverAlias() {
return RetEffect(ReceiverAlias);
- }
+ }
static RetEffect MakeOwned(ObjKind o, bool isAllocated = false) {
return RetEffect(isAllocated ? OwnedAllocatedSymbol : OwnedSymbol, o);
- }
+ }
static RetEffect MakeNotOwned(ObjKind o) {
return RetEffect(NotOwnedSymbol, o);
}
static RetEffect MakeGCNotOwned() {
return RetEffect(GCNotOwnedSymbol, ObjC);
}
-
+
static RetEffect MakeNoRet() {
return RetEffect(NoRet);
}
-
+
void Profile(llvm::FoldingSetNodeID& ID) const {
ID.AddInteger((unsigned)K);
ID.AddInteger((unsigned)O);
ID.AddInteger(index);
}
};
-
-
+
+
class VISIBILITY_HIDDEN RetainSummary {
/// Args - an ordered vector of (index, ArgEffect) pairs, where index
/// specifies the argument (starting from 0). This can be sparsely
/// populated; arguments with no entry in Args use 'DefaultArgEffect'.
ArgEffects Args;
-
+
/// DefaultArgEffect - The default ArgEffect to apply to arguments that
/// do not have an entry in Args.
ArgEffect DefaultArgEffect;
-
+
/// Receiver - If this summary applies to an Objective-C message expression,
/// this is the effect applied to the state of the receiver.
ArgEffect Receiver;
-
+
/// Ret - The effect on the return value. Used to indicate if the
/// function/method call returns a new tracked symbol, returns an
/// alias of one of the arguments in the call, and so on.
RetEffect Ret;
-
+
/// EndPath - Indicates that execution of this method/function should
/// terminate the simulation of a path.
bool EndPath;
-
+
public:
RetainSummary(ArgEffects A, RetEffect R, ArgEffect defaultEff,
ArgEffect ReceiverEff, bool endpath = false)
: Args(A), DefaultArgEffect(defaultEff), Receiver(ReceiverEff), Ret(R),
- EndPath(endpath) {}
-
+ EndPath(endpath) {}
+
/// getArg - Return the argument effect on the argument specified by
/// idx (starting from 0).
ArgEffect getArg(unsigned idx) const {
if (const ArgEffect *AE = Args.lookup(idx))
return *AE;
-
+
return DefaultArgEffect;
}
-
+
/// setDefaultArgEffect - Set the default argument effect.
void setDefaultArgEffect(ArgEffect E) {
DefaultArgEffect = E;
}
-
+
/// setArg - Set the argument effect on the argument specified by idx.
void setArgEffect(ArgEffects::Factory& AF, unsigned idx, ArgEffect E) {
Args = AF.Add(Args, idx, E);
}
-
+
/// getRetEffect - Returns the effect on the return value of the call.
RetEffect getRetEffect() const { return Ret; }
-
+
/// setRetEffect - Set the effect of the return value of the call.
void setRetEffect(RetEffect E) { Ret = E; }
-
+
/// isEndPath - Returns true if executing the given method/function should
/// terminate the path.
bool isEndPath() const { return EndPath; }
-
+
/// getReceiverEffect - Returns the effect on the receiver of the call.
/// This is only meaningful if the summary applies to an ObjCMessageExpr*.
ArgEffect getReceiverEffect() const { return Receiver; }
-
+
/// setReceiverEffect - Set the effect on the receiver of the call.
void setReceiverEffect(ArgEffect E) { Receiver = E; }
-
+
typedef ArgEffects::iterator ExprIterator;
-
+
ExprIterator begin_args() const { return Args.begin(); }
ExprIterator end_args() const { return Args.end(); }
-
+
static void Profile(llvm::FoldingSetNodeID& ID, ArgEffects A,
RetEffect RetEff, ArgEffect DefaultEff,
ArgEffect ReceiverEff, bool EndPath) {
@@ -424,7 +425,7 @@ public:
ID.AddInteger((unsigned) ReceiverEff);
ID.AddInteger((unsigned) EndPath);
}
-
+
void Profile(llvm::FoldingSetNodeID& ID) const {
Profile(ID, Args, Ret, DefaultArgEffect, Receiver, EndPath);
}
@@ -439,7 +440,7 @@ namespace {
class VISIBILITY_HIDDEN ObjCSummaryKey {
IdentifierInfo* II;
Selector S;
-public:
+public:
ObjCSummaryKey(IdentifierInfo* ii, Selector s)
: II(ii), S(s) {}
@@ -448,10 +449,10 @@ public:
ObjCSummaryKey(const ObjCInterfaceDecl* d, IdentifierInfo *ii, Selector s)
: II(d ? d->getIdentifier() : ii), S(s) {}
-
+
ObjCSummaryKey(Selector s)
: II(0), S(s) {}
-
+
IdentifierInfo* getIdentifier() const { return II; }
Selector getSelector() const { return S; }
};
@@ -463,58 +464,56 @@ template <> struct DenseMapInfo<ObjCSummaryKey> {
return ObjCSummaryKey(DenseMapInfo<IdentifierInfo*>::getEmptyKey(),
DenseMapInfo<Selector>::getEmptyKey());
}
-
+
static inline ObjCSummaryKey getTombstoneKey() {
return ObjCSummaryKey(DenseMapInfo<IdentifierInfo*>::getTombstoneKey(),
- DenseMapInfo<Selector>::getTombstoneKey());
+ DenseMapInfo<Selector>::getTombstoneKey());
}
-
+
static unsigned getHashValue(const ObjCSummaryKey &V) {
return (DenseMapInfo<IdentifierInfo*>::getHashValue(V.getIdentifier())
- & 0x88888888)
+ & 0x88888888)
| (DenseMapInfo<Selector>::getHashValue(V.getSelector())
& 0x55555555);
}
-
+
static bool isEqual(const ObjCSummaryKey& LHS, const ObjCSummaryKey& RHS) {
return DenseMapInfo<IdentifierInfo*>::isEqual(LHS.getIdentifier(),
RHS.getIdentifier()) &&
DenseMapInfo<Selector>::isEqual(LHS.getSelector(),
RHS.getSelector());
}
-
+
static bool isPod() {
return DenseMapInfo<ObjCInterfaceDecl*>::isPod() &&
DenseMapInfo<Selector>::isPod();
}
};
} // end llvm namespace
-
+
namespace {
class VISIBILITY_HIDDEN ObjCSummaryCache {
typedef llvm::DenseMap<ObjCSummaryKey, RetainSummary*> MapTy;
MapTy M;
public:
ObjCSummaryCache() {}
-
- typedef MapTy::iterator iterator;
-
- iterator find(const ObjCInterfaceDecl* D, IdentifierInfo *ClsName,
+
+ RetainSummary* find(const ObjCInterfaceDecl* D, IdentifierInfo *ClsName,
Selector S) {
// Lookup the method using the decl for the class @interface. If we
// have no decl, lookup using the class name.
return D ? find(D, S) : find(ClsName, S);
}
-
- iterator find(const ObjCInterfaceDecl* D, Selector S) {
+
+ RetainSummary* find(const ObjCInterfaceDecl* D, Selector S) {
// Do a lookup with the (D,S) pair. If we find a match return
// the iterator.
ObjCSummaryKey K(D, S);
MapTy::iterator I = M.find(K);
-
+
if (I != M.end() || !D)
- return I;
-
+ return I->second;
+
// Walk the super chain. If we find a hit with a parent, we'll end
// up returning that summary. We actually allow that key (null,S), as
// we cache summaries for the null ObjCInterfaceDecl* to allow us to
@@ -524,62 +523,62 @@ public:
for (ObjCInterfaceDecl* C=D->getSuperClass() ;; C=C->getSuperClass()) {
if ((I = M.find(ObjCSummaryKey(C, S))) != M.end())
break;
-
+
if (!C)
- return I;
+ return NULL;
}
-
- // Cache the summary with original key to make the next lookup faster
+
+ // Cache the summary with original key to make the next lookup faster
// and return the iterator.
- M[K] = I->second;
- return I;
+ RetainSummary *Summ = I->second;
+ M[K] = Summ;
+ return Summ;
}
-
- iterator find(Expr* Receiver, Selector S) {
+
+ RetainSummary* find(Expr* Receiver, Selector S) {
return find(getReceiverDecl(Receiver), S);
}
-
- iterator find(IdentifierInfo* II, Selector S) {
+
+ RetainSummary* find(IdentifierInfo* II, Selector S) {
// FIXME: Class method lookup. Right now we dont' have a good way
// of going between IdentifierInfo* and the class hierarchy.
- iterator I = M.find(ObjCSummaryKey(II, S));
- return I == M.end() ? M.find(ObjCSummaryKey(S)) : I;
+ MapTy::iterator I = M.find(ObjCSummaryKey(II, S));
+
+ if (I == M.end())
+ I = M.find(ObjCSummaryKey(S));
+
+ return I == M.end() ? NULL : I->second;
}
-
- ObjCInterfaceDecl* getReceiverDecl(Expr* E) {
-
- const PointerType* PT = E->getType()->getAsPointerType();
- if (!PT) return 0;
-
- ObjCInterfaceType* OI = dyn_cast<ObjCInterfaceType>(PT->getPointeeType());
- if (!OI) return 0;
-
- return OI ? OI->getDecl() : 0;
+
+ const ObjCInterfaceDecl* getReceiverDecl(Expr* E) {
+ if (const ObjCObjectPointerType* PT =
+ E->getType()->getAs<ObjCObjectPointerType>())
+ return PT->getInterfaceDecl();
+
+ return NULL;
}
-
- iterator end() { return M.end(); }
-
+
RetainSummary*& operator[](ObjCMessageExpr* ME) {
-
+
Selector S = ME->getSelector();
-
+
if (Expr* Receiver = ME->getReceiver()) {
- ObjCInterfaceDecl* OD = getReceiverDecl(Receiver);
+ const ObjCInterfaceDecl* OD = getReceiverDecl(Receiver);
return OD ? M[ObjCSummaryKey(OD->getIdentifier(), S)] : M[S];
}
-
+
return M[ObjCSummaryKey(ME->getClassName(), S)];
}
-
+
RetainSummary*& operator[](ObjCSummaryKey K) {
return M[K];
}
-
+
RetainSummary*& operator[](Selector S) {
return M[ ObjCSummaryKey(S) ];
}
-};
+};
} // end anonymous namespace
//===----------------------------------------------------------------------===//
@@ -592,29 +591,29 @@ class VISIBILITY_HIDDEN RetainSummaryManager {
//==-----------------------------------------------------------------==//
// Typedefs.
//==-----------------------------------------------------------------==//
-
+
typedef llvm::DenseMap<FunctionDecl*, RetainSummary*>
FuncSummariesTy;
-
+
typedef ObjCSummaryCache ObjCMethodSummariesTy;
-
+
//==-----------------------------------------------------------------==//
// Data.
//==-----------------------------------------------------------------==//
-
+
/// Ctx - The ASTContext object for the analyzed ASTs.
ASTContext& Ctx;
/// CFDictionaryCreateII - An IdentifierInfo* representing the indentifier
/// "CFDictionaryCreate".
IdentifierInfo* CFDictionaryCreateII;
-
+
/// GCEnabled - Records whether or not the analyzed code runs in GC mode.
const bool GCEnabled;
-
+
/// FuncSummaries - A map from FunctionDecls to summaries.
- FuncSummariesTy FuncSummaries;
-
+ FuncSummariesTy FuncSummaries;
+
/// ObjCClassMethodSummaries - A map from selectors (for instance methods)
/// to summaries.
ObjCMethodSummariesTy ObjCClassMethodSummaries;
@@ -625,34 +624,34 @@ class VISIBILITY_HIDDEN RetainSummaryManager {
/// BPAlloc - A BumpPtrAllocator used for allocating summaries, ArgEffects,
/// and all other data used by the checker.
llvm::BumpPtrAllocator BPAlloc;
-
+
/// AF - A factory for ArgEffects objects.
- ArgEffects::Factory AF;
-
+ ArgEffects::Factory AF;
+
/// ScratchArgs - A holding buffer for construct ArgEffects.
ArgEffects ScratchArgs;
-
+
/// ObjCAllocRetE - Default return effect for methods returning Objective-C
/// objects.
RetEffect ObjCAllocRetE;
- /// ObjCInitRetE - Default return effect for init methods returning Objective-C
- /// objects.
+ /// ObjCInitRetE - Default return effect for init methods returning
+ /// Objective-C objects.
RetEffect ObjCInitRetE;
-
+
RetainSummary DefaultSummary;
RetainSummary* StopSummary;
-
+
//==-----------------------------------------------------------------==//
// Methods.
//==-----------------------------------------------------------------==//
-
+
/// getArgEffects - Returns a persistent ArgEffects object based on the
/// data in ScratchArgs.
ArgEffects getArgEffects();
- enum UnaryFuncKind { cfretain, cfrelease, cfmakecollectable };
-
+ enum UnaryFuncKind { cfretain, cfrelease, cfmakecollectable };
+
public:
RetEffect getObjAllocRetEffect() const { return ObjCAllocRetE; }
@@ -660,13 +659,13 @@ public:
RetainSummary *Summ = (RetainSummary*) BPAlloc.Allocate<RetainSummary>();
return new (Summ) RetainSummary(DefaultSummary);
}
-
+
RetainSummary* getUnarySummary(const FunctionType* FT, UnaryFuncKind func);
-
+
RetainSummary* getCFSummaryCreateRule(FunctionDecl* FD);
- RetainSummary* getCFSummaryGetRule(FunctionDecl* FD);
+ RetainSummary* getCFSummaryGetRule(FunctionDecl* FD);
RetainSummary* getCFCreateGetRuleSummary(FunctionDecl* FD, const char* FName);
-
+
RetainSummary* getPersistentSummary(ArgEffects AE, RetEffect RetEff,
ArgEffect ReceiverEff = DoNothing,
ArgEffect DefaultEff = MayEscape,
@@ -677,36 +676,36 @@ public:
ArgEffect DefaultEff = MayEscape) {
return getPersistentSummary(getArgEffects(), RE, ReceiverEff, DefaultEff);
}
-
+
RetainSummary *getPersistentStopSummary() {
if (StopSummary)
return StopSummary;
-
+
StopSummary = getPersistentSummary(RetEffect::MakeNoRet(),
StopTracking, StopTracking);
return StopSummary;
- }
+ }
RetainSummary *getInitMethodSummary(QualType RetTy);
void InitializeClassMethodSummaries();
void InitializeMethodSummaries();
-
+
bool isTrackedObjCObjectType(QualType T);
bool isTrackedCFObjectType(QualType T);
-
+
private:
-
+
void addClsMethSummary(IdentifierInfo* ClsII, Selector S,
RetainSummary* Summ) {
ObjCClassMethodSummaries[ObjCSummaryKey(ClsII, S)] = Summ;
}
-
+
void addNSObjectClsMethSummary(Selector S, RetainSummary *Summ) {
ObjCClassMethodSummaries[S] = Summ;
}
-
+
void addNSObjectMethSummary(Selector S, RetainSummary *Summ) {
ObjCMethodSummaries[S] = Summ;
}
@@ -717,43 +716,43 @@ private:
Selector S = GetNullarySelector(nullaryName, Ctx);
ObjCClassMethodSummaries[ObjCSummaryKey(ClsII, S)] = Summ;
}
-
+
void addInstMethSummary(const char* Cls, const char* nullaryName,
RetainSummary *Summ) {
IdentifierInfo* ClsII = &Ctx.Idents.get(Cls);
Selector S = GetNullarySelector(nullaryName, Ctx);
ObjCMethodSummaries[ObjCSummaryKey(ClsII, S)] = Summ;
}
-
+
Selector generateSelector(va_list argp) {
llvm::SmallVector<IdentifierInfo*, 10> II;
while (const char* s = va_arg(argp, const char*))
II.push_back(&Ctx.Idents.get(s));
- return Ctx.Selectors.getSelector(II.size(), &II[0]);
+ return Ctx.Selectors.getSelector(II.size(), &II[0]);
}
-
+
void addMethodSummary(IdentifierInfo *ClsII, ObjCMethodSummariesTy& Summaries,
RetainSummary* Summ, va_list argp) {
Selector S = generateSelector(argp);
Summaries[ObjCSummaryKey(ClsII, S)] = Summ;
}
-
+
void addInstMethSummary(const char* Cls, RetainSummary* Summ, ...) {
va_list argp;
va_start(argp, Summ);
addMethodSummary(&Ctx.Idents.get(Cls), ObjCMethodSummaries, Summ, argp);
- va_end(argp);
+ va_end(argp);
}
-
+
void addClsMethSummary(const char* Cls, RetainSummary* Summ, ...) {
va_list argp;
va_start(argp, Summ);
addMethodSummary(&Ctx.Idents.get(Cls),ObjCClassMethodSummaries, Summ, argp);
va_end(argp);
}
-
+
void addClsMethSummary(IdentifierInfo *II, RetainSummary* Summ, ...) {
va_list argp;
va_start(argp, Summ);
@@ -770,9 +769,9 @@ private:
addMethodSummary(&Ctx.Idents.get(Cls), ObjCMethodSummaries, Summ, argp);
va_end(argp);
}
-
+
public:
-
+
RetainSummaryManager(ASTContext& ctx, bool gcenabled)
: Ctx(ctx),
CFDictionaryCreateII(&ctx.Idents.get("CFDictionaryCreate")),
@@ -790,17 +789,17 @@ public:
InitializeClassMethodSummaries();
InitializeMethodSummaries();
}
-
+
~RetainSummaryManager();
-
- RetainSummary* getSummary(FunctionDecl* FD);
-
+
+ RetainSummary* getSummary(FunctionDecl* FD);
+
RetainSummary* getInstanceMethodSummary(ObjCMessageExpr* ME,
const ObjCInterfaceDecl* ID) {
return getInstanceMethodSummary(ME->getSelector(), ME->getClassName(),
- ID, ME->getMethodDecl(), ME->getType());
+ ID, ME->getMethodDecl(), ME->getType());
}
-
+
RetainSummary* getInstanceMethodSummary(Selector S, IdentifierInfo *ClsName,
const ObjCInterfaceDecl* ID,
const ObjCMethodDecl *MD,
@@ -810,7 +809,7 @@ public:
const ObjCInterfaceDecl *ID,
const ObjCMethodDecl *MD,
QualType RetTy);
-
+
RetainSummary *getClassMethodSummary(ObjCMessageExpr *ME) {
return getClassMethodSummary(ME->getSelector(), ME->getClassName(),
ME->getClassInfo().first,
@@ -825,17 +824,17 @@ public:
Selector S = MD->getSelector();
IdentifierInfo *ClsName = ID->getIdentifier();
QualType ResultTy = MD->getResultType();
-
- // Resolve the method decl last.
+
+ // Resolve the method decl last.
if (const ObjCMethodDecl *InterfaceMD = ResolveToInterfaceMethodDecl(MD))
MD = InterfaceMD;
-
+
if (MD->isInstanceMethod())
return getInstanceMethodSummary(S, ClsName, ID, MD, ResultTy);
else
return getClassMethodSummary(S, ClsName, ID, MD, ResultTy);
}
-
+
RetainSummary* getCommonMethodSummary(const ObjCMethodDecl* MD,
Selector S, QualType RetTy);
@@ -846,14 +845,14 @@ public:
const FunctionDecl *FD);
bool isGCEnabled() const { return GCEnabled; }
-
+
RetainSummary *copySummary(RetainSummary *OldSumm) {
RetainSummary *Summ = (RetainSummary*) BPAlloc.Allocate<RetainSummary>();
new (Summ) RetainSummary(*OldSumm);
return Summ;
- }
+ }
};
-
+
} // end anonymous namespace
//===----------------------------------------------------------------------===//
@@ -872,7 +871,7 @@ RetainSummary*
RetainSummaryManager::getPersistentSummary(ArgEffects AE, RetEffect RetEff,
ArgEffect ReceiverEff,
ArgEffect DefaultEff,
- bool isEndPath) {
+ bool isEndPath) {
// Create the summary and return it.
RetainSummary *Summ = (RetainSummary*) BPAlloc.Allocate<RetainSummary>();
new (Summ) RetainSummary(AE, RetEff, DefaultEff, ReceiverEff, isEndPath);
@@ -884,36 +883,35 @@ RetainSummaryManager::getPersistentSummary(ArgEffects AE, RetEffect RetEff,
//===----------------------------------------------------------------------===//
bool RetainSummaryManager::isTrackedObjCObjectType(QualType Ty) {
- if (!Ctx.isObjCObjectPointerType(Ty))
+ if (!Ty->isObjCObjectPointerType())
return false;
- // We assume that id<..>, id, and "Class" all represent tracked objects.
- const PointerType *PT = Ty->getAsPointerType();
- if (PT == 0)
+ const ObjCObjectPointerType *PT = Ty->getAs<ObjCObjectPointerType>();
+
+ // Can be true for objects with the 'NSObject' attribute.
+ if (!PT)
return true;
-
- const ObjCInterfaceType *OT = PT->getPointeeType()->getAsObjCInterfaceType();
// We assume that id<..>, id, and "Class" all represent tracked objects.
- if (!OT)
+ if (PT->isObjCIdType() || PT->isObjCQualifiedIdType() ||
+ PT->isObjCClassType())
return true;
-
- // Does the interface subclass NSObject?
- // FIXME: We can memoize here if this gets too expensive.
- ObjCInterfaceDecl* ID = OT->getDecl();
+
+ // Does the interface subclass NSObject?
+ // FIXME: We can memoize here if this gets too expensive.
+ const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
// Assume that anything declared with a forward declaration and no
// @interface subclasses NSObject.
if (ID->isForwardDecl())
return true;
-
- IdentifierInfo* NSObjectII = &Ctx.Idents.get("NSObject");
+ IdentifierInfo* NSObjectII = &Ctx.Idents.get("NSObject");
for ( ; ID ; ID = ID->getSuperClass())
if (ID->getIdentifier() == NSObjectII)
return true;
-
+
return false;
}
@@ -947,38 +945,44 @@ RetainSummary* RetainSummaryManager::getSummary(FunctionDecl* FD) {
// No summary? Generate one.
RetainSummary *S = 0;
-
+
do {
// We generate "stop" summaries for implicitly defined functions.
if (FD->isImplicit()) {
S = getPersistentStopSummary();
break;
}
-
- // [PR 3337] Use 'getAsFunctionType' to strip away any typedefs on the
+
+ // [PR 3337] Use 'getAs<FunctionType>' to strip away any typedefs on the
// function's type.
- const FunctionType* FT = FD->getType()->getAsFunctionType();
+ const FunctionType* FT = FD->getType()->getAs<FunctionType>();
const char* FName = FD->getIdentifier()->getName();
-
+
// Strip away preceding '_'. Doing this here will effect all the checks
// down below.
while (*FName == '_') ++FName;
-
+
// Inspect the result type.
QualType RetTy = FT->getResultType();
-
+
// FIXME: This should all be refactored into a chain of "summary lookup"
// filters.
- assert (ScratchArgs.isEmpty());
+ assert(ScratchArgs.isEmpty());
switch (strlen(FName)) {
default: break;
-
+ case 14:
+ if (!memcmp(FName, "pthread_create", 14)) {
+ // Part of: <rdar://problem/7299394>. This will be addressed
+ // better with IPA.
+ S = getPersistentStopSummary();
+ }
+ break;
case 17:
// Handle: id NSMakeCollectable(CFTypeRef)
if (!memcmp(FName, "NSMakeCollectable", 17)) {
- S = (RetTy == Ctx.getObjCIdType())
+ S = (RetTy->isObjCIdType())
? getUnarySummary(FT, cfmakecollectable)
: getPersistentStopSummary();
}
@@ -1005,10 +1009,10 @@ RetainSummary* RetainSummaryManager::getSummary(FunctionDecl* FD) {
// Part of <rdar://problem/6961230>. (IOKit)
// This should be addressed using a API table.
ScratchArgs = AF.Add(ScratchArgs, 2, DecRef);
- S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
}
break;
-
+
case 25:
if (!memcmp(FName, "IORegistryEntryIDMatching", 25)) {
// Part of <rdar://problem/6961230>. (IOKit)
@@ -1017,13 +1021,13 @@ RetainSummary* RetainSummaryManager::getSummary(FunctionDecl* FD) {
DoNothing, DoNothing);
}
break;
-
+
case 26:
if (!memcmp(FName, "IOOpenFirmwarePathMatching", 26)) {
// Part of <rdar://problem/6961230>. (IOKit)
// This should be addressed using a API table.
S = getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true),
- DoNothing, DoNothing);
+ DoNothing, DoNothing);
}
break;
@@ -1032,7 +1036,7 @@ RetainSummary* RetainSummaryManager::getSummary(FunctionDecl* FD) {
// Part of <rdar://problem/6961230>.
// This should be addressed using a API table.
ScratchArgs = AF.Add(ScratchArgs, 1, DecRef);
- S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
}
break;
@@ -1042,20 +1046,43 @@ RetainSummary* RetainSummaryManager::getSummary(FunctionDecl* FD) {
// This should be addressed using a API table. This strcmp is also
// a little gross, but there is no need to super optimize here.
ScratchArgs = AF.Add(ScratchArgs, 1, DecRef);
- S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing,
+ DoNothing);
+ }
+ else if (!memcmp(FName, "CVPixelBufferCreateWithBytes", 28)) {
+ // FIXES: <rdar://problem/7283567>
+ // Eventually this can be improved by recognizing that the pixel
+ // buffer passed to CVPixelBufferCreateWithBytes is released via
+ // a callback and doing full IPA to make sure this is done correctly.
+ ScratchArgs = AF.Add(ScratchArgs, 7, StopTracking);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing,
+ DoNothing);
}
break;
-
+
case 32:
if (!memcmp(FName, "IOServiceAddMatchingNotification", 32)) {
// Part of <rdar://problem/6961230>.
// This should be addressed using a API table.
ScratchArgs = AF.Add(ScratchArgs, 2, DecRef);
- S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ }
+ break;
+
+ case 34:
+ if (!memcmp(FName, "CVPixelBufferCreateWithPlanarBytes", 34)) {
+ // FIXES: <rdar://problem/7283567>
+ // Eventually this can be improved by recognizing that the pixel
+ // buffer passed to CVPixelBufferCreateWithPlanarBytes is released
+ // via a callback and doing full IPA to make sure this is done
+ // correctly.
+ ScratchArgs = AF.Add(ScratchArgs, 12, StopTracking);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing,
+ DoNothing);
}
break;
}
-
+
// Did we get a summary?
if (S)
break;
@@ -1065,7 +1092,7 @@ RetainSummary* RetainSummaryManager::getSummary(FunctionDecl* FD) {
#if 0
// Handle: NSDeallocateObject(id anObject);
// This method does allow 'nil' (although we don't check it now).
- if (strcmp(FName, "NSDeallocateObject") == 0) {
+ if (strcmp(FName, "NSDeallocateObject") == 0) {
return RetTy == Ctx.VoidTy
? getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, Dealloc)
: getPersistentStopSummary();
@@ -1079,7 +1106,7 @@ RetainSummary* RetainSummaryManager::getSummary(FunctionDecl* FD) {
S = getUnarySummary(FT, cfretain);
else if (strstr(FName, "MakeCollectable"))
S = getUnarySummary(FT, cfmakecollectable);
- else
+ else
S = getCFCreateGetRuleSummary(FD, FName);
break;
@@ -1102,7 +1129,7 @@ RetainSummary* RetainSummaryManager::getSummary(FunctionDecl* FD) {
S = getCFCreateGetRuleSummary(FD, FName);
break;
}
-
+
break;
}
@@ -1114,7 +1141,7 @@ RetainSummary* RetainSummaryManager::getSummary(FunctionDecl* FD) {
FName += 4;
else
FName += 2;
-
+
if (isRelease(FD, FName))
S = getUnarySummary(FT, cfrelease);
else {
@@ -1124,48 +1151,50 @@ RetainSummary* RetainSummaryManager::getSummary(FunctionDecl* FD) {
// and that ownership cannot be transferred. While this is technically
// correct, many methods allow a tracked object to escape. For example:
//
- // CFMutableDictionaryRef x = CFDictionaryCreateMutable(...);
+ // CFMutableDictionaryRef x = CFDictionaryCreateMutable(...);
// CFDictionaryAddValue(y, key, x);
- // CFRelease(x);
+ // CFRelease(x);
// ... it is okay to use 'x' since 'y' has a reference to it
//
// We handle this and similar cases with the follow heuristic. If the
- // function name contains "InsertValue", "SetValue" or "AddValue" then
- // we assume that arguments may "escape."
- //
+ // function name contains "InsertValue", "SetValue", "AddValue",
+ // "AppendValue", or "SetAttribute", then we assume that arguments may
+ // "escape." This means that something else holds on to the object,
+ // allowing it be used even after its local retain count drops to 0.
ArgEffect E = (CStrInCStrNoCase(FName, "InsertValue") ||
CStrInCStrNoCase(FName, "AddValue") ||
CStrInCStrNoCase(FName, "SetValue") ||
- CStrInCStrNoCase(FName, "AppendValue"))
+ CStrInCStrNoCase(FName, "AppendValue") ||
+ CStrInCStrNoCase(FName, "SetAttribute"))
? MayEscape : DoNothing;
-
+
S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, E);
}
}
}
while (0);
-
+
if (!S)
S = getDefaultSummary();
// Annotations override defaults.
assert(S);
updateSummaryFromAnnotations(*S, FD);
-
+
FuncSummaries[FD] = S;
- return S;
+ return S;
}
RetainSummary*
RetainSummaryManager::getCFCreateGetRuleSummary(FunctionDecl* FD,
const char* FName) {
-
+
if (strstr(FName, "Create") || strstr(FName, "Copy"))
return getCFSummaryCreateRule(FD);
-
+
if (strstr(FName, "Get"))
return getCFSummaryGetRule(FD);
-
+
return getDefaultSummary();
}
@@ -1178,27 +1207,27 @@ RetainSummaryManager::getUnarySummary(const FunctionType* FT,
const FunctionProtoType* FTP = dyn_cast<FunctionProtoType>(FT);
if (!FTP || FTP->getNumArgs() != 1)
return getPersistentStopSummary();
-
+
assert (ScratchArgs.isEmpty());
-
+
switch (func) {
case cfretain: {
ScratchArgs = AF.Add(ScratchArgs, 0, IncRef);
return getPersistentSummary(RetEffect::MakeAlias(0),
DoNothing, DoNothing);
}
-
+
case cfrelease: {
ScratchArgs = AF.Add(ScratchArgs, 0, DecRef);
return getPersistentSummary(RetEffect::MakeNoRet(),
DoNothing, DoNothing);
}
-
+
case cfmakecollectable: {
ScratchArgs = AF.Add(ScratchArgs, 0, MakeCollectable);
- return getPersistentSummary(RetEffect::MakeAlias(0),DoNothing, DoNothing);
+ return getPersistentSummary(RetEffect::MakeAlias(0),DoNothing, DoNothing);
}
-
+
default:
assert (false && "Not a supported unary function.");
return getDefaultSummary();
@@ -1207,17 +1236,17 @@ RetainSummaryManager::getUnarySummary(const FunctionType* FT,
RetainSummary* RetainSummaryManager::getCFSummaryCreateRule(FunctionDecl* FD) {
assert (ScratchArgs.isEmpty());
-
+
if (FD->getIdentifier() == CFDictionaryCreateII) {
ScratchArgs = AF.Add(ScratchArgs, 1, DoNothingByRef);
ScratchArgs = AF.Add(ScratchArgs, 2, DoNothingByRef);
}
-
+
return getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true));
}
RetainSummary* RetainSummaryManager::getCFSummaryGetRule(FunctionDecl* FD) {
- assert (ScratchArgs.isEmpty());
+ assert (ScratchArgs.isEmpty());
return getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::CF),
DoNothing, DoNothing);
}
@@ -1228,12 +1257,12 @@ RetainSummary* RetainSummaryManager::getCFSummaryGetRule(FunctionDecl* FD) {
RetainSummary*
RetainSummaryManager::getInitMethodSummary(QualType RetTy) {
- assert(ScratchArgs.isEmpty());
+ assert(ScratchArgs.isEmpty());
// 'init' methods conceptually return a newly allocated object and claim
- // the receiver.
+ // the receiver.
if (isTrackedObjCObjectType(RetTy) || isTrackedCFObjectType(RetTy))
return getPersistentSummary(ObjCInitRetE, DecRefMsg);
-
+
return getDefaultSummary();
}
@@ -1244,7 +1273,7 @@ RetainSummaryManager::updateSummaryFromAnnotations(RetainSummary &Summ,
return;
QualType RetTy = FD->getResultType();
-
+
// Determine if there is a special return effect for this method.
if (isTrackedObjCObjectType(RetTy)) {
if (FD->getAttr<NSReturnsRetainedAttr>()) {
@@ -1254,7 +1283,7 @@ RetainSummaryManager::updateSummaryFromAnnotations(RetainSummary &Summ,
Summ.setRetEffect(RetEffect::MakeOwned(RetEffect::CF, true));
}
}
- else if (RetTy->getAsPointerType()) {
+ else if (RetTy->getAs<PointerType>()) {
if (FD->getAttr<CFReturnsRetainedAttr>()) {
Summ.setRetEffect(RetEffect::MakeOwned(RetEffect::CF, true));
}
@@ -1267,15 +1296,23 @@ RetainSummaryManager::updateSummaryFromAnnotations(RetainSummary &Summ,
if (!MD)
return;
+ bool isTrackedLoc = false;
+
// Determine if there is a special return effect for this method.
if (isTrackedObjCObjectType(MD->getResultType())) {
if (MD->getAttr<NSReturnsRetainedAttr>()) {
Summ.setRetEffect(ObjCAllocRetE);
+ return;
}
- else if (MD->getAttr<CFReturnsRetainedAttr>()) {
- Summ.setRetEffect(RetEffect::MakeOwned(RetEffect::CF, true));
- }
+
+ isTrackedLoc = true;
}
+
+ if (!isTrackedLoc)
+ isTrackedLoc = MD->getResultType()->getAs<PointerType>() != NULL;
+
+ if (isTrackedLoc && MD->getAttr<CFReturnsRetainedAttr>())
+ Summ.setRetEffect(RetEffect::MakeOwned(RetEffect::CF, true));
}
RetainSummary*
@@ -1296,10 +1333,10 @@ RetainSummaryManager::getCommonMethodSummary(const ObjCMethodDecl* MD,
ScratchArgs = AF.Add(ScratchArgs, i, StopTracking);
}
}
-
+
// Any special effect for the receiver?
ArgEffect ReceiverEff = DoNothing;
-
+
// If one of the arguments in the selector has the keyword 'delegate' we
// should stop tracking the reference count for the receiver. This is
// because the reference count is quite possibly handled by a delegate
@@ -1309,29 +1346,29 @@ RetainSummaryManager::getCommonMethodSummary(const ObjCMethodDecl* MD,
assert(!str.empty());
if (CStrInCStrNoCase(&str[0], "delegate:")) ReceiverEff = StopTracking;
}
-
+
// Look for methods that return an owned object.
- if (isTrackedObjCObjectType(RetTy)) {
+ if (isTrackedObjCObjectType(RetTy)) {
// EXPERIMENTAL: Assume the Cocoa conventions for all objects returned
// by instance methods.
RetEffect E = followsFundamentalRule(S)
? ObjCAllocRetE : RetEffect::MakeNotOwned(RetEffect::ObjC);
-
- return getPersistentSummary(E, ReceiverEff, MayEscape);
+
+ return getPersistentSummary(E, ReceiverEff, MayEscape);
}
-
+
// Look for methods that return an owned core foundation object.
if (isTrackedCFObjectType(RetTy)) {
RetEffect E = followsFundamentalRule(S)
? RetEffect::MakeOwned(RetEffect::CF, true)
: RetEffect::MakeNotOwned(RetEffect::CF);
-
+
return getPersistentSummary(E, ReceiverEff, MayEscape);
}
-
+
if (ScratchArgs.isEmpty() && ReceiverEff == DoNothing)
return getDefaultSummary();
-
+
return getPersistentSummary(RetEffect::MakeNoRet(), ReceiverEff, MayEscape);
}
@@ -1343,25 +1380,24 @@ RetainSummaryManager::getInstanceMethodSummary(Selector S,
QualType RetTy) {
// Look up a summary in our summary cache.
- ObjCMethodSummariesTy::iterator I = ObjCMethodSummaries.find(ID, ClsName, S);
-
- if (I != ObjCMethodSummaries.end())
- return I->second;
+ RetainSummary *Summ = ObjCMethodSummaries.find(ID, ClsName, S);
+
+ if (!Summ) {
+ assert(ScratchArgs.isEmpty());
+
+ // "initXXX": pass-through for receiver.
+ if (deriveNamingConvention(S) == InitRule)
+ Summ = getInitMethodSummary(RetTy);
+ else
+ Summ = getCommonMethodSummary(MD, S, RetTy);
+
+ // Annotations override defaults.
+ updateSummaryFromAnnotations(*Summ, MD);
+
+ // Memoize the summary.
+ ObjCMethodSummaries[ObjCSummaryKey(ID, ClsName, S)] = Summ;
+ }
- assert(ScratchArgs.isEmpty());
- RetainSummary *Summ = 0;
-
- // "initXXX": pass-through for receiver.
- if (deriveNamingConvention(S) == InitRule)
- Summ = getInitMethodSummary(RetTy);
- else
- Summ = getCommonMethodSummary(MD, S, RetTy);
-
- // Annotations override defaults.
- updateSummaryFromAnnotations(*Summ, MD);
-
- // Memoize the summary.
- ObjCMethodSummaries[ObjCSummaryKey(ID, ClsName, S)] = Summ;
return Summ;
}
@@ -1372,44 +1408,41 @@ RetainSummaryManager::getClassMethodSummary(Selector S, IdentifierInfo *ClsName,
QualType RetTy) {
assert(ClsName && "Class name must be specified.");
- ObjCMethodSummariesTy::iterator I =
- ObjCClassMethodSummaries.find(ID, ClsName, S);
-
- if (I != ObjCClassMethodSummaries.end())
- return I->second;
-
- RetainSummary *Summ = getCommonMethodSummary(MD, S, RetTy);
-
- // Annotations override defaults.
- updateSummaryFromAnnotations(*Summ, MD);
+ RetainSummary *Summ = ObjCClassMethodSummaries.find(ID, ClsName, S);
+
+ if (!Summ) {
+ Summ = getCommonMethodSummary(MD, S, RetTy);
+ // Annotations override defaults.
+ updateSummaryFromAnnotations(*Summ, MD);
+ // Memoize the summary.
+ ObjCClassMethodSummaries[ObjCSummaryKey(ID, ClsName, S)] = Summ;
+ }
- // Memoize the summary.
- ObjCClassMethodSummaries[ObjCSummaryKey(ID, ClsName, S)] = Summ;
return Summ;
}
-void RetainSummaryManager::InitializeClassMethodSummaries() {
+void RetainSummaryManager::InitializeClassMethodSummaries() {
assert(ScratchArgs.isEmpty());
RetainSummary* Summ = getPersistentSummary(ObjCAllocRetE);
-
+
// Create the summaries for "alloc", "new", and "allocWithZone:" for
// NSObject and its derivatives.
addNSObjectClsMethSummary(GetNullarySelector("alloc", Ctx), Summ);
addNSObjectClsMethSummary(GetNullarySelector("new", Ctx), Summ);
addNSObjectClsMethSummary(GetUnarySelector("allocWithZone", Ctx), Summ);
-
- // Create the [NSAssertionHandler currentHander] summary.
+
+ // Create the [NSAssertionHandler currentHander] summary.
addClsMethSummary(&Ctx.Idents.get("NSAssertionHandler"),
GetNullarySelector("currentHandler", Ctx),
getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::ObjC)));
-
+
// Create the [NSAutoreleasePool addObject:] summary.
ScratchArgs = AF.Add(ScratchArgs, 0, Autorelease);
addClsMethSummary(&Ctx.Idents.get("NSAutoreleasePool"),
GetUnarySelector("addObject", Ctx),
getPersistentSummary(RetEffect::MakeNoRet(),
DoNothing, Autorelease));
-
+
// Create the summaries for [NSObject performSelector...]. We treat
// these as 'stop tracking' for the arguments because they are often
// used for delegates that can release the object. When we have better
@@ -1431,7 +1464,7 @@ void RetainSummaryManager::InitializeClassMethodSummaries() {
"withObject", "waitUntilDone", "modes", NULL);
addClsMethSummary(NSObjectII, Summ, "performSelectorInBackground",
"withObject", NULL);
-
+
// Specially handle NSData.
RetainSummary *dataWithBytesNoCopySumm =
getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::ObjC), DoNothing,
@@ -1443,36 +1476,43 @@ void RetainSummaryManager::InitializeClassMethodSummaries() {
}
void RetainSummaryManager::InitializeMethodSummaries() {
-
- assert (ScratchArgs.isEmpty());
-
+
+ assert (ScratchArgs.isEmpty());
+
// Create the "init" selector. It just acts as a pass-through for the
// receiver.
- addNSObjectMethSummary(GetNullarySelector("init", Ctx),
- getPersistentSummary(ObjCInitRetE, DecRefMsg));
-
+ RetainSummary *InitSumm = getPersistentSummary(ObjCInitRetE, DecRefMsg);
+ addNSObjectMethSummary(GetNullarySelector("init", Ctx), InitSumm);
+
+ // awakeAfterUsingCoder: behaves basically like an 'init' method. It
+ // claims the receiver and returns a retained object.
+ addNSObjectMethSummary(GetUnarySelector("awakeAfterUsingCoder", Ctx),
+ InitSumm);
+
// The next methods are allocators.
- RetainSummary *AllocSumm = getPersistentSummary(ObjCAllocRetE);
-
- // Create the "copy" selector.
- addNSObjectMethSummary(GetNullarySelector("copy", Ctx), AllocSumm);
+ RetainSummary *AllocSumm = getPersistentSummary(ObjCAllocRetE);
+ RetainSummary *CFAllocSumm =
+ getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true));
+
+ // Create the "copy" selector.
+ addNSObjectMethSummary(GetNullarySelector("copy", Ctx), AllocSumm);
// Create the "mutableCopy" selector.
addNSObjectMethSummary(GetNullarySelector("mutableCopy", Ctx), AllocSumm);
-
+
// Create the "retain" selector.
RetEffect E = RetEffect::MakeReceiverAlias();
RetainSummary *Summ = getPersistentSummary(E, IncRefMsg);
addNSObjectMethSummary(GetNullarySelector("retain", Ctx), Summ);
-
+
// Create the "release" selector.
Summ = getPersistentSummary(E, DecRefMsg);
addNSObjectMethSummary(GetNullarySelector("release", Ctx), Summ);
-
+
// Create the "drain" selector.
Summ = getPersistentSummary(E, isGCEnabled() ? DoNothing : DecRef);
addNSObjectMethSummary(GetNullarySelector("drain", Ctx), Summ);
-
+
// Create the -dealloc summary.
Summ = getPersistentSummary(RetEffect::MakeNoRet(), Dealloc);
addNSObjectMethSummary(GetNullarySelector("dealloc", Ctx), Summ);
@@ -1480,13 +1520,13 @@ void RetainSummaryManager::InitializeMethodSummaries() {
// Create the "autorelease" selector.
Summ = getPersistentSummary(E, Autorelease);
addNSObjectMethSummary(GetNullarySelector("autorelease", Ctx), Summ);
-
+
// Specially handle NSAutoreleasePool.
addInstMethSummary("NSAutoreleasePool", "init",
getPersistentSummary(RetEffect::MakeReceiverAlias(),
NewAutoreleasePool));
-
- // For NSWindow, allocated objects are (initially) self-owned.
+
+ // For NSWindow, allocated objects are (initially) self-owned.
// FIXME: For now we opt for false negatives with NSWindow, as these objects
// self-own themselves. However, they only do this once they are displayed.
// Thus, we need to track an NSWindow's display status.
@@ -1495,42 +1535,42 @@ void RetainSummaryManager::InitializeMethodSummaries() {
RetainSummary *NoTrackYet = getPersistentSummary(RetEffect::MakeNoRet(),
StopTracking,
StopTracking);
-
+
addClassMethSummary("NSWindow", "alloc", NoTrackYet);
#if 0
addInstMethSummary("NSWindow", NoTrackYet, "initWithContentRect",
"styleMask", "backing", "defer", NULL);
-
+
addInstMethSummary("NSWindow", NoTrackYet, "initWithContentRect",
"styleMask", "backing", "defer", "screen", NULL);
#endif
-
+
// For NSPanel (which subclasses NSWindow), allocated objects are not
// self-owned.
// FIXME: For now we don't track NSPanels. object for the same reason
// as for NSWindow objects.
addClassMethSummary("NSPanel", "alloc", NoTrackYet);
-
+
#if 0
addInstMethSummary("NSPanel", NoTrackYet, "initWithContentRect",
"styleMask", "backing", "defer", NULL);
-
+
addInstMethSummary("NSPanel", NoTrackYet, "initWithContentRect",
"styleMask", "backing", "defer", "screen", NULL);
#endif
-
+
// Don't track allocated autorelease pools yet, as it is okay to prematurely
// exit a method.
addClassMethSummary("NSAutoreleasePool", "alloc", NoTrackYet);
// Create NSAssertionHandler summaries.
addPanicSummary("NSAssertionHandler", "handleFailureInFunction", "file",
- "lineNumber", "description", NULL);
-
+ "lineNumber", "description", NULL);
+
addPanicSummary("NSAssertionHandler", "handleFailureInMethod", "object",
"file", "lineNumber", "description", NULL);
-
+
// Create summaries QCRenderer/QCView -createSnapShotImageOfType:
addInstMethSummary("QCRenderer", AllocSumm,
"createSnapshotImageOfType", NULL);
@@ -1538,12 +1578,13 @@ void RetainSummaryManager::InitializeMethodSummaries() {
"createSnapshotImageOfType", NULL);
// Create summaries for CIContext, 'createCGImage' and
- // 'createCGLayerWithSize'.
- addInstMethSummary("CIContext", AllocSumm,
+ // 'createCGLayerWithSize'. These objects are CF objects, and are not
+ // automatically garbage collected.
+ addInstMethSummary("CIContext", CFAllocSumm,
"createCGImage", "fromRect", NULL);
- addInstMethSummary("CIContext", AllocSumm,
- "createCGImage", "fromRect", "format", "colorSpace", NULL);
- addInstMethSummary("CIContext", AllocSumm, "createCGLayerWithSize",
+ addInstMethSummary("CIContext", CFAllocSumm,
+ "createCGImage", "fromRect", "format", "colorSpace", NULL);
+ addInstMethSummary("CIContext", CFAllocSumm, "createCGLayerWithSize",
"info", NULL);
}
@@ -1552,19 +1593,19 @@ void RetainSummaryManager::InitializeMethodSummaries() {
//===----------------------------------------------------------------------===//
namespace {
-
+
class VISIBILITY_HIDDEN RefVal {
-public:
+public:
enum Kind {
- Owned = 0, // Owning reference.
- NotOwned, // Reference is not owned by still valid (not freed).
+ Owned = 0, // Owning reference.
+ NotOwned, // Reference is not owned by still valid (not freed).
Released, // Object has been released.
ReturnedOwned, // Returned object passes ownership to caller.
ReturnedNotOwned, // Return object does not pass ownership to caller.
ERROR_START,
ErrorDeallocNotOwned, // -dealloc called on non-owned object.
ErrorDeallocGC, // Calling -dealloc with GC enabled.
- ErrorUseAfterRelease, // Object used after released.
+ ErrorUseAfterRelease, // Object used after released.
ErrorReleaseNotOwned, // Release of an object that was not owned.
ERROR_LEAK_START,
ErrorLeak, // A memory leak due to excessive reference counts.
@@ -1575,7 +1616,7 @@ public:
ErrorReturnedNotOwned
};
-private:
+private:
Kind kind;
RetEffect::ObjKind okind;
unsigned Cnt;
@@ -1588,9 +1629,9 @@ private:
RefVal(Kind k, unsigned cnt = 0)
: kind(k), okind(RetEffect::AnyObj), Cnt(cnt), ACnt(0) {}
-public:
+public:
Kind getKind() const { return kind; }
-
+
RetEffect::ObjKind getObjKind() const { return okind; }
unsigned getCount() const { return Cnt; }
@@ -1599,72 +1640,72 @@ public:
void clearCounts() { Cnt = 0; ACnt = 0; }
void setCount(unsigned i) { Cnt = i; }
void setAutoreleaseCount(unsigned i) { ACnt = i; }
-
+
QualType getType() const { return T; }
-
+
// Useful predicates.
-
+
static bool isError(Kind k) { return k >= ERROR_START; }
-
+
static bool isLeak(Kind k) { return k >= ERROR_LEAK_START; }
-
+
bool isOwned() const {
return getKind() == Owned;
}
-
+
bool isNotOwned() const {
return getKind() == NotOwned;
}
-
+
bool isReturnedOwned() const {
return getKind() == ReturnedOwned;
}
-
+
bool isReturnedNotOwned() const {
return getKind() == ReturnedNotOwned;
}
-
+
bool isNonLeakError() const {
Kind k = getKind();
return isError(k) && !isLeak(k);
}
-
+
static RefVal makeOwned(RetEffect::ObjKind o, QualType t,
unsigned Count = 1) {
return RefVal(Owned, o, Count, 0, t);
}
-
+
static RefVal makeNotOwned(RetEffect::ObjKind o, QualType t,
unsigned Count = 0) {
return RefVal(NotOwned, o, Count, 0, t);
}
-
+
// Comparison, profiling, and pretty-printing.
-
+
bool operator==(const RefVal& X) const {
return kind == X.kind && Cnt == X.Cnt && T == X.T && ACnt == X.ACnt;
}
-
+
RefVal operator-(size_t i) const {
return RefVal(getKind(), getObjKind(), getCount() - i,
getAutoreleaseCount(), getType());
}
-
+
RefVal operator+(size_t i) const {
return RefVal(getKind(), getObjKind(), getCount() + i,
getAutoreleaseCount(), getType());
}
-
+
RefVal operator^(Kind k) const {
return RefVal(k, getObjKind(), getCount(), getAutoreleaseCount(),
getType());
}
-
+
RefVal autorelease() const {
return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount()+1,
getType());
}
-
+
void Profile(llvm::FoldingSetNodeID& ID) const {
ID.AddInteger((unsigned) kind);
ID.AddInteger(Cnt);
@@ -1674,41 +1715,41 @@ public:
void print(llvm::raw_ostream& Out) const;
};
-
+
void RefVal::print(llvm::raw_ostream& Out) const {
if (!T.isNull())
Out << "Tracked Type:" << T.getAsString() << '\n';
-
+
switch (getKind()) {
default: assert(false);
- case Owned: {
+ case Owned: {
Out << "Owned";
unsigned cnt = getCount();
if (cnt) Out << " (+ " << cnt << ")";
break;
}
-
+
case NotOwned: {
Out << "NotOwned";
unsigned cnt = getCount();
if (cnt) Out << " (+ " << cnt << ")";
break;
}
-
- case ReturnedOwned: {
+
+ case ReturnedOwned: {
Out << "ReturnedOwned";
unsigned cnt = getCount();
if (cnt) Out << " (+ " << cnt << ")";
break;
}
-
+
case ReturnedNotOwned: {
Out << "ReturnedNotOwned";
unsigned cnt = getCount();
if (cnt) Out << " (+ " << cnt << ")";
break;
}
-
+
case Released:
Out << "Released";
break;
@@ -1716,19 +1757,19 @@ void RefVal::print(llvm::raw_ostream& Out) const {
case ErrorDeallocGC:
Out << "-dealloc (GC)";
break;
-
+
case ErrorDeallocNotOwned:
Out << "-dealloc (not-owned)";
break;
-
+
case ErrorLeak:
Out << "Leaked";
- break;
-
+ break;
+
case ErrorLeakReturned:
Out << "Leaked (Bad naming)";
break;
-
+
case ErrorGCLeakReturned:
Out << "Leaked (GC-ed at return)";
break;
@@ -1736,38 +1777,38 @@ void RefVal::print(llvm::raw_ostream& Out) const {
case ErrorUseAfterRelease:
Out << "Use-After-Release [ERROR]";
break;
-
+
case ErrorReleaseNotOwned:
Out << "Release of Not-Owned [ERROR]";
break;
-
+
case RefVal::ErrorOverAutorelease:
Out << "Over autoreleased";
break;
-
+
case RefVal::ErrorReturnedNotOwned:
Out << "Non-owned object returned instead of owned";
break;
}
-
+
if (ACnt) {
Out << " [ARC +" << ACnt << ']';
}
}
-
+
} // end anonymous namespace
//===----------------------------------------------------------------------===//
// RefBindings - State used to track object reference counts.
//===----------------------------------------------------------------------===//
-
+
typedef llvm::ImmutableMap<SymbolRef, RefVal> RefBindings;
static int RefBIndex = 0;
namespace clang {
template<>
struct GRStateTrait<RefBindings> : public GRStatePartialTrait<RefBindings> {
- static inline void* GDMIndex() { return &RefBIndex; }
+ static inline void* GDMIndex() { return &RefBIndex; }
};
}
@@ -1788,12 +1829,12 @@ namespace { class VISIBILITY_HIDDEN AutoreleaseStack {}; }
namespace clang {
template<> struct GRStateTrait<AutoreleaseStack>
: public GRStatePartialTrait<ARStack> {
- static inline void* GDMIndex() { return &AutoRBIndex; }
+ static inline void* GDMIndex() { return &AutoRBIndex; }
};
template<> struct GRStateTrait<AutoreleasePoolContents>
: public GRStatePartialTrait<ARPoolContents> {
- static inline void* GDMIndex() { return &AutoRCIndex; }
+ static inline void* GDMIndex() { return &AutoRCIndex; }
};
} // end clang namespace
@@ -1808,14 +1849,14 @@ static const GRState * SendAutorelease(const GRState *state,
SymbolRef pool = GetCurrentAutoreleasePool(state);
const ARCounts *cnts = state->get<AutoreleasePoolContents>(pool);
ARCounts newCnts(0);
-
+
if (cnts) {
const unsigned *cnt = (*cnts).lookup(sym);
newCnts = F.Add(*cnts, sym, cnt ? *cnt + 1 : 1);
}
else
newCnts = F.Add(F.GetEmptyMap(), sym, 1);
-
+
return state->set<AutoreleasePoolContents>(pool, newCnts);
}
@@ -1824,7 +1865,7 @@ static const GRState * SendAutorelease(const GRState *state,
//===----------------------------------------------------------------------===//
namespace {
-
+
class VISIBILITY_HIDDEN CFRefCount : public GRTransferFuncs {
public:
class BindingsPrinter : public GRState::Printer {
@@ -1834,10 +1875,10 @@ public:
};
private:
- typedef llvm::DenseMap<const GRExprEngine::NodeTy*, const RetainSummary*>
- SummaryLogTy;
+ typedef llvm::DenseMap<const ExplodedNode*, const RetainSummary*>
+ SummaryLogTy;
- RetainSummaryManager Summaries;
+ RetainSummaryManager Summaries;
SummaryLogTy SummaryLog;
const LangOptions& LOpts;
ARCounts::Factory ARCountFactory;
@@ -1848,106 +1889,106 @@ private:
BugType *overAutorelease;
BugType *returnNotOwnedForOwned;
BugReporter *BR;
-
+
const GRState * Update(const GRState * state, SymbolRef sym, RefVal V, ArgEffect E,
RefVal::Kind& hasErr);
- void ProcessNonLeakError(ExplodedNodeSet<GRState>& Dst,
- GRStmtNodeBuilder<GRState>& Builder,
+ void ProcessNonLeakError(ExplodedNodeSet& Dst,
+ GRStmtNodeBuilder& Builder,
Expr* NodeExpr, Expr* ErrorExpr,
- ExplodedNode<GRState>* Pred,
+ ExplodedNode* Pred,
const GRState* St,
RefVal::Kind hasErr, SymbolRef Sym);
-
+
const GRState * HandleSymbolDeath(const GRState * state, SymbolRef sid, RefVal V,
llvm::SmallVectorImpl<SymbolRef> &Leaked);
-
- ExplodedNode<GRState>* ProcessLeaks(const GRState * state,
+
+ ExplodedNode* ProcessLeaks(const GRState * state,
llvm::SmallVectorImpl<SymbolRef> &Leaked,
GenericNodeBuilder &Builder,
GRExprEngine &Eng,
- ExplodedNode<GRState> *Pred = 0);
-
-public:
+ ExplodedNode *Pred = 0);
+
+public:
CFRefCount(ASTContext& Ctx, bool gcenabled, const LangOptions& lopts)
: Summaries(Ctx, gcenabled),
LOpts(lopts), useAfterRelease(0), releaseNotOwned(0),
deallocGC(0), deallocNotOwned(0),
leakWithinFunction(0), leakAtReturn(0), overAutorelease(0),
returnNotOwnedForOwned(0), BR(0) {}
-
+
virtual ~CFRefCount() {}
-
+
void RegisterChecks(BugReporter &BR);
-
+
virtual void RegisterPrinters(std::vector<GRState::Printer*>& Printers) {
Printers.push_back(new BindingsPrinter());
}
-
+
bool isGCEnabled() const { return Summaries.isGCEnabled(); }
const LangOptions& getLangOptions() const { return LOpts; }
-
- const RetainSummary *getSummaryOfNode(const ExplodedNode<GRState> *N) const {
+
+ const RetainSummary *getSummaryOfNode(const ExplodedNode *N) const {
SummaryLogTy::const_iterator I = SummaryLog.find(N);
return I == SummaryLog.end() ? 0 : I->second;
}
-
+
// Calls.
- void EvalSummary(ExplodedNodeSet<GRState>& Dst,
+ void EvalSummary(ExplodedNodeSet& Dst,
GRExprEngine& Eng,
- GRStmtNodeBuilder<GRState>& Builder,
+ GRStmtNodeBuilder& Builder,
Expr* Ex,
Expr* Receiver,
const RetainSummary& Summ,
ExprIterator arg_beg, ExprIterator arg_end,
- ExplodedNode<GRState>* Pred);
-
- virtual void EvalCall(ExplodedNodeSet<GRState>& Dst,
+ ExplodedNode* Pred);
+
+ virtual void EvalCall(ExplodedNodeSet& Dst,
GRExprEngine& Eng,
- GRStmtNodeBuilder<GRState>& Builder,
+ GRStmtNodeBuilder& Builder,
CallExpr* CE, SVal L,
- ExplodedNode<GRState>* Pred);
-
-
- virtual void EvalObjCMessageExpr(ExplodedNodeSet<GRState>& Dst,
+ ExplodedNode* Pred);
+
+
+ virtual void EvalObjCMessageExpr(ExplodedNodeSet& Dst,
GRExprEngine& Engine,
- GRStmtNodeBuilder<GRState>& Builder,
+ GRStmtNodeBuilder& Builder,
ObjCMessageExpr* ME,
- ExplodedNode<GRState>* Pred);
-
- bool EvalObjCMessageExprAux(ExplodedNodeSet<GRState>& Dst,
+ ExplodedNode* Pred);
+
+ bool EvalObjCMessageExprAux(ExplodedNodeSet& Dst,
GRExprEngine& Engine,
- GRStmtNodeBuilder<GRState>& Builder,
+ GRStmtNodeBuilder& Builder,
ObjCMessageExpr* ME,
- ExplodedNode<GRState>* Pred);
+ ExplodedNode* Pred);
- // Stores.
+ // Stores.
virtual void EvalBind(GRStmtNodeBuilderRef& B, SVal location, SVal val);
// End-of-path.
-
+
virtual void EvalEndPath(GRExprEngine& Engine,
- GREndPathNodeBuilder<GRState>& Builder);
-
- virtual void EvalDeadSymbols(ExplodedNodeSet<GRState>& Dst,
+ GREndPathNodeBuilder& Builder);
+
+ virtual void EvalDeadSymbols(ExplodedNodeSet& Dst,
GRExprEngine& Engine,
- GRStmtNodeBuilder<GRState>& Builder,
- ExplodedNode<GRState>* Pred,
+ GRStmtNodeBuilder& Builder,
+ ExplodedNode* Pred,
Stmt* S, const GRState* state,
SymbolReaper& SymReaper);
-
- std::pair<ExplodedNode<GRState>*, const GRState *>
+
+ std::pair<ExplodedNode*, const GRState *>
HandleAutoreleaseCounts(const GRState * state, GenericNodeBuilder Bd,
- ExplodedNode<GRState>* Pred, GRExprEngine &Eng,
+ ExplodedNode* Pred, GRExprEngine &Eng,
SymbolRef Sym, RefVal V, bool &stop);
// Return statements.
-
- virtual void EvalReturn(ExplodedNodeSet<GRState>& Dst,
+
+ virtual void EvalReturn(ExplodedNodeSet& Dst,
GRExprEngine& Engine,
- GRStmtNodeBuilder<GRState>& Builder,
+ GRStmtNodeBuilder& Builder,
ReturnStmt* S,
- ExplodedNode<GRState>* Pred);
+ ExplodedNode* Pred);
// Assumptions.
@@ -1965,34 +2006,34 @@ static void PrintPool(llvm::raw_ostream &Out, SymbolRef Sym,
else
Out << "<pool>";
Out << ":{";
-
+
// Get the contents of the pool.
if (const ARCounts *cnts = state->get<AutoreleasePoolContents>(Sym))
for (ARCounts::iterator J=cnts->begin(), EJ=cnts->end(); J != EJ; ++J)
Out << '(' << J.getKey() << ',' << J.getData() << ')';
- Out << '}';
+ Out << '}';
}
void CFRefCount::BindingsPrinter::Print(llvm::raw_ostream& Out,
const GRState* state,
const char* nl, const char* sep) {
-
+
RefBindings B = state->get<RefBindings>();
-
+
if (!B.isEmpty())
Out << sep << nl;
-
+
for (RefBindings::iterator I=B.begin(), E=B.end(); I!=E; ++I) {
Out << (*I).first << " : ";
(*I).second.print(Out);
Out << nl;
}
-
+
// Print the autorelease stack.
Out << sep << nl << "AR pool stack:";
ARStack stack = state->get<AutoreleaseStack>();
-
+
PrintPool(Out, SymbolRef(), state); // Print the caller's pool.
for (ARStack::iterator I=stack.begin(), E=stack.end(); I!=E; ++I)
PrintPool(Out, *I, state);
@@ -2005,157 +2046,155 @@ void CFRefCount::BindingsPrinter::Print(llvm::raw_ostream& Out,
//===----------------------------------------------------------------------===//
namespace {
-
+
//===-------------===//
// Bug Descriptions. //
- //===-------------===//
-
+ //===-------------===//
+
class VISIBILITY_HIDDEN CFRefBug : public BugType {
protected:
CFRefCount& TF;
-
- CFRefBug(CFRefCount* tf, const char* name)
- : BugType(name, "Memory (Core Foundation/Objective-C)"), TF(*tf) {}
+
+ CFRefBug(CFRefCount* tf, const char* name)
+ : BugType(name, "Memory (Core Foundation/Objective-C)"), TF(*tf) {}
public:
-
+
CFRefCount& getTF() { return TF; }
const CFRefCount& getTF() const { return TF; }
-
+
// FIXME: Eventually remove.
virtual const char* getDescription() const = 0;
-
+
virtual bool isLeak() const { return false; }
};
-
+
class VISIBILITY_HIDDEN UseAfterRelease : public CFRefBug {
public:
UseAfterRelease(CFRefCount* tf)
: CFRefBug(tf, "Use-after-release") {}
-
+
const char* getDescription() const {
return "Reference-counted object is used after it is released";
- }
+ }
};
-
+
class VISIBILITY_HIDDEN BadRelease : public CFRefBug {
public:
BadRelease(CFRefCount* tf) : CFRefBug(tf, "Bad release") {}
-
+
const char* getDescription() const {
- return "Incorrect decrement of the reference count of an "
- "object is not owned at this point by the caller";
+ return "Incorrect decrement of the reference count of an object that is "
+ "not owned at this point by the caller";
}
};
-
+
class VISIBILITY_HIDDEN DeallocGC : public CFRefBug {
public:
DeallocGC(CFRefCount *tf)
: CFRefBug(tf, "-dealloc called while using garbage collection") {}
-
+
const char *getDescription() const {
return "-dealloc called while using garbage collection";
}
};
-
+
class VISIBILITY_HIDDEN DeallocNotOwned : public CFRefBug {
public:
DeallocNotOwned(CFRefCount *tf)
: CFRefBug(tf, "-dealloc sent to non-exclusively owned object") {}
-
+
const char *getDescription() const {
return "-dealloc sent to object that may be referenced elsewhere";
}
- };
-
+ };
+
class VISIBILITY_HIDDEN OverAutorelease : public CFRefBug {
public:
- OverAutorelease(CFRefCount *tf) :
+ OverAutorelease(CFRefCount *tf) :
CFRefBug(tf, "Object sent -autorelease too many times") {}
-
+
const char *getDescription() const {
return "Object sent -autorelease too many times";
}
};
-
+
class VISIBILITY_HIDDEN ReturnedNotOwnedForOwned : public CFRefBug {
public:
ReturnedNotOwnedForOwned(CFRefCount *tf) :
CFRefBug(tf, "Method should return an owned object") {}
-
+
const char *getDescription() const {
return "Object with +0 retain counts returned to caller where a +1 "
"(owning) retain count is expected";
}
};
-
+
class VISIBILITY_HIDDEN Leak : public CFRefBug {
const bool isReturn;
protected:
Leak(CFRefCount* tf, const char* name, bool isRet)
: CFRefBug(tf, name), isReturn(isRet) {}
public:
-
+
const char* getDescription() const { return ""; }
-
+
bool isLeak() const { return true; }
};
-
+
class VISIBILITY_HIDDEN LeakAtReturn : public Leak {
public:
LeakAtReturn(CFRefCount* tf, const char* name)
: Leak(tf, name, true) {}
};
-
+
class VISIBILITY_HIDDEN LeakWithinFunction : public Leak {
public:
LeakWithinFunction(CFRefCount* tf, const char* name)
: Leak(tf, name, false) {}
- };
-
+ };
+
//===---------===//
// Bug Reports. //
//===---------===//
-
+
class VISIBILITY_HIDDEN CFRefReport : public RangedBugReport {
protected:
SymbolRef Sym;
const CFRefCount &TF;
public:
CFRefReport(CFRefBug& D, const CFRefCount &tf,
- ExplodedNode<GRState> *n, SymbolRef sym)
+ ExplodedNode *n, SymbolRef sym)
: RangedBugReport(D, D.getDescription(), n), Sym(sym), TF(tf) {}
CFRefReport(CFRefBug& D, const CFRefCount &tf,
- ExplodedNode<GRState> *n, SymbolRef sym, const char* endText)
+ ExplodedNode *n, SymbolRef sym, const char* endText)
: RangedBugReport(D, D.getDescription(), endText, n), Sym(sym), TF(tf) {}
-
+
virtual ~CFRefReport() {}
-
+
CFRefBug& getBugType() {
return (CFRefBug&) RangedBugReport::getBugType();
}
const CFRefBug& getBugType() const {
return (const CFRefBug&) RangedBugReport::getBugType();
}
-
- virtual void getRanges(BugReporter& BR, const SourceRange*& beg,
- const SourceRange*& end) {
-
+
+ virtual void getRanges(const SourceRange*& beg, const SourceRange*& end) {
if (!getBugType().isLeak())
- RangedBugReport::getRanges(BR, beg, end);
+ RangedBugReport::getRanges(beg, end);
else
beg = end = 0;
}
-
+
SymbolRef getSymbol() const { return Sym; }
-
+
PathDiagnosticPiece* getEndPath(BugReporterContext& BRC,
- const ExplodedNode<GRState>* N);
-
+ const ExplodedNode* N);
+
std::pair<const char**,const char**> getExtraDescriptiveText();
-
- PathDiagnosticPiece* VisitNode(const ExplodedNode<GRState>* N,
- const ExplodedNode<GRState>* PrevN,
+
+ PathDiagnosticPiece* VisitNode(const ExplodedNode* N,
+ const ExplodedNode* PrevN,
BugReporterContext& BRC);
};
@@ -2164,38 +2203,38 @@ namespace {
const MemRegion* AllocBinding;
public:
CFRefLeakReport(CFRefBug& D, const CFRefCount &tf,
- ExplodedNode<GRState> *n, SymbolRef sym,
+ ExplodedNode *n, SymbolRef sym,
GRExprEngine& Eng);
-
+
PathDiagnosticPiece* getEndPath(BugReporterContext& BRC,
- const ExplodedNode<GRState>* N);
-
+ const ExplodedNode* N);
+
SourceLocation getLocation() const { return AllocSite; }
- };
+ };
} // end anonymous namespace
void CFRefCount::RegisterChecks(BugReporter& BR) {
useAfterRelease = new UseAfterRelease(this);
BR.Register(useAfterRelease);
-
+
releaseNotOwned = new BadRelease(this);
BR.Register(releaseNotOwned);
-
+
deallocGC = new DeallocGC(this);
BR.Register(deallocGC);
-
+
deallocNotOwned = new DeallocNotOwned(this);
BR.Register(deallocNotOwned);
-
+
overAutorelease = new OverAutorelease(this);
BR.Register(overAutorelease);
-
+
returnNotOwnedForOwned = new ReturnedNotOwnedForOwned(this);
BR.Register(returnNotOwnedForOwned);
-
+
// First register "return" leaks.
const char* name = 0;
-
+
if (isGCEnabled())
name = "Leak of returned object when using garbage collection";
else if (getLangOptions().getGCMode() == LangOptions::HybridGC)
@@ -2205,13 +2244,15 @@ void CFRefCount::RegisterChecks(BugReporter& BR) {
assert(getLangOptions().getGCMode() == LangOptions::NonGC);
name = "Leak of returned object";
}
-
+
+ // Leaks should not be reported if they are post-dominated by a sink.
leakAtReturn = new LeakAtReturn(this, name);
+ leakAtReturn->setSuppressOnSink(true);
BR.Register(leakAtReturn);
-
+
// Second, register leaks within a function/method.
if (isGCEnabled())
- name = "Leak of object when using garbage collection";
+ name = "Leak of object when using garbage collection";
else if (getLangOptions().getGCMode() == LangOptions::HybridGC)
name = "Leak of object when not using garbage collection (GC) in "
"dual GC/non-GC code";
@@ -2219,22 +2260,24 @@ void CFRefCount::RegisterChecks(BugReporter& BR) {
assert(getLangOptions().getGCMode() == LangOptions::NonGC);
name = "Leak";
}
-
+
+ // Leaks should not be reported if they are post-dominated by sinks.
leakWithinFunction = new LeakWithinFunction(this, name);
+ leakWithinFunction->setSuppressOnSink(true);
BR.Register(leakWithinFunction);
-
+
// Save the reference to the BugReporter.
this->BR = &BR;
}
static const char* Msgs[] = {
// GC only
- "Code is compiled to only use garbage collection",
+ "Code is compiled to only use garbage collection",
// No GC.
"Code is compiled to use reference counts",
// Hybrid, with GC.
"Code is compiled to use either garbage collection (GC) or reference counts"
- " (non-GC). The bug occurs with GC enabled",
+ " (non-GC). The bug occurs with GC enabled",
// Hybrid, without GC
"Code is compiled to use either garbage collection (GC) or reference counts"
" (non-GC). The bug occurs in non-GC mode"
@@ -2242,19 +2285,19 @@ static const char* Msgs[] = {
std::pair<const char**,const char**> CFRefReport::getExtraDescriptiveText() {
CFRefCount& TF = static_cast<CFRefBug&>(getBugType()).getTF();
-
+
switch (TF.getLangOptions().getGCMode()) {
default:
assert(false);
-
+
case LangOptions::GCOnly:
assert (TF.isGCEnabled());
- return std::make_pair(&Msgs[0], &Msgs[0]+1);
-
+ return std::make_pair(&Msgs[0], &Msgs[0]+1);
+
case LangOptions::NonGC:
assert (!TF.isGCEnabled());
return std::make_pair(&Msgs[1], &Msgs[1]+1);
-
+
case LangOptions::HybridGC:
if (TF.isGCEnabled())
return std::make_pair(&Msgs[2], &Msgs[2]+1);
@@ -2268,50 +2311,50 @@ static inline bool contains(const llvm::SmallVectorImpl<ArgEffect>& V,
for (llvm::SmallVectorImpl<ArgEffect>::const_iterator I=V.begin(), E=V.end();
I!=E; ++I)
if (*I == X) return true;
-
+
return false;
}
-PathDiagnosticPiece* CFRefReport::VisitNode(const ExplodedNode<GRState>* N,
- const ExplodedNode<GRState>* PrevN,
+PathDiagnosticPiece* CFRefReport::VisitNode(const ExplodedNode* N,
+ const ExplodedNode* PrevN,
BugReporterContext& BRC) {
-
+
if (!isa<PostStmt>(N->getLocation()))
return NULL;
-
+
// Check if the type state has changed.
const GRState *PrevSt = PrevN->getState();
const GRState *CurrSt = N->getState();
-
- const RefVal* CurrT = CurrSt->get<RefBindings>(Sym);
+
+ const RefVal* CurrT = CurrSt->get<RefBindings>(Sym);
if (!CurrT) return NULL;
-
+
const RefVal &CurrV = *CurrT;
const RefVal *PrevT = PrevSt->get<RefBindings>(Sym);
-
+
// Create a string buffer to constain all the useful things we want
// to tell the user.
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
-
+
// This is the allocation site since the previous node had no bindings
// for this symbol.
if (!PrevT) {
- Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
-
- if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ const Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
+
+ if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
// Get the name of the callee (if it is available).
SVal X = CurrSt->getSValAsScalarOrLoc(CE->getCallee());
if (const FunctionDecl* FD = X.getAsFunctionDecl())
os << "Call to function '" << FD->getNameAsString() <<'\'';
else
- os << "function call";
- }
+ os << "function call";
+ }
else {
assert (isa<ObjCMessageExpr>(S));
os << "Method";
}
-
+
if (CurrV.getObjKind() == RetEffect::CF) {
os << " returns a Core Foundation object with a ";
}
@@ -2319,10 +2362,10 @@ PathDiagnosticPiece* CFRefReport::VisitNode(const ExplodedNode<GRState>* N,
assert (CurrV.getObjKind() == RetEffect::ObjC);
os << " returns an Objective-C object with a ";
}
-
+
if (CurrV.isOwned()) {
os << "+1 retain count (owning reference).";
-
+
if (static_cast<CFRefBug&>(getBugType()).getTF().isGCEnabled()) {
assert(CurrV.getObjKind() == RetEffect::CF);
os << " "
@@ -2333,51 +2376,51 @@ PathDiagnosticPiece* CFRefReport::VisitNode(const ExplodedNode<GRState>* N,
assert (CurrV.isNotOwned());
os << "+0 retain count (non-owning reference).";
}
-
+
PathDiagnosticLocation Pos(S, BRC.getSourceManager());
return new PathDiagnosticEventPiece(Pos, os.str());
}
-
+
// Gather up the effects that were performed on the object at this
// program point
llvm::SmallVector<ArgEffect, 2> AEffects;
-
+
if (const RetainSummary *Summ =
TF.getSummaryOfNode(BRC.getNodeResolver().getOriginalNode(N))) {
// We only have summaries attached to nodes after evaluating CallExpr and
// ObjCMessageExprs.
- Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
-
- if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ const Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
+
+ if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
// Iterate through the parameter expressions and see if the symbol
// was ever passed as an argument.
unsigned i = 0;
-
- for (CallExpr::arg_iterator AI=CE->arg_begin(), AE=CE->arg_end();
+
+ for (CallExpr::const_arg_iterator AI=CE->arg_begin(), AE=CE->arg_end();
AI!=AE; ++AI, ++i) {
-
+
// Retrieve the value of the argument. Is it the symbol
// we are interested in?
if (CurrSt->getSValAsScalarOrLoc(*AI).getAsLocSymbol() != Sym)
continue;
-
+
// We have an argument. Get the effect!
AEffects.push_back(Summ->getArg(i));
}
}
- else if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S)) {
- if (Expr *receiver = ME->getReceiver())
+ else if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S)) {
+ if (const Expr *receiver = ME->getReceiver())
if (CurrSt->getSValAsScalarOrLoc(receiver).getAsLocSymbol() == Sym) {
// The symbol we are tracking is the receiver.
AEffects.push_back(Summ->getReceiverEffect());
}
}
}
-
+
do {
// Get the previous type state.
RefVal PrevV = *PrevT;
-
+
// Specially handle -dealloc.
if (!TF.isGCEnabled() && contains(AEffects, Dealloc)) {
// Determine if the object's reference count was pushed to zero.
@@ -2390,23 +2433,23 @@ PathDiagnosticPiece* CFRefReport::VisitNode(const ExplodedNode<GRState>* N,
break;
}
}
-
+
// Specially handle CFMakeCollectable and friends.
if (contains(AEffects, MakeCollectable)) {
// Get the name of the function.
- Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
+ const Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
SVal X = CurrSt->getSValAsScalarOrLoc(cast<CallExpr>(S)->getCallee());
const FunctionDecl* FD = X.getAsFunctionDecl();
const std::string& FName = FD->getNameAsString();
-
+
if (TF.isGCEnabled()) {
// Determine if the object's reference count was pushed to zero.
assert(!(PrevV == CurrV) && "The typestate *must* have changed.");
-
+
os << "In GC mode a call to '" << FName
<< "' decrements an object's retain count and registers the "
"object with the garbage collector. ";
-
+
if (CurrV.getKind() == RefVal::Released) {
assert(CurrV.getCount() == 0);
os << "Since it now has a 0 retain count the object can be "
@@ -2417,67 +2460,67 @@ PathDiagnosticPiece* CFRefReport::VisitNode(const ExplodedNode<GRState>* N,
"After this call its retain count is +" << CurrV.getCount()
<< '.';
}
- else
+ else
os << "When GC is not enabled a call to '" << FName
<< "' has no effect on its argument.";
-
+
// Nothing more to say.
break;
}
-
- // Determine if the typestate has changed.
+
+ // Determine if the typestate has changed.
if (!(PrevV == CurrV))
switch (CurrV.getKind()) {
case RefVal::Owned:
case RefVal::NotOwned:
-
+
if (PrevV.getCount() == CurrV.getCount()) {
// Did an autorelease message get sent?
if (PrevV.getAutoreleaseCount() == CurrV.getAutoreleaseCount())
return 0;
-
+
assert(PrevV.getAutoreleaseCount() < CurrV.getAutoreleaseCount());
os << "Object sent -autorelease message";
break;
}
-
+
if (PrevV.getCount() > CurrV.getCount())
os << "Reference count decremented.";
else
os << "Reference count incremented.";
-
+
if (unsigned Count = CurrV.getCount())
os << " The object now has a +" << Count << " retain count.";
-
+
if (PrevV.getKind() == RefVal::Released) {
assert(TF.isGCEnabled() && CurrV.getCount() > 0);
os << " The object is not eligible for garbage collection until the "
"retain count reaches 0 again.";
}
-
+
break;
-
+
case RefVal::Released:
os << "Object released.";
break;
-
+
case RefVal::ReturnedOwned:
os << "Object returned to caller as an owning reference (single retain "
"count transferred to caller).";
break;
-
+
case RefVal::ReturnedNotOwned:
os << "Object returned to caller with a +0 (non-owning) retain count.";
break;
-
+
default:
return NULL;
}
-
+
// Emit any remaining diagnostics for the argument effects (if any).
for (llvm::SmallVectorImpl<ArgEffect>::iterator I=AEffects.begin(),
E=AEffects.end(); I != E; ++I) {
-
+
// A bunch of things have alternate behavior under GC.
if (TF.isGCEnabled())
switch (*I) {
@@ -2493,24 +2536,25 @@ PathDiagnosticPiece* CFRefReport::VisitNode(const ExplodedNode<GRState>* N,
continue;
}
}
- } while(0);
-
+ } while (0);
+
if (os.str().empty())
return 0; // We have nothing to say!
- Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
+ const Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
PathDiagnosticLocation Pos(S, BRC.getSourceManager());
PathDiagnosticPiece* P = new PathDiagnosticEventPiece(Pos, os.str());
-
+
// Add the range by scanning the children of the statement for any bindings
// to Sym.
- for (Stmt::child_iterator I = S->child_begin(), E = S->child_end(); I!=E; ++I)
- if (Expr* Exp = dyn_cast_or_null<Expr>(*I))
+ for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
+ I!=E; ++I)
+ if (const Expr* Exp = dyn_cast_or_null<Expr>(*I))
if (CurrSt->getSValAsScalarOrLoc(Exp).getAsLocSymbol() == Sym) {
P->addRange(Exp->getSourceRange());
break;
}
-
+
return P;
}
@@ -2520,62 +2564,62 @@ namespace {
SymbolRef Sym;
const MemRegion* Binding;
bool First;
-
+
public:
FindUniqueBinding(SymbolRef sym) : Sym(sym), Binding(0), First(true) {}
-
+
bool HandleBinding(StoreManager& SMgr, Store store, const MemRegion* R,
SVal val) {
-
- SymbolRef SymV = val.getAsSymbol();
+
+ SymbolRef SymV = val.getAsSymbol();
if (!SymV || SymV != Sym)
return true;
-
+
if (Binding) {
First = false;
return false;
}
else
Binding = R;
-
- return true;
+
+ return true;
}
-
+
operator bool() { return First && Binding; }
const MemRegion* getRegion() { return Binding; }
- };
+ };
}
-static std::pair<const ExplodedNode<GRState>*,const MemRegion*>
-GetAllocationSite(GRStateManager& StateMgr, const ExplodedNode<GRState>* N,
+static std::pair<const ExplodedNode*,const MemRegion*>
+GetAllocationSite(GRStateManager& StateMgr, const ExplodedNode* N,
SymbolRef Sym) {
-
+
// Find both first node that referred to the tracked symbol and the
// memory location that value was store to.
- const ExplodedNode<GRState>* Last = N;
- const MemRegion* FirstBinding = 0;
-
+ const ExplodedNode* Last = N;
+ const MemRegion* FirstBinding = 0;
+
while (N) {
const GRState* St = N->getState();
RefBindings B = St->get<RefBindings>();
-
+
if (!B.lookup(Sym))
break;
-
+
FindUniqueBinding FB(Sym);
- StateMgr.iterBindings(St, FB);
- if (FB) FirstBinding = FB.getRegion();
-
+ StateMgr.iterBindings(St, FB);
+ if (FB) FirstBinding = FB.getRegion();
+
Last = N;
- N = N->pred_empty() ? NULL : *(N->pred_begin());
+ N = N->pred_empty() ? NULL : *(N->pred_begin());
}
-
+
return std::make_pair(Last, FirstBinding);
}
PathDiagnosticPiece*
CFRefReport::getEndPath(BugReporterContext& BRC,
- const ExplodedNode<GRState>* EndN) {
+ const ExplodedNode* EndN) {
// Tell the BugReporterContext to report cases when the tracked symbol is
// assigned to different variables, etc.
BRC.addNotableSymbol(Sym);
@@ -2584,37 +2628,37 @@ CFRefReport::getEndPath(BugReporterContext& BRC,
PathDiagnosticPiece*
CFRefLeakReport::getEndPath(BugReporterContext& BRC,
- const ExplodedNode<GRState>* EndN){
-
+ const ExplodedNode* EndN){
+
// Tell the BugReporterContext to report cases when the tracked symbol is
// assigned to different variables, etc.
BRC.addNotableSymbol(Sym);
-
+
// We are reporting a leak. Walk up the graph to get to the first node where
// the symbol appeared, and also get the first VarDecl that tracked object
// is stored to.
- const ExplodedNode<GRState>* AllocNode = 0;
+ const ExplodedNode* AllocNode = 0;
const MemRegion* FirstBinding = 0;
-
+
llvm::tie(AllocNode, FirstBinding) =
GetAllocationSite(BRC.getStateManager(), EndN, Sym);
-
- // Get the allocate site.
+
+ // Get the allocate site.
assert(AllocNode);
- Stmt* FirstStmt = cast<PostStmt>(AllocNode->getLocation()).getStmt();
-
+ const Stmt* FirstStmt = cast<PostStmt>(AllocNode->getLocation()).getStmt();
+
SourceManager& SMgr = BRC.getSourceManager();
unsigned AllocLine =SMgr.getInstantiationLineNumber(FirstStmt->getLocStart());
-
+
// Compute an actual location for the leak. Sometimes a leak doesn't
// occur at an actual statement (e.g., transition between blocks; end
// of function) so we need to walk the graph and compute a real location.
- const ExplodedNode<GRState>* LeakN = EndN;
+ const ExplodedNode* LeakN = EndN;
PathDiagnosticLocation L;
-
+
while (LeakN) {
ProgramPoint P = LeakN->getLocation();
-
+
if (const PostStmt *PS = dyn_cast<PostStmt>(&P)) {
L = PathDiagnosticLocation(PS->getStmt()->getLocStart(), SMgr);
break;
@@ -2625,31 +2669,31 @@ CFRefLeakReport::getEndPath(BugReporterContext& BRC,
break;
}
}
-
+
LeakN = LeakN->succ_empty() ? 0 : *(LeakN->succ_begin());
}
-
+
if (!L.isValid()) {
- const Decl &D = BRC.getCodeDecl();
+ const Decl &D = EndN->getCodeDecl();
L = PathDiagnosticLocation(D.getBodyRBrace(), SMgr);
}
-
+
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
-
+
os << "Object allocated on line " << AllocLine;
-
+
if (FirstBinding)
- os << " and stored into '" << FirstBinding->getString() << '\'';
-
+ os << " and stored into '" << FirstBinding->getString() << '\'';
+
// Get the retain count.
const RefVal* RV = EndN->getState()->get<RefBindings>(Sym);
-
+
if (RV->getKind() == RefVal::ErrorLeakReturned) {
// FIXME: Per comments in rdar://6320065, "create" only applies to CF
// ojbects. Only "copy", "alloc", "retain" and "new" transfer ownership
// to the caller for NS objects.
- ObjCMethodDecl& MD = cast<ObjCMethodDecl>(BRC.getCodeDecl());
+ ObjCMethodDecl& MD = cast<ObjCMethodDecl>(EndN->getCodeDecl());
os << " is returned from a method whose name ('"
<< MD.getSelector().getAsString()
<< "') does not contain 'copy' or otherwise starts with"
@@ -2657,7 +2701,7 @@ CFRefLeakReport::getEndPath(BugReporterContext& BRC,
" in the Memory Management Guide for Cocoa (object leaked)";
}
else if (RV->getKind() == RefVal::ErrorGCLeakReturned) {
- ObjCMethodDecl& MD = cast<ObjCMethodDecl>(BRC.getCodeDecl());
+ ObjCMethodDecl& MD = cast<ObjCMethodDecl>(EndN->getCodeDecl());
os << " and returned from method '" << MD.getSelector().getAsString()
<< "' is potentially leaked when using garbage collection. Callers "
"of this method do not expect a returned object with a +1 retain "
@@ -2667,16 +2711,15 @@ CFRefLeakReport::getEndPath(BugReporterContext& BRC,
else
os << " is no longer referenced after this point and has a retain count of"
" +" << RV->getCount() << " (object leaked)";
-
+
return new PathDiagnosticEventPiece(L, os.str());
}
CFRefLeakReport::CFRefLeakReport(CFRefBug& D, const CFRefCount &tf,
- ExplodedNode<GRState> *n,
+ ExplodedNode *n,
SymbolRef sym, GRExprEngine& Eng)
-: CFRefReport(D, tf, n, sym)
-{
-
+: CFRefReport(D, tf, n, sym) {
+
// Most bug reports are cached at the location where they occured.
// With leaks, we want to unique them by the location where they were
// allocated, and only report a single path. To do this, we need to find
@@ -2685,15 +2728,15 @@ CFRefLeakReport::CFRefLeakReport(CFRefBug& D, const CFRefCount &tf,
// Note that this is *not* the trimmed graph; we are guaranteed, however,
// that all ancestor nodes that represent the allocation site have the
// same SourceLocation.
- const ExplodedNode<GRState>* AllocNode = 0;
-
+ const ExplodedNode* AllocNode = 0;
+
llvm::tie(AllocNode, AllocBinding) = // Set AllocBinding.
GetAllocationSite(Eng.getStateManager(), getEndNode(), getSymbol());
-
+
// Get the SourceLocation for the allocation site.
ProgramPoint P = AllocNode->getLocation();
AllocSite = cast<PostStmt>(P).getStmt()->getLocStart();
-
+
// Fill in the description of the bug.
Description.clear();
llvm::raw_string_ostream os(Description);
@@ -2702,9 +2745,9 @@ CFRefLeakReport::CFRefLeakReport(CFRefBug& D, const CFRefCount &tf,
os << "Potential leak ";
if (tf.isGCEnabled()) {
os << "(when using garbage collection) ";
- }
+ }
os << "of an object allocated on line " << AllocLine;
-
+
// FIXME: AllocBinding doesn't get populated for RegionStore yet.
if (AllocBinding)
os << " and stored into '" << AllocBinding->getString() << '\'';
@@ -2719,57 +2762,46 @@ CFRefLeakReport::CFRefLeakReport(CFRefBug& D, const CFRefCount &tf,
/// While the the return type can be queried directly from RetEx, when
/// invoking class methods we augment to the return type to be that of
/// a pointer to the class (as opposed it just being id).
-static QualType GetReturnType(Expr* RetE, ASTContext& Ctx) {
-
+static QualType GetReturnType(const Expr* RetE, ASTContext& Ctx) {
QualType RetTy = RetE->getType();
-
- // FIXME: We aren't handling id<...>.
- const PointerType* PT = RetTy->getAsPointerType();
- if (!PT)
- return RetTy;
-
- // If RetEx is not a message expression just return its type.
- // If RetEx is a message expression, return its types if it is something
+ // If RetE is not a message expression just return its type.
+ // If RetE is a message expression, return its types if it is something
/// more specific than id.
-
- ObjCMessageExpr* ME = dyn_cast<ObjCMessageExpr>(RetE);
-
- if (!ME || !Ctx.isObjCIdStructType(PT->getPointeeType()))
- return RetTy;
-
- ObjCInterfaceDecl* D = ME->getClassInfo().first;
-
- // At this point we know the return type of the message expression is id.
- // If we have an ObjCInterceDecl, we know this is a call to a class method
- // whose type we can resolve. In such cases, promote the return type to
- // Class*.
- return !D ? RetTy : Ctx.getPointerType(Ctx.getObjCInterfaceType(D));
-}
+ if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(RetE))
+ if (const ObjCObjectPointerType *PT = RetTy->getAs<ObjCObjectPointerType>())
+ if (PT->isObjCQualifiedIdType() || PT->isObjCIdType() ||
+ PT->isObjCClassType()) {
+ // At this point we know the return type of the message expression is
+ // id, id<...>, or Class. If we have an ObjCInterfaceDecl, we know this
+ // is a call to a class method whose type we can resolve. In such
+ // cases, promote the return type to XXX* (where XXX is the class).
+ const ObjCInterfaceDecl *D = ME->getClassInfo().first;
+ return !D ? RetTy : Ctx.getPointerType(Ctx.getObjCInterfaceType(D));
+ }
+ return RetTy;
+}
-void CFRefCount::EvalSummary(ExplodedNodeSet<GRState>& Dst,
+void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
GRExprEngine& Eng,
- GRStmtNodeBuilder<GRState>& Builder,
+ GRStmtNodeBuilder& Builder,
Expr* Ex,
Expr* Receiver,
const RetainSummary& Summ,
ExprIterator arg_beg, ExprIterator arg_end,
- ExplodedNode<GRState>* Pred) {
-
+ ExplodedNode* Pred) {
+
// Get the state.
- GRStateManager& StateMgr = Eng.getStateManager();
const GRState *state = Builder.GetState(Pred);
- ASTContext& Ctx = StateMgr.getContext();
- ValueManager &ValMgr = Eng.getValueManager();
// Evaluate the effect of the arguments.
RefVal::Kind hasErr = (RefVal::Kind) 0;
unsigned idx = 0;
Expr* ErrorExpr = NULL;
- SymbolRef ErrorSym = 0;
-
- for (ExprIterator I = arg_beg; I != arg_end; ++I, ++idx) {
- SVal V = state->getSValAsScalarOrLoc(*I);
+ SymbolRef ErrorSym = 0;
+
+ for (ExprIterator I = arg_beg; I != arg_end; ++I, ++idx) {
+ SVal V = state->getSValAsScalarOrLoc(*I);
SymbolRef Sym = V.getAsLocSymbol();
if (Sym)
@@ -2779,143 +2811,76 @@ void CFRefCount::EvalSummary(ExplodedNodeSet<GRState>& Dst,
ErrorExpr = *I;
ErrorSym = Sym;
break;
- }
+ }
continue;
}
+ tryAgain:
if (isa<Loc>(V)) {
if (loc::MemRegionVal* MR = dyn_cast<loc::MemRegionVal>(&V)) {
if (Summ.getArg(idx) == DoNothingByRef)
continue;
-
- // Invalidate the value of the variable passed by reference.
-
+
+ // Invalidate the value of the variable passed by reference.
+
// FIXME: We can have collisions on the conjured symbol if the
// expression *I also creates conjured symbols. We probably want
// to identify conjured symbols by an expression pair: the enclosing
// expression (the context) and the expression itself. This should
- // disambiguate conjured symbols.
+ // disambiguate conjured symbols.
unsigned Count = Builder.getCurrentBlockCount();
- const TypedRegion* R = dyn_cast<TypedRegion>(MR->getRegion());
-
- if (R) {
- // Are we dealing with an ElementRegion? If the element type is
- // a basic integer type (e.g., char, int) and the underying region
- // is a variable region then strip off the ElementRegion.
- // FIXME: We really need to think about this for the general case
- // as sometimes we are reasoning about arrays and other times
- // about (char*), etc., is just a form of passing raw bytes.
- // e.g., void *p = alloca(); foo((char*)p);
- if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
- // Checking for 'integral type' is probably too promiscuous, but
- // we'll leave it in for now until we have a systematic way of
- // handling all of these cases. Eventually we need to come up
- // with an interface to StoreManager so that this logic can be
- // approriately delegated to the respective StoreManagers while
- // still allowing us to do checker-specific logic (e.g.,
- // invalidating reference counts), probably via callbacks.
- if (ER->getElementType()->isIntegralType()) {
- const MemRegion *superReg = ER->getSuperRegion();
- if (isa<VarRegion>(superReg) || isa<FieldRegion>(superReg) ||
- isa<ObjCIvarRegion>(superReg))
- R = cast<TypedRegion>(superReg);
- }
-
- // FIXME: What about layers of ElementRegions?
+ StoreManager& StoreMgr = Eng.getStateManager().getStoreManager();
+
+ const MemRegion *R = MR->getRegion();
+ // Are we dealing with an ElementRegion? If the element type is
+ // a basic integer type (e.g., char, int) and the underying region
+ // is a variable region then strip off the ElementRegion.
+ // FIXME: We really need to think about this for the general case
+ // as sometimes we are reasoning about arrays and other times
+ // about (char*), etc., is just a form of passing raw bytes.
+ // e.g., void *p = alloca(); foo((char*)p);
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ // Checking for 'integral type' is probably too promiscuous, but
+ // we'll leave it in for now until we have a systematic way of
+ // handling all of these cases. Eventually we need to come up
+ // with an interface to StoreManager so that this logic can be
+ // approriately delegated to the respective StoreManagers while
+ // still allowing us to do checker-specific logic (e.g.,
+ // invalidating reference counts), probably via callbacks.
+ if (ER->getElementType()->isIntegralType()) {
+ const MemRegion *superReg = ER->getSuperRegion();
+ if (isa<VarRegion>(superReg) || isa<FieldRegion>(superReg) ||
+ isa<ObjCIvarRegion>(superReg))
+ R = cast<TypedRegion>(superReg);
}
-
- // Is the invalidated variable something that we were tracking?
- SymbolRef Sym = state->getSValAsScalarOrLoc(R).getAsLocSymbol();
-
- // Remove any existing reference-count binding.
- if (Sym) state = state->remove<RefBindings>(Sym);
-
- if (R->isBoundable()) {
- // Set the value of the variable to be a conjured symbol.
-
- QualType T = R->getValueType(Ctx);
-
- if (Loc::IsLocType(T) || (T->isIntegerType() && T->isScalarType())){
- ValueManager &ValMgr = Eng.getValueManager();
- SVal V = ValMgr.getConjuredSymbolVal(*I, T, Count);
- state = state->bindLoc(ValMgr.makeLoc(R), V);
- }
- else if (const RecordType *RT = T->getAsStructureType()) {
- // Handle structs in a not so awesome way. Here we just
- // eagerly bind new symbols to the fields. In reality we
- // should have the store manager handle this. The idea is just
- // to prototype some basic functionality here. All of this logic
- // should one day soon just go away.
- const RecordDecl *RD = RT->getDecl()->getDefinition(Ctx);
-
- // No record definition. There is nothing we can do.
- if (!RD)
- continue;
-
- MemRegionManager &MRMgr =
- state->getStateManager().getRegionManager();
-
- // Iterate through the fields and construct new symbols.
- for (RecordDecl::field_iterator FI=RD->field_begin(),
- FE=RD->field_end(); FI!=FE; ++FI) {
-
- // For now just handle scalar fields.
- FieldDecl *FD = *FI;
- QualType FT = FD->getType();
- const FieldRegion* FR = MRMgr.getFieldRegion(FD, R);
-
- if (Loc::IsLocType(FT) ||
- (FT->isIntegerType() && FT->isScalarType())) {
- SVal V = ValMgr.getConjuredSymbolVal(*I, FT, Count);
- state = state->bindLoc(ValMgr.makeLoc(FR), V);
- }
- else if (FT->isStructureType()) {
- // set the default value of the struct field to conjured
- // symbol. Note that the type of the symbol is irrelavant.
- // We cannot use the type of the struct otherwise ValMgr won't
- // give us the conjured symbol.
- StoreManager& StoreMgr =
- Eng.getStateManager().getStoreManager();
- SVal V = ValMgr.getConjuredSymbolVal(*I,
- Eng.getContext().IntTy,
- Count);
- state = StoreMgr.setDefaultValue(state, FR, V);
- }
- }
- } else if (const ArrayType *AT = Ctx.getAsArrayType(T)) {
- // Set the default value of the array to conjured symbol.
- StoreManager& StoreMgr = Eng.getStateManager().getStoreManager();
- SVal V = ValMgr.getConjuredSymbolVal(*I, AT->getElementType(),
- Count);
- state = StoreMgr.setDefaultValue(state, R, V);
- } else {
- // Just blast away other values.
- state = state->bindLoc(*MR, UnknownVal());
- }
- }
- }
- else if (isa<AllocaRegion>(MR->getRegion())) {
- // Invalidate the alloca region by setting its default value to
- // conjured symbol. The type of the symbol is irrelavant.
- SVal V = ValMgr.getConjuredSymbolVal(*I, Eng.getContext().IntTy,
- Count);
- StoreManager& StoreMgr =
- Eng.getStateManager().getStoreManager();
- state = StoreMgr.setDefaultValue(state, MR->getRegion(), V);
+ // FIXME: What about layers of ElementRegions?
}
- else
- state = state->bindLoc(*MR, UnknownVal());
+
+ // Is the invalidated variable something that we were tracking?
+ SymbolRef Sym = state->getSValAsScalarOrLoc(R).getAsLocSymbol();
+
+ // Remove any existing reference-count binding.
+ if (Sym)
+ state = state->remove<RefBindings>(Sym);
+
+ state = StoreMgr.InvalidateRegion(state, R, *I, Count);
}
else {
// Nuke all other arguments passed by reference.
+ // FIXME: is this necessary or correct? This handles the non-Region
+ // cases. Is it ever valid to store to these?
state = state->unbindLoc(cast<Loc>(V));
}
}
- else if (isa<nonloc::LocAsInteger>(V))
- state = state->unbindLoc(cast<nonloc::LocAsInteger>(V).getLoc());
- }
-
- // Evaluate the effect on the message receiver.
+ else if (isa<nonloc::LocAsInteger>(V)) {
+ // If we are passing a location wrapped as an integer, unwrap it and
+ // invalidate the values referred by the location.
+ V = cast<nonloc::LocAsInteger>(V).getLoc();
+ goto tryAgain;
+ }
+ }
+
+ // Evaluate the effect on the message receiver.
if (!ErrorExpr && Receiver) {
SymbolRef Sym = state->getSValAsScalarOrLoc(Receiver).getAsLocSymbol();
if (Sym) {
@@ -2928,17 +2893,17 @@ void CFRefCount::EvalSummary(ExplodedNodeSet<GRState>& Dst,
}
}
}
-
- // Process any errors.
+
+ // Process any errors.
if (hasErr) {
ProcessNonLeakError(Dst, Builder, Ex, ErrorExpr, Pred, state,
hasErr, ErrorSym);
return;
}
-
- // Consult the summary for the return value.
+
+ // Consult the summary for the return value.
RetEffect RE = Summ.getRetEffect();
-
+
if (RE.getKind() == RetEffect::OwnedWhenTrackedReceiver) {
assert(Receiver);
SVal V = state->getSValAsScalarOrLoc(Receiver);
@@ -2951,57 +2916,57 @@ void CFRefCount::EvalSummary(ExplodedNodeSet<GRState>& Dst,
if (!found)
RE = RetEffect::MakeNoRet();
- }
-
+ }
+
switch (RE.getKind()) {
default:
assert (false && "Unhandled RetEffect."); break;
-
- case RetEffect::NoRet: {
+
+ case RetEffect::NoRet: {
// Make up a symbol for the return value (not reference counted).
// FIXME: Most of this logic is not specific to the retain/release
// checker.
-
+
// FIXME: We eventually should handle structs and other compound types
// that are returned by value.
-
+
QualType T = Ex->getType();
-
+
if (Loc::IsLocType(T) || (T->isIntegerType() && T->isScalarType())) {
unsigned Count = Builder.getCurrentBlockCount();
ValueManager &ValMgr = Eng.getValueManager();
- SVal X = ValMgr.getConjuredSymbolVal(Ex, T, Count);
- state = state->bindExpr(Ex, X, false);
- }
-
+ SVal X = ValMgr.getConjuredSymbolVal(NULL, Ex, T, Count);
+ state = state->BindExpr(Ex, X, false);
+ }
+
break;
}
-
+
case RetEffect::Alias: {
unsigned idx = RE.getIndex();
assert (arg_end >= arg_beg);
assert (idx < (unsigned) (arg_end - arg_beg));
SVal V = state->getSValAsScalarOrLoc(*(arg_beg+idx));
- state = state->bindExpr(Ex, V, false);
+ state = state->BindExpr(Ex, V, false);
break;
}
-
+
case RetEffect::ReceiverAlias: {
assert (Receiver);
SVal V = state->getSValAsScalarOrLoc(Receiver);
- state = state->bindExpr(Ex, V, false);
+ state = state->BindExpr(Ex, V, false);
break;
}
-
+
case RetEffect::OwnedAllocatedSymbol:
case RetEffect::OwnedSymbol: {
unsigned Count = Builder.getCurrentBlockCount();
- ValueManager &ValMgr = Eng.getValueManager();
+ ValueManager &ValMgr = Eng.getValueManager();
SymbolRef Sym = ValMgr.getConjuredSymbol(Ex, Count);
- QualType RetT = GetReturnType(Ex, ValMgr.getContext());
+ QualType RetT = GetReturnType(Ex, ValMgr.getContext());
state = state->set<RefBindings>(Sym, RefVal::makeOwned(RE.getObjKind(),
RetT));
- state = state->bindExpr(Ex, ValMgr.makeLoc(Sym), false);
+ state = state->BindExpr(Ex, ValMgr.makeLoc(Sym), false);
// FIXME: Add a flag to the checker where allocations are assumed to
// *not fail.
@@ -3009,57 +2974,57 @@ void CFRefCount::EvalSummary(ExplodedNodeSet<GRState>& Dst,
if (RE.getKind() == RetEffect::OwnedAllocatedSymbol) {
bool isFeasible;
state = state.Assume(loc::SymbolVal(Sym), true, isFeasible);
- assert(isFeasible && "Cannot assume fresh symbol is non-null.");
+ assert(isFeasible && "Cannot assume fresh symbol is non-null.");
}
#endif
-
+
break;
}
-
+
case RetEffect::GCNotOwnedSymbol:
case RetEffect::NotOwnedSymbol: {
unsigned Count = Builder.getCurrentBlockCount();
ValueManager &ValMgr = Eng.getValueManager();
SymbolRef Sym = ValMgr.getConjuredSymbol(Ex, Count);
- QualType RetT = GetReturnType(Ex, ValMgr.getContext());
+ QualType RetT = GetReturnType(Ex, ValMgr.getContext());
state = state->set<RefBindings>(Sym, RefVal::makeNotOwned(RE.getObjKind(),
RetT));
- state = state->bindExpr(Ex, ValMgr.makeLoc(Sym), false);
+ state = state->BindExpr(Ex, ValMgr.makeLoc(Sym), false);
break;
}
}
-
+
// Generate a sink node if we are at the end of a path.
- GRExprEngine::NodeTy *NewNode =
+ ExplodedNode *NewNode =
Summ.isEndPath() ? Builder.MakeSinkNode(Dst, Ex, Pred, state)
: Builder.MakeNode(Dst, Ex, Pred, state);
-
+
// Annotate the edge with summary we used.
if (NewNode) SummaryLog[NewNode] = &Summ;
}
-void CFRefCount::EvalCall(ExplodedNodeSet<GRState>& Dst,
+void CFRefCount::EvalCall(ExplodedNodeSet& Dst,
GRExprEngine& Eng,
- GRStmtNodeBuilder<GRState>& Builder,
+ GRStmtNodeBuilder& Builder,
CallExpr* CE, SVal L,
- ExplodedNode<GRState>* Pred) {
+ ExplodedNode* Pred) {
const FunctionDecl* FD = L.getAsFunctionDecl();
- RetainSummary* Summ = !FD ? Summaries.getDefaultSummary()
+ RetainSummary* Summ = !FD ? Summaries.getDefaultSummary()
: Summaries.getSummary(const_cast<FunctionDecl*>(FD));
-
+
assert(Summ);
EvalSummary(Dst, Eng, Builder, CE, 0, *Summ,
CE->arg_begin(), CE->arg_end(), Pred);
}
-void CFRefCount::EvalObjCMessageExpr(ExplodedNodeSet<GRState>& Dst,
+void CFRefCount::EvalObjCMessageExpr(ExplodedNodeSet& Dst,
GRExprEngine& Eng,
- GRStmtNodeBuilder<GRState>& Builder,
+ GRStmtNodeBuilder& Builder,
ObjCMessageExpr* ME,
- ExplodedNode<GRState>* Pred) {
+ ExplodedNode* Pred) {
RetainSummary* Summ = 0;
-
+
if (Expr* Receiver = ME->getReceiver()) {
// We need the type-information of the tracked receiver object
// Retrieve it from the state.
@@ -3073,26 +3038,21 @@ void CFRefCount::EvalObjCMessageExpr(ExplodedNodeSet<GRState>& Dst,
SVal V = St->getSValAsScalarOrLoc(Receiver);
SymbolRef Sym = V.getAsLocSymbol();
+
if (Sym) {
if (const RefVal* T = St->get<RefBindings>(Sym)) {
- QualType Ty = T->getType();
-
- if (const PointerType* PT = Ty->getAsPointerType()) {
- QualType PointeeTy = PT->getPointeeType();
-
- if (ObjCInterfaceType* IT = dyn_cast<ObjCInterfaceType>(PointeeTy))
- ID = IT->getDecl();
- }
+ if (const ObjCObjectPointerType* PT =
+ T->getType()->getAs<ObjCObjectPointerType>())
+ ID = PT->getInterfaceDecl();
}
}
// FIXME: this is a hack. This may or may not be the actual method
// that is called.
if (!ID) {
- if (const PointerType *PT = Receiver->getType()->getAsPointerType())
- if (const ObjCInterfaceType *p =
- PT->getPointeeType()->getAsObjCInterfaceType())
- ID = p->getDecl();
+ if (const ObjCObjectPointerType *PT =
+ Receiver->getType()->getAs<ObjCObjectPointerType>())
+ ID = PT->getInterfaceDecl();
}
// FIXME: The receiver could be a reference to a class, meaning that
@@ -3101,16 +3061,22 @@ void CFRefCount::EvalObjCMessageExpr(ExplodedNodeSet<GRState>& Dst,
// Special-case: are we sending a mesage to "self"?
// This is a hack. When we have full-IP this should be removed.
- if (isa<ObjCMethodDecl>(&Eng.getGraph().getCodeDecl())) {
+ if (isa<ObjCMethodDecl>(Pred->getLocationContext()->getDecl())) {
if (Expr* Receiver = ME->getReceiver()) {
SVal X = St->getSValAsScalarOrLoc(Receiver);
- if (loc::MemRegionVal* L = dyn_cast<loc::MemRegionVal>(&X))
- if (L->getRegion() == St->getSelfRegion()) {
- // Update the summary to make the default argument effect
- // 'StopTracking'.
- Summ = Summaries.copySummary(Summ);
- Summ->setDefaultArgEffect(StopTracking);
+ if (loc::MemRegionVal* L = dyn_cast<loc::MemRegionVal>(&X)) {
+ // Get the region associated with 'self'.
+ const LocationContext *LC = Pred->getLocationContext();
+ if (const ImplicitParamDecl *SelfDecl = LC->getSelfDecl()) {
+ SVal SelfVal = St->getSVal(St->getRegion(SelfDecl, LC));
+ if (L->getBaseRegion() == SelfVal.getAsRegion()) {
+ // Update the summary to make the default argument effect
+ // 'StopTracking'.
+ Summ = Summaries.copySummary(Summ);
+ Summ->setDefaultArgEffect(StopTracking);
+ }
}
+ }
}
}
}
@@ -3137,18 +3103,18 @@ public:
}
};
} // end anonymous namespace
-
-void CFRefCount::EvalBind(GRStmtNodeBuilderRef& B, SVal location, SVal val) {
- // Are we storing to something that causes the value to "escape"?
+
+void CFRefCount::EvalBind(GRStmtNodeBuilderRef& B, SVal location, SVal val) {
+ // Are we storing to something that causes the value to "escape"?
bool escapes = false;
-
+
// A value escapes in three possible cases (this may change):
//
// (1) we are binding to something that is not a memory region.
// (2) we are binding to a memregion that does not have stack storage
// (3) we are binding to a memregion with stack storage that the store
- // does not understand.
+ // does not understand.
const GRState *state = B.getState();
if (!isa<loc::MemRegionVal>(location))
@@ -3156,7 +3122,7 @@ void CFRefCount::EvalBind(GRStmtNodeBuilderRef& B, SVal location, SVal val) {
else {
const MemRegion* R = cast<loc::MemRegionVal>(location).getRegion();
escapes = !R->hasStackStorage();
-
+
if (!escapes) {
// To test (3), generate a new state with the binding removed. If it is
// the same state, then it escapes (since the store cannot represent
@@ -3178,40 +3144,40 @@ void CFRefCount::EvalBind(GRStmtNodeBuilderRef& B, SVal location, SVal val) {
// Return statements.
-void CFRefCount::EvalReturn(ExplodedNodeSet<GRState>& Dst,
+void CFRefCount::EvalReturn(ExplodedNodeSet& Dst,
GRExprEngine& Eng,
- GRStmtNodeBuilder<GRState>& Builder,
+ GRStmtNodeBuilder& Builder,
ReturnStmt* S,
- ExplodedNode<GRState>* Pred) {
-
+ ExplodedNode* Pred) {
+
Expr* RetE = S->getRetValue();
if (!RetE)
return;
-
+
const GRState *state = Builder.GetState(Pred);
SymbolRef Sym = state->getSValAsScalarOrLoc(RetE).getAsLocSymbol();
-
+
if (!Sym)
return;
-
+
// Get the reference count binding (if any).
const RefVal* T = state->get<RefBindings>(Sym);
-
+
if (!T)
return;
-
- // Change the reference count.
- RefVal X = *T;
-
- switch (X.getKind()) {
- case RefVal::Owned: {
+
+ // Change the reference count.
+ RefVal X = *T;
+
+ switch (X.getKind()) {
+ case RefVal::Owned: {
unsigned cnt = X.getCount();
assert (cnt > 0);
X.setCount(cnt - 1);
X = X ^ RefVal::ReturnedOwned;
break;
}
-
+
case RefVal::NotOwned: {
unsigned cnt = X.getCount();
if (cnt) {
@@ -3223,39 +3189,39 @@ void CFRefCount::EvalReturn(ExplodedNodeSet<GRState>& Dst,
}
break;
}
-
- default:
+
+ default:
return;
}
-
+
// Update the binding.
state = state->set<RefBindings>(Sym, X);
Pred = Builder.MakeNode(Dst, S, Pred, state);
-
+
// Did we cache out?
if (!Pred)
return;
-
+
// Update the autorelease counts.
static unsigned autoreleasetag = 0;
GenericNodeBuilder Bd(Builder, S, &autoreleasetag);
bool stop = false;
llvm::tie(Pred, state) = HandleAutoreleaseCounts(state , Bd, Pred, Eng, Sym,
X, stop);
-
+
// Did we cache out?
if (!Pred || stop)
return;
-
+
// Get the updated binding.
T = state->get<RefBindings>(Sym);
assert(T);
X = *T;
-
+
// Any leaks or other errors?
if (X.isReturnedOwned() && X.getCount() == 0) {
- const Decl *CD = &Eng.getStateManager().getCodeDecl();
- if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(CD)) {
+ Decl const *CD = &Pred->getCodeDecl();
+ if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(CD)) {
const RetainSummary &Summ = *Summaries.getMethodSummary(MD);
RetEffect RE = Summ.getRetEffect();
bool hasError = false;
@@ -3267,25 +3233,26 @@ void CFRefCount::EvalReturn(ExplodedNodeSet<GRState>& Dst,
// a leak (as the caller expects a GC'ed object) because no
// method should return ownership unless it returns a CF object.
X = X ^ RefVal::ErrorGCLeakReturned;
-
+
// Keep this false until this is properly tested.
hasError = true;
}
else if (!RE.isOwned()) {
// Either we are using GC and the returned object is a CF type
// or we aren't using GC. In either case, we expect that the
- // enclosing method is expected to return ownership.
+ // enclosing method is expected to return ownership.
hasError = true;
X = X ^ RefVal::ErrorLeakReturned;
}
}
-
- if (hasError) {
+
+ if (hasError) {
// Generate an error node.
static int ReturnOwnLeakTag = 0;
state = state->set<RefBindings>(Sym, X);
- ExplodedNode<GRState> *N =
- Builder.generateNode(PostStmt(S, &ReturnOwnLeakTag), state, Pred);
+ ExplodedNode *N =
+ Builder.generateNode(PostStmt(S, Pred->getLocationContext(),
+ &ReturnOwnLeakTag), state, Pred);
if (N) {
CFRefReport *report =
new CFRefLeakReport(*static_cast<CFRefBug*>(leakAtReturn), *this,
@@ -3293,21 +3260,22 @@ void CFRefCount::EvalReturn(ExplodedNodeSet<GRState>& Dst,
BR->EmitReport(report);
}
}
- }
+ }
}
else if (X.isReturnedNotOwned()) {
- const Decl *CD = &Eng.getStateManager().getCodeDecl();
+ Decl const *CD = &Pred->getCodeDecl();
if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(CD)) {
const RetainSummary &Summ = *Summaries.getMethodSummary(MD);
if (Summ.getRetEffect().isOwned()) {
// Trying to return a not owned object to a caller expecting an
// owned object.
-
+
static int ReturnNotOwnedForOwnedTag = 0;
state = state->set<RefBindings>(Sym, X ^ RefVal::ErrorReturnedNotOwned);
- if (ExplodedNode<GRState> *N =
- Builder.generateNode(PostStmt(S, &ReturnNotOwnedForOwnedTag),
- state, Pred)) {
+ if (ExplodedNode *N =
+ Builder.generateNode(PostStmt(S, Pred->getLocationContext(),
+ &ReturnNotOwnedForOwnedTag),
+ state, Pred)) {
CFRefReport *report =
new CFRefReport(*static_cast<CFRefBug*>(returnNotOwnedForOwned),
*this, N, Sym);
@@ -3326,18 +3294,18 @@ const GRState* CFRefCount::EvalAssume(const GRState *state,
// FIXME: We may add to the interface of EvalAssume the list of symbols
// whose assumptions have changed. For now we just iterate through the
// bindings and check if any of the tracked symbols are NULL. This isn't
- // too bad since the number of symbols we will track in practice are
+ // too bad since the number of symbols we will track in practice are
// probably small and EvalAssume is only called at branches and a few
// other places.
RefBindings B = state->get<RefBindings>();
-
+
if (B.isEmpty())
return state;
-
- bool changed = false;
+
+ bool changed = false;
RefBindings::Factory& RefBFactory = state->get_context<RefBindings>();
- for (RefBindings::iterator I=B.begin(), E=B.end(); I!=E; ++I) {
+ for (RefBindings::iterator I=B.begin(), E=B.end(); I!=E; ++I) {
// Check if the symbol is null (or equal to any constant).
// If this is the case, stop tracking the symbol.
if (state->getSymVal(I.getKey())) {
@@ -3345,10 +3313,10 @@ const GRState* CFRefCount::EvalAssume(const GRState *state,
B = RefBFactory.Remove(B, I.getKey());
}
}
-
+
if (changed)
state = state->set<RefBindings>(B);
-
+
return state;
}
@@ -3362,21 +3330,21 @@ const GRState * CFRefCount::Update(const GRState * state, SymbolRef sym,
case IncRefMsg: E = isGCEnabled() ? DoNothing : IncRef; break;
case DecRefMsg: E = isGCEnabled() ? DoNothing : DecRef; break;
case MakeCollectable: E = isGCEnabled() ? DecRef : DoNothing; break;
- case NewAutoreleasePool: E = isGCEnabled() ? DoNothing :
+ case NewAutoreleasePool: E = isGCEnabled() ? DoNothing :
NewAutoreleasePool; break;
}
-
+
// Handle all use-after-releases.
if (!isGCEnabled() && V.getKind() == RefVal::Released) {
V = V ^ RefVal::ErrorUseAfterRelease;
hasErr = V.getKind();
return state->set<RefBindings>(sym, V);
- }
-
+ }
+
switch (E) {
default:
assert (false && "Unhandled CFRef transition.");
-
+
case Dealloc:
// Any use of -dealloc in GC is *bad*.
if (isGCEnabled()) {
@@ -3384,7 +3352,7 @@ const GRState * CFRefCount::Update(const GRState * state, SymbolRef sym,
hasErr = V.getKind();
break;
}
-
+
switch (V.getKind()) {
default:
assert(false && "Invalid case.");
@@ -3397,13 +3365,13 @@ const GRState * CFRefCount::Update(const GRState * state, SymbolRef sym,
V = V ^ RefVal::ErrorDeallocNotOwned;
hasErr = V.getKind();
break;
- }
+ }
break;
case NewAutoreleasePool:
assert(!isGCEnabled());
return state->add<AutoreleaseStack>(sym);
-
+
case MayEscape:
if (V.getKind() == RefVal::Owned) {
V = V ^ RefVal::NotOwned;
@@ -3411,7 +3379,7 @@ const GRState * CFRefCount::Update(const GRState * state, SymbolRef sym,
}
// Fall-through.
-
+
case DoNothingByRef:
case DoNothing:
return state;
@@ -3419,7 +3387,7 @@ const GRState * CFRefCount::Update(const GRState * state, SymbolRef sym,
case Autorelease:
if (isGCEnabled())
return state;
-
+
// Update the autorelease counts.
state = SendAutorelease(state, ARCountFactory, sym);
V = V.autorelease();
@@ -3428,7 +3396,7 @@ const GRState * CFRefCount::Update(const GRState * state, SymbolRef sym,
case StopTracking:
return state->remove<RefBindings>(sym);
- case IncRef:
+ case IncRef:
switch (V.getKind()) {
default:
assert(false);
@@ -3436,15 +3404,15 @@ const GRState * CFRefCount::Update(const GRState * state, SymbolRef sym,
case RefVal::Owned:
case RefVal::NotOwned:
V = V + 1;
- break;
+ break;
case RefVal::Released:
// Non-GC cases are handled above.
assert(isGCEnabled());
V = (V ^ RefVal::Owned) + 1;
break;
- }
+ }
break;
-
+
case SelfOwn:
V = V ^ RefVal::NotOwned;
// Fall-through.
@@ -3459,23 +3427,23 @@ const GRState * CFRefCount::Update(const GRState * state, SymbolRef sym,
if (V.getCount() == 1) V = V ^ RefVal::Released;
V = V - 1;
break;
-
+
case RefVal::NotOwned:
if (V.getCount() > 0)
V = V - 1;
else {
V = V ^ RefVal::ErrorReleaseNotOwned;
hasErr = V.getKind();
- }
+ }
break;
-
+
case RefVal::Released:
// Non-GC cases are handled above.
assert(isGCEnabled());
V = V ^ RefVal::ErrorUseAfterRelease;
hasErr = V.getKind();
- break;
- }
+ break;
+ }
break;
}
return state->set<RefBindings>(sym, V);
@@ -3485,27 +3453,27 @@ const GRState * CFRefCount::Update(const GRState * state, SymbolRef sym,
// Handle dead symbols and end-of-path.
//===----------------------------------------------------------------------===//
-std::pair<ExplodedNode<GRState>*, const GRState *>
+std::pair<ExplodedNode*, const GRState *>
CFRefCount::HandleAutoreleaseCounts(const GRState * state, GenericNodeBuilder Bd,
- ExplodedNode<GRState>* Pred,
+ ExplodedNode* Pred,
GRExprEngine &Eng,
SymbolRef Sym, RefVal V, bool &stop) {
-
+
unsigned ACnt = V.getAutoreleaseCount();
stop = false;
// No autorelease counts? Nothing to be done.
if (!ACnt)
return std::make_pair(Pred, state);
-
- assert(!isGCEnabled() && "Autorelease counts in GC mode?");
+
+ assert(!isGCEnabled() && "Autorelease counts in GC mode?");
unsigned Cnt = V.getCount();
-
+
// FIXME: Handle sending 'autorelease' to already released object.
if (V.getKind() == RefVal::ReturnedOwned)
++Cnt;
-
+
if (ACnt <= Cnt) {
if (ACnt == Cnt) {
V.clearCounts();
@@ -3519,10 +3487,10 @@ CFRefCount::HandleAutoreleaseCounts(const GRState * state, GenericNodeBuilder Bd
V.setAutoreleaseCount(0);
}
state = state->set<RefBindings>(Sym, V);
- ExplodedNode<GRState> *N = Bd.MakeNode(state, Pred);
+ ExplodedNode *N = Bd.MakeNode(state, Pred);
stop = (N == 0);
return std::make_pair(N, state);
- }
+ }
// Woah! More autorelease counts then retain counts left.
// Emit hard error.
@@ -3530,9 +3498,9 @@ CFRefCount::HandleAutoreleaseCounts(const GRState * state, GenericNodeBuilder Bd
V = V ^ RefVal::ErrorOverAutorelease;
state = state->set<RefBindings>(Sym, V);
- if (ExplodedNode<GRState> *N = Bd.MakeNode(state, Pred)) {
+ if (ExplodedNode *N = Bd.MakeNode(state, Pred)) {
N->markAsSink();
-
+
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
os << "Object over-autoreleased: object was sent -autorelease";
@@ -3544,95 +3512,95 @@ CFRefCount::HandleAutoreleaseCounts(const GRState * state, GenericNodeBuilder Bd
else
os << "+" << V.getCount();
os << " retain counts";
-
+
CFRefReport *report =
new CFRefReport(*static_cast<CFRefBug*>(overAutorelease),
*this, N, Sym, os.str().c_str());
BR->EmitReport(report);
}
-
- return std::make_pair((ExplodedNode<GRState>*)0, state);
+
+ return std::make_pair((ExplodedNode*)0, state);
}
const GRState *
CFRefCount::HandleSymbolDeath(const GRState * state, SymbolRef sid, RefVal V,
llvm::SmallVectorImpl<SymbolRef> &Leaked) {
-
- bool hasLeak = V.isOwned() ||
+
+ bool hasLeak = V.isOwned() ||
((V.isNotOwned() || V.isReturnedOwned()) && V.getCount() > 0);
-
+
if (!hasLeak)
return state->remove<RefBindings>(sid);
-
+
Leaked.push_back(sid);
return state->set<RefBindings>(sid, V ^ RefVal::ErrorLeak);
}
-ExplodedNode<GRState>*
+ExplodedNode*
CFRefCount::ProcessLeaks(const GRState * state,
llvm::SmallVectorImpl<SymbolRef> &Leaked,
GenericNodeBuilder &Builder,
GRExprEngine& Eng,
- ExplodedNode<GRState> *Pred) {
-
+ ExplodedNode *Pred) {
+
if (Leaked.empty())
return Pred;
-
+
// Generate an intermediate node representing the leak point.
- ExplodedNode<GRState> *N = Builder.MakeNode(state, Pred);
-
+ ExplodedNode *N = Builder.MakeNode(state, Pred);
+
if (N) {
for (llvm::SmallVectorImpl<SymbolRef>::iterator
I = Leaked.begin(), E = Leaked.end(); I != E; ++I) {
-
- CFRefBug *BT = static_cast<CFRefBug*>(Pred ? leakWithinFunction
+
+ CFRefBug *BT = static_cast<CFRefBug*>(Pred ? leakWithinFunction
: leakAtReturn);
assert(BT && "BugType not initialized.");
CFRefLeakReport* report = new CFRefLeakReport(*BT, *this, N, *I, Eng);
BR->EmitReport(report);
}
}
-
+
return N;
}
void CFRefCount::EvalEndPath(GRExprEngine& Eng,
- GREndPathNodeBuilder<GRState>& Builder) {
-
+ GREndPathNodeBuilder& Builder) {
+
const GRState *state = Builder.getState();
GenericNodeBuilder Bd(Builder);
- RefBindings B = state->get<RefBindings>();
- ExplodedNode<GRState> *Pred = 0;
+ RefBindings B = state->get<RefBindings>();
+ ExplodedNode *Pred = 0;
for (RefBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) {
bool stop = false;
llvm::tie(Pred, state) = HandleAutoreleaseCounts(state, Bd, Pred, Eng,
(*I).first,
- (*I).second, stop);
+ (*I).second, stop);
if (stop)
return;
}
-
- B = state->get<RefBindings>();
- llvm::SmallVector<SymbolRef, 10> Leaked;
-
+
+ B = state->get<RefBindings>();
+ llvm::SmallVector<SymbolRef, 10> Leaked;
+
for (RefBindings::iterator I = B.begin(), E = B.end(); I != E; ++I)
state = HandleSymbolDeath(state, (*I).first, (*I).second, Leaked);
ProcessLeaks(state, Leaked, Bd, Eng, Pred);
}
-void CFRefCount::EvalDeadSymbols(ExplodedNodeSet<GRState>& Dst,
+void CFRefCount::EvalDeadSymbols(ExplodedNodeSet& Dst,
GRExprEngine& Eng,
- GRStmtNodeBuilder<GRState>& Builder,
- ExplodedNode<GRState>* Pred,
+ GRStmtNodeBuilder& Builder,
+ ExplodedNode* Pred,
Stmt* S,
const GRState* state,
SymbolReaper& SymReaper) {
RefBindings B = state->get<RefBindings>();
-
+
// Update counts from autorelease pools
for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
E = SymReaper.dead_end(); I != E; ++I) {
@@ -3648,57 +3616,57 @@ void CFRefCount::EvalDeadSymbols(ExplodedNodeSet<GRState>& Dst,
return;
}
}
-
+
B = state->get<RefBindings>();
llvm::SmallVector<SymbolRef, 10> Leaked;
-
+
for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
- E = SymReaper.dead_end(); I != E; ++I) {
+ E = SymReaper.dead_end(); I != E; ++I) {
if (const RefVal* T = B.lookup(*I))
state = HandleSymbolDeath(state, *I, *T, Leaked);
- }
-
+ }
+
static unsigned LeakPPTag = 0;
{
GenericNodeBuilder Bd(Builder, S, &LeakPPTag);
Pred = ProcessLeaks(state, Leaked, Bd, Eng, Pred);
}
-
+
// Did we cache out?
if (!Pred)
return;
-
+
// Now generate a new node that nukes the old bindings.
RefBindings::Factory& F = state->get_context<RefBindings>();
-
+
for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
E = SymReaper.dead_end(); I!=E; ++I) B = F.Remove(B, *I);
-
+
state = state->set<RefBindings>(B);
Builder.MakeNode(Dst, S, Pred, state);
}
-void CFRefCount::ProcessNonLeakError(ExplodedNodeSet<GRState>& Dst,
- GRStmtNodeBuilder<GRState>& Builder,
- Expr* NodeExpr, Expr* ErrorExpr,
- ExplodedNode<GRState>* Pred,
+void CFRefCount::ProcessNonLeakError(ExplodedNodeSet& Dst,
+ GRStmtNodeBuilder& Builder,
+ Expr* NodeExpr, Expr* ErrorExpr,
+ ExplodedNode* Pred,
const GRState* St,
RefVal::Kind hasErr, SymbolRef Sym) {
Builder.BuildSinks = true;
- GRExprEngine::NodeTy* N = Builder.MakeNode(Dst, NodeExpr, Pred, St);
-
+ ExplodedNode *N = Builder.MakeNode(Dst, NodeExpr, Pred, St);
+
if (!N)
return;
-
+
CFRefBug *BT = 0;
-
+
switch (hasErr) {
default:
assert(false && "Unhandled error.");
return;
case RefVal::ErrorUseAfterRelease:
BT = static_cast<CFRefBug*>(useAfterRelease);
- break;
+ break;
case RefVal::ErrorReleaseNotOwned:
BT = static_cast<CFRefBug*>(releaseNotOwned);
break;
@@ -3709,7 +3677,7 @@ void CFRefCount::ProcessNonLeakError(ExplodedNodeSet<GRState>& Dst,
BT = static_cast<CFRefBug*>(deallocNotOwned);
break;
}
-
+
CFRefReport *report = new CFRefReport(*BT, *this, N, Sym);
report->addRange(ErrorExpr->getSourceRange());
BR->EmitReport(report);
@@ -3722,4 +3690,4 @@ void CFRefCount::ProcessNonLeakError(ExplodedNodeSet<GRState>& Dst,
GRTransferFuncs* clang::MakeCFRefCountTF(ASTContext& Ctx, bool GCEnabled,
const LangOptions& lopts) {
return new CFRefCount(Ctx, GCEnabled, lopts);
-}
+}
diff --git a/lib/Analysis/CMakeLists.txt b/lib/Analysis/CMakeLists.txt
index 7d6a619736e0..89c1783cc2f5 100644
--- a/lib/Analysis/CMakeLists.txt
+++ b/lib/Analysis/CMakeLists.txt
@@ -1,17 +1,24 @@
set(LLVM_NO_RTTI 1)
add_clang_library(clangAnalysis
+ AnalysisContext.cpp
+ AnalysisManager.cpp
BasicConstraintManager.cpp
BasicObjCFoundationChecks.cpp
BasicStore.cpp
BasicValueFactory.cpp
BugReporter.cpp
+ BugReporterVisitors.cpp
+ CFG.cpp
CFRefCount.cpp
+ CallGraph.cpp
+ CallInliner.cpp
CheckDeadStores.cpp
CheckNSError.cpp
CheckObjCDealloc.cpp
CheckObjCInstMethSignature.cpp
CheckObjCUnusedIVars.cpp
+ CheckSecuritySyntaxOnly.cpp
Environment.cpp
ExplodedGraph.cpp
GRBlockCounter.cpp
@@ -24,10 +31,11 @@ add_clang_library(clangAnalysis
PathDiagnostic.cpp
RangeConstraintManager.cpp
RegionStore.cpp
+ SVals.cpp
+ SValuator.cpp
SimpleConstraintManager.cpp
SimpleSValuator.cpp
Store.cpp
- SVals.cpp
SymbolManager.cpp
UninitializedValues.cpp
ValueManager.cpp
diff --git a/lib/Analysis/CallGraph.cpp b/lib/Analysis/CallGraph.cpp
new file mode 100644
index 000000000000..ae8845db63ae
--- /dev/null
+++ b/lib/Analysis/CallGraph.cpp
@@ -0,0 +1,150 @@
+//== CallGraph.cpp - Call graph building ------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defined the CallGraph and CGBuilder classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/CallGraph.h"
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/StmtVisitor.h"
+
+#include "llvm/Support/GraphWriter.h"
+
+using namespace clang;
+using namespace idx;
+
+namespace {
+class CGBuilder : public StmtVisitor<CGBuilder> {
+
+ CallGraph &G;
+ FunctionDecl *FD;
+
+ Entity CallerEnt;
+
+ CallGraphNode *CallerNode;
+
+public:
+ CGBuilder(CallGraph &g, FunctionDecl *fd, Entity E, CallGraphNode *N)
+ : G(g), FD(fd), CallerEnt(E), CallerNode(N) {}
+
+ void VisitStmt(Stmt *S) { VisitChildren(S); }
+
+ void VisitCallExpr(CallExpr *CE);
+
+ void VisitChildren(Stmt *S) {
+ for (Stmt::child_iterator I=S->child_begin(), E=S->child_end(); I != E;++I)
+ if (*I)
+ static_cast<CGBuilder*>(this)->Visit(*I);
+ }
+};
+}
+
+void CGBuilder::VisitCallExpr(CallExpr *CE) {
+ if (FunctionDecl *CalleeDecl = CE->getDirectCallee()) {
+ Entity Ent = Entity::get(CalleeDecl, G.getProgram());
+ CallGraphNode *CalleeNode = G.getOrInsertFunction(Ent);
+ CallerNode->addCallee(ASTLocation(FD, CE), CalleeNode);
+ }
+}
+
+CallGraph::CallGraph() : Root(0) {
+ ExternalCallingNode = getOrInsertFunction(Entity());
+}
+
+CallGraph::~CallGraph() {
+ if (!FunctionMap.empty()) {
+ for (FunctionMapTy::iterator I = FunctionMap.begin(), E = FunctionMap.end();
+ I != E; ++I)
+ delete I->second;
+ FunctionMap.clear();
+ }
+}
+
+void CallGraph::addTU(ASTUnit &AST) {
+ ASTContext &Ctx = AST.getASTContext();
+ DeclContext *DC = Ctx.getTranslationUnitDecl();
+
+ for (DeclContext::decl_iterator I = DC->decls_begin(), E = DC->decls_end();
+ I != E; ++I) {
+
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
+ if (FD->isThisDeclarationADefinition()) {
+ // Set caller's ASTContext.
+ Entity Ent = Entity::get(FD, Prog);
+ CallGraphNode *Node = getOrInsertFunction(Ent);
+ CallerCtx[Node] = &Ctx;
+
+ // If this function has external linkage, anything could call it.
+ if (FD->isGlobal())
+ ExternalCallingNode->addCallee(idx::ASTLocation(), Node);
+
+ // Set root node to 'main' function.
+ if (FD->getNameAsString() == "main")
+ Root = Node;
+
+ CGBuilder builder(*this, FD, Ent, Node);
+ builder.Visit(FD->getBody());
+ }
+ }
+ }
+}
+
+CallGraphNode *CallGraph::getOrInsertFunction(Entity F) {
+ CallGraphNode *&Node = FunctionMap[F];
+ if (Node)
+ return Node;
+
+ return Node = new CallGraphNode(F);
+}
+
+Decl *CallGraph::getDecl(CallGraphNode *Node) {
+ // Get the function's context.
+ ASTContext *Ctx = CallerCtx[Node];
+
+ return Node->getDecl(*Ctx);
+}
+
+void CallGraph::print(llvm::raw_ostream &os) {
+ for (iterator I = begin(), E = end(); I != E; ++I) {
+ if (I->second->hasCallee()) {
+ os << "function: " << I->first.getPrintableName()
+ << " calls:\n";
+ for (CallGraphNode::iterator CI = I->second->begin(),
+ CE = I->second->end(); CI != CE; ++CI) {
+ os << " " << CI->second->getName().c_str();
+ }
+ os << '\n';
+ }
+ }
+}
+
+void CallGraph::dump() {
+ print(llvm::errs());
+}
+
+void CallGraph::ViewCallGraph() const {
+ llvm::ViewGraph(*this, "CallGraph");
+}
+
+namespace llvm {
+
+template <>
+struct DOTGraphTraits<CallGraph> : public DefaultDOTGraphTraits {
+
+ static std::string getNodeLabel(const CallGraphNode *Node,
+ const CallGraph &CG, bool ShortNames) {
+ return Node->getName();
+
+ }
+
+};
+
+}
diff --git a/lib/Analysis/CallInliner.cpp b/lib/Analysis/CallInliner.cpp
new file mode 100644
index 000000000000..cca8584a61fa
--- /dev/null
+++ b/lib/Analysis/CallInliner.cpp
@@ -0,0 +1,75 @@
+//===--- CallInliner.cpp - Transfer function that inlines callee ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the callee inlining transfer function.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/PathSensitive/GRExprEngine.h"
+#include "clang/Analysis/PathSensitive/GRTransferFuncs.h"
+
+using namespace clang;
+
+namespace {
+
+class VISIBILITY_HIDDEN CallInliner : public GRTransferFuncs {
+ ASTContext &Ctx;
+public:
+ CallInliner(ASTContext &ctx) : Ctx(ctx) {}
+
+ void EvalCall(ExplodedNodeSet& Dst, GRExprEngine& Engine,
+ GRStmtNodeBuilder& Builder, CallExpr* CE, SVal L,
+ ExplodedNode* Pred);
+
+};
+
+}
+
+void CallInliner::EvalCall(ExplodedNodeSet& Dst, GRExprEngine& Engine,
+ GRStmtNodeBuilder& Builder, CallExpr* CE, SVal L,
+ ExplodedNode* Pred) {
+ FunctionDecl const *FD = L.getAsFunctionDecl();
+ if (!FD)
+ return; // GRExprEngine is responsible for the autotransition.
+
+ // Make a new LocationContext.
+ StackFrameContext const *LocCtx =
+ Engine.getAnalysisManager().getStackFrame(FD, Pred->getLocationContext(), CE);
+
+ CFGBlock const *Entry = &(LocCtx->getCFG()->getEntry());
+
+ assert (Entry->empty() && "Entry block must be empty.");
+
+ assert (Entry->succ_size() == 1 && "Entry block must have 1 successor.");
+
+ // Get the solitary successor.
+ CFGBlock const *SuccB = *(Entry->succ_begin());
+
+ // Construct an edge representing the starting location in the function.
+ BlockEdge Loc(Entry, SuccB, LocCtx);
+
+ GRState const *state = Builder.GetState(Pred);
+ state = Engine.getStoreManager().EnterStackFrame(state, LocCtx);
+
+ bool isNew;
+ ExplodedNode *SuccN = Engine.getGraph().getNode(Loc, state, &isNew);
+ SuccN->addPredecessor(Pred, Engine.getGraph());
+
+ Builder.Deferred.erase(Pred);
+
+ // This is a hack. We really should not use the GRStmtNodeBuilder.
+ if (isNew)
+ Builder.getWorkList()->Enqueue(SuccN);
+
+ Builder.HasGeneratedNode = true;
+}
+
+GRTransferFuncs *clang::CreateCallInliner(ASTContext &ctx) {
+ return new CallInliner(ctx);
+}
diff --git a/lib/Analysis/CheckDeadStores.cpp b/lib/Analysis/CheckDeadStores.cpp
index 69433d6396a5..d5cb7ca7fdd3 100644
--- a/lib/Analysis/CheckDeadStores.cpp
+++ b/lib/Analysis/CheckDeadStores.cpp
@@ -33,14 +33,14 @@ class VISIBILITY_HIDDEN DeadStoreObs : public LiveVariables::ObserverTy {
BugReporter& BR;
ParentMap& Parents;
llvm::SmallPtrSet<VarDecl*, 20> Escaped;
-
+
enum DeadStoreKind { Standard, Enclosing, DeadIncrement, DeadInit };
-
+
public:
DeadStoreObs(ASTContext &ctx, BugReporter& br, ParentMap& parents,
llvm::SmallPtrSet<VarDecl*, 20> &escaped)
: Ctx(ctx), BR(br), Parents(parents), Escaped(escaped) {}
-
+
virtual ~DeadStoreObs() {}
void Report(VarDecl* V, DeadStoreKind dsk, SourceLocation L, SourceRange R) {
@@ -48,27 +48,27 @@ public:
return;
std::string name = V->getNameAsString();
-
+
const char* BugType = 0;
std::string msg;
-
+
switch (dsk) {
default:
assert(false && "Impossible dead store type.");
-
+
case DeadInit:
BugType = "Dead initialization";
msg = "Value stored to '" + name +
"' during its initialization is never read";
break;
-
+
case DeadIncrement:
BugType = "Dead increment";
case Standard:
if (!BugType) BugType = "Dead assignment";
msg = "Value stored to '" + name + "' is never read";
break;
-
+
case Enclosing:
BugType = "Dead nested assignment";
msg = "Although the value stored to '" + name +
@@ -76,10 +76,10 @@ public:
" read from '" + name + "'";
break;
}
-
- BR.EmitBasicReport(BugType, "Dead store", msg.c_str(), L, R);
+
+ BR.EmitBasicReport(BugType, "Dead store", msg.c_str(), L, R);
}
-
+
void CheckVarDecl(VarDecl* VD, Expr* Ex, Expr* Val,
DeadStoreKind dsk,
const LiveVariables::AnalysisDataTy& AD,
@@ -87,60 +87,60 @@ public:
if (VD->hasLocalStorage() && !Live(VD, AD) && !VD->getAttr<UnusedAttr>())
Report(VD, dsk, Ex->getSourceRange().getBegin(),
- Val->getSourceRange());
+ Val->getSourceRange());
}
-
+
void CheckDeclRef(DeclRefExpr* DR, Expr* Val, DeadStoreKind dsk,
const LiveVariables::AnalysisDataTy& AD,
const LiveVariables::ValTy& Live) {
-
+
if (VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl()))
CheckVarDecl(VD, DR, Val, dsk, AD, Live);
}
-
+
bool isIncrement(VarDecl* VD, BinaryOperator* B) {
if (B->isCompoundAssignmentOp())
return true;
-
+
Expr* RHS = B->getRHS()->IgnoreParenCasts();
BinaryOperator* BRHS = dyn_cast<BinaryOperator>(RHS);
-
+
if (!BRHS)
return false;
-
+
DeclRefExpr *DR;
-
+
if ((DR = dyn_cast<DeclRefExpr>(BRHS->getLHS()->IgnoreParenCasts())))
if (DR->getDecl() == VD)
return true;
-
+
if ((DR = dyn_cast<DeclRefExpr>(BRHS->getRHS()->IgnoreParenCasts())))
if (DR->getDecl() == VD)
return true;
-
+
return false;
}
-
+
virtual void ObserveStmt(Stmt* S,
const LiveVariables::AnalysisDataTy& AD,
const LiveVariables::ValTy& Live) {
-
+
// Skip statements in macros.
if (S->getLocStart().isMacroID())
return;
-
- if (BinaryOperator* B = dyn_cast<BinaryOperator>(S)) {
+
+ if (BinaryOperator* B = dyn_cast<BinaryOperator>(S)) {
if (!B->isAssignmentOp()) return; // Skip non-assignments.
-
+
if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(B->getLHS()))
if (VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
Expr* RHS = B->getRHS()->IgnoreParenCasts();
-
+
// Special case: check for assigning null to a pointer.
- // This is a common form of defensive programming.
+ // This is a common form of defensive programming.
if (VD->getType()->isPointerType()) {
if (IntegerLiteral* L = dyn_cast<IntegerLiteral>(RHS))
- // FIXME: Probably should have an Expr::isNullPointerConstant.
+ // FIXME: Probably should have an Expr::isNullPointerConstant.
if (L->getValue() == 0)
return;
}
@@ -149,19 +149,19 @@ public:
if (DeclRefExpr* RhsDR = dyn_cast<DeclRefExpr>(RHS))
if (VD == dyn_cast<VarDecl>(RhsDR->getDecl()))
return;
-
+
// Otherwise, issue a warning.
DeadStoreKind dsk = Parents.isConsumedExpr(B)
- ? Enclosing
+ ? Enclosing
: (isIncrement(VD,B) ? DeadIncrement : Standard);
-
+
CheckVarDecl(VD, DR, B->getRHS(), dsk, AD, Live);
- }
+ }
}
else if (UnaryOperator* U = dyn_cast<UnaryOperator>(S)) {
if (!U->isIncrementOp())
return;
-
+
// Handle: ++x within a subexpression. The solution is not warn
// about preincrements to dead variables when the preincrement occurs
// as a subexpression. This can lead to false negatives, e.g. "(++x);"
@@ -170,21 +170,21 @@ public:
return;
Expr *Ex = U->getSubExpr()->IgnoreParenCasts();
-
+
if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(Ex))
CheckDeclRef(DR, U, DeadIncrement, AD, Live);
- }
+ }
else if (DeclStmt* DS = dyn_cast<DeclStmt>(S))
// Iterate through the decls. Warn if any initializers are complex
// expressions that are not live (never used).
for (DeclStmt::decl_iterator DI=DS->decl_begin(), DE=DS->decl_end();
DI != DE; ++DI) {
-
+
VarDecl* V = dyn_cast<VarDecl>(*DI);
if (!V)
continue;
-
+
if (V->hasLocalStorage())
if (Expr* E = V->getInit()) {
// A dead initialization is a variable that is dead after it
@@ -200,7 +200,7 @@ public:
// due to defensive programming.
if (E->isConstantInitializer(Ctx))
return;
-
+
// Special case: check for initializations from constant
// variables.
//
@@ -211,14 +211,14 @@ public:
if (VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
if (VD->hasGlobalStorage() &&
VD->getType().isConstQualified()) return;
-
+
Report(V, DeadInit, V->getLocation(), E->getSourceRange());
}
}
}
}
};
-
+
} // end anonymous namespace
//===----------------------------------------------------------------------===//
@@ -230,9 +230,9 @@ class VISIBILITY_HIDDEN FindEscaped : public CFGRecStmtDeclVisitor<FindEscaped>{
CFG *cfg;
public:
FindEscaped(CFG *c) : cfg(c) {}
-
+
CFG& getCFG() { return *cfg; }
-
+
llvm::SmallPtrSet<VarDecl*, 20> Escaped;
void VisitUnaryOperator(UnaryOperator* U) {
@@ -249,11 +249,12 @@ public:
}
};
} // end anonymous namespace
-
-void clang::CheckDeadStores(LiveVariables& L, BugReporter& BR) {
- FindEscaped FS(BR.getCFG());
- FS.getCFG().VisitBlockStmts(FS);
- DeadStoreObs A(BR.getContext(), BR, BR.getParentMap(), FS.Escaped);
- L.runOnAllBlocks(*BR.getCFG(), &A);
+
+void clang::CheckDeadStores(CFG &cfg, LiveVariables &L, ParentMap &pmap,
+ BugReporter& BR) {
+ FindEscaped FS(&cfg);
+ FS.getCFG().VisitBlockStmts(FS);
+ DeadStoreObs A(BR.getContext(), BR, pmap, FS.Escaped);
+ L.runOnAllBlocks(cfg, &A);
}
diff --git a/lib/Analysis/CheckNSError.cpp b/lib/Analysis/CheckNSError.cpp
index c91442b5e829..8086da588264 100644
--- a/lib/Analysis/CheckNSError.cpp
+++ b/lib/Analysis/CheckNSError.cpp
@@ -28,91 +28,91 @@ using namespace clang;
namespace {
class VISIBILITY_HIDDEN NSErrorCheck : public BugType {
+ const Decl &CodeDecl;
const bool isNSErrorWarning;
IdentifierInfo * const II;
GRExprEngine &Eng;
-
- void CheckSignature(ObjCMethodDecl& MD, QualType& ResultTy,
+
+ void CheckSignature(const ObjCMethodDecl& MD, QualType& ResultTy,
llvm::SmallVectorImpl<VarDecl*>& ErrorParams);
-
- void CheckSignature(FunctionDecl& MD, QualType& ResultTy,
+
+ void CheckSignature(const FunctionDecl& MD, QualType& ResultTy,
llvm::SmallVectorImpl<VarDecl*>& ErrorParams);
bool CheckNSErrorArgument(QualType ArgTy);
bool CheckCFErrorArgument(QualType ArgTy);
-
- void CheckParamDeref(VarDecl* V, const GRState *state, BugReporter& BR);
-
- void EmitRetTyWarning(BugReporter& BR, Decl& CodeDecl);
-
+
+ void CheckParamDeref(const VarDecl *V, const LocationContext *LC,
+ const GRState *state, BugReporter& BR);
+
+ void EmitRetTyWarning(BugReporter& BR, const Decl& CodeDecl);
+
public:
- NSErrorCheck(bool isNSError, GRExprEngine& eng)
- : BugType(isNSError ? "NSError** null dereference"
- : "CFErrorRef* null dereference",
- "Coding Conventions (Apple)"),
- isNSErrorWarning(isNSError),
+ NSErrorCheck(const Decl &D, bool isNSError, GRExprEngine& eng)
+ : BugType(isNSError ? "NSError** null dereference"
+ : "CFErrorRef* null dereference",
+ "Coding conventions (Apple)"),
+ CodeDecl(D),
+ isNSErrorWarning(isNSError),
II(&eng.getContext().Idents.get(isNSErrorWarning ? "NSError":"CFErrorRef")),
Eng(eng) {}
-
+
void FlushReports(BugReporter& BR);
-};
-
+};
+
} // end anonymous namespace
-void clang::RegisterNSErrorChecks(BugReporter& BR, GRExprEngine &Eng) {
- BR.Register(new NSErrorCheck(true, Eng));
- BR.Register(new NSErrorCheck(false, Eng));
+void clang::RegisterNSErrorChecks(BugReporter& BR, GRExprEngine &Eng,
+ const Decl &D) {
+ BR.Register(new NSErrorCheck(D, true, Eng));
+ BR.Register(new NSErrorCheck(D, false, Eng));
}
void NSErrorCheck::FlushReports(BugReporter& BR) {
// Get the analysis engine and the exploded analysis graph.
- GRExprEngine::GraphTy& G = Eng.getGraph();
-
- // Get the declaration of the method/function that was analyzed.
- Decl& CodeDecl = G.getCodeDecl();
-
+ ExplodedGraph& G = Eng.getGraph();
+
// Get the ASTContext, which is useful for querying type information.
ASTContext &Ctx = BR.getContext();
QualType ResultTy;
llvm::SmallVector<VarDecl*, 5> ErrorParams;
- if (ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(&CodeDecl))
+ if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(&CodeDecl))
CheckSignature(*MD, ResultTy, ErrorParams);
- else if (FunctionDecl* FD = dyn_cast<FunctionDecl>(&CodeDecl))
+ else if (const FunctionDecl* FD = dyn_cast<FunctionDecl>(&CodeDecl))
CheckSignature(*FD, ResultTy, ErrorParams);
else
return;
-
+
if (ErrorParams.empty())
return;
-
+
if (ResultTy == Ctx.VoidTy) EmitRetTyWarning(BR, CodeDecl);
-
- for (GRExprEngine::GraphTy::roots_iterator RI=G.roots_begin(),
- RE=G.roots_end(); RI!=RE; ++RI) {
+
+ for (ExplodedGraph::roots_iterator RI=G.roots_begin(), RE=G.roots_end();
+ RI!=RE; ++RI) {
// Scan the parameters for an implicit null dereference.
for (llvm::SmallVectorImpl<VarDecl*>::iterator I=ErrorParams.begin(),
- E=ErrorParams.end(); I!=E; ++I)
- CheckParamDeref(*I, (*RI)->getState(), BR);
-
+ E=ErrorParams.end(); I!=E; ++I)
+ CheckParamDeref(*I, (*RI)->getLocationContext(), (*RI)->getState(), BR);
}
}
-void NSErrorCheck::EmitRetTyWarning(BugReporter& BR, Decl& CodeDecl) {
+void NSErrorCheck::EmitRetTyWarning(BugReporter& BR, const Decl& CodeDecl) {
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
-
+
if (isa<ObjCMethodDecl>(CodeDecl))
os << "Method";
else
- os << "Function";
-
+ os << "Function";
+
os << " accepting ";
os << (isNSErrorWarning ? "NSError**" : "CFErrorRef*");
os << " should have a non-void return value to indicate whether or not an "
- "error occured.";
-
+ "error occurred";
+
BR.EmitBasicReport(isNSErrorWarning
? "Bad return type when passing NSError**"
: "Bad return type when passing CFError*",
@@ -121,15 +121,15 @@ void NSErrorCheck::EmitRetTyWarning(BugReporter& BR, Decl& CodeDecl) {
}
void
-NSErrorCheck::CheckSignature(ObjCMethodDecl& M, QualType& ResultTy,
+NSErrorCheck::CheckSignature(const ObjCMethodDecl& M, QualType& ResultTy,
llvm::SmallVectorImpl<VarDecl*>& ErrorParams) {
ResultTy = M.getResultType();
-
- for (ObjCMethodDecl::param_iterator I=M.param_begin(),
+
+ for (ObjCMethodDecl::param_iterator I=M.param_begin(),
E=M.param_end(); I!=E; ++I) {
- QualType T = (*I)->getType();
+ QualType T = (*I)->getType();
if (isNSErrorWarning) {
if (CheckNSErrorArgument(T)) ErrorParams.push_back(*I);
@@ -140,16 +140,16 @@ NSErrorCheck::CheckSignature(ObjCMethodDecl& M, QualType& ResultTy,
}
void
-NSErrorCheck::CheckSignature(FunctionDecl& F, QualType& ResultTy,
+NSErrorCheck::CheckSignature(const FunctionDecl& F, QualType& ResultTy,
llvm::SmallVectorImpl<VarDecl*>& ErrorParams) {
-
+
ResultTy = F.getResultType();
-
- for (FunctionDecl::param_iterator I=F.param_begin(),
- E=F.param_end(); I!=E; ++I) {
-
- QualType T = (*I)->getType();
-
+
+ for (FunctionDecl::param_const_iterator I = F.param_begin(),
+ E = F.param_end(); I != E; ++I) {
+
+ QualType T = (*I)->getType();
+
if (isNSErrorWarning) {
if (CheckNSErrorArgument(T)) ErrorParams.push_back(*I);
}
@@ -160,51 +160,59 @@ NSErrorCheck::CheckSignature(FunctionDecl& F, QualType& ResultTy,
bool NSErrorCheck::CheckNSErrorArgument(QualType ArgTy) {
-
- const PointerType* PPT = ArgTy->getAsPointerType();
- if (!PPT) return false;
-
- const PointerType* PT = PPT->getPointeeType()->getAsPointerType();
- if (!PT) return false;
-
- const ObjCInterfaceType *IT =
- PT->getPointeeType()->getAsObjCInterfaceType();
-
- if (!IT) return false;
- return IT->getDecl()->getIdentifier() == II;
+
+ const PointerType* PPT = ArgTy->getAs<PointerType>();
+ if (!PPT)
+ return false;
+
+ const ObjCObjectPointerType* PT =
+ PPT->getPointeeType()->getAs<ObjCObjectPointerType>();
+
+ if (!PT)
+ return false;
+
+ const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
+
+ // FIXME: Can ID ever be NULL?
+ if (ID)
+ return II == ID->getIdentifier();
+
+ return false;
}
bool NSErrorCheck::CheckCFErrorArgument(QualType ArgTy) {
-
- const PointerType* PPT = ArgTy->getAsPointerType();
+
+ const PointerType* PPT = ArgTy->getAs<PointerType>();
if (!PPT) return false;
-
- const TypedefType* TT = PPT->getPointeeType()->getAsTypedefType();
+
+ const TypedefType* TT = PPT->getPointeeType()->getAs<TypedefType>();
if (!TT) return false;
return TT->getDecl()->getIdentifier() == II;
}
-void NSErrorCheck::CheckParamDeref(VarDecl* Param, const GRState *rootState,
+void NSErrorCheck::CheckParamDeref(const VarDecl *Param,
+ const LocationContext *LC,
+ const GRState *rootState,
BugReporter& BR) {
-
- SVal ParamL = rootState->getLValue(Param);
+
+ SVal ParamL = rootState->getLValue(Param, LC);
const MemRegion* ParamR = cast<loc::MemRegionVal>(ParamL).getRegionAs<VarRegion>();
assert (ParamR && "Parameters always have VarRegions.");
SVal ParamSVal = rootState->getSVal(ParamR);
-
+
// FIXME: For now assume that ParamSVal is symbolic. We need to generalize
// this later.
SymbolRef ParamSym = ParamSVal.getAsLocSymbol();
if (!ParamSym)
return;
-
+
// Iterate over the implicit-null dereferences.
for (GRExprEngine::null_deref_iterator I=Eng.implicit_null_derefs_begin(),
E=Eng.implicit_null_derefs_end(); I!=E; ++I) {
-
+
const GRState *state = (*I)->getState();
- const SVal* X = state->get<GRState::NullDerefTag>();
+ const SVal* X = state->get<GRState::NullDerefTag>();
if (!X || X->getAsSymbol() != ParamSym)
continue;
@@ -213,14 +221,14 @@ void NSErrorCheck::CheckParamDeref(VarDecl* Param, const GRState *rootState,
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
os << "Potential null dereference. According to coding standards ";
-
+
if (isNSErrorWarning)
os << "in 'Creating and Returning NSError Objects' the parameter '";
else
os << "documented in CoreFoundation/CFError.h the parameter '";
-
+
os << Param->getNameAsString() << "' may be null.";
-
+
BugReport *report = new BugReport(*this, os.str().c_str(), *I);
// FIXME: Notable symbols are now part of the report. We should
// add support for notable symbols in BugReport.
diff --git a/lib/Analysis/CheckObjCDealloc.cpp b/lib/Analysis/CheckObjCDealloc.cpp
index a14ae265128b..92e3e112d9f1 100644
--- a/lib/Analysis/CheckObjCDealloc.cpp
+++ b/lib/Analysis/CheckObjCDealloc.cpp
@@ -24,11 +24,11 @@
using namespace clang;
-static bool scan_dealloc(Stmt* S, Selector Dealloc) {
-
+static bool scan_dealloc(Stmt* S, Selector Dealloc) {
+
if (ObjCMessageExpr* ME = dyn_cast<ObjCMessageExpr>(S))
if (ME->getSelector() == Dealloc)
- if(ME->getReceiver())
+ if (ME->getReceiver())
if (Expr* Receiver = ME->getReceiver()->IgnoreParenCasts())
return isa<ObjCSuperExpr>(Receiver);
@@ -37,20 +37,20 @@ static bool scan_dealloc(Stmt* S, Selector Dealloc) {
for (Stmt::child_iterator I = S->child_begin(), E= S->child_end(); I!=E; ++I)
if (*I && scan_dealloc(*I, Dealloc))
return true;
-
+
return false;
}
-static bool scan_ivar_release(Stmt* S, ObjCIvarDecl* ID,
- const ObjCPropertyDecl* PD,
- Selector Release,
+static bool scan_ivar_release(Stmt* S, ObjCIvarDecl* ID,
+ const ObjCPropertyDecl* PD,
+ Selector Release,
IdentifierInfo* SelfII,
- ASTContext& Ctx) {
-
+ ASTContext& Ctx) {
+
// [mMyIvar release]
if (ObjCMessageExpr* ME = dyn_cast<ObjCMessageExpr>(S))
if (ME->getSelector() == Release)
- if(ME->getReceiver())
+ if (ME->getReceiver())
if (Expr* Receiver = ME->getReceiver()->IgnoreParenCasts())
if (ObjCIvarRefExpr* E = dyn_cast<ObjCIvarRefExpr>(Receiver))
if (E->getDecl() == ID)
@@ -58,27 +58,29 @@ static bool scan_ivar_release(Stmt* S, ObjCIvarDecl* ID,
// [self setMyIvar:nil];
if (ObjCMessageExpr* ME = dyn_cast<ObjCMessageExpr>(S))
- if(ME->getReceiver())
+ if (ME->getReceiver())
if (Expr* Receiver = ME->getReceiver()->IgnoreParenCasts())
if (DeclRefExpr* E = dyn_cast<DeclRefExpr>(Receiver))
if (E->getDecl()->getIdentifier() == SelfII)
if (ME->getMethodDecl() == PD->getSetterMethodDecl() &&
ME->getNumArgs() == 1 &&
- ME->getArg(0)->isNullPointerConstant(Ctx))
+ ME->getArg(0)->isNullPointerConstant(Ctx,
+ Expr::NPC_ValueDependentIsNull))
return true;
-
+
// self.myIvar = nil;
if (BinaryOperator* BO = dyn_cast<BinaryOperator>(S))
if (BO->isAssignmentOp())
- if(ObjCPropertyRefExpr* PRE =
+ if (ObjCPropertyRefExpr* PRE =
dyn_cast<ObjCPropertyRefExpr>(BO->getLHS()->IgnoreParenCasts()))
- if(PRE->getProperty() == PD)
- if(BO->getRHS()->isNullPointerConstant(Ctx)) {
+ if (PRE->getProperty() == PD)
+ if (BO->getRHS()->isNullPointerConstant(Ctx,
+ Expr::NPC_ValueDependentIsNull)) {
// This is only a 'release' if the property kind is not
// 'assign'.
return PD->getSetterKind() != ObjCPropertyDecl::Assign;;
}
-
+
// Recurse to children.
for (Stmt::child_iterator I = S->child_begin(), E= S->child_end(); I!=E; ++I)
if (*I && scan_ivar_release(*I, ID, PD, Release, SelfII, Ctx))
@@ -87,43 +89,43 @@ static bool scan_ivar_release(Stmt* S, ObjCIvarDecl* ID,
return false;
}
-void clang::CheckObjCDealloc(ObjCImplementationDecl* D,
+void clang::CheckObjCDealloc(const ObjCImplementationDecl* D,
const LangOptions& LOpts, BugReporter& BR) {
assert (LOpts.getGCMode() != LangOptions::GCOnly);
-
+
ASTContext& Ctx = BR.getContext();
- ObjCInterfaceDecl* ID = D->getClassInterface();
-
+ const ObjCInterfaceDecl* ID = D->getClassInterface();
+
// Does the class contain any ivars that are pointers (or id<...>)?
// If not, skip the check entirely.
// NOTE: This is motivated by PR 2517:
// http://llvm.org/bugs/show_bug.cgi?id=2517
-
+
bool containsPointerIvar = false;
-
+
for (ObjCInterfaceDecl::ivar_iterator I=ID->ivar_begin(), E=ID->ivar_end();
I!=E; ++I) {
-
+
ObjCIvarDecl* ID = *I;
QualType T = ID->getType();
-
- if (!Ctx.isObjCObjectPointerType(T) ||
+
+ if (!T->isObjCObjectPointerType() ||
ID->getAttr<IBOutletAttr>()) // Skip IBOutlets.
continue;
-
+
containsPointerIvar = true;
break;
}
-
+
if (!containsPointerIvar)
return;
-
+
// Determine if the class subclasses NSObject.
IdentifierInfo* NSObjectII = &Ctx.Idents.get("NSObject");
IdentifierInfo* SenTestCaseII = &Ctx.Idents.get("SenTestCase");
-
+
for ( ; ID ; ID = ID->getSuperClass()) {
IdentifierInfo *II = ID->getIdentifier();
@@ -137,118 +139,118 @@ void clang::CheckObjCDealloc(ObjCImplementationDecl* D,
if (II == SenTestCaseII)
return;
}
-
+
if (!ID)
return;
-
+
// Get the "dealloc" selector.
IdentifierInfo* II = &Ctx.Idents.get("dealloc");
- Selector S = Ctx.Selectors.getSelector(0, &II);
+ Selector S = Ctx.Selectors.getSelector(0, &II);
ObjCMethodDecl* MD = 0;
-
+
// Scan the instance methods for "dealloc".
for (ObjCImplementationDecl::instmeth_iterator I = D->instmeth_begin(),
E = D->instmeth_end(); I!=E; ++I) {
-
+
if ((*I)->getSelector() == S) {
MD = *I;
break;
- }
+ }
}
-
+
if (!MD) { // No dealloc found.
-
- const char* name = LOpts.getGCMode() == LangOptions::NonGC
- ? "missing -dealloc"
+
+ const char* name = LOpts.getGCMode() == LangOptions::NonGC
+ ? "missing -dealloc"
: "missing -dealloc (Hybrid MM, non-GC)";
-
+
std::string buf;
llvm::raw_string_ostream os(buf);
os << "Objective-C class '" << D->getNameAsString()
<< "' lacks a 'dealloc' instance method";
-
+
BR.EmitBasicReport(name, os.str().c_str(), D->getLocStart());
return;
}
-
+
// dealloc found. Scan for missing [super dealloc].
if (MD->getBody() && !scan_dealloc(MD->getBody(), S)) {
-
+
const char* name = LOpts.getGCMode() == LangOptions::NonGC
? "missing [super dealloc]"
: "missing [super dealloc] (Hybrid MM, non-GC)";
-
+
std::string buf;
llvm::raw_string_ostream os(buf);
os << "The 'dealloc' instance method in Objective-C class '"
<< D->getNameAsString()
<< "' does not send a 'dealloc' message to its super class"
" (missing [super dealloc])";
-
+
BR.EmitBasicReport(name, os.str().c_str(), D->getLocStart());
return;
- }
-
+ }
+
// Get the "release" selector.
IdentifierInfo* RII = &Ctx.Idents.get("release");
- Selector RS = Ctx.Selectors.getSelector(0, &RII);
-
+ Selector RS = Ctx.Selectors.getSelector(0, &RII);
+
// Get the "self" identifier
IdentifierInfo* SelfII = &Ctx.Idents.get("self");
-
+
// Scan for missing and extra releases of ivars used by implementations
// of synthesized properties
for (ObjCImplementationDecl::propimpl_iterator I = D->propimpl_begin(),
E = D->propimpl_end(); I!=E; ++I) {
// We can only check the synthesized properties
- if((*I)->getPropertyImplementation() != ObjCPropertyImplDecl::Synthesize)
+ if ((*I)->getPropertyImplementation() != ObjCPropertyImplDecl::Synthesize)
continue;
-
+
ObjCIvarDecl* ID = (*I)->getPropertyIvarDecl();
if (!ID)
continue;
-
+
QualType T = ID->getType();
- if (!Ctx.isObjCObjectPointerType(T)) // Skip non-pointer ivars
+ if (!T->isObjCObjectPointerType()) // Skip non-pointer ivars
continue;
const ObjCPropertyDecl* PD = (*I)->getPropertyDecl();
- if(!PD)
+ if (!PD)
continue;
-
+
// ivars cannot be set via read-only properties, so we'll skip them
- if(PD->isReadOnly())
+ if (PD->isReadOnly())
continue;
-
+
// ivar must be released if and only if the kind of setter was not 'assign'
bool requiresRelease = PD->getSetterKind() != ObjCPropertyDecl::Assign;
- if(scan_ivar_release(MD->getBody(), ID, PD, RS, SelfII, Ctx)
+ if (scan_ivar_release(MD->getBody(), ID, PD, RS, SelfII, Ctx)
!= requiresRelease) {
const char *name;
const char* category = "Memory (Core Foundation/Objective-C)";
-
+
std::string buf;
llvm::raw_string_ostream os(buf);
- if(requiresRelease) {
+ if (requiresRelease) {
name = LOpts.getGCMode() == LangOptions::NonGC
? "missing ivar release (leak)"
: "missing ivar release (Hybrid MM, non-GC)";
-
+
os << "The '" << ID->getNameAsString()
<< "' instance variable was retained by a synthesized property but "
- "wasn't released in 'dealloc'";
+ "wasn't released in 'dealloc'";
} else {
name = LOpts.getGCMode() == LangOptions::NonGC
? "extra ivar release (use-after-release)"
: "extra ivar release (Hybrid MM, non-GC)";
-
+
os << "The '" << ID->getNameAsString()
<< "' instance variable was not retained by a synthesized property "
"but was released in 'dealloc'";
}
-
+
BR.EmitBasicReport(name, category,
os.str().c_str(), (*I)->getLocation());
}
diff --git a/lib/Analysis/CheckObjCInstMethSignature.cpp b/lib/Analysis/CheckObjCInstMethSignature.cpp
index 28814867bd58..8c0d39629d50 100644
--- a/lib/Analysis/CheckObjCInstMethSignature.cpp
+++ b/lib/Analysis/CheckObjCInstMethSignature.cpp
@@ -30,25 +30,24 @@ static bool AreTypesCompatible(QualType Derived, QualType Ancestor,
// Right now don't compare the compatibility of pointers. That involves
// looking at subtyping relationships. FIXME: Future patch.
- if ((Derived->isPointerType() || Derived->isObjCQualifiedIdType()) &&
- (Ancestor->isPointerType() || Ancestor->isObjCQualifiedIdType()))
+ if (Derived->isAnyPointerType() && Ancestor->isAnyPointerType())
return true;
return C.typesAreCompatible(Derived, Ancestor);
}
-static void CompareReturnTypes(ObjCMethodDecl* MethDerived,
- ObjCMethodDecl* MethAncestor,
- BugReporter& BR, ASTContext& Ctx,
- ObjCImplementationDecl* ID) {
-
+static void CompareReturnTypes(const ObjCMethodDecl *MethDerived,
+ const ObjCMethodDecl *MethAncestor,
+ BugReporter &BR, ASTContext &Ctx,
+ const ObjCImplementationDecl *ID) {
+
QualType ResDerived = MethDerived->getResultType();
- QualType ResAncestor = MethAncestor->getResultType();
-
+ QualType ResAncestor = MethAncestor->getResultType();
+
if (!AreTypesCompatible(ResDerived, ResAncestor, Ctx)) {
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
-
+
os << "The Objective-C class '"
<< MethDerived->getClassInterface()->getNameAsString()
<< "', which is derived from class '"
@@ -64,31 +63,31 @@ static void CompareReturnTypes(ObjCMethodDecl* MethDerived,
<< ResAncestor.getAsString()
<< "'. These two types are incompatible, and may result in undefined "
"behavior for clients of these classes.";
-
+
BR.EmitBasicReport("Incompatible instance method return type",
os.str().c_str(), MethDerived->getLocStart());
}
}
-void clang::CheckObjCInstMethSignature(ObjCImplementationDecl* ID,
+void clang::CheckObjCInstMethSignature(const ObjCImplementationDecl* ID,
BugReporter& BR) {
-
- ObjCInterfaceDecl* D = ID->getClassInterface();
- ObjCInterfaceDecl* C = D->getSuperClass();
+
+ const ObjCInterfaceDecl* D = ID->getClassInterface();
+ const ObjCInterfaceDecl* C = D->getSuperClass();
if (!C)
return;
-
+
ASTContext& Ctx = BR.getContext();
-
+
// Build a DenseMap of the methods for quick querying.
typedef llvm::DenseMap<Selector,ObjCMethodDecl*> MapTy;
MapTy IMeths;
unsigned NumMethods = 0;
-
+
for (ObjCImplementationDecl::instmeth_iterator I=ID->instmeth_begin(),
- E=ID->instmeth_end(); I!=E; ++I) {
-
+ E=ID->instmeth_end(); I!=E; ++I) {
+
ObjCMethodDecl* M = *I;
IMeths[M->getSelector()] = M;
++NumMethods;
@@ -102,19 +101,19 @@ void clang::CheckObjCInstMethSignature(ObjCImplementationDecl* ID,
ObjCMethodDecl* M = *I;
Selector S = M->getSelector();
-
+
MapTy::iterator MI = IMeths.find(S);
if (MI == IMeths.end() || MI->second == 0)
continue;
-
+
--NumMethods;
ObjCMethodDecl* MethDerived = MI->second;
MI->second = 0;
-
+
CompareReturnTypes(MethDerived, M, BR, Ctx, ID);
}
-
+
C = C->getSuperClass();
}
}
diff --git a/lib/Analysis/CheckObjCUnusedIVars.cpp b/lib/Analysis/CheckObjCUnusedIVars.cpp
index 0063c40482a0..1a900f897678 100644
--- a/lib/Analysis/CheckObjCUnusedIVars.cpp
+++ b/lib/Analysis/CheckObjCUnusedIVars.cpp
@@ -24,47 +24,56 @@
using namespace clang;
enum IVarState { Unused, Used };
-typedef llvm::DenseMap<ObjCIvarDecl*,IVarState> IvarUsageMap;
+typedef llvm::DenseMap<const ObjCIvarDecl*,IVarState> IvarUsageMap;
-static void Scan(IvarUsageMap& M, Stmt* S) {
+static void Scan(IvarUsageMap& M, const Stmt* S) {
if (!S)
return;
-
- if (ObjCIvarRefExpr* Ex = dyn_cast<ObjCIvarRefExpr>(S)) {
- ObjCIvarDecl* D = Ex->getDecl();
+
+ if (const ObjCIvarRefExpr *Ex = dyn_cast<ObjCIvarRefExpr>(S)) {
+ const ObjCIvarDecl *D = Ex->getDecl();
IvarUsageMap::iterator I = M.find(D);
- if (I != M.end()) I->second = Used;
+ if (I != M.end())
+ I->second = Used;
+ return;
+ }
+
+ // Blocks can reference an instance variable of a class.
+ if (const BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
+ Scan(M, BE->getBody());
return;
}
-
- for (Stmt::child_iterator I=S->child_begin(), E=S->child_end(); I!=E;++I)
+
+ for (Stmt::const_child_iterator I=S->child_begin(),E=S->child_end(); I!=E;++I)
Scan(M, *I);
}
-static void Scan(IvarUsageMap& M, ObjCPropertyImplDecl* D) {
+static void Scan(IvarUsageMap& M, const ObjCPropertyImplDecl* D) {
if (!D)
return;
-
- ObjCIvarDecl* ID = D->getPropertyIvarDecl();
+
+ const ObjCIvarDecl* ID = D->getPropertyIvarDecl();
if (!ID)
return;
-
+
IvarUsageMap::iterator I = M.find(ID);
- if (I != M.end()) I->second = Used;
+ if (I != M.end())
+ I->second = Used;
}
-void clang::CheckObjCUnusedIvar(ObjCImplementationDecl* D, BugReporter& BR) {
+void clang::CheckObjCUnusedIvar(const ObjCImplementationDecl *D,
+ BugReporter &BR) {
- ObjCInterfaceDecl* ID = D->getClassInterface();
+ const ObjCInterfaceDecl* ID = D->getClassInterface();
IvarUsageMap M;
// Iterate over the ivars.
- for (ObjCInterfaceDecl::ivar_iterator I=ID->ivar_begin(), E=ID->ivar_end();
- I!=E; ++I) {
-
- ObjCIvarDecl* ID = *I;
-
+ for (ObjCInterfaceDecl::ivar_iterator I=ID->ivar_begin(),
+ E=ID->ivar_end(); I!=E; ++I) {
+
+ const ObjCIvarDecl* ID = *I;
+
// Ignore ivars that aren't private.
if (ID->getAccessControl() != ObjCIvarDecl::Private)
continue;
@@ -72,31 +81,31 @@ void clang::CheckObjCUnusedIvar(ObjCImplementationDecl* D, BugReporter& BR) {
// Skip IB Outlets.
if (ID->getAttr<IBOutletAttr>())
continue;
-
+
M[ID] = Unused;
}
if (M.empty())
return;
-
+
// Now scan the methods for accesses.
for (ObjCImplementationDecl::instmeth_iterator I = D->instmeth_begin(),
- E = D->instmeth_end(); I!=E; ++I)
+ E = D->instmeth_end(); I!=E; ++I)
Scan(M, (*I)->getBody());
-
+
// Scan for @synthesized property methods that act as setters/getters
// to an ivar.
for (ObjCImplementationDecl::propimpl_iterator I = D->propimpl_begin(),
E = D->propimpl_end(); I!=E; ++I)
- Scan(M, *I);
-
+ Scan(M, *I);
+
// Find ivars that are unused.
for (IvarUsageMap::iterator I = M.begin(), E = M.end(); I!=E; ++I)
if (I->second == Unused) {
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
os << "Instance variable '" << I->first->getNameAsString()
- << "' in class '" << ID->getNameAsString()
+ << "' in class '" << ID->getNameAsString()
<< "' is never used by the methods in its @implementation "
"(although it may be used by category methods).";
@@ -104,4 +113,3 @@ void clang::CheckObjCUnusedIvar(ObjCImplementationDecl* D, BugReporter& BR) {
os.str().c_str(), I->first->getLocation());
}
}
-
diff --git a/lib/Analysis/CheckSecuritySyntaxOnly.cpp b/lib/Analysis/CheckSecuritySyntaxOnly.cpp
new file mode 100644
index 000000000000..9f0d059cb66e
--- /dev/null
+++ b/lib/Analysis/CheckSecuritySyntaxOnly.cpp
@@ -0,0 +1,409 @@
+//==- CheckSecuritySyntaxOnly.cpp - Basic security checks --------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a set of flow-insensitive security checks.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/PathSensitive/BugReporter.h"
+#include "clang/Analysis/LocalCheckers.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+namespace {
+class VISIBILITY_HIDDEN WalkAST : public StmtVisitor<WalkAST> {
+ BugReporter &BR;
+ IdentifierInfo *II_gets;
+ enum { num_rands = 9 };
+ IdentifierInfo *II_rand[num_rands];
+ IdentifierInfo *II_random;
+ enum { num_setids = 6 };
+ IdentifierInfo *II_setid[num_setids];
+
+public:
+ WalkAST(BugReporter &br) : BR(br),
+ II_gets(0), II_rand(), II_random(0), II_setid() {}
+
+ // Statement visitor methods.
+ void VisitCallExpr(CallExpr *CE);
+ void VisitForStmt(ForStmt *S);
+ void VisitCompoundStmt (CompoundStmt *S);
+ void VisitStmt(Stmt *S) { VisitChildren(S); }
+
+ void VisitChildren(Stmt *S);
+
+ // Helpers.
+ IdentifierInfo *GetIdentifier(IdentifierInfo *& II, const char *str);
+
+ // Checker-specific methods.
+ void CheckLoopConditionForFloat(const ForStmt *FS);
+ void CheckCall_gets(const CallExpr *CE, const FunctionDecl *FD);
+ void CheckCall_rand(const CallExpr *CE, const FunctionDecl *FD);
+ void CheckCall_random(const CallExpr *CE, const FunctionDecl *FD);
+ void CheckUncheckedReturnValue(CallExpr *CE);
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Helper methods.
+//===----------------------------------------------------------------------===//
+
+IdentifierInfo *WalkAST::GetIdentifier(IdentifierInfo *& II, const char *str) {
+ if (!II)
+ II = &BR.getContext().Idents.get(str);
+
+ return II;
+}
+
+//===----------------------------------------------------------------------===//
+// AST walking.
+//===----------------------------------------------------------------------===//
+
+void WalkAST::VisitChildren(Stmt *S) {
+ for (Stmt::child_iterator I = S->child_begin(), E = S->child_end(); I!=E; ++I)
+ if (Stmt *child = *I)
+ Visit(child);
+}
+
+void WalkAST::VisitCallExpr(CallExpr *CE) {
+ if (const FunctionDecl *FD = CE->getDirectCallee()) {
+ CheckCall_gets(CE, FD);
+ CheckCall_rand(CE, FD);
+ CheckCall_random(CE, FD);
+ }
+
+ // Recurse and check children.
+ VisitChildren(CE);
+}
+
+void WalkAST::VisitCompoundStmt(CompoundStmt *S) {
+ for (Stmt::child_iterator I = S->child_begin(), E = S->child_end(); I!=E; ++I)
+ if (Stmt *child = *I) {
+ if (CallExpr *CE = dyn_cast<CallExpr>(child))
+ CheckUncheckedReturnValue(CE);
+ Visit(child);
+ }
+}
+
+void WalkAST::VisitForStmt(ForStmt *FS) {
+ CheckLoopConditionForFloat(FS);
+
+ // Recurse and check children.
+ VisitChildren(FS);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: floating poing variable used as loop counter.
+// Originally: <rdar://problem/6336718>
+// Implements: CERT security coding advisory FLP-30.
+//===----------------------------------------------------------------------===//
+
+static const DeclRefExpr*
+GetIncrementedVar(const Expr *expr, const VarDecl *x, const VarDecl *y) {
+ expr = expr->IgnoreParenCasts();
+
+ if (const BinaryOperator *B = dyn_cast<BinaryOperator>(expr)) {
+ if (!(B->isAssignmentOp() || B->isCompoundAssignmentOp() ||
+ B->getOpcode() == BinaryOperator::Comma))
+ return NULL;
+
+ if (const DeclRefExpr *lhs = GetIncrementedVar(B->getLHS(), x, y))
+ return lhs;
+
+ if (const DeclRefExpr *rhs = GetIncrementedVar(B->getRHS(), x, y))
+ return rhs;
+
+ return NULL;
+ }
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(expr)) {
+ const NamedDecl *ND = DR->getDecl();
+ return ND == x || ND == y ? DR : NULL;
+ }
+
+ if (const UnaryOperator *U = dyn_cast<UnaryOperator>(expr))
+ return U->isIncrementDecrementOp()
+ ? GetIncrementedVar(U->getSubExpr(), x, y) : NULL;
+
+ return NULL;
+}
+
+/// CheckLoopConditionForFloat - This check looks for 'for' statements that
+/// use a floating point variable as a loop counter.
+/// CERT: FLP30-C, FLP30-CPP.
+///
+void WalkAST::CheckLoopConditionForFloat(const ForStmt *FS) {
+ // Does the loop have a condition?
+ const Expr *condition = FS->getCond();
+
+ if (!condition)
+ return;
+
+ // Does the loop have an increment?
+ const Expr *increment = FS->getInc();
+
+ if (!increment)
+ return;
+
+ // Strip away '()' and casts.
+ condition = condition->IgnoreParenCasts();
+ increment = increment->IgnoreParenCasts();
+
+ // Is the loop condition a comparison?
+ const BinaryOperator *B = dyn_cast<BinaryOperator>(condition);
+
+ if (!B)
+ return;
+
+ // Is this a comparison?
+ if (!(B->isRelationalOp() || B->isEqualityOp()))
+ return;
+
+ // Are we comparing variables?
+ const DeclRefExpr *drLHS = dyn_cast<DeclRefExpr>(B->getLHS()->IgnoreParens());
+ const DeclRefExpr *drRHS = dyn_cast<DeclRefExpr>(B->getRHS()->IgnoreParens());
+
+ // Does at least one of the variables have a floating point type?
+ drLHS = drLHS && drLHS->getType()->isFloatingType() ? drLHS : NULL;
+ drRHS = drRHS && drRHS->getType()->isFloatingType() ? drRHS : NULL;
+
+ if (!drLHS && !drRHS)
+ return;
+
+ const VarDecl *vdLHS = drLHS ? dyn_cast<VarDecl>(drLHS->getDecl()) : NULL;
+ const VarDecl *vdRHS = drRHS ? dyn_cast<VarDecl>(drRHS->getDecl()) : NULL;
+
+ if (!vdLHS && !vdRHS)
+ return;
+
+ // Does either variable appear in increment?
+ const DeclRefExpr *drInc = GetIncrementedVar(increment, vdLHS, vdRHS);
+
+ if (!drInc)
+ return;
+
+ // Emit the error. First figure out which DeclRefExpr in the condition
+ // referenced the compared variable.
+ const DeclRefExpr *drCond = vdLHS == drInc->getDecl() ? drLHS : drRHS;
+
+ llvm::SmallVector<SourceRange, 2> ranges;
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ os << "Variable '" << drCond->getDecl()->getNameAsCString()
+ << "' with floating point type '" << drCond->getType().getAsString()
+ << "' should not be used as a loop counter";
+
+ ranges.push_back(drCond->getSourceRange());
+ ranges.push_back(drInc->getSourceRange());
+
+ const char *bugType = "Floating point variable used as loop counter";
+ BR.EmitBasicReport(bugType, "Security", os.str().c_str(),
+ FS->getLocStart(), ranges.data(), ranges.size());
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Any use of 'gets' is insecure.
+// Originally: <rdar://problem/6335715>
+// Implements (part of): 300-BSI (buildsecurityin.us-cert.gov)
+//===----------------------------------------------------------------------===//
+
+void WalkAST::CheckCall_gets(const CallExpr *CE, const FunctionDecl *FD) {
+ if (FD->getIdentifier() != GetIdentifier(II_gets, "gets"))
+ return;
+
+ const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FD->getType());
+ if (!FTP)
+ return;
+
+ // Verify that the function takes a single argument.
+ if (FTP->getNumArgs() != 1)
+ return;
+
+ // Is the argument a 'char*'?
+ const PointerType *PT = dyn_cast<PointerType>(FTP->getArgType(0));
+ if (!PT)
+ return;
+
+ if (PT->getPointeeType().getUnqualifiedType() != BR.getContext().CharTy)
+ return;
+
+ // Issue a warning.
+ SourceRange R = CE->getCallee()->getSourceRange();
+ BR.EmitBasicReport("Potential buffer overflow in call to 'gets'",
+ "Security",
+ "Call to function 'gets' is extremely insecure as it can "
+ "always result in a buffer overflow",
+ CE->getLocStart(), &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Linear congruent random number generators should not be used
+// Originally: <rdar://problem/63371000>
+// CWE-338: Use of cryptographically weak prng
+//===----------------------------------------------------------------------===//
+
+void WalkAST::CheckCall_rand(const CallExpr *CE, const FunctionDecl *FD) {
+ if (II_rand[0] == NULL) {
+ // This check applies to these functions
+ static const char * const identifiers[num_rands] = {
+ "drand48", "erand48", "jrand48", "lrand48", "mrand48", "nrand48",
+ "lcong48",
+ "rand", "rand_r"
+ };
+
+ for (size_t i = 0; i < num_rands; i++)
+ II_rand[i] = &BR.getContext().Idents.get(identifiers[i]);
+ }
+
+ const IdentifierInfo *id = FD->getIdentifier();
+ size_t identifierid;
+
+ for (identifierid = 0; identifierid < num_rands; identifierid++)
+ if (id == II_rand[identifierid])
+ break;
+
+ if (identifierid >= num_rands)
+ return;
+
+ const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FD->getType());
+ if (!FTP)
+ return;
+
+ if (FTP->getNumArgs() == 1) {
+ // Is the argument an 'unsigned short *'?
+ // (Actually any integer type is allowed.)
+ const PointerType *PT = dyn_cast<PointerType>(FTP->getArgType(0));
+ if (!PT)
+ return;
+
+ if (! PT->getPointeeType()->isIntegerType())
+ return;
+ }
+ else if (FTP->getNumArgs() != 0)
+ return;
+
+ // Issue a warning.
+ std::string buf1;
+ llvm::raw_string_ostream os1(buf1);
+ os1 << "'" << FD->getNameAsString() << "' is a poor random number generator";
+
+ std::string buf2;
+ llvm::raw_string_ostream os2(buf2);
+ os2 << "Function '" << FD->getNameAsString()
+ << "' is obsolete because it implements a poor random number generator."
+ << " Use 'arc4random' instead";
+
+ SourceRange R = CE->getCallee()->getSourceRange();
+
+ BR.EmitBasicReport(os1.str().c_str(), "Security", os2.str().c_str(),
+ CE->getLocStart(), &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: 'random' should not be used
+// Originally: <rdar://problem/63371000>
+//===----------------------------------------------------------------------===//
+
+void WalkAST::CheckCall_random(const CallExpr *CE, const FunctionDecl *FD) {
+ if (FD->getIdentifier() != GetIdentifier(II_random, "random"))
+ return;
+
+ const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FD->getType());
+ if (!FTP)
+ return;
+
+ // Verify that the function takes no argument.
+ if (FTP->getNumArgs() != 0)
+ return;
+
+ // Issue a warning.
+ SourceRange R = CE->getCallee()->getSourceRange();
+ BR.EmitBasicReport("'random' is not a secure random number generator",
+ "Security",
+ "The 'random' function produces a sequence of values that "
+ "an adversary may be able to predict. Use 'arc4random' "
+ "instead",
+ CE->getLocStart(), &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Should check whether privileges are dropped successfully.
+// Originally: <rdar://problem/6337132>
+//===----------------------------------------------------------------------===//
+
+void WalkAST::CheckUncheckedReturnValue(CallExpr *CE) {
+ const FunctionDecl *FD = CE->getDirectCallee();
+ if (!FD)
+ return;
+
+ if (II_setid[0] == NULL) {
+ static const char * const identifiers[num_setids] = {
+ "setuid", "setgid", "seteuid", "setegid",
+ "setreuid", "setregid"
+ };
+
+ for (size_t i = 0; i < num_setids; i++)
+ II_setid[i] = &BR.getContext().Idents.get(identifiers[i]);
+ }
+
+ const IdentifierInfo *id = FD->getIdentifier();
+ size_t identifierid;
+
+ for (identifierid = 0; identifierid < num_setids; identifierid++)
+ if (id == II_setid[identifierid])
+ break;
+
+ if (identifierid >= num_setids)
+ return;
+
+ const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FD->getType());
+ if (!FTP)
+ return;
+
+ // Verify that the function takes one or two arguments (depending on
+ // the function).
+ if (FTP->getNumArgs() != (identifierid < 4 ? 1 : 2))
+ return;
+
+ // The arguments must be integers.
+ for (unsigned i = 0; i < FTP->getNumArgs(); i++)
+ if (! FTP->getArgType(i)->isIntegerType())
+ return;
+
+ // Issue a warning.
+ std::string buf1;
+ llvm::raw_string_ostream os1(buf1);
+ os1 << "Return value is not checked in call to '" << FD->getNameAsString()
+ << "'";
+
+ std::string buf2;
+ llvm::raw_string_ostream os2(buf2);
+ os2 << "The return value from the call to '" << FD->getNameAsString()
+ << "' is not checked. If an error occurs in '"
+ << FD->getNameAsString()
+ << "', the following code may execute with unexpected privileges";
+
+ SourceRange R = CE->getCallee()->getSourceRange();
+
+ BR.EmitBasicReport(os1.str().c_str(), "Security", os2.str().c_str(),
+ CE->getLocStart(), &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Entry point for check.
+//===----------------------------------------------------------------------===//
+
+void clang::CheckSecuritySyntaxOnly(const Decl *D, BugReporter &BR) {
+ WalkAST walker(BR);
+ walker.Visit(D->getBody());
+}
diff --git a/lib/Analysis/Environment.cpp b/lib/Analysis/Environment.cpp
index 3f8f14dcb0b4..1610ad4d271d 100644
--- a/lib/Analysis/Environment.cpp
+++ b/lib/Analysis/Environment.cpp
@@ -12,106 +12,81 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/PathSensitive/GRState.h"
#include "clang/Analysis/Analyses/LiveVariables.h"
-#include "llvm/ADT/ImmutableMap.h"
-#include "llvm/Support/Streams.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/ADT/ImmutableMap.h"
using namespace clang;
SVal Environment::GetSVal(const Stmt *E, ValueManager& ValMgr) const {
-
+
for (;;) {
-
+
switch (E->getStmtClass()) {
-
- case Stmt::AddrLabelExprClass:
+
+ case Stmt::AddrLabelExprClass:
return ValMgr.makeLoc(cast<AddrLabelExpr>(E));
-
+
// ParenExprs are no-ops.
-
- case Stmt::ParenExprClass:
+
+ case Stmt::ParenExprClass:
E = cast<ParenExpr>(E)->getSubExpr();
continue;
-
+
case Stmt::CharacterLiteralClass: {
const CharacterLiteral* C = cast<CharacterLiteral>(E);
return ValMgr.makeIntVal(C->getValue(), C->getType());
}
-
+
case Stmt::IntegerLiteralClass: {
return ValMgr.makeIntVal(cast<IntegerLiteral>(E));
}
-
+
// Casts where the source and target type are the same
// are no-ops. We blast through these to get the descendant
// subexpression that has a value.
-
+
case Stmt::ImplicitCastExprClass:
case Stmt::CStyleCastExprClass: {
const CastExpr* C = cast<CastExpr>(E);
QualType CT = C->getType();
-
+
if (CT->isVoidType())
return UnknownVal();
-
+
break;
}
-
+
// Handle all other Stmt* using a lookup.
-
+
default:
break;
};
-
+
break;
}
-
+
return LookupExpr(E);
}
-SVal Environment::GetBlkExprSVal(const Stmt *E, ValueManager& ValMgr) const {
-
- while (1) {
- switch (E->getStmtClass()) {
- case Stmt::ParenExprClass:
- E = cast<ParenExpr>(E)->getSubExpr();
- continue;
-
- case Stmt::CharacterLiteralClass: {
- const CharacterLiteral* C = cast<CharacterLiteral>(E);
- return ValMgr.makeIntVal(C->getValue(), C->getType());
- }
-
- case Stmt::IntegerLiteralClass: {
- return ValMgr.makeIntVal(cast<IntegerLiteral>(E));
- }
-
- default:
- return LookupBlkExpr(E);
- }
- }
-}
+Environment EnvironmentManager::BindExpr(Environment Env, const Stmt *S,
+ SVal V, bool Invalidate) {
+ assert(S);
-Environment EnvironmentManager::BindExpr(const Environment& Env, const Stmt* E,
- SVal V, bool isBlkExpr,
- bool Invalidate) {
- assert (E);
-
- if (V.isUnknown()) {
+ if (V.isUnknown()) {
if (Invalidate)
- return isBlkExpr ? RemoveBlkExpr(Env, E) : RemoveSubExpr(Env, E);
+ return Environment(F.Remove(Env.ExprBindings, S), Env.ACtx);
else
return Env;
}
- return isBlkExpr ? AddBlkExpr(Env, E, V) : AddSubExpr(Env, E, V);
+ return Environment(F.Add(Env.ExprBindings, S, V), Env.ACtx);
}
namespace {
class VISIBILITY_HIDDEN MarkLiveCallback : public SymbolVisitor {
SymbolReaper &SymReaper;
public:
- MarkLiveCallback(SymbolReaper &symreaper) : SymReaper(symreaper) {}
+ MarkLiveCallback(SymbolReaper &symreaper) : SymReaper(symreaper) {}
bool VisitSymbol(SymbolRef sym) { SymReaper.markLive(sym); return true; }
};
} // end anonymous namespace
@@ -120,60 +95,66 @@ public:
// - Remove subexpression bindings.
// - Remove dead block expression bindings.
// - Keep live block expression bindings:
-// - Mark their reachable symbols live in SymbolReaper,
+// - Mark their reachable symbols live in SymbolReaper,
// see ScanReachableSymbols.
// - Mark the region in DRoots if the binding is a loc::MemRegionVal.
-Environment
-EnvironmentManager::RemoveDeadBindings(Environment Env, Stmt* Loc,
- SymbolReaper& SymReaper,
- GRStateManager& StateMgr,
- const GRState *state,
- llvm::SmallVectorImpl<const MemRegion*>& DRoots) {
-
- // Drop bindings for subexpressions.
- Env = RemoveSubExprBindings(Env);
+Environment
+EnvironmentManager::RemoveDeadBindings(Environment Env, const Stmt *S,
+ SymbolReaper &SymReaper,
+ const GRState *ST,
+ llvm::SmallVectorImpl<const MemRegion*> &DRoots) {
+
+ CFG &C = *Env.getAnalysisContext().getCFG();
+
+ // We construct a new Environment object entirely, as this is cheaper than
+ // individually removing all the subexpression bindings (which will greatly
+ // outnumber block-level expression bindings).
+ Environment NewEnv = getInitialEnvironment(&Env.getAnalysisContext());
// Iterate over the block-expr bindings.
- for (Environment::beb_iterator I = Env.beb_begin(), E = Env.beb_end();
+ for (Environment::iterator I = Env.begin(), E = Env.end();
I != E; ++I) {
+
const Stmt *BlkExpr = I.getKey();
- if (SymReaper.isLive(Loc, BlkExpr)) {
- SVal X = I.getData();
+ // Not a block-level expression?
+ if (!C.isBlkExpr(BlkExpr))
+ continue;
+
+ const SVal &X = I.getData();
+
+ if (SymReaper.isLive(S, BlkExpr)) {
+ // Copy the binding to the new map.
+ NewEnv.ExprBindings = F.Add(NewEnv.ExprBindings, BlkExpr, X);
// If the block expr's value is a memory region, then mark that region.
if (isa<loc::MemRegionVal>(X)) {
const MemRegion* R = cast<loc::MemRegionVal>(X).getRegion();
DRoots.push_back(R);
// Mark the super region of the RX as live.
- // e.g.: int x; char *y = (char*) &x; if (*y) ...
+ // e.g.: int x; char *y = (char*) &x; if (*y) ...
// 'y' => element region. 'x' is its super region.
// We only add one level super region for now.
// FIXME: maybe multiple level of super regions should be added.
- if (const SubRegion *SR = dyn_cast<SubRegion>(R)) {
+ if (const SubRegion *SR = dyn_cast<SubRegion>(R))
DRoots.push_back(SR->getSuperRegion());
- }
}
// Mark all symbols in the block expr's value live.
MarkLiveCallback cb(SymReaper);
- state->scanReachableSymbols(X, cb);
- } else {
- // The block expr is dead.
- SVal X = I.getData();
-
- // Do not misclean LogicalExpr or ConditionalOperator. It is dead at the
- // beginning of itself, but we need its UndefinedVal to determine its
- // SVal.
-
- if (X.isUndef() && cast<UndefinedVal>(X).getData())
- continue;
-
- Env = RemoveBlkExpr(Env, BlkExpr);
+ ST->scanReachableSymbols(X, cb);
+ continue;
}
+
+ // Otherwise the expression is dead with a couple exceptions.
+ // Do not misclean LogicalExpr or ConditionalOperator. It is dead at the
+ // beginning of itself, but we need its UndefinedVal to determine its
+ // SVal.
+ if (X.isUndef() && cast<UndefinedVal>(X).getData())
+ NewEnv.ExprBindings = F.Add(NewEnv.ExprBindings, BlkExpr, X);
}
- return Env;
+ return NewEnv;
}
diff --git a/lib/Analysis/ExplodedGraph.cpp b/lib/Analysis/ExplodedGraph.cpp
index 20de6c48c387..0dc81a4225a8 100644
--- a/lib/Analysis/ExplodedGraph.cpp
+++ b/lib/Analysis/ExplodedGraph.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/PathSensitive/ExplodedGraph.h"
+#include "clang/Analysis/PathSensitive/GRState.h"
#include "clang/AST/Stmt.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/DenseMap.h"
@@ -26,193 +27,234 @@ using namespace clang;
//===----------------------------------------------------------------------===//
// An out of line virtual method to provide a home for the class vtable.
-ExplodedNodeImpl::Auditor::~Auditor() {}
+ExplodedNode::Auditor::~Auditor() {}
#ifndef NDEBUG
-static ExplodedNodeImpl::Auditor* NodeAuditor = 0;
+static ExplodedNode::Auditor* NodeAuditor = 0;
#endif
-void ExplodedNodeImpl::SetAuditor(ExplodedNodeImpl::Auditor* A) {
+void ExplodedNode::SetAuditor(ExplodedNode::Auditor* A) {
#ifndef NDEBUG
NodeAuditor = A;
#endif
}
//===----------------------------------------------------------------------===//
-// ExplodedNodeImpl.
+// ExplodedNode.
//===----------------------------------------------------------------------===//
-static inline std::vector<ExplodedNodeImpl*>& getVector(void* P) {
- return *reinterpret_cast<std::vector<ExplodedNodeImpl*>*>(P);
+static inline BumpVector<ExplodedNode*>& getVector(void* P) {
+ return *reinterpret_cast<BumpVector<ExplodedNode*>*>(P);
}
-void ExplodedNodeImpl::addPredecessor(ExplodedNodeImpl* V) {
+void ExplodedNode::addPredecessor(ExplodedNode* V, ExplodedGraph &G) {
assert (!V->isSink());
- Preds.addNode(V);
- V->Succs.addNode(this);
+ Preds.addNode(V, G);
+ V->Succs.addNode(this, G);
#ifndef NDEBUG
if (NodeAuditor) NodeAuditor->AddEdge(V, this);
#endif
}
-void ExplodedNodeImpl::NodeGroup::addNode(ExplodedNodeImpl* N) {
-
- assert ((reinterpret_cast<uintptr_t>(N) & Mask) == 0x0);
- assert (!getFlag());
-
+void ExplodedNode::NodeGroup::addNode(ExplodedNode* N, ExplodedGraph &G) {
+ assert((reinterpret_cast<uintptr_t>(N) & Mask) == 0x0);
+ assert(!getFlag());
+
if (getKind() == Size1) {
- if (ExplodedNodeImpl* NOld = getNode()) {
- std::vector<ExplodedNodeImpl*>* V = new std::vector<ExplodedNodeImpl*>();
- assert ((reinterpret_cast<uintptr_t>(V) & Mask) == 0x0);
- V->push_back(NOld);
- V->push_back(N);
+ if (ExplodedNode* NOld = getNode()) {
+ BumpVectorContext &Ctx = G.getNodeAllocator();
+ BumpVector<ExplodedNode*> *V =
+ G.getAllocator().Allocate<BumpVector<ExplodedNode*> >();
+ new (V) BumpVector<ExplodedNode*>(Ctx, 4);
+
+ assert((reinterpret_cast<uintptr_t>(V) & Mask) == 0x0);
+ V->push_back(NOld, Ctx);
+ V->push_back(N, Ctx);
P = reinterpret_cast<uintptr_t>(V) | SizeOther;
- assert (getPtr() == (void*) V);
- assert (getKind() == SizeOther);
+ assert(getPtr() == (void*) V);
+ assert(getKind() == SizeOther);
}
else {
P = reinterpret_cast<uintptr_t>(N);
- assert (getKind() == Size1);
+ assert(getKind() == Size1);
}
}
else {
- assert (getKind() == SizeOther);
- getVector(getPtr()).push_back(N);
+ assert(getKind() == SizeOther);
+ getVector(getPtr()).push_back(N, G.getNodeAllocator());
}
}
-
-unsigned ExplodedNodeImpl::NodeGroup::size() const {
+unsigned ExplodedNode::NodeGroup::size() const {
if (getFlag())
return 0;
-
+
if (getKind() == Size1)
return getNode() ? 1 : 0;
else
return getVector(getPtr()).size();
}
-ExplodedNodeImpl** ExplodedNodeImpl::NodeGroup::begin() const {
+ExplodedNode **ExplodedNode::NodeGroup::begin() const {
if (getFlag())
return NULL;
-
+
if (getKind() == Size1)
- return (ExplodedNodeImpl**) (getPtr() ? &P : NULL);
+ return (ExplodedNode**) (getPtr() ? &P : NULL);
else
- return const_cast<ExplodedNodeImpl**>(&*(getVector(getPtr()).begin()));
+ return const_cast<ExplodedNode**>(&*(getVector(getPtr()).begin()));
}
-ExplodedNodeImpl** ExplodedNodeImpl::NodeGroup::end() const {
+ExplodedNode** ExplodedNode::NodeGroup::end() const {
if (getFlag())
return NULL;
-
+
if (getKind() == Size1)
- return (ExplodedNodeImpl**) (getPtr() ? &P+1 : NULL);
+ return (ExplodedNode**) (getPtr() ? &P+1 : NULL);
else {
// Dereferencing end() is undefined behaviour. The vector is not empty, so
// we can dereference the last elem and then add 1 to the result.
- return const_cast<ExplodedNodeImpl**>(&getVector(getPtr()).back()) + 1;
+ return const_cast<ExplodedNode**>(getVector(getPtr()).end());
}
}
-ExplodedNodeImpl::NodeGroup::~NodeGroup() {
- if (getKind() == SizeOther) delete &getVector(getPtr());
+ExplodedNode *ExplodedGraph::getNode(const ProgramPoint& L,
+ const GRState* State, bool* IsNew) {
+ // Profile 'State' to determine if we already have an existing node.
+ llvm::FoldingSetNodeID profile;
+ void* InsertPos = 0;
+
+ NodeTy::Profile(profile, L, State);
+ NodeTy* V = Nodes.FindNodeOrInsertPos(profile, InsertPos);
+
+ if (!V) {
+ // Allocate a new node.
+ V = (NodeTy*) getAllocator().Allocate<NodeTy>();
+ new (V) NodeTy(L, State);
+
+ // Insert the node into the node set and return it.
+ Nodes.InsertNode(V, InsertPos);
+
+ ++NumNodes;
+
+ if (IsNew) *IsNew = true;
+ }
+ else
+ if (IsNew) *IsNew = false;
+
+ return V;
+}
+
+std::pair<ExplodedGraph*, InterExplodedGraphMap*>
+ExplodedGraph::Trim(const NodeTy* const* NBeg, const NodeTy* const* NEnd,
+ llvm::DenseMap<const void*, const void*> *InverseMap) const {
+
+ if (NBeg == NEnd)
+ return std::make_pair((ExplodedGraph*) 0,
+ (InterExplodedGraphMap*) 0);
+
+ assert (NBeg < NEnd);
+
+ llvm::OwningPtr<InterExplodedGraphMap> M(new InterExplodedGraphMap());
+
+ ExplodedGraph* G = TrimInternal(NBeg, NEnd, M.get(), InverseMap);
+
+ return std::make_pair(static_cast<ExplodedGraph*>(G), M.take());
}
-ExplodedGraphImpl*
-ExplodedGraphImpl::Trim(const ExplodedNodeImpl* const* BeginSources,
- const ExplodedNodeImpl* const* EndSources,
- InterExplodedGraphMapImpl* M,
- llvm::DenseMap<const void*, const void*> *InverseMap)
-const {
-
- typedef llvm::DenseSet<const ExplodedNodeImpl*> Pass1Ty;
+ExplodedGraph*
+ExplodedGraph::TrimInternal(const ExplodedNode* const* BeginSources,
+ const ExplodedNode* const* EndSources,
+ InterExplodedGraphMap* M,
+ llvm::DenseMap<const void*, const void*> *InverseMap) const {
+
+ typedef llvm::DenseSet<const ExplodedNode*> Pass1Ty;
Pass1Ty Pass1;
-
- typedef llvm::DenseMap<const ExplodedNodeImpl*, ExplodedNodeImpl*> Pass2Ty;
+
+ typedef llvm::DenseMap<const ExplodedNode*, ExplodedNode*> Pass2Ty;
Pass2Ty& Pass2 = M->M;
-
- llvm::SmallVector<const ExplodedNodeImpl*, 10> WL1, WL2;
+
+ llvm::SmallVector<const ExplodedNode*, 10> WL1, WL2;
// ===- Pass 1 (reverse DFS) -===
- for (const ExplodedNodeImpl* const* I = BeginSources; I != EndSources; ++I) {
+ for (const ExplodedNode* const* I = BeginSources; I != EndSources; ++I) {
assert(*I);
WL1.push_back(*I);
}
-
+
// Process the first worklist until it is empty. Because it is a std::list
// it acts like a FIFO queue.
while (!WL1.empty()) {
- const ExplodedNodeImpl *N = WL1.back();
+ const ExplodedNode *N = WL1.back();
WL1.pop_back();
-
+
// Have we already visited this node? If so, continue to the next one.
if (Pass1.count(N))
continue;
// Otherwise, mark this node as visited.
Pass1.insert(N);
-
+
// If this is a root enqueue it to the second worklist.
if (N->Preds.empty()) {
WL2.push_back(N);
continue;
}
-
+
// Visit our predecessors and enqueue them.
- for (ExplodedNodeImpl** I=N->Preds.begin(), **E=N->Preds.end(); I!=E; ++I)
+ for (ExplodedNode** I=N->Preds.begin(), **E=N->Preds.end(); I!=E; ++I)
WL1.push_back(*I);
}
-
+
// We didn't hit a root? Return with a null pointer for the new graph.
if (WL2.empty())
return 0;
// Create an empty graph.
- ExplodedGraphImpl* G = MakeEmptyGraph();
-
- // ===- Pass 2 (forward DFS to construct the new graph) -===
+ ExplodedGraph* G = MakeEmptyGraph();
+
+ // ===- Pass 2 (forward DFS to construct the new graph) -===
while (!WL2.empty()) {
- const ExplodedNodeImpl* N = WL2.back();
+ const ExplodedNode* N = WL2.back();
WL2.pop_back();
-
+
// Skip this node if we have already processed it.
if (Pass2.find(N) != Pass2.end())
continue;
-
+
// Create the corresponding node in the new graph and record the mapping
// from the old node to the new node.
- ExplodedNodeImpl* NewN = G->getNodeImpl(N->getLocation(), N->State, NULL);
+ ExplodedNode* NewN = G->getNode(N->getLocation(), N->State, NULL);
Pass2[N] = NewN;
-
+
// Also record the reverse mapping from the new node to the old node.
if (InverseMap) (*InverseMap)[NewN] = N;
-
+
// If this node is a root, designate it as such in the graph.
if (N->Preds.empty())
G->addRoot(NewN);
-
+
// In the case that some of the intended predecessors of NewN have already
// been created, we should hook them up as predecessors.
// Walk through the predecessors of 'N' and hook up their corresponding
// nodes in the new graph (if any) to the freshly created node.
- for (ExplodedNodeImpl **I=N->Preds.begin(), **E=N->Preds.end(); I!=E; ++I) {
+ for (ExplodedNode **I=N->Preds.begin(), **E=N->Preds.end(); I!=E; ++I) {
Pass2Ty::iterator PI = Pass2.find(*I);
if (PI == Pass2.end())
continue;
-
- NewN->addPredecessor(PI->second);
+
+ NewN->addPredecessor(PI->second, *G);
}
// In the case that some of the intended successors of NewN have already
// been created, we should hook them up as successors. Otherwise, enqueue
// the new nodes from the original graph that should have nodes created
// in the new graph.
- for (ExplodedNodeImpl **I=N->Succs.begin(), **E=N->Succs.end(); I!=E; ++I) {
- Pass2Ty::iterator PI = Pass2.find(*I);
+ for (ExplodedNode **I=N->Succs.begin(), **E=N->Succs.end(); I!=E; ++I) {
+ Pass2Ty::iterator PI = Pass2.find(*I);
if (PI != Pass2.end()) {
- PI->second->addPredecessor(NewN);
+ PI->second->addPredecessor(NewN, *G);
continue;
}
@@ -220,22 +262,20 @@ const {
if (Pass1.count(*I))
WL2.push_back(*I);
}
-
+
// Finally, explictly mark all nodes without any successors as sinks.
if (N->isSink())
NewN->markAsSink();
}
-
+
return G;
}
-ExplodedNodeImpl*
-InterExplodedGraphMapImpl::getMappedImplNode(const ExplodedNodeImpl* N) const {
- llvm::DenseMap<const ExplodedNodeImpl*, ExplodedNodeImpl*>::iterator I =
+ExplodedNode*
+InterExplodedGraphMap::getMappedNode(const ExplodedNode* N) const {
+ llvm::DenseMap<const ExplodedNode*, ExplodedNode*>::iterator I =
M.find(N);
return I == M.end() ? 0 : I->second;
}
-InterExplodedGraphMapImpl::InterExplodedGraphMapImpl() {}
-
diff --git a/lib/Analysis/GRBlockCounter.cpp b/lib/Analysis/GRBlockCounter.cpp
index f69a16da401c..4f4103ac45b4 100644
--- a/lib/Analysis/GRBlockCounter.cpp
+++ b/lib/Analysis/GRBlockCounter.cpp
@@ -1,5 +1,5 @@
//==- GRBlockCounter.h - ADT for counting block visits -------------*- C++ -*-//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
diff --git a/lib/Analysis/GRCoreEngine.cpp b/lib/Analysis/GRCoreEngine.cpp
index ff7b548bc054..87472472fdee 100644
--- a/lib/Analysis/GRCoreEngine.cpp
+++ b/lib/Analysis/GRCoreEngine.cpp
@@ -1,5 +1,5 @@
//==- GRCoreEngine.cpp - Path-Sensitive Dataflow Engine ------------*- C++ -*-//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/PathSensitive/GRCoreEngine.h"
+#include "clang/Analysis/PathSensitive/GRExprEngine.h"
#include "clang/AST/Expr.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Casting.h"
@@ -29,7 +30,7 @@ using namespace clang;
//===----------------------------------------------------------------------===//
namespace {
- class VISIBILITY_HIDDEN DFS : public GRWorkList {
+class VISIBILITY_HIDDEN DFS : public GRWorkList {
llvm::SmallVector<GRWorkListUnit,20> Stack;
public:
virtual bool hasWork() const {
@@ -47,27 +48,27 @@ public:
return U;
}
};
-
+
class VISIBILITY_HIDDEN BFS : public GRWorkList {
std::queue<GRWorkListUnit> Queue;
public:
virtual bool hasWork() const {
return !Queue.empty();
}
-
+
virtual void Enqueue(const GRWorkListUnit& U) {
Queue.push(U);
}
-
+
virtual GRWorkListUnit Dequeue() {
// Don't use const reference. The subsequent pop_back() might make it
// unsafe.
- GRWorkListUnit U = Queue.front();
+ GRWorkListUnit U = Queue.front();
Queue.pop();
return U;
}
};
-
+
} // end anonymous namespace
// Place the dstor for GRWorkList here because it contains virtual member
@@ -85,14 +86,14 @@ namespace {
virtual bool hasWork() const {
return !Queue.empty() || !Stack.empty();
}
-
+
virtual void Enqueue(const GRWorkListUnit& U) {
if (isa<BlockEntrance>(U.getNode()->getLocation()))
Queue.push(U);
else
Stack.push_back(U);
}
-
+
virtual GRWorkListUnit Dequeue() {
// Process all basic blocks to completion.
if (!Stack.empty()) {
@@ -100,13 +101,13 @@ namespace {
Stack.pop_back(); // This technically "invalidates" U, but we are fine.
return U;
}
-
+
assert(!Queue.empty());
// Don't use const reference. The subsequent pop_back() might make it
// unsafe.
- GRWorkListUnit U = Queue.front();
+ GRWorkListUnit U = Queue.front();
Queue.pop();
- return U;
+ return U;
}
};
} // end anonymous namespace
@@ -118,55 +119,80 @@ GRWorkList* GRWorkList::MakeBFSBlockDFSContents() {
//===----------------------------------------------------------------------===//
// Core analysis engine.
//===----------------------------------------------------------------------===//
+void GRCoreEngine::ProcessEndPath(GREndPathNodeBuilder& Builder) {
+ SubEngine.ProcessEndPath(Builder);
+}
+
+void GRCoreEngine::ProcessStmt(Stmt* S, GRStmtNodeBuilder& Builder) {
+ SubEngine.ProcessStmt(S, Builder);
+}
+
+bool GRCoreEngine::ProcessBlockEntrance(CFGBlock* Blk, const GRState* State,
+ GRBlockCounter BC) {
+ return SubEngine.ProcessBlockEntrance(Blk, State, BC);
+}
+
+void GRCoreEngine::ProcessBranch(Stmt* Condition, Stmt* Terminator,
+ GRBranchNodeBuilder& Builder) {
+ SubEngine.ProcessBranch(Condition, Terminator, Builder);
+}
+
+void GRCoreEngine::ProcessIndirectGoto(GRIndirectGotoNodeBuilder& Builder) {
+ SubEngine.ProcessIndirectGoto(Builder);
+}
+
+void GRCoreEngine::ProcessSwitch(GRSwitchNodeBuilder& Builder) {
+ SubEngine.ProcessSwitch(Builder);
+}
/// ExecuteWorkList - Run the worklist algorithm for a maximum number of steps.
-bool GRCoreEngineImpl::ExecuteWorkList(unsigned Steps) {
-
+bool GRCoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps) {
+
if (G->num_roots() == 0) { // Initialize the analysis by constructing
// the root if none exists.
-
- CFGBlock* Entry = &getCFG().getEntry();
-
- assert (Entry->empty() &&
+
+ CFGBlock* Entry = &(L->getCFG()->getEntry());
+
+ assert (Entry->empty() &&
"Entry block must be empty.");
-
+
assert (Entry->succ_size() == 1 &&
"Entry block must have 1 successor.");
-
+
// Get the solitary successor.
- CFGBlock* Succ = *(Entry->succ_begin());
-
+ CFGBlock* Succ = *(Entry->succ_begin());
+
// Construct an edge representing the
// starting location in the function.
- BlockEdge StartLoc(Entry, Succ);
-
+ BlockEdge StartLoc(Entry, Succ, L);
+
// Set the current block counter to being empty.
WList->setBlockCounter(BCounterFactory.GetEmptyCounter());
-
+
// Generate the root.
- GenerateNode(StartLoc, getInitialState(), 0);
+ GenerateNode(StartLoc, getInitialState(L), 0);
}
-
+
while (Steps && WList->hasWork()) {
--Steps;
const GRWorkListUnit& WU = WList->Dequeue();
-
+
// Set the current block counter.
WList->setBlockCounter(WU.getBlockCounter());
// Retrieve the node.
- ExplodedNodeImpl* Node = WU.getNode();
-
+ ExplodedNode* Node = WU.getNode();
+
// Dispatch on the location type.
switch (Node->getLocation().getKind()) {
case ProgramPoint::BlockEdgeKind:
HandleBlockEdge(cast<BlockEdge>(Node->getLocation()), Node);
break;
-
+
case ProgramPoint::BlockEntranceKind:
HandleBlockEntrance(cast<BlockEntrance>(Node->getLocation()), Node);
break;
-
+
case ProgramPoint::BlockExitKind:
assert (false && "BlockExit location never occur in forward analysis.");
break;
@@ -175,26 +201,26 @@ bool GRCoreEngineImpl::ExecuteWorkList(unsigned Steps) {
assert(isa<PostStmt>(Node->getLocation()));
HandlePostStmt(cast<PostStmt>(Node->getLocation()), WU.getBlock(),
WU.getIndex(), Node);
- break;
+ break;
}
}
-
+
return WList->hasWork();
}
-void GRCoreEngineImpl::HandleBlockEdge(const BlockEdge& L,
- ExplodedNodeImpl* Pred) {
-
+
+void GRCoreEngine::HandleBlockEdge(const BlockEdge& L, ExplodedNode* Pred) {
+
CFGBlock* Blk = L.getDst();
-
- // Check if we are entering the EXIT block.
- if (Blk == &getCFG().getExit()) {
-
- assert (getCFG().getExit().size() == 0
+
+ // Check if we are entering the EXIT block.
+ if (Blk == &(Pred->getLocationContext()->getCFG()->getExit())) {
+
+ assert (Pred->getLocationContext()->getCFG()->getExit().size() == 0
&& "EXIT block cannot contain Stmts.");
// Process the final state transition.
- GREndPathNodeBuilderImpl Builder(Blk, Pred, this);
+ GREndPathNodeBuilder Builder(Blk, Pred, this);
ProcessEndPath(Builder);
// This path is done. Don't enqueue any more nodes.
@@ -202,84 +228,81 @@ void GRCoreEngineImpl::HandleBlockEdge(const BlockEdge& L,
}
// FIXME: Should we allow ProcessBlockEntrance to also manipulate state?
-
+
if (ProcessBlockEntrance(Blk, Pred->State, WList->getBlockCounter()))
- GenerateNode(BlockEntrance(Blk), Pred->State, Pred);
+ GenerateNode(BlockEntrance(Blk, Pred->getLocationContext()), Pred->State, Pred);
}
-void GRCoreEngineImpl::HandleBlockEntrance(const BlockEntrance& L,
- ExplodedNodeImpl* Pred) {
-
+void GRCoreEngine::HandleBlockEntrance(const BlockEntrance& L,
+ ExplodedNode* Pred) {
+
// Increment the block counter.
GRBlockCounter Counter = WList->getBlockCounter();
Counter = BCounterFactory.IncrementCount(Counter, L.getBlock()->getBlockID());
WList->setBlockCounter(Counter);
-
- // Process the entrance of the block.
+
+ // Process the entrance of the block.
if (Stmt* S = L.getFirstStmt()) {
- GRStmtNodeBuilderImpl Builder(L.getBlock(), 0, Pred, this);
+ GRStmtNodeBuilder Builder(L.getBlock(), 0, Pred, this,
+ SubEngine.getStateManager());
ProcessStmt(S, Builder);
}
- else
+ else
HandleBlockExit(L.getBlock(), Pred);
}
-GRCoreEngineImpl::~GRCoreEngineImpl() {
- delete WList;
-}
+void GRCoreEngine::HandleBlockExit(CFGBlock * B, ExplodedNode* Pred) {
-void GRCoreEngineImpl::HandleBlockExit(CFGBlock * B, ExplodedNodeImpl* Pred) {
-
if (Stmt* Term = B->getTerminator()) {
switch (Term->getStmtClass()) {
default:
assert(false && "Analysis for this terminator not implemented.");
break;
-
+
case Stmt::BinaryOperatorClass: // '&&' and '||'
HandleBranch(cast<BinaryOperator>(Term)->getLHS(), Term, B, Pred);
return;
-
+
case Stmt::ConditionalOperatorClass:
HandleBranch(cast<ConditionalOperator>(Term)->getCond(), Term, B, Pred);
return;
-
+
// FIXME: Use constant-folding in CFG construction to simplify this
// case.
-
+
case Stmt::ChooseExprClass:
HandleBranch(cast<ChooseExpr>(Term)->getCond(), Term, B, Pred);
return;
-
+
case Stmt::DoStmtClass:
HandleBranch(cast<DoStmt>(Term)->getCond(), Term, B, Pred);
return;
-
+
case Stmt::ForStmtClass:
HandleBranch(cast<ForStmt>(Term)->getCond(), Term, B, Pred);
return;
-
+
case Stmt::ContinueStmtClass:
case Stmt::BreakStmtClass:
- case Stmt::GotoStmtClass:
+ case Stmt::GotoStmtClass:
break;
-
+
case Stmt::IfStmtClass:
HandleBranch(cast<IfStmt>(Term)->getCond(), Term, B, Pred);
return;
-
+
case Stmt::IndirectGotoStmtClass: {
// Only 1 successor: the indirect goto dispatch block.
assert (B->succ_size() == 1);
-
- GRIndirectGotoNodeBuilderImpl
+
+ GRIndirectGotoNodeBuilder
builder(Pred, B, cast<IndirectGotoStmt>(Term)->getTarget(),
*(B->succ_begin()), this);
-
+
ProcessIndirectGoto(builder);
return;
}
-
+
case Stmt::ObjCForCollectionStmtClass: {
// In the case of ObjCForCollectionStmt, it appears twice in a CFG:
//
@@ -294,16 +317,15 @@ void GRCoreEngineImpl::HandleBlockExit(CFGBlock * B, ExplodedNodeImpl* Pred) {
HandleBranch(Term, Term, B, Pred);
return;
}
-
+
case Stmt::SwitchStmtClass: {
- GRSwitchNodeBuilderImpl builder(Pred, B,
- cast<SwitchStmt>(Term)->getCond(),
- this);
-
+ GRSwitchNodeBuilder builder(Pred, B, cast<SwitchStmt>(Term)->getCond(),
+ this);
+
ProcessSwitch(builder);
return;
}
-
+
case Stmt::WhileStmtClass:
HandleBranch(cast<WhileStmt>(Term)->getCond(), Term, B, Pred);
return;
@@ -312,265 +334,280 @@ void GRCoreEngineImpl::HandleBlockExit(CFGBlock * B, ExplodedNodeImpl* Pred) {
assert (B->succ_size() == 1 &&
"Blocks with no terminator should have at most 1 successor.");
-
- GenerateNode(BlockEdge(B, *(B->succ_begin())), Pred->State, Pred);
+
+ GenerateNode(BlockEdge(B, *(B->succ_begin()), Pred->getLocationContext()),
+ Pred->State, Pred);
}
-void GRCoreEngineImpl::HandleBranch(Stmt* Cond, Stmt* Term, CFGBlock * B,
- ExplodedNodeImpl* Pred) {
+void GRCoreEngine::HandleBranch(Stmt* Cond, Stmt* Term, CFGBlock * B,
+ ExplodedNode* Pred) {
assert (B->succ_size() == 2);
- GRBranchNodeBuilderImpl Builder(B, *(B->succ_begin()), *(B->succ_begin()+1),
- Pred, this);
-
+ GRBranchNodeBuilder Builder(B, *(B->succ_begin()), *(B->succ_begin()+1),
+ Pred, this);
+
ProcessBranch(Cond, Term, Builder);
}
-void GRCoreEngineImpl::HandlePostStmt(const PostStmt& L, CFGBlock* B,
- unsigned StmtIdx, ExplodedNodeImpl* Pred) {
-
+void GRCoreEngine::HandlePostStmt(const PostStmt& L, CFGBlock* B,
+ unsigned StmtIdx, ExplodedNode* Pred) {
+
assert (!B->empty());
if (StmtIdx == B->size())
HandleBlockExit(B, Pred);
else {
- GRStmtNodeBuilderImpl Builder(B, StmtIdx, Pred, this);
+ GRStmtNodeBuilder Builder(B, StmtIdx, Pred, this,
+ SubEngine.getStateManager());
ProcessStmt((*B)[StmtIdx], Builder);
}
}
/// GenerateNode - Utility method to generate nodes, hook up successors,
/// and add nodes to the worklist.
-void GRCoreEngineImpl::GenerateNode(const ProgramPoint& Loc, const void* State,
- ExplodedNodeImpl* Pred) {
-
+void GRCoreEngine::GenerateNode(const ProgramPoint& Loc,
+ const GRState* State, ExplodedNode* Pred) {
+
bool IsNew;
- ExplodedNodeImpl* Node = G->getNodeImpl(Loc, State, &IsNew);
-
- if (Pred)
- Node->addPredecessor(Pred); // Link 'Node' with its predecessor.
+ ExplodedNode* Node = G->getNode(Loc, State, &IsNew);
+
+ if (Pred)
+ Node->addPredecessor(Pred, *G); // Link 'Node' with its predecessor.
else {
assert (IsNew);
G->addRoot(Node); // 'Node' has no predecessor. Make it a root.
}
-
+
// Only add 'Node' to the worklist if it was freshly generated.
if (IsNew) WList->Enqueue(Node);
}
-GRStmtNodeBuilderImpl::GRStmtNodeBuilderImpl(CFGBlock* b, unsigned idx,
- ExplodedNodeImpl* N, GRCoreEngineImpl* e)
- : Eng(*e), B(*b), Idx(idx), Pred(N), LastNode(N) {
+GRStmtNodeBuilder::GRStmtNodeBuilder(CFGBlock* b, unsigned idx,
+ ExplodedNode* N, GRCoreEngine* e,
+ GRStateManager &mgr)
+ : Eng(*e), B(*b), Idx(idx), Pred(N), LastNode(N), Mgr(mgr), Auditor(0),
+ PurgingDeadSymbols(false), BuildSinks(false), HasGeneratedNode(false),
+ PointKind(ProgramPoint::PostStmtKind), Tag(0) {
Deferred.insert(N);
+ CleanedState = getLastNode()->getState();
}
-GRStmtNodeBuilderImpl::~GRStmtNodeBuilderImpl() {
+GRStmtNodeBuilder::~GRStmtNodeBuilder() {
for (DeferredTy::iterator I=Deferred.begin(), E=Deferred.end(); I!=E; ++I)
if (!(*I)->isSink())
GenerateAutoTransition(*I);
}
-void GRStmtNodeBuilderImpl::GenerateAutoTransition(ExplodedNodeImpl* N) {
+void GRStmtNodeBuilder::GenerateAutoTransition(ExplodedNode* N) {
assert (!N->isSink());
-
- PostStmt Loc(getStmt());
-
+
+ PostStmt Loc(getStmt(), N->getLocationContext());
+
if (Loc == N->getLocation()) {
// Note: 'N' should be a fresh node because otherwise it shouldn't be
// a member of Deferred.
Eng.WList->Enqueue(N, B, Idx+1);
return;
}
-
+
bool IsNew;
- ExplodedNodeImpl* Succ = Eng.G->getNodeImpl(Loc, N->State, &IsNew);
- Succ->addPredecessor(N);
+ ExplodedNode* Succ = Eng.G->getNode(Loc, N->State, &IsNew);
+ Succ->addPredecessor(N, *Eng.G);
if (IsNew)
Eng.WList->Enqueue(Succ, B, Idx+1);
}
-static inline PostStmt GetPostLoc(Stmt* S, ProgramPoint::Kind K,
- const void *tag) {
+static inline PostStmt GetPostLoc(const Stmt* S, ProgramPoint::Kind K,
+ const LocationContext *L, const void *tag) {
switch (K) {
default:
assert(false && "Invalid PostXXXKind.");
-
+
case ProgramPoint::PostStmtKind:
- return PostStmt(S, tag);
-
+ return PostStmt(S, L, tag);
+
case ProgramPoint::PostLoadKind:
- return PostLoad(S, tag);
+ return PostLoad(S, L, tag);
case ProgramPoint::PostUndefLocationCheckFailedKind:
- return PostUndefLocationCheckFailed(S, tag);
+ return PostUndefLocationCheckFailed(S, L, tag);
case ProgramPoint::PostLocationChecksSucceedKind:
- return PostLocationChecksSucceed(S, tag);
-
+ return PostLocationChecksSucceed(S, L, tag);
+
case ProgramPoint::PostOutOfBoundsCheckFailedKind:
- return PostOutOfBoundsCheckFailed(S, tag);
-
+ return PostOutOfBoundsCheckFailed(S, L, tag);
+
case ProgramPoint::PostNullCheckFailedKind:
- return PostNullCheckFailed(S, tag);
-
+ return PostNullCheckFailed(S, L, tag);
+
case ProgramPoint::PostStoreKind:
- return PostStore(S, tag);
-
+ return PostStore(S, L, tag);
+
case ProgramPoint::PostLValueKind:
- return PostLValue(S, tag);
-
+ return PostLValue(S, L, tag);
+
case ProgramPoint::PostPurgeDeadSymbolsKind:
- return PostPurgeDeadSymbols(S, tag);
+ return PostPurgeDeadSymbols(S, L, tag);
}
}
-ExplodedNodeImpl*
-GRStmtNodeBuilderImpl::generateNodeImpl(Stmt* S, const void* State,
- ExplodedNodeImpl* Pred,
+ExplodedNode*
+GRStmtNodeBuilder::generateNodeInternal(const Stmt* S, const GRState* State,
+ ExplodedNode* Pred,
ProgramPoint::Kind K,
const void *tag) {
- return generateNodeImpl(GetPostLoc(S, K, tag), State, Pred);
+ return K == ProgramPoint::PreStmtKind
+ ? generateNodeInternal(PreStmt(S, Pred->getLocationContext(),tag),
+ State, Pred)
+ : generateNodeInternal(GetPostLoc(S, K, Pred->getLocationContext(), tag),
+ State, Pred);
}
-ExplodedNodeImpl*
-GRStmtNodeBuilderImpl::generateNodeImpl(PostStmt Loc, const void* State,
- ExplodedNodeImpl* Pred) {
+ExplodedNode*
+GRStmtNodeBuilder::generateNodeInternal(const ProgramPoint &Loc,
+ const GRState* State,
+ ExplodedNode* Pred) {
bool IsNew;
- ExplodedNodeImpl* N = Eng.G->getNodeImpl(Loc, State, &IsNew);
- N->addPredecessor(Pred);
+ ExplodedNode* N = Eng.G->getNode(Loc, State, &IsNew);
+ N->addPredecessor(Pred, *Eng.G);
Deferred.erase(Pred);
-
+
if (IsNew) {
Deferred.insert(N);
LastNode = N;
return N;
}
-
+
LastNode = NULL;
- return NULL;
+ return NULL;
}
-ExplodedNodeImpl* GRBranchNodeBuilderImpl::generateNodeImpl(const void* State,
- bool branch) {
+ExplodedNode* GRBranchNodeBuilder::generateNode(const GRState* State,
+ bool branch) {
+
+ // If the branch has been marked infeasible we should not generate a node.
+ if (!isFeasible(branch))
+ return NULL;
+
bool IsNew;
-
- ExplodedNodeImpl* Succ =
- Eng.G->getNodeImpl(BlockEdge(Src, branch ? DstT : DstF), State, &IsNew);
-
- Succ->addPredecessor(Pred);
-
- if (branch) GeneratedTrue = true;
- else GeneratedFalse = true;
-
+
+ ExplodedNode* Succ =
+ Eng.G->getNode(BlockEdge(Src,branch ? DstT:DstF,Pred->getLocationContext()),
+ State, &IsNew);
+
+ Succ->addPredecessor(Pred, *Eng.G);
+
+ if (branch)
+ GeneratedTrue = true;
+ else
+ GeneratedFalse = true;
+
if (IsNew) {
Deferred.push_back(Succ);
return Succ;
}
-
+
return NULL;
}
-GRBranchNodeBuilderImpl::~GRBranchNodeBuilderImpl() {
- if (!GeneratedTrue) generateNodeImpl(Pred->State, true);
- if (!GeneratedFalse) generateNodeImpl(Pred->State, false);
-
+GRBranchNodeBuilder::~GRBranchNodeBuilder() {
+ if (!GeneratedTrue) generateNode(Pred->State, true);
+ if (!GeneratedFalse) generateNode(Pred->State, false);
+
for (DeferredTy::iterator I=Deferred.begin(), E=Deferred.end(); I!=E; ++I)
if (!(*I)->isSink()) Eng.WList->Enqueue(*I);
}
-ExplodedNodeImpl*
-GRIndirectGotoNodeBuilderImpl::generateNodeImpl(const Iterator& I,
- const void* St,
- bool isSink) {
+ExplodedNode*
+GRIndirectGotoNodeBuilder::generateNode(const iterator& I, const GRState* St,
+ bool isSink) {
bool IsNew;
-
- ExplodedNodeImpl* Succ =
- Eng.G->getNodeImpl(BlockEdge(Src, I.getBlock()), St, &IsNew);
-
- Succ->addPredecessor(Pred);
-
+
+ ExplodedNode* Succ = Eng.G->getNode(BlockEdge(Src, I.getBlock(),
+ Pred->getLocationContext()), St, &IsNew);
+
+ Succ->addPredecessor(Pred, *Eng.G);
+
if (IsNew) {
-
+
if (isSink)
Succ->markAsSink();
else
Eng.WList->Enqueue(Succ);
-
+
return Succ;
}
-
+
return NULL;
}
-ExplodedNodeImpl*
-GRSwitchNodeBuilderImpl::generateCaseStmtNodeImpl(const Iterator& I,
- const void* St) {
+ExplodedNode*
+GRSwitchNodeBuilder::generateCaseStmtNode(const iterator& I, const GRState* St){
bool IsNew;
-
- ExplodedNodeImpl* Succ = Eng.G->getNodeImpl(BlockEdge(Src, I.getBlock()),
- St, &IsNew);
- Succ->addPredecessor(Pred);
-
+
+ ExplodedNode* Succ = Eng.G->getNode(BlockEdge(Src, I.getBlock(),
+ Pred->getLocationContext()), St, &IsNew);
+ Succ->addPredecessor(Pred, *Eng.G);
+
if (IsNew) {
Eng.WList->Enqueue(Succ);
return Succ;
}
-
+
return NULL;
}
-ExplodedNodeImpl*
-GRSwitchNodeBuilderImpl::generateDefaultCaseNodeImpl(const void* St,
- bool isSink) {
-
+ExplodedNode*
+GRSwitchNodeBuilder::generateDefaultCaseNode(const GRState* St, bool isSink) {
+
// Get the block for the default case.
assert (Src->succ_rbegin() != Src->succ_rend());
CFGBlock* DefaultBlock = *Src->succ_rbegin();
-
+
bool IsNew;
-
- ExplodedNodeImpl* Succ = Eng.G->getNodeImpl(BlockEdge(Src, DefaultBlock),
- St, &IsNew);
- Succ->addPredecessor(Pred);
-
+
+ ExplodedNode* Succ = Eng.G->getNode(BlockEdge(Src, DefaultBlock,
+ Pred->getLocationContext()), St, &IsNew);
+ Succ->addPredecessor(Pred, *Eng.G);
+
if (IsNew) {
if (isSink)
Succ->markAsSink();
else
Eng.WList->Enqueue(Succ);
-
+
return Succ;
}
-
+
return NULL;
}
-GREndPathNodeBuilderImpl::~GREndPathNodeBuilderImpl() {
+GREndPathNodeBuilder::~GREndPathNodeBuilder() {
// Auto-generate an EOP node if one has not been generated.
- if (!HasGeneratedNode) generateNodeImpl(Pred->State);
+ if (!HasGeneratedNode) generateNode(Pred->State);
}
-ExplodedNodeImpl*
-GREndPathNodeBuilderImpl::generateNodeImpl(const void* State,
- const void *tag,
- ExplodedNodeImpl* P) {
- HasGeneratedNode = true;
+ExplodedNode*
+GREndPathNodeBuilder::generateNode(const GRState* State, const void *tag,
+ ExplodedNode* P) {
+ HasGeneratedNode = true;
bool IsNew;
-
- ExplodedNodeImpl* Node =
- Eng.G->getNodeImpl(BlockEntrance(&B, tag), State, &IsNew);
-
- Node->addPredecessor(P ? P : Pred);
-
+
+ ExplodedNode* Node = Eng.G->getNode(BlockEntrance(&B,
+ Pred->getLocationContext(), tag), State, &IsNew);
+
+ Node->addPredecessor(P ? P : Pred, *Eng.G);
+
if (IsNew) {
Eng.G->addEndOfPath(Node);
return Node;
}
-
+
return NULL;
}
diff --git a/lib/Analysis/GRExprEngine.cpp b/lib/Analysis/GRExprEngine.cpp
index d9117f5930e6..5079acef54b4 100644
--- a/lib/Analysis/GRExprEngine.cpp
+++ b/lib/Analysis/GRExprEngine.cpp
@@ -15,16 +15,16 @@
#include "clang/Analysis/PathSensitive/GRExprEngine.h"
#include "clang/Analysis/PathSensitive/GRExprEngineBuilders.h"
+#include "clang/Analysis/PathSensitive/Checker.h"
#include "clang/AST/ParentMap.h"
#include "clang/AST/StmtObjC.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/PrettyStackTrace.h"
-#include "llvm/Support/Streams.h"
-#include "llvm/ADT/ImmutableList.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/ImmutableList.h"
#ifndef NDEBUG
#include "llvm/Support/GraphWriter.h"
@@ -36,7 +36,7 @@ using llvm::cast;
using llvm::APSInt;
//===----------------------------------------------------------------------===//
-// Engine construction and deletion.
+// Batch auditor. DEPRECATED.
//===----------------------------------------------------------------------===//
namespace {
@@ -44,7 +44,7 @@ namespace {
class VISIBILITY_HIDDEN MappedBatchAuditor : public GRSimpleAPICheck {
typedef llvm::ImmutableList<GRSimpleAPICheck*> Checks;
typedef llvm::DenseMap<void*,Checks> MapTy;
-
+
MapTy M;
Checks::Factory F;
Checks AllStmts;
@@ -52,18 +52,18 @@ class VISIBILITY_HIDDEN MappedBatchAuditor : public GRSimpleAPICheck {
public:
MappedBatchAuditor(llvm::BumpPtrAllocator& Alloc) :
F(Alloc), AllStmts(F.GetEmptyList()) {}
-
+
virtual ~MappedBatchAuditor() {
llvm::DenseSet<GRSimpleAPICheck*> AlreadyVisited;
-
+
for (MapTy::iterator MI = M.begin(), ME = M.end(); MI != ME; ++MI)
for (Checks::iterator I=MI->second.begin(), E=MI->second.end(); I!=E;++I){
GRSimpleAPICheck* check = *I;
-
+
if (AlreadyVisited.count(check))
continue;
-
+
AlreadyVisited.insert(check);
delete check;
}
@@ -75,34 +75,69 @@ public:
MapTy::iterator I = M.find(key);
M[key] = F.Concat(A, I == M.end() ? F.GetEmptyList() : I->second);
}
-
+
void AddCheck(GRSimpleAPICheck *A) {
assert (A && "Check cannot be null.");
- AllStmts = F.Concat(A, AllStmts);
+ AllStmts = F.Concat(A, AllStmts);
}
- virtual bool Audit(NodeTy* N, GRStateManager& VMgr) {
+ virtual bool Audit(ExplodedNode* N, GRStateManager& VMgr) {
// First handle the auditors that accept all statements.
bool isSink = false;
for (Checks::iterator I = AllStmts.begin(), E = AllStmts.end(); I!=E; ++I)
isSink |= (*I)->Audit(N, VMgr);
-
+
// Next handle the auditors that accept only specific statements.
- Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
+ const Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
void* key = reinterpret_cast<void*>((uintptr_t) S->getStmtClass());
MapTy::iterator MI = M.find(key);
- if (MI != M.end()) {
+ if (MI != M.end()) {
for (Checks::iterator I=MI->second.begin(), E=MI->second.end(); I!=E; ++I)
isSink |= (*I)->Audit(N, VMgr);
}
-
- return isSink;
+
+ return isSink;
}
};
} // end anonymous namespace
//===----------------------------------------------------------------------===//
+// Checker worklist routines.
+//===----------------------------------------------------------------------===//
+
+void GRExprEngine::CheckerVisit(Stmt *S, ExplodedNodeSet &Dst,
+ ExplodedNodeSet &Src, bool isPrevisit) {
+
+ if (Checkers.empty()) {
+ Dst = Src;
+ return;
+ }
+
+ ExplodedNodeSet Tmp;
+ ExplodedNodeSet *PrevSet = &Src;
+
+ for (std::vector<Checker*>::iterator I = Checkers.begin(), E = Checkers.end();
+ I != E; ++I) {
+
+ ExplodedNodeSet *CurrSet = (I+1 == E) ? &Dst
+ : (PrevSet == &Tmp) ? &Src : &Tmp;
+ CurrSet->clear();
+ Checker *checker = *I;
+
+ for (ExplodedNodeSet::iterator NI = PrevSet->begin(), NE = PrevSet->end();
+ NI != NE; ++NI)
+ checker->GR_Visit(*CurrSet, *Builder, *this, S, *NI, isPrevisit);
+
+ // Update which NodeSet is the current one.
+ PrevSet = CurrSet;
+ }
+
+ // Don't autotransition. The CheckerContext objects should do this
+ // automatically.
+}
+
+//===----------------------------------------------------------------------===//
// Engine construction and deletion.
//===----------------------------------------------------------------------===//
@@ -112,29 +147,27 @@ static inline Selector GetNullarySelector(const char* name, ASTContext& Ctx) {
}
-GRExprEngine::GRExprEngine(CFG& cfg, Decl& CD, ASTContext& Ctx,
- LiveVariables& L, BugReporterData& BRD,
- bool purgeDead, bool eagerlyAssume,
- StoreManagerCreator SMC,
- ConstraintManagerCreator CMC)
- : CoreEngine(cfg, CD, Ctx, *this),
+GRExprEngine::GRExprEngine(AnalysisManager &mgr)
+ : AMgr(mgr),
+ CoreEngine(mgr.getASTContext(), *this),
G(CoreEngine.getGraph()),
- Liveness(L),
Builder(NULL),
- StateMgr(G.getContext(), SMC, CMC, G.getAllocator(), cfg, CD, L),
+ StateMgr(G.getContext(), mgr.getStoreManagerCreator(),
+ mgr.getConstraintManagerCreator(), G.getAllocator()),
SymMgr(StateMgr.getSymbolManager()),
ValMgr(StateMgr.getValueManager()),
- SVator(clang::CreateSimpleSValuator(ValMgr)), // FIXME: Generalize later.
+ SVator(ValMgr.getSValuator()),
CurrentStmt(NULL),
NSExceptionII(NULL), NSExceptionInstanceRaiseSelectors(NULL),
- RaiseSel(GetNullarySelector("raise", G.getContext())),
- PurgeDead(purgeDead),
- BR(BRD, *this),
- EagerlyAssume(eagerlyAssume) {}
+ RaiseSel(GetNullarySelector("raise", G.getContext())),
+ BR(mgr, *this) {}
-GRExprEngine::~GRExprEngine() {
+GRExprEngine::~GRExprEngine() {
BR.FlushReports();
delete [] NSExceptionInstanceRaiseSelectors;
+ for (std::vector<Checker*>::iterator I=Checkers.begin(), E=Checkers.end();
+ I!=E; ++I)
+ delete *I;
}
//===----------------------------------------------------------------------===//
@@ -151,7 +184,7 @@ void GRExprEngine::setTransferFunctions(GRTransferFuncs* tf) {
void GRExprEngine::AddCheck(GRSimpleAPICheck* A, Stmt::StmtClass C) {
if (!BatchAuditor)
BatchAuditor.reset(new MappedBatchAuditor(getGraph().getAllocator()));
-
+
((MappedBatchAuditor*) BatchAuditor.get())->AddCheck(A, C);
}
@@ -162,29 +195,50 @@ void GRExprEngine::AddCheck(GRSimpleAPICheck *A) {
((MappedBatchAuditor*) BatchAuditor.get())->AddCheck(A);
}
-const GRState* GRExprEngine::getInitialState() {
- const GRState *state = StateMgr.getInitialState();
-
- // Precondition: the first argument of 'main' is an integer guaranteed
- // to be > 0.
+const GRState* GRExprEngine::getInitialState(const LocationContext *InitLoc) {
+ const GRState *state = StateMgr.getInitialState(InitLoc);
+
+ // Preconditions.
+
// FIXME: It would be nice if we had a more general mechanism to add
// such preconditions. Some day.
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(&StateMgr.getCodeDecl()))
+ const Decl *D = InitLoc->getDecl();
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // Precondition: the first argument of 'main' is an integer guaranteed
+ // to be > 0.
if (strcmp(FD->getIdentifier()->getName(), "main") == 0 &&
FD->getNumParams() > 0) {
const ParmVarDecl *PD = FD->getParamDecl(0);
QualType T = PD->getType();
if (T->isIntegerType())
- if (const MemRegion *R = state->getRegion(PD)) {
+ if (const MemRegion *R = state->getRegion(PD, InitLoc)) {
SVal V = state->getSVal(loc::MemRegionVal(R));
- SVal Constraint = EvalBinOp(state, BinaryOperator::GT, V,
- ValMgr.makeZeroVal(T),
- getContext().IntTy);
-
- if (const GRState *newState = state->assume(Constraint, true))
- state = newState;
+ SVal Constraint_untested = EvalBinOp(state, BinaryOperator::GT, V,
+ ValMgr.makeZeroVal(T),
+ getContext().IntTy);
+
+ if (DefinedOrUnknownSVal *Constraint =
+ dyn_cast<DefinedOrUnknownSVal>(&Constraint_untested)) {
+ if (const GRState *newState = state->Assume(*Constraint, true))
+ state = newState;
+ }
}
}
+ }
+ else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ // Precondition: 'self' is always non-null upon entry to an Objective-C
+ // method.
+ const ImplicitParamDecl *SelfD = MD->getSelfDecl();
+ const MemRegion *R = state->getRegion(SelfD, InitLoc);
+ SVal V = state->getSVal(loc::MemRegionVal(R));
+
+ if (const Loc *LV = dyn_cast<Loc>(&V)) {
+ // Assume that the pointer value in 'self' is non-null.
+ state = state->Assume(*LV, true);
+ assert(state && "'self' cannot be null");
+ }
+ }
return state;
}
@@ -193,32 +247,33 @@ const GRState* GRExprEngine::getInitialState() {
// Top-level transfer function logic (Dispatcher).
//===----------------------------------------------------------------------===//
-void GRExprEngine::ProcessStmt(Stmt* S, StmtNodeBuilder& builder) {
-
+void GRExprEngine::ProcessStmt(Stmt* S, GRStmtNodeBuilder& builder) {
+
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
S->getLocStart(),
"Error evaluating statement");
-
+
Builder = &builder;
EntryNode = builder.getLastNode();
-
+
// FIXME: Consolidate.
CurrentStmt = S;
StateMgr.CurrentStmt = S;
-
+
// Set up our simple checks.
if (BatchAuditor)
Builder->setAuditor(BatchAuditor.get());
-
- // Create the cleaned state.
- SymbolReaper SymReaper(Liveness, SymMgr);
- CleanedState = PurgeDead ? StateMgr.RemoveDeadBindings(EntryNode->getState(),
- CurrentStmt, SymReaper)
- : EntryNode->getState();
+
+ // Create the cleaned state.
+ SymbolReaper SymReaper(Builder->getBasePredecessor()->getLiveVariables(),
+ SymMgr);
+ CleanedState = AMgr.shouldPurgeDead()
+ ? StateMgr.RemoveDeadBindings(EntryNode->getState(), CurrentStmt, SymReaper)
+ : EntryNode->getState();
// Process any special transfer function for dead symbols.
- NodeSet Tmp;
-
+ ExplodedNodeSet Tmp;
+
if (!SymReaper.hasDeadSymbols())
Tmp.Add(EntryNode);
else {
@@ -227,36 +282,36 @@ void GRExprEngine::ProcessStmt(Stmt* S, StmtNodeBuilder& builder) {
SaveAndRestore<bool> OldPurgeDeadSymbols(Builder->PurgingDeadSymbols);
Builder->PurgingDeadSymbols = true;
-
- getTF().EvalDeadSymbols(Tmp, *this, *Builder, EntryNode, S,
+
+ getTF().EvalDeadSymbols(Tmp, *this, *Builder, EntryNode, S,
CleanedState, SymReaper);
if (!Builder->BuildSinks && !Builder->HasGeneratedNode)
Tmp.Add(EntryNode);
}
-
+
bool HasAutoGenerated = false;
- for (NodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
- NodeSet Dst;
-
- // Set the cleaned state.
+ ExplodedNodeSet Dst;
+
+ // Set the cleaned state.
Builder->SetCleanedState(*I == EntryNode ? CleanedState : GetState(*I));
-
- // Visit the statement.
+
+ // Visit the statement.
Visit(S, *I, Dst);
// Do we need to auto-generate a node? We only need to do this to generate
// a node with a "cleaned" state; GRCoreEngine will actually handle
- // auto-transitions for other cases.
+ // auto-transitions for other cases.
if (Dst.size() == 1 && *Dst.begin() == EntryNode
&& !Builder->HasGeneratedNode && !HasAutoGenerated) {
HasAutoGenerated = true;
builder.generateNode(S, GetState(EntryNode), *I);
}
}
-
+
// NULL out these variables to cleanup.
CleanedState = NULL;
EntryNode = NULL;
@@ -264,11 +319,11 @@ void GRExprEngine::ProcessStmt(Stmt* S, StmtNodeBuilder& builder) {
// FIXME: Consolidate.
StateMgr.CurrentStmt = 0;
CurrentStmt = 0;
-
+
Builder = NULL;
}
-void GRExprEngine::Visit(Stmt* S, NodeTy* Pred, NodeSet& Dst) {
+void GRExprEngine::Visit(Stmt* S, ExplodedNode* Pred, ExplodedNodeSet& Dst) {
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
S->getLocStart(),
"Error evaluating statement");
@@ -276,46 +331,46 @@ void GRExprEngine::Visit(Stmt* S, NodeTy* Pred, NodeSet& Dst) {
// FIXME: add metadata to the CFG so that we can disable
// this check when we KNOW that there is no block-level subexpression.
// The motivation is that this check requires a hashtable lookup.
-
- if (S != CurrentStmt && getCFG().isBlkExpr(S)) {
+
+ if (S != CurrentStmt && Pred->getLocationContext()->getCFG()->isBlkExpr(S)) {
Dst.Add(Pred);
return;
}
-
+
switch (S->getStmtClass()) {
-
+
default:
// Cases we intentionally have "default" handle:
// AddrLabelExpr, IntegerLiteral, CharacterLiteral
-
+
Dst.Add(Pred); // No-op. Simply propagate the current state unchanged.
break;
-
+
case Stmt::ArraySubscriptExprClass:
VisitArraySubscriptExpr(cast<ArraySubscriptExpr>(S), Pred, Dst, false);
break;
-
+
case Stmt::AsmStmtClass:
VisitAsmStmt(cast<AsmStmt>(S), Pred, Dst);
break;
-
+
case Stmt::BinaryOperatorClass: {
BinaryOperator* B = cast<BinaryOperator>(S);
-
+
if (B->isLogicalOp()) {
VisitLogicalExpr(B, Pred, Dst);
break;
}
else if (B->getOpcode() == BinaryOperator::Comma) {
const GRState* state = GetState(Pred);
- MakeNode(Dst, B, Pred, state->bindExpr(B, state->getSVal(B->getRHS())));
+ MakeNode(Dst, B, Pred, state->BindExpr(B, state->getSVal(B->getRHS())));
break;
}
- if (EagerlyAssume && (B->isRelationalOp() || B->isEqualityOp())) {
- NodeSet Tmp;
+ if (AMgr.shouldEagerlyAssume() && (B->isRelationalOp() || B->isEqualityOp())) {
+ ExplodedNodeSet Tmp;
VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Tmp);
- EvalEagerlyAssume(Dst, Tmp, cast<Expr>(S));
+ EvalEagerlyAssume(Dst, Tmp, cast<Expr>(S));
}
else
VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Dst);
@@ -332,13 +387,13 @@ void GRExprEngine::Visit(Stmt* S, NodeTy* Pred, NodeSet& Dst) {
// FIXME: ChooseExpr is really a constant. We need to fix
// the CFG do not model them as explicit control-flow.
-
+
case Stmt::ChooseExprClass: { // __builtin_choose_expr
ChooseExpr* C = cast<ChooseExpr>(S);
VisitGuardedExpr(C, C->getLHS(), C->getRHS(), Pred, Dst);
break;
}
-
+
case Stmt::CompoundAssignOperatorClass:
VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Dst);
break;
@@ -346,22 +401,22 @@ void GRExprEngine::Visit(Stmt* S, NodeTy* Pred, NodeSet& Dst) {
case Stmt::CompoundLiteralExprClass:
VisitCompoundLiteralExpr(cast<CompoundLiteralExpr>(S), Pred, Dst, false);
break;
-
+
case Stmt::ConditionalOperatorClass: { // '?' operator
ConditionalOperator* C = cast<ConditionalOperator>(S);
VisitGuardedExpr(C, C->getLHS(), C->getRHS(), Pred, Dst);
break;
}
-
+
case Stmt::DeclRefExprClass:
case Stmt::QualifiedDeclRefExprClass:
VisitDeclRefExpr(cast<DeclRefExpr>(S), Pred, Dst, false);
break;
-
+
case Stmt::DeclStmtClass:
VisitDeclStmt(cast<DeclStmt>(S), Pred, Dst);
break;
-
+
case Stmt::ImplicitCastExprClass:
case Stmt::CStyleCastExprClass: {
CastExpr* C = cast<CastExpr>(S);
@@ -372,11 +427,11 @@ void GRExprEngine::Visit(Stmt* S, NodeTy* Pred, NodeSet& Dst) {
case Stmt::InitListExprClass:
VisitInitListExpr(cast<InitListExpr>(S), Pred, Dst);
break;
-
+
case Stmt::MemberExprClass:
VisitMemberExpr(cast<MemberExpr>(S), Pred, Dst, false);
break;
-
+
case Stmt::ObjCIvarRefExprClass:
VisitObjCIvarRefExpr(cast<ObjCIvarRefExpr>(S), Pred, Dst, false);
break;
@@ -384,12 +439,12 @@ void GRExprEngine::Visit(Stmt* S, NodeTy* Pred, NodeSet& Dst) {
case Stmt::ObjCForCollectionStmtClass:
VisitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(S), Pred, Dst);
break;
-
+
case Stmt::ObjCMessageExprClass: {
VisitObjCMessageExpr(cast<ObjCMessageExpr>(S), Pred, Dst);
break;
}
-
+
case Stmt::ObjCAtThrowStmtClass: {
// FIXME: This is not complete. We basically treat @throw as
// an abort.
@@ -398,19 +453,19 @@ void GRExprEngine::Visit(Stmt* S, NodeTy* Pred, NodeSet& Dst) {
MakeNode(Dst, S, Pred, GetState(Pred));
break;
}
-
+
case Stmt::ParenExprClass:
Visit(cast<ParenExpr>(S)->getSubExpr()->IgnoreParens(), Pred, Dst);
break;
-
+
case Stmt::ReturnStmtClass:
VisitReturnStmt(cast<ReturnStmt>(S), Pred, Dst);
break;
-
+
case Stmt::SizeOfAlignOfExprClass:
VisitSizeOfAlignOfExpr(cast<SizeOfAlignOfExpr>(S), Pred, Dst);
break;
-
+
case Stmt::StmtExprClass: {
StmtExpr* SE = cast<StmtExpr>(S);
@@ -421,25 +476,25 @@ void GRExprEngine::Visit(Stmt* S, NodeTy* Pred, NodeSet& Dst) {
Dst.Add(Pred);
break;
}
-
+
if (Expr* LastExpr = dyn_cast<Expr>(*SE->getSubStmt()->body_rbegin())) {
const GRState* state = GetState(Pred);
- MakeNode(Dst, SE, Pred, state->bindExpr(SE, state->getSVal(LastExpr)));
+ MakeNode(Dst, SE, Pred, state->BindExpr(SE, state->getSVal(LastExpr)));
}
else
Dst.Add(Pred);
-
+
break;
}
case Stmt::StringLiteralClass:
VisitLValue(cast<StringLiteral>(S), Pred, Dst);
break;
-
+
case Stmt::UnaryOperatorClass: {
UnaryOperator *U = cast<UnaryOperator>(S);
- if (EagerlyAssume && (U->getOpcode() == UnaryOperator::LNot)) {
- NodeSet Tmp;
+ if (AMgr.shouldEagerlyAssume() && (U->getOpcode() == UnaryOperator::LNot)) {
+ ExplodedNodeSet Tmp;
VisitUnaryOperator(U, Pred, Tmp, false);
EvalEagerlyAssume(Dst, Tmp, U);
}
@@ -450,44 +505,45 @@ void GRExprEngine::Visit(Stmt* S, NodeTy* Pred, NodeSet& Dst) {
}
}
-void GRExprEngine::VisitLValue(Expr* Ex, NodeTy* Pred, NodeSet& Dst) {
-
+void GRExprEngine::VisitLValue(Expr* Ex, ExplodedNode* Pred,
+ ExplodedNodeSet& Dst) {
+
Ex = Ex->IgnoreParens();
-
- if (Ex != CurrentStmt && getCFG().isBlkExpr(Ex)) {
+
+ if (Ex != CurrentStmt && Pred->getLocationContext()->getCFG()->isBlkExpr(Ex)) {
Dst.Add(Pred);
return;
}
-
+
switch (Ex->getStmtClass()) {
-
+
case Stmt::ArraySubscriptExprClass:
VisitArraySubscriptExpr(cast<ArraySubscriptExpr>(Ex), Pred, Dst, true);
return;
-
+
case Stmt::DeclRefExprClass:
case Stmt::QualifiedDeclRefExprClass:
VisitDeclRefExpr(cast<DeclRefExpr>(Ex), Pred, Dst, true);
return;
-
+
case Stmt::ObjCIvarRefExprClass:
VisitObjCIvarRefExpr(cast<ObjCIvarRefExpr>(Ex), Pred, Dst, true);
return;
-
+
case Stmt::UnaryOperatorClass:
VisitUnaryOperator(cast<UnaryOperator>(Ex), Pred, Dst, true);
return;
-
+
case Stmt::MemberExprClass:
VisitMemberExpr(cast<MemberExpr>(Ex), Pred, Dst, true);
return;
-
+
case Stmt::CompoundLiteralExprClass:
VisitCompoundLiteralExpr(cast<CompoundLiteralExpr>(Ex), Pred, Dst, true);
return;
-
+
case Stmt::ObjCPropertyRefExprClass:
- case Stmt::ObjCKVCRefExprClass:
+ case Stmt::ObjCImplicitSetterGetterRefExprClass:
// FIXME: Property assignments are lvalues, but not really "locations".
// e.g.: self.x = something;
// Here the "self.x" really can translate to a method call (setter) when
@@ -505,10 +561,10 @@ void GRExprEngine::VisitLValue(Expr* Ex, NodeTy* Pred, NodeSet& Dst) {
case Stmt::StringLiteralClass: {
const GRState* state = GetState(Pred);
SVal V = state->getLValue(cast<StringLiteral>(Ex));
- MakeNode(Dst, Ex, Pred, state->bindExpr(Ex, V));
+ MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, V));
return;
}
-
+
default:
// Arbitrary subexpressions can return aggregate temporaries that
// can be used in a lvalue context. We need to enhance our support
@@ -517,7 +573,7 @@ void GRExprEngine::VisitLValue(Expr* Ex, NodeTy* Pred, NodeSet& Dst) {
assert ((Ex->getType()->isAggregateType()) &&
"Other kinds of expressions with non-aggregate/union types do"
" not have lvalues.");
-
+
Visit(Ex, Pred, Dst);
}
}
@@ -528,7 +584,7 @@ void GRExprEngine::VisitLValue(Expr* Ex, NodeTy* Pred, NodeSet& Dst) {
bool GRExprEngine::ProcessBlockEntrance(CFGBlock* B, const GRState*,
GRBlockCounter BC) {
-
+
return BC.getNumVisited(B->getBlockID()) < 3;
}
@@ -536,12 +592,9 @@ bool GRExprEngine::ProcessBlockEntrance(CFGBlock* B, const GRState*,
// Generic node creation.
//===----------------------------------------------------------------------===//
-GRExprEngine::NodeTy* GRExprEngine::MakeNode(NodeSet& Dst, Stmt* S,
- NodeTy* Pred,
- const GRState* St,
- ProgramPoint::Kind K,
- const void *tag) {
-
+ExplodedNode* GRExprEngine::MakeNode(ExplodedNodeSet& Dst, Stmt* S,
+ ExplodedNode* Pred, const GRState* St,
+ ProgramPoint::Kind K, const void *tag) {
assert (Builder && "GRStmtNodeBuilder not present.");
SaveAndRestore<const void*> OldTag(Builder->Tag);
Builder->Tag = tag;
@@ -555,54 +608,54 @@ GRExprEngine::NodeTy* GRExprEngine::MakeNode(NodeSet& Dst, Stmt* S,
const GRState* GRExprEngine::MarkBranch(const GRState* state,
Stmt* Terminator,
bool branchTaken) {
-
+
switch (Terminator->getStmtClass()) {
default:
return state;
-
+
case Stmt::BinaryOperatorClass: { // '&&' and '||'
-
+
BinaryOperator* B = cast<BinaryOperator>(Terminator);
BinaryOperator::Opcode Op = B->getOpcode();
-
+
assert (Op == BinaryOperator::LAnd || Op == BinaryOperator::LOr);
-
+
// For &&, if we take the true branch, then the value of the whole
// expression is that of the RHS expression.
//
// For ||, if we take the false branch, then the value of the whole
// expression is that of the RHS expression.
-
+
Expr* Ex = (Op == BinaryOperator::LAnd && branchTaken) ||
- (Op == BinaryOperator::LOr && !branchTaken)
+ (Op == BinaryOperator::LOr && !branchTaken)
? B->getRHS() : B->getLHS();
-
- return state->bindBlkExpr(B, UndefinedVal(Ex));
+
+ return state->BindExpr(B, UndefinedVal(Ex));
}
-
+
case Stmt::ConditionalOperatorClass: { // ?:
-
+
ConditionalOperator* C = cast<ConditionalOperator>(Terminator);
-
+
// For ?, if branchTaken == true then the value is either the LHS or
// the condition itself. (GNU extension).
-
- Expr* Ex;
-
+
+ Expr* Ex;
+
if (branchTaken)
- Ex = C->getLHS() ? C->getLHS() : C->getCond();
+ Ex = C->getLHS() ? C->getLHS() : C->getCond();
else
Ex = C->getRHS();
-
- return state->bindBlkExpr(C, UndefinedVal(Ex));
+
+ return state->BindExpr(C, UndefinedVal(Ex));
}
-
+
case Stmt::ChooseExprClass: { // ?:
-
+
ChooseExpr* C = cast<ChooseExpr>(Terminator);
-
- Expr* Ex = branchTaken ? C->getLHS() : C->getRHS();
- return state->bindBlkExpr(C, UndefinedVal(Ex));
+
+ Expr* Ex = branchTaken ? C->getLHS() : C->getRHS();
+ return state->BindExpr(C, UndefinedVal(Ex));
}
}
}
@@ -621,19 +674,19 @@ static SVal RecoverCastedSymbol(GRStateManager& StateMgr, const GRState* state,
uint64_t bits = 0;
bool bitsInit = false;
-
+
while (CastExpr *CE = dyn_cast<CastExpr>(Ex)) {
QualType T = CE->getType();
if (!T->isIntegerType())
return UnknownVal();
-
+
uint64_t newBits = Ctx.getTypeSize(T);
if (!bitsInit || newBits < bits) {
bitsInit = true;
bits = newBits;
}
-
+
Ex = CE->getSubExpr();
}
@@ -642,211 +695,215 @@ static SVal RecoverCastedSymbol(GRStateManager& StateMgr, const GRState* state,
if (!bitsInit || !T->isIntegerType() || Ctx.getTypeSize(T) > bits)
return UnknownVal();
-
+
return state->getSVal(Ex);
}
void GRExprEngine::ProcessBranch(Stmt* Condition, Stmt* Term,
- BranchNodeBuilder& builder) {
-
- // Remove old bindings for subexpressions.
- const GRState* PrevState =
- StateMgr.RemoveSubExprBindings(builder.getState());
-
+ GRBranchNodeBuilder& builder) {
+
// Check for NULL conditions; e.g. "for(;;)"
- if (!Condition) {
+ if (!Condition) {
builder.markInfeasible(false);
return;
}
-
+
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
Condition->getLocStart(),
"Error evaluating branch");
+
+ const GRState* PrevState = builder.getState();
+ SVal X = PrevState->getSVal(Condition);
+ DefinedSVal *V = NULL;
- SVal V = PrevState->getSVal(Condition);
-
- switch (V.getBaseKind()) {
- default:
- break;
+ while (true) {
+ V = dyn_cast<DefinedSVal>(&X);
- case SVal::UnknownKind: {
- if (Expr *Ex = dyn_cast<Expr>(Condition)) {
+ if (!V) {
+ if (X.isUnknown()) {
+ if (const Expr *Ex = dyn_cast<Expr>(Condition)) {
if (Ex->getType()->isIntegerType()) {
- // Try to recover some path-sensitivity. Right now casts of symbolic
- // integers that promote their values are currently not tracked well.
- // If 'Condition' is such an expression, try and recover the
- // underlying value and use that instead.
- SVal recovered = RecoverCastedSymbol(getStateManager(),
- builder.getState(), Condition,
- getContext());
-
- if (!recovered.isUnknown()) {
- V = recovered;
- break;
+ // Try to recover some path-sensitivity. Right now casts of symbolic
+ // integers that promote their values are currently not tracked well.
+ // If 'Condition' is such an expression, try and recover the
+ // underlying value and use that instead.
+ SVal recovered = RecoverCastedSymbol(getStateManager(),
+ builder.getState(), Condition,
+ getContext());
+
+ if (!recovered.isUnknown()) {
+ X = recovered;
+ continue;
+ }
}
- }
+ }
+
+ builder.generateNode(MarkBranch(PrevState, Term, true), true);
+ builder.generateNode(MarkBranch(PrevState, Term, false), false);
+ return;
}
-
- builder.generateNode(MarkBranch(PrevState, Term, true), true);
- builder.generateNode(MarkBranch(PrevState, Term, false), false);
- return;
- }
-
- case SVal::UndefinedKind: {
- NodeTy* N = builder.generateNode(PrevState, true);
+
+ assert(X.isUndef());
+ ExplodedNode *N = builder.generateNode(PrevState, true);
if (N) {
N->markAsSink();
UndefBranches.insert(N);
}
-
+
builder.markInfeasible(false);
return;
- }
- }
+ }
+ break;
+ }
+
// Process the true branch.
- if (const GRState *state = PrevState->assume(V, true))
- builder.generateNode(MarkBranch(state, Term, true), true);
- else
- builder.markInfeasible(true);
-
- // Process the false branch.
- if (const GRState *state = PrevState->assume(V, false))
- builder.generateNode(MarkBranch(state, Term, false), false);
- else
- builder.markInfeasible(false);
+ if (builder.isFeasible(true)) {
+ if (const GRState *state = PrevState->Assume(*V, true))
+ builder.generateNode(MarkBranch(state, Term, true), true);
+ else
+ builder.markInfeasible(true);
+ }
+
+ // Process the false branch.
+ if (builder.isFeasible(false)) {
+ if (const GRState *state = PrevState->Assume(*V, false))
+ builder.generateNode(MarkBranch(state, Term, false), false);
+ else
+ builder.markInfeasible(false);
+ }
}
/// ProcessIndirectGoto - Called by GRCoreEngine. Used to generate successor
/// nodes by processing the 'effects' of a computed goto jump.
-void GRExprEngine::ProcessIndirectGoto(IndirectGotoNodeBuilder& builder) {
+void GRExprEngine::ProcessIndirectGoto(GRIndirectGotoNodeBuilder& builder) {
- const GRState *state = builder.getState();
+ const GRState *state = builder.getState();
SVal V = state->getSVal(builder.getTarget());
-
+
// Three possibilities:
//
// (1) We know the computed label.
// (2) The label is NULL (or some other constant), or Undefined.
// (3) We have no clue about the label. Dispatch to all targets.
//
-
- typedef IndirectGotoNodeBuilder::iterator iterator;
+
+ typedef GRIndirectGotoNodeBuilder::iterator iterator;
if (isa<loc::GotoLabel>(V)) {
LabelStmt* L = cast<loc::GotoLabel>(V).getLabel();
-
+
for (iterator I=builder.begin(), E=builder.end(); I != E; ++I) {
if (I.getLabel() == L) {
builder.generateNode(I, state);
return;
}
}
-
+
assert (false && "No block with label.");
return;
}
if (isa<loc::ConcreteInt>(V) || isa<UndefinedVal>(V)) {
// Dispatch to the first target and mark it as a sink.
- NodeTy* N = builder.generateNode(builder.begin(), state, true);
+ ExplodedNode* N = builder.generateNode(builder.begin(), state, true);
UndefBranches.insert(N);
return;
}
-
+
// This is really a catch-all. We don't support symbolics yet.
// FIXME: Implement dispatch for symbolic pointers.
-
+
for (iterator I=builder.begin(), E=builder.end(); I != E; ++I)
builder.generateNode(I, state);
}
void GRExprEngine::VisitGuardedExpr(Expr* Ex, Expr* L, Expr* R,
- NodeTy* Pred, NodeSet& Dst) {
-
- assert (Ex == CurrentStmt && getCFG().isBlkExpr(Ex));
-
+ ExplodedNode* Pred, ExplodedNodeSet& Dst) {
+
+ assert (Ex == CurrentStmt && Pred->getLocationContext()->getCFG()->isBlkExpr(Ex));
+
const GRState* state = GetState(Pred);
- SVal X = state->getBlkExprSVal(Ex);
-
+ SVal X = state->getSVal(Ex);
+
assert (X.isUndef());
-
+
Expr *SE = (Expr*) cast<UndefinedVal>(X).getData();
- assert(SE);
- X = state->getBlkExprSVal(SE);
-
+ assert(SE);
+ X = state->getSVal(SE);
+
// Make sure that we invalidate the previous binding.
- MakeNode(Dst, Ex, Pred, state->bindExpr(Ex, X, true, true));
+ MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, X, true));
}
/// ProcessSwitch - Called by GRCoreEngine. Used to generate successor
/// nodes by processing the 'effects' of a switch statement.
-void GRExprEngine::ProcessSwitch(SwitchNodeBuilder& builder) {
- typedef SwitchNodeBuilder::iterator iterator;
- const GRState* state = builder.getState();
+void GRExprEngine::ProcessSwitch(GRSwitchNodeBuilder& builder) {
+ typedef GRSwitchNodeBuilder::iterator iterator;
+ const GRState* state = builder.getState();
Expr* CondE = builder.getCondition();
- SVal CondV = state->getSVal(CondE);
+ SVal CondV_untested = state->getSVal(CondE);
- if (CondV.isUndef()) {
- NodeTy* N = builder.generateDefaultCaseNode(state, true);
+ if (CondV_untested.isUndef()) {
+ ExplodedNode* N = builder.generateDefaultCaseNode(state, true);
UndefBranches.insert(N);
return;
}
+ DefinedOrUnknownSVal CondV = cast<DefinedOrUnknownSVal>(CondV_untested);
- const GRState* DefaultSt = state;
+ const GRState *DefaultSt = state;
bool defaultIsFeasible = false;
-
+
for (iterator I = builder.begin(), EI = builder.end(); I != EI; ++I) {
CaseStmt* Case = cast<CaseStmt>(I.getCase());
// Evaluate the LHS of the case value.
Expr::EvalResult V1;
- bool b = Case->getLHS()->Evaluate(V1, getContext());
-
+ bool b = Case->getLHS()->Evaluate(V1, getContext());
+
// Sanity checks. These go away in Release builds.
- assert(b && V1.Val.isInt() && !V1.HasSideEffects
+ assert(b && V1.Val.isInt() && !V1.HasSideEffects
&& "Case condition must evaluate to an integer constant.");
- b = b; // silence unused variable warning
- assert(V1.Val.getInt().getBitWidth() ==
+ b = b; // silence unused variable warning
+ assert(V1.Val.getInt().getBitWidth() ==
getContext().getTypeSize(CondE->getType()));
-
+
// Get the RHS of the case, if it exists.
Expr::EvalResult V2;
-
+
if (Expr* E = Case->getRHS()) {
b = E->Evaluate(V2, getContext());
- assert(b && V2.Val.isInt() && !V2.HasSideEffects
+ assert(b && V2.Val.isInt() && !V2.HasSideEffects
&& "Case condition must evaluate to an integer constant.");
b = b; // silence unused variable warning
}
else
V2 = V1;
-
+
// FIXME: Eventually we should replace the logic below with a range
// comparison, rather than concretize the values within the range.
// This should be easy once we have "ranges" for NonLVals.
-
+
do {
- nonloc::ConcreteInt CaseVal(getBasicVals().getValue(V1.Val.getInt()));
- SVal Res = EvalBinOp(DefaultSt, BinaryOperator::EQ, CondV, CaseVal,
- getContext().IntTy);
-
- // Now "assume" that the case matches.
- if (const GRState* stateNew = state->assume(Res, true)) {
+ nonloc::ConcreteInt CaseVal(getBasicVals().getValue(V1.Val.getInt()));
+ DefinedOrUnknownSVal Res = SVator.EvalEQ(DefaultSt, CondV, CaseVal);
+
+ // Now "assume" that the case matches.
+ if (const GRState* stateNew = state->Assume(Res, true)) {
builder.generateCaseStmtNode(I, stateNew);
-
+
// If CondV evaluates to a constant, then we know that this
// is the *only* case that we can take, so stop evaluating the
// others.
if (isa<nonloc::ConcreteInt>(CondV))
return;
}
-
+
// Now "assume" that the case doesn't match. Add this state
// to the default state (if it is feasible).
- if (const GRState *stateNew = DefaultSt->assume(Res, false)) {
+ if (const GRState *stateNew = DefaultSt->Assume(Res, false)) {
defaultIsFeasible = true;
DefaultSt = stateNew;
}
@@ -854,15 +911,15 @@ void GRExprEngine::ProcessSwitch(SwitchNodeBuilder& builder) {
// Concretize the next value in the range.
if (V1.Val.getInt() == V2.Val.getInt())
break;
-
+
++V1.Val.getInt();
assert (V1.Val.getInt() <= V2.Val.getInt());
-
+
} while (true);
}
-
+
// If we reach here, than we know that the default branch is
- // possible.
+ // possible.
if (defaultIsFeasible) builder.generateDefaultCaseNode(DefaultSt);
}
@@ -870,74 +927,72 @@ void GRExprEngine::ProcessSwitch(SwitchNodeBuilder& builder) {
// Transfer functions: logical operations ('&&', '||').
//===----------------------------------------------------------------------===//
-void GRExprEngine::VisitLogicalExpr(BinaryOperator* B, NodeTy* Pred,
- NodeSet& Dst) {
-
+void GRExprEngine::VisitLogicalExpr(BinaryOperator* B, ExplodedNode* Pred,
+ ExplodedNodeSet& Dst) {
+
assert(B->getOpcode() == BinaryOperator::LAnd ||
B->getOpcode() == BinaryOperator::LOr);
-
- assert(B == CurrentStmt && getCFG().isBlkExpr(B));
-
+
+ assert(B == CurrentStmt && Pred->getLocationContext()->getCFG()->isBlkExpr(B));
+
const GRState* state = GetState(Pred);
- SVal X = state->getBlkExprSVal(B);
+ SVal X = state->getSVal(B);
assert(X.isUndef());
-
- Expr* Ex = (Expr*) cast<UndefinedVal>(X).getData();
-
+
+ const Expr *Ex = (const Expr*) cast<UndefinedVal>(X).getData();
assert(Ex);
-
+
if (Ex == B->getRHS()) {
-
- X = state->getBlkExprSVal(Ex);
-
+ X = state->getSVal(Ex);
+
// Handle undefined values.
-
if (X.isUndef()) {
- MakeNode(Dst, B, Pred, state->bindBlkExpr(B, X));
+ MakeNode(Dst, B, Pred, state->BindExpr(B, X));
return;
}
+ DefinedOrUnknownSVal XD = cast<DefinedOrUnknownSVal>(X);
+
// We took the RHS. Because the value of the '&&' or '||' expression must
// evaluate to 0 or 1, we must assume the value of the RHS evaluates to 0
// or 1. Alternatively, we could take a lazy approach, and calculate this
// value later when necessary. We don't have the machinery in place for
// this right now, and since most logical expressions are used for branches,
- // the payoff is not likely to be large. Instead, we do eager evaluation.
- if (const GRState *newState = state->assume(X, true))
- MakeNode(Dst, B, Pred,
- newState->bindBlkExpr(B, ValMgr.makeIntVal(1U, B->getType())));
-
- if (const GRState *newState = state->assume(X, false))
- MakeNode(Dst, B, Pred,
- newState->bindBlkExpr(B, ValMgr.makeIntVal(0U, B->getType())));
+ // the payoff is not likely to be large. Instead, we do eager evaluation.
+ if (const GRState *newState = state->Assume(XD, true))
+ MakeNode(Dst, B, Pred,
+ newState->BindExpr(B, ValMgr.makeIntVal(1U, B->getType())));
+
+ if (const GRState *newState = state->Assume(XD, false))
+ MakeNode(Dst, B, Pred,
+ newState->BindExpr(B, ValMgr.makeIntVal(0U, B->getType())));
}
else {
// We took the LHS expression. Depending on whether we are '&&' or
// '||' we know what the value of the expression is via properties of
// the short-circuiting.
- X = ValMgr.makeIntVal(B->getOpcode() == BinaryOperator::LAnd ? 0U : 1U,
+ X = ValMgr.makeIntVal(B->getOpcode() == BinaryOperator::LAnd ? 0U : 1U,
B->getType());
- MakeNode(Dst, B, Pred, state->bindBlkExpr(B, X));
+ MakeNode(Dst, B, Pred, state->BindExpr(B, X));
}
}
-
+
//===----------------------------------------------------------------------===//
// Transfer functions: Loads and stores.
//===----------------------------------------------------------------------===//
-void GRExprEngine::VisitDeclRefExpr(DeclRefExpr* Ex, NodeTy* Pred, NodeSet& Dst,
- bool asLValue) {
-
- const GRState* state = GetState(Pred);
+void GRExprEngine::VisitDeclRefExpr(DeclRefExpr *Ex, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst, bool asLValue) {
- const NamedDecl* D = Ex->getDecl();
+ const GRState *state = GetState(Pred);
+ const NamedDecl *D = Ex->getDecl();
if (const VarDecl* VD = dyn_cast<VarDecl>(D)) {
- SVal V = state->getLValue(VD);
+ SVal V = state->getLValue(VD, Pred->getLocationContext());
if (asLValue)
- MakeNode(Dst, Ex, Pred, state->bindExpr(Ex, V),
+ MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, V),
ProgramPoint::PostLValueKind);
else
EvalLoad(Dst, Ex, Pred, state, V);
@@ -947,29 +1002,30 @@ void GRExprEngine::VisitDeclRefExpr(DeclRefExpr* Ex, NodeTy* Pred, NodeSet& Dst,
assert(!asLValue && "EnumConstantDecl does not have lvalue.");
SVal V = ValMgr.makeIntVal(ED->getInitVal());
- MakeNode(Dst, Ex, Pred, state->bindExpr(Ex, V));
+ MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, V));
return;
} else if (const FunctionDecl* FD = dyn_cast<FunctionDecl>(D)) {
- assert(asLValue);
+ // This code is valid regardless of the value of 'isLValue'.
SVal V = ValMgr.getFunctionPointer(FD);
- MakeNode(Dst, Ex, Pred, state->bindExpr(Ex, V),
+ MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, V),
ProgramPoint::PostLValueKind);
return;
}
-
+
assert (false &&
"ValueDecl support for this ValueDecl not implemented.");
}
/// VisitArraySubscriptExpr - Transfer function for array accesses
-void GRExprEngine::VisitArraySubscriptExpr(ArraySubscriptExpr* A, NodeTy* Pred,
- NodeSet& Dst, bool asLValue) {
-
+void GRExprEngine::VisitArraySubscriptExpr(ArraySubscriptExpr* A,
+ ExplodedNode* Pred,
+ ExplodedNodeSet& Dst, bool asLValue){
+
Expr* Base = A->getBase()->IgnoreParens();
Expr* Idx = A->getIdx()->IgnoreParens();
- NodeSet Tmp;
-
+ ExplodedNodeSet Tmp;
+
if (Base->getType()->isVectorType()) {
// For vector types get its lvalue.
// FIXME: This may not be correct. Is the rvalue of a vector its location?
@@ -977,20 +1033,20 @@ void GRExprEngine::VisitArraySubscriptExpr(ArraySubscriptExpr* A, NodeTy* Pred,
// semantics.
VisitLValue(Base, Pred, Tmp);
}
- else
+ else
Visit(Base, Pred, Tmp); // Get Base's rvalue, which should be an LocVal.
-
- for (NodeSet::iterator I1=Tmp.begin(), E1=Tmp.end(); I1!=E1; ++I1) {
- NodeSet Tmp2;
+
+ for (ExplodedNodeSet::iterator I1=Tmp.begin(), E1=Tmp.end(); I1!=E1; ++I1) {
+ ExplodedNodeSet Tmp2;
Visit(Idx, *I1, Tmp2); // Evaluate the index.
-
- for (NodeSet::iterator I2=Tmp2.begin(), E2=Tmp2.end(); I2!=E2; ++I2) {
+
+ for (ExplodedNodeSet::iterator I2=Tmp2.begin(),E2=Tmp2.end();I2!=E2; ++I2) {
const GRState* state = GetState(*I2);
- SVal V = state->getLValue(A->getType(), state->getSVal(Base),
- state->getSVal(Idx));
+ SVal V = state->getLValue(A->getType(), state->getSVal(Idx),
+ state->getSVal(Base));
if (asLValue)
- MakeNode(Dst, A, *I2, state->bindExpr(A, V),
+ MakeNode(Dst, A, *I2, state->BindExpr(A, V),
ProgramPoint::PostLValueKind);
else
EvalLoad(Dst, A, *I2, state, V);
@@ -999,30 +1055,30 @@ void GRExprEngine::VisitArraySubscriptExpr(ArraySubscriptExpr* A, NodeTy* Pred,
}
/// VisitMemberExpr - Transfer function for member expressions.
-void GRExprEngine::VisitMemberExpr(MemberExpr* M, NodeTy* Pred,
- NodeSet& Dst, bool asLValue) {
-
+void GRExprEngine::VisitMemberExpr(MemberExpr* M, ExplodedNode* Pred,
+ ExplodedNodeSet& Dst, bool asLValue) {
+
Expr* Base = M->getBase()->IgnoreParens();
- NodeSet Tmp;
-
- if (M->isArrow())
+ ExplodedNodeSet Tmp;
+
+ if (M->isArrow())
Visit(Base, Pred, Tmp); // p->f = ... or ... = p->f
else
VisitLValue(Base, Pred, Tmp); // x.f = ... or ... = x.f
-
+
FieldDecl *Field = dyn_cast<FieldDecl>(M->getMemberDecl());
if (!Field) // FIXME: skipping member expressions for non-fields
return;
- for (NodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E; ++I) {
+ for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E; ++I) {
const GRState* state = GetState(*I);
// FIXME: Should we insert some assumption logic in here to determine
// if "Base" is a valid piece of memory? Before we put this assumption
// later when using FieldOffset lvals (which we no longer have).
- SVal L = state->getLValue(state->getSVal(Base), Field);
+ SVal L = state->getLValue(Field, state->getSVal(Base));
if (asLValue)
- MakeNode(Dst, M, *I, state->bindExpr(M, L),
+ MakeNode(Dst, M, *I, state->BindExpr(M, L),
ProgramPoint::PostLValueKind);
else
EvalLoad(Dst, M, *I, state, L);
@@ -1031,11 +1087,11 @@ void GRExprEngine::VisitMemberExpr(MemberExpr* M, NodeTy* Pred,
/// EvalBind - Handle the semantics of binding a value to a specific location.
/// This method is used by EvalStore and (soon) VisitDeclStmt, and others.
-void GRExprEngine::EvalBind(NodeSet& Dst, Expr* Ex, NodeTy* Pred,
- const GRState* state, SVal location, SVal Val) {
+void GRExprEngine::EvalBind(ExplodedNodeSet& Dst, Expr* Ex, ExplodedNode* Pred,
+ const GRState* state, SVal location, SVal Val) {
const GRState* newState = 0;
-
+
if (location.isUnknown()) {
// We know that the new state will be the same as the old state since
// the location of the binding is "unknown". Consequently, there
@@ -1053,7 +1109,7 @@ void GRExprEngine::EvalBind(NodeSet& Dst, Expr* Ex, NodeTy* Pred,
// doesn't do anything, just auto-propagate the current state.
GRStmtNodeBuilderRef BuilderRef(Dst, *Builder, *this, Pred, newState, Ex,
newState != state);
-
+
getTF().EvalBind(BuilderRef, location, Val);
}
@@ -1063,22 +1119,22 @@ void GRExprEngine::EvalBind(NodeSet& Dst, Expr* Ex, NodeTy* Pred,
/// @param state The current simulation state
/// @param location The location to store the value
/// @param Val The value to be stored
-void GRExprEngine::EvalStore(NodeSet& Dst, Expr* Ex, NodeTy* Pred,
+void GRExprEngine::EvalStore(ExplodedNodeSet& Dst, Expr* Ex, ExplodedNode* Pred,
const GRState* state, SVal location, SVal Val,
const void *tag) {
-
+
assert (Builder && "GRStmtNodeBuilder must be defined.");
-
+
// Evaluate the location (checks for bad dereferences).
Pred = EvalLocation(Ex, Pred, state, location, tag);
-
+
if (!Pred)
return;
assert (!location.isUndef());
state = GetState(Pred);
- // Proceed with the store.
+ // Proceed with the store.
SaveAndRestore<ProgramPoint::Kind> OldSPointKind(Builder->PointKind);
SaveAndRestore<const void*> OldTag(Builder->Tag);
Builder->PointKind = ProgramPoint::PostStoreKind;
@@ -1086,18 +1142,18 @@ void GRExprEngine::EvalStore(NodeSet& Dst, Expr* Ex, NodeTy* Pred,
EvalBind(Dst, Ex, Pred, state, location, Val);
}
-void GRExprEngine::EvalLoad(NodeSet& Dst, Expr* Ex, NodeTy* Pred,
+void GRExprEngine::EvalLoad(ExplodedNodeSet& Dst, Expr* Ex, ExplodedNode* Pred,
const GRState* state, SVal location,
const void *tag) {
- // Evaluate the location (checks for bad dereferences).
+ // Evaluate the location (checks for bad dereferences).
Pred = EvalLocation(Ex, Pred, state, location, tag);
-
+
if (!Pred)
return;
-
+
state = GetState(Pred);
-
+
// Proceed with the load.
ProgramPoint::Kind K = ProgramPoint::PostLoadKind;
@@ -1106,86 +1162,89 @@ void GRExprEngine::EvalLoad(NodeSet& Dst, Expr* Ex, NodeTy* Pred,
if (location.isUnknown()) {
// This is important. We must nuke the old binding.
- MakeNode(Dst, Ex, Pred, state->bindExpr(Ex, UnknownVal()), K, tag);
+ MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, UnknownVal()),
+ K, tag);
}
else {
SVal V = state->getSVal(cast<Loc>(location), Ex->getType());
- MakeNode(Dst, Ex, Pred, state->bindExpr(Ex, V), K, tag);
+ MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, V), K, tag);
}
}
-void GRExprEngine::EvalStore(NodeSet& Dst, Expr* Ex, Expr* StoreE, NodeTy* Pred,
- const GRState* state, SVal location, SVal Val,
- const void *tag) {
-
- NodeSet TmpDst;
+void GRExprEngine::EvalStore(ExplodedNodeSet& Dst, Expr* Ex, Expr* StoreE,
+ ExplodedNode* Pred, const GRState* state,
+ SVal location, SVal Val, const void *tag) {
+
+ ExplodedNodeSet TmpDst;
EvalStore(TmpDst, StoreE, Pred, state, location, Val, tag);
- for (NodeSet::iterator I=TmpDst.begin(), E=TmpDst.end(); I!=E; ++I)
+ for (ExplodedNodeSet::iterator I=TmpDst.begin(), E=TmpDst.end(); I!=E; ++I)
MakeNode(Dst, Ex, *I, (*I)->getState(), ProgramPoint::PostStmtKind, tag);
}
-GRExprEngine::NodeTy* GRExprEngine::EvalLocation(Stmt* Ex, NodeTy* Pred,
- const GRState* state,
- SVal location,
- const void *tag) {
-
+ExplodedNode* GRExprEngine::EvalLocation(Stmt* Ex, ExplodedNode* Pred,
+ const GRState* state, SVal location,
+ const void *tag) {
+
SaveAndRestore<const void*> OldTag(Builder->Tag);
Builder->Tag = tag;
-
- // Check for loads/stores from/to undefined values.
+
+ // Check for loads/stores from/to undefined values.
if (location.isUndef()) {
- NodeTy* N =
+ ExplodedNode* N =
Builder->generateNode(Ex, state, Pred,
ProgramPoint::PostUndefLocationCheckFailedKind);
-
+
if (N) {
N->markAsSink();
UndefDeref.insert(N);
}
-
+
return 0;
}
-
+
// Check for loads/stores from/to unknown locations. Treat as No-Ops.
if (location.isUnknown())
return Pred;
-
+
// During a load, one of two possible situations arise:
// (1) A crash, because the location (pointer) was NULL.
// (2) The location (pointer) is not NULL, and the dereference works.
- //
+ //
// We add these assumptions.
-
- Loc LV = cast<Loc>(location);
-
+
+ Loc LV = cast<Loc>(location);
+
// "Assume" that the pointer is not NULL.
- const GRState *StNotNull = state->assume(LV, true);
-
+ const GRState *StNotNull = state->Assume(LV, true);
+
// "Assume" that the pointer is NULL.
- const GRState *StNull = state->assume(LV, false);
+ const GRState *StNull = state->Assume(LV, false);
- if (StNull) {
+ if (StNull) {
// Use the Generic Data Map to mark in the state what lval was null.
const SVal* PersistentLV = getBasicVals().getPersistentSVal(LV);
StNull = StNull->set<GRState::NullDerefTag>(PersistentLV);
-
+
// We don't use "MakeNode" here because the node will be a sink
// and we have no intention of processing it later.
- NodeTy* NullNode =
- Builder->generateNode(Ex, StNull, Pred,
+ ExplodedNode* NullNode =
+ Builder->generateNode(Ex, StNull, Pred,
ProgramPoint::PostNullCheckFailedKind);
- if (NullNode) {
- NullNode->markAsSink();
+ if (NullNode) {
+ NullNode->markAsSink();
if (StNotNull) ImplicitNullDeref.insert(NullNode);
else ExplicitNullDeref.insert(NullNode);
}
}
-
+
if (!StNotNull)
return NULL;
+ // FIXME: Temporarily disable out-of-bounds checking until we make
+ // the logic reflect recent changes to CastRegion and friends.
+#if 0
// Check for out-of-bound array access.
if (isa<loc::MemRegionVal>(LV)) {
const MemRegion* R = cast<loc::MemRegionVal>(LV).getRegion();
@@ -1196,14 +1255,14 @@ GRExprEngine::NodeTy* GRExprEngine::EvalLocation(Stmt* Ex, NodeTy* Pred,
SVal NumElements = getStoreManager().getSizeInElements(StNotNull,
ER->getSuperRegion());
- const GRState * StInBound = StNotNull->assumeInBound(Idx, NumElements,
+ const GRState * StInBound = StNotNull->AssumeInBound(Idx, NumElements,
true);
- const GRState* StOutBound = StNotNull->assumeInBound(Idx, NumElements,
+ const GRState* StOutBound = StNotNull->AssumeInBound(Idx, NumElements,
false);
if (StOutBound) {
// Report warning. Make sink node manually.
- NodeTy* OOBNode =
+ ExplodedNode* OOBNode =
Builder->generateNode(Ex, StOutBound, Pred,
ProgramPoint::PostOutOfBoundsCheckFailedKind);
@@ -1223,7 +1282,8 @@ GRExprEngine::NodeTy* GRExprEngine::EvalLocation(Stmt* Ex, NodeTy* Pred,
StNotNull = StInBound;
}
}
-
+#endif
+
// Generate a new node indicating the checks succeed.
return Builder->generateNode(Ex, StNotNull, Pred,
ProgramPoint::PostLocationChecksSucceedKind);
@@ -1239,105 +1299,127 @@ GRExprEngine::NodeTy* GRExprEngine::EvalLocation(Stmt* Ex, NodeTy* Pred,
// http://developer.apple.com/documentation/Darwin/Reference/Manpages/man3
// atomic.3.html
//
-static bool EvalOSAtomicCompareAndSwap(ExplodedNodeSet<GRState>& Dst,
+static bool EvalOSAtomicCompareAndSwap(ExplodedNodeSet& Dst,
GRExprEngine& Engine,
- GRStmtNodeBuilder<GRState>& Builder,
- CallExpr* CE, SVal L,
- ExplodedNode<GRState>* Pred) {
+ GRStmtNodeBuilder& Builder,
+ CallExpr* CE, SVal L,
+ ExplodedNode* Pred) {
// Not enough arguments to match OSAtomicCompareAndSwap?
if (CE->getNumArgs() != 3)
return false;
-
+
ASTContext &C = Engine.getContext();
Expr *oldValueExpr = CE->getArg(0);
QualType oldValueType = C.getCanonicalType(oldValueExpr->getType());
Expr *newValueExpr = CE->getArg(1);
QualType newValueType = C.getCanonicalType(newValueExpr->getType());
-
+
// Do the types of 'oldValue' and 'newValue' match?
if (oldValueType != newValueType)
return false;
-
+
Expr *theValueExpr = CE->getArg(2);
- const PointerType *theValueType = theValueExpr->getType()->getAsPointerType();
-
+ const PointerType *theValueType =
+ theValueExpr->getType()->getAs<PointerType>();
+
// theValueType not a pointer?
if (!theValueType)
return false;
-
+
QualType theValueTypePointee =
C.getCanonicalType(theValueType->getPointeeType()).getUnqualifiedType();
-
+
// The pointee must match newValueType and oldValueType.
if (theValueTypePointee != newValueType)
return false;
-
+
static unsigned magic_load = 0;
static unsigned magic_store = 0;
const void *OSAtomicLoadTag = &magic_load;
const void *OSAtomicStoreTag = &magic_store;
-
+
// Load 'theValue'.
const GRState *state = Pred->getState();
- ExplodedNodeSet<GRState> Tmp;
+ ExplodedNodeSet Tmp;
SVal location = state->getSVal(theValueExpr);
Engine.EvalLoad(Tmp, theValueExpr, Pred, state, location, OSAtomicLoadTag);
- for (ExplodedNodeSet<GRState>::iterator I = Tmp.begin(), E = Tmp.end();
+ for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end();
I != E; ++I) {
-
- ExplodedNode<GRState> *N = *I;
+
+ ExplodedNode *N = *I;
const GRState *stateLoad = N->getState();
- SVal theValueVal = stateLoad->getSVal(theValueExpr);
- SVal oldValueVal = stateLoad->getSVal(oldValueExpr);
-
- // Perform the comparison.
- SVal Cmp = Engine.EvalBinOp(stateLoad, BinaryOperator::EQ, theValueVal,
- oldValueVal, Engine.getContext().IntTy);
+ SVal theValueVal_untested = stateLoad->getSVal(theValueExpr);
+ SVal oldValueVal_untested = stateLoad->getSVal(oldValueExpr);
- const GRState *stateEqual = stateLoad->assume(Cmp, true);
+ // FIXME: Issue an error.
+ if (theValueVal_untested.isUndef() || oldValueVal_untested.isUndef()) {
+ return false;
+ }
+ DefinedOrUnknownSVal theValueVal =
+ cast<DefinedOrUnknownSVal>(theValueVal_untested);
+ DefinedOrUnknownSVal oldValueVal =
+ cast<DefinedOrUnknownSVal>(oldValueVal_untested);
+
+ SValuator &SVator = Engine.getSValuator();
+
+ // Perform the comparison.
+ DefinedOrUnknownSVal Cmp = SVator.EvalEQ(stateLoad, theValueVal,
+ oldValueVal);
+
+ const GRState *stateEqual = stateLoad->Assume(Cmp, true);
+
// Were they equal?
if (stateEqual) {
// Perform the store.
- ExplodedNodeSet<GRState> TmpStore;
- Engine.EvalStore(TmpStore, theValueExpr, N, stateEqual, location,
- stateEqual->getSVal(newValueExpr), OSAtomicStoreTag);
-
+ ExplodedNodeSet TmpStore;
+ SVal val = stateEqual->getSVal(newValueExpr);
+
+ // Handle implicit value casts.
+ if (const TypedRegion *R =
+ dyn_cast_or_null<TypedRegion>(location.getAsRegion())) {
+ llvm::tie(state, val) = SVator.EvalCast(val, state, R->getValueType(C),
+ newValueExpr->getType());
+ }
+
+ Engine.EvalStore(TmpStore, theValueExpr, N, stateEqual, location,
+ val, OSAtomicStoreTag);
+
// Now bind the result of the comparison.
- for (ExplodedNodeSet<GRState>::iterator I2 = TmpStore.begin(),
+ for (ExplodedNodeSet::iterator I2 = TmpStore.begin(),
E2 = TmpStore.end(); I2 != E2; ++I2) {
- ExplodedNode<GRState> *predNew = *I2;
+ ExplodedNode *predNew = *I2;
const GRState *stateNew = predNew->getState();
SVal Res = Engine.getValueManager().makeTruthVal(true, CE->getType());
- Engine.MakeNode(Dst, CE, predNew, stateNew->bindExpr(CE, Res));
+ Engine.MakeNode(Dst, CE, predNew, stateNew->BindExpr(CE, Res));
}
}
-
+
// Were they not equal?
- if (const GRState *stateNotEqual = stateLoad->assume(Cmp, false)) {
+ if (const GRState *stateNotEqual = stateLoad->Assume(Cmp, false)) {
SVal Res = Engine.getValueManager().makeTruthVal(false, CE->getType());
- Engine.MakeNode(Dst, CE, N, stateNotEqual->bindExpr(CE, Res));
+ Engine.MakeNode(Dst, CE, N, stateNotEqual->BindExpr(CE, Res));
}
}
-
+
return true;
}
-static bool EvalOSAtomic(ExplodedNodeSet<GRState>& Dst,
+static bool EvalOSAtomic(ExplodedNodeSet& Dst,
GRExprEngine& Engine,
- GRStmtNodeBuilder<GRState>& Builder,
+ GRStmtNodeBuilder& Builder,
CallExpr* CE, SVal L,
- ExplodedNode<GRState>* Pred) {
+ ExplodedNode* Pred) {
const FunctionDecl* FD = L.getAsFunctionDecl();
if (!FD)
return false;
const char *FName = FD->getNameAsCString();
-
+
// Check for compare and swap.
if (strncmp(FName, "OSAtomicCompareAndSwap", 22) == 0 ||
strncmp(FName, "objc_atomicCompareAndSwap", 25) == 0)
@@ -1350,37 +1432,163 @@ static bool EvalOSAtomic(ExplodedNodeSet<GRState>& Dst,
//===----------------------------------------------------------------------===//
// Transfer function: Function calls.
//===----------------------------------------------------------------------===//
+static void MarkNoReturnFunction(const FunctionDecl *FD, CallExpr *CE,
+ const GRState *state,
+ GRStmtNodeBuilder *Builder) {
+ if (!FD)
+ return;
+
+ if (FD->getAttr<NoReturnAttr>() ||
+ FD->getAttr<AnalyzerNoReturnAttr>())
+ Builder->BuildSinks = true;
+ else {
+ // HACK: Some functions are not marked noreturn, and don't return.
+ // Here are a few hardwired ones. If this takes too long, we can
+ // potentially cache these results.
+ const char* s = FD->getIdentifier()->getName();
+ unsigned n = strlen(s);
+
+ switch (n) {
+ default:
+ break;
-void GRExprEngine::EvalCall(NodeSet& Dst, CallExpr* CE, SVal L, NodeTy* Pred) {
+ case 4:
+ if (!memcmp(s, "exit", 4)) Builder->BuildSinks = true;
+ break;
+
+ case 5:
+ if (!memcmp(s, "panic", 5)) Builder->BuildSinks = true;
+ else if (!memcmp(s, "error", 5)) {
+ if (CE->getNumArgs() > 0) {
+ SVal X = state->getSVal(*CE->arg_begin());
+ // FIXME: use Assume to inspect the possible symbolic value of
+ // X. Also check the specific signature of error().
+ nonloc::ConcreteInt* CI = dyn_cast<nonloc::ConcreteInt>(&X);
+ if (CI && CI->getValue() != 0)
+ Builder->BuildSinks = true;
+ }
+ }
+ break;
+
+ case 6:
+ if (!memcmp(s, "Assert", 6)) {
+ Builder->BuildSinks = true;
+ break;
+ }
+
+ // FIXME: This is just a wrapper around throwing an exception.
+ // Eventually inter-procedural analysis should handle this easily.
+ if (!memcmp(s, "ziperr", 6)) Builder->BuildSinks = true;
+
+ break;
+
+ case 7:
+ if (!memcmp(s, "assfail", 7)) Builder->BuildSinks = true;
+ break;
+
+ case 8:
+ if (!memcmp(s ,"db_error", 8) ||
+ !memcmp(s, "__assert", 8))
+ Builder->BuildSinks = true;
+ break;
+
+ case 12:
+ if (!memcmp(s, "__assert_rtn", 12)) Builder->BuildSinks = true;
+ break;
+
+ case 13:
+ if (!memcmp(s, "__assert_fail", 13)) Builder->BuildSinks = true;
+ break;
+
+ case 14:
+ if (!memcmp(s, "dtrace_assfail", 14) ||
+ !memcmp(s, "yy_fatal_error", 14))
+ Builder->BuildSinks = true;
+ break;
+
+ case 26:
+ if (!memcmp(s, "_XCAssertionFailureHandler", 26) ||
+ !memcmp(s, "_DTAssertionFailureHandler", 26) ||
+ !memcmp(s, "_TSAssertionFailureHandler", 26))
+ Builder->BuildSinks = true;
+
+ break;
+ }
+
+ }
+}
+
+bool GRExprEngine::EvalBuiltinFunction(const FunctionDecl *FD, CallExpr *CE,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ if (!FD)
+ return false;
+
+ unsigned id = FD->getBuiltinID();
+ if (!id)
+ return false;
+
+ const GRState *state = Pred->getState();
+
+ switch (id) {
+ case Builtin::BI__builtin_expect: {
+ // For __builtin_expect, just return the value of the subexpression.
+ assert (CE->arg_begin() != CE->arg_end());
+ SVal X = state->getSVal(*(CE->arg_begin()));
+ MakeNode(Dst, CE, Pred, state->BindExpr(CE, X));
+ return true;
+ }
+
+ case Builtin::BI__builtin_alloca: {
+ // FIXME: Refactor into StoreManager itself?
+ MemRegionManager& RM = getStateManager().getRegionManager();
+ const MemRegion* R =
+ RM.getAllocaRegion(CE, Builder->getCurrentBlockCount());
+
+ // Set the extent of the region in bytes. This enables us to use the
+ // SVal of the argument directly. If we save the extent in bits, we
+ // cannot represent values like symbol*8.
+ SVal Extent = state->getSVal(*(CE->arg_begin()));
+ state = getStoreManager().setExtent(state, R, Extent);
+ MakeNode(Dst, CE, Pred, state->BindExpr(CE, loc::MemRegionVal(R)));
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void GRExprEngine::EvalCall(ExplodedNodeSet& Dst, CallExpr* CE, SVal L,
+ ExplodedNode* Pred) {
assert (Builder && "GRStmtNodeBuilder must be defined.");
-
+
// FIXME: Allow us to chain together transfer functions.
if (EvalOSAtomic(Dst, *this, *Builder, CE, L, Pred))
return;
-
+
getTF().EvalCall(Dst, *this, *Builder, CE, L, Pred);
}
-void GRExprEngine::VisitCall(CallExpr* CE, NodeTy* Pred,
+void GRExprEngine::VisitCall(CallExpr* CE, ExplodedNode* Pred,
CallExpr::arg_iterator AI,
CallExpr::arg_iterator AE,
- NodeSet& Dst)
-{
+ ExplodedNodeSet& Dst) {
// Determine the type of function we're calling (if available).
const FunctionProtoType *Proto = NULL;
QualType FnType = CE->getCallee()->IgnoreParens()->getType();
- if (const PointerType *FnTypePtr = FnType->getAsPointerType())
- Proto = FnTypePtr->getPointeeType()->getAsFunctionProtoType();
+ if (const PointerType *FnTypePtr = FnType->getAs<PointerType>())
+ Proto = FnTypePtr->getPointeeType()->getAs<FunctionProtoType>();
VisitCallRec(CE, Pred, AI, AE, Dst, Proto, /*ParamIdx=*/0);
}
-void GRExprEngine::VisitCallRec(CallExpr* CE, NodeTy* Pred,
+void GRExprEngine::VisitCallRec(CallExpr* CE, ExplodedNode* Pred,
CallExpr::arg_iterator AI,
CallExpr::arg_iterator AE,
- NodeSet& Dst, const FunctionProtoType *Proto,
+ ExplodedNodeSet& Dst,
+ const FunctionProtoType *Proto,
unsigned ParamIdx) {
-
+
// Process the arguments.
if (AI != AE) {
// If the call argument is being bound to a reference parameter,
@@ -1389,201 +1597,63 @@ void GRExprEngine::VisitCallRec(CallExpr* CE, NodeTy* Pred,
if (Proto && ParamIdx < Proto->getNumArgs())
VisitAsLvalue = Proto->getArgType(ParamIdx)->isReferenceType();
- NodeSet DstTmp;
+ ExplodedNodeSet DstTmp;
if (VisitAsLvalue)
- VisitLValue(*AI, Pred, DstTmp);
+ VisitLValue(*AI, Pred, DstTmp);
else
- Visit(*AI, Pred, DstTmp);
+ Visit(*AI, Pred, DstTmp);
++AI;
-
- for (NodeSet::iterator DI=DstTmp.begin(), DE=DstTmp.end(); DI != DE; ++DI)
+
+ for (ExplodedNodeSet::iterator DI=DstTmp.begin(), DE=DstTmp.end(); DI != DE;
+ ++DI)
VisitCallRec(CE, *DI, AI, AE, Dst, Proto, ParamIdx + 1);
-
+
return;
}
// If we reach here we have processed all of the arguments. Evaluate
// the callee expression.
-
- NodeSet DstTmp;
+ ExplodedNodeSet DstTmp;
Expr* Callee = CE->getCallee()->IgnoreParens();
- Visit(Callee, Pred, DstTmp);
-
+ { // Enter new scope to make the lifetime of 'DstTmp2' bounded.
+ ExplodedNodeSet DstTmp2;
+ Visit(Callee, Pred, DstTmp2);
+
+ // Perform the previsit of the CallExpr, storing the results in DstTmp.
+ CheckerVisit(CE, DstTmp, DstTmp2, true);
+ }
+
// Finally, evaluate the function call.
- for (NodeSet::iterator DI = DstTmp.begin(), DE = DstTmp.end(); DI!=DE; ++DI) {
+ for (ExplodedNodeSet::iterator DI = DstTmp.begin(), DE = DstTmp.end();
+ DI != DE; ++DI) {
const GRState* state = GetState(*DI);
SVal L = state->getSVal(Callee);
// FIXME: Add support for symbolic function calls (calls involving
// function pointer values that are symbolic).
-
- // Check for undefined control-flow or calls to NULL.
-
- if (L.isUndef() || isa<loc::ConcreteInt>(L)) {
- NodeTy* N = Builder->generateNode(CE, state, *DI);
-
- if (N) {
- N->markAsSink();
- BadCalls.insert(N);
- }
-
- continue;
- }
-
+
// Check for the "noreturn" attribute.
-
+
SaveAndRestore<bool> OldSink(Builder->BuildSinks);
const FunctionDecl* FD = L.getAsFunctionDecl();
- if (FD) {
- if (FD->getAttr<NoReturnAttr>() ||
- FD->getAttr<AnalyzerNoReturnAttr>())
- Builder->BuildSinks = true;
- else {
- // HACK: Some functions are not marked noreturn, and don't return.
- // Here are a few hardwired ones. If this takes too long, we can
- // potentially cache these results.
- const char* s = FD->getIdentifier()->getName();
- unsigned n = strlen(s);
-
- switch (n) {
- default:
- break;
-
- case 4:
- if (!memcmp(s, "exit", 4)) Builder->BuildSinks = true;
- break;
-
- case 5:
- if (!memcmp(s, "panic", 5)) Builder->BuildSinks = true;
- else if (!memcmp(s, "error", 5)) {
- if (CE->getNumArgs() > 0) {
- SVal X = state->getSVal(*CE->arg_begin());
- // FIXME: use Assume to inspect the possible symbolic value of
- // X. Also check the specific signature of error().
- nonloc::ConcreteInt* CI = dyn_cast<nonloc::ConcreteInt>(&X);
- if (CI && CI->getValue() != 0)
- Builder->BuildSinks = true;
- }
- }
- break;
-
- case 6:
- if (!memcmp(s, "Assert", 6)) {
- Builder->BuildSinks = true;
- break;
- }
-
- // FIXME: This is just a wrapper around throwing an exception.
- // Eventually inter-procedural analysis should handle this easily.
- if (!memcmp(s, "ziperr", 6)) Builder->BuildSinks = true;
- break;
-
- case 7:
- if (!memcmp(s, "assfail", 7)) Builder->BuildSinks = true;
- break;
-
- case 8:
- if (!memcmp(s ,"db_error", 8) ||
- !memcmp(s, "__assert", 8))
- Builder->BuildSinks = true;
- break;
-
- case 12:
- if (!memcmp(s, "__assert_rtn", 12)) Builder->BuildSinks = true;
- break;
-
- case 13:
- if (!memcmp(s, "__assert_fail", 13)) Builder->BuildSinks = true;
- break;
-
- case 14:
- if (!memcmp(s, "dtrace_assfail", 14) ||
- !memcmp(s, "yy_fatal_error", 14))
- Builder->BuildSinks = true;
- break;
-
- case 26:
- if (!memcmp(s, "_XCAssertionFailureHandler", 26) ||
- !memcmp(s, "_DTAssertionFailureHandler", 26) ||
- !memcmp(s, "_TSAssertionFailureHandler", 26))
- Builder->BuildSinks = true;
+ MarkNoReturnFunction(FD, CE, state, Builder);
- break;
- }
-
- }
- }
-
// Evaluate the call.
+ if (EvalBuiltinFunction(FD, CE, *DI, Dst))
+ continue;
- if (FD) {
-
- if (unsigned id = FD->getBuiltinID(getContext()))
- switch (id) {
- case Builtin::BI__builtin_expect: {
- // For __builtin_expect, just return the value of the subexpression.
- assert (CE->arg_begin() != CE->arg_end());
- SVal X = state->getSVal(*(CE->arg_begin()));
- MakeNode(Dst, CE, *DI, state->bindExpr(CE, X));
- continue;
- }
-
- case Builtin::BI__builtin_alloca: {
- // FIXME: Refactor into StoreManager itself?
- MemRegionManager& RM = getStateManager().getRegionManager();
- const MemRegion* R =
- RM.getAllocaRegion(CE, Builder->getCurrentBlockCount());
-
- // Set the extent of the region in bytes. This enables us to use the
- // SVal of the argument directly. If we save the extent in bits, we
- // cannot represent values like symbol*8.
- SVal Extent = state->getSVal(*(CE->arg_begin()));
- state = getStoreManager().setExtent(state, R, Extent);
-
- MakeNode(Dst, CE, *DI, state->bindExpr(CE, loc::MemRegionVal(R)));
- continue;
- }
-
- default:
- break;
- }
- }
-
- // Check any arguments passed-by-value against being undefined.
-
- bool badArg = false;
-
- for (CallExpr::arg_iterator I = CE->arg_begin(), E = CE->arg_end();
- I != E; ++I) {
+ // Dispatch to the plug-in transfer function.
- if (GetState(*DI)->getSVal(*I).isUndef()) {
- NodeTy* N = Builder->generateNode(CE, GetState(*DI), *DI);
-
- if (N) {
- N->markAsSink();
- UndefArgs[N] = *I;
- }
-
- badArg = true;
- break;
- }
- }
-
- if (badArg)
- continue;
-
- // Dispatch to the plug-in transfer function.
-
unsigned size = Dst.size();
SaveOr OldHasGen(Builder->HasGeneratedNode);
EvalCall(Dst, CE, L, *DI);
-
+
// Handle the case where no nodes where generated. Auto-generate that
// contains the updated state if we aren't generating sinks.
-
+
if (!Builder->BuildSinks && Dst.size() == size &&
!Builder->HasGeneratedNode)
MakeNode(Dst, CE, *DI, state);
@@ -1597,35 +1667,38 @@ void GRExprEngine::VisitCallRec(CallExpr* CE, NodeTy* Pred,
static std::pair<const void*,const void*> EagerlyAssumeTag
= std::pair<const void*,const void*>(&EagerlyAssumeTag,0);
-void GRExprEngine::EvalEagerlyAssume(NodeSet &Dst, NodeSet &Src, Expr *Ex) {
- for (NodeSet::iterator I=Src.begin(), E=Src.end(); I!=E; ++I) {
- NodeTy *Pred = *I;
-
+void GRExprEngine::EvalEagerlyAssume(ExplodedNodeSet &Dst, ExplodedNodeSet &Src,
+ Expr *Ex) {
+ for (ExplodedNodeSet::iterator I=Src.begin(), E=Src.end(); I!=E; ++I) {
+ ExplodedNode *Pred = *I;
+
// Test if the previous node was as the same expression. This can happen
// when the expression fails to evaluate to anything meaningful and
// (as an optimization) we don't generate a node.
- ProgramPoint P = Pred->getLocation();
+ ProgramPoint P = Pred->getLocation();
if (!isa<PostStmt>(P) || cast<PostStmt>(P).getStmt() != Ex) {
- Dst.Add(Pred);
+ Dst.Add(Pred);
continue;
- }
+ }
- const GRState* state = Pred->getState();
- SVal V = state->getSVal(Ex);
- if (isa<nonloc::SymExprVal>(V)) {
+ const GRState* state = Pred->getState();
+ SVal V = state->getSVal(Ex);
+ if (nonloc::SymExprVal *SEV = dyn_cast<nonloc::SymExprVal>(&V)) {
// First assume that the condition is true.
- if (const GRState *stateTrue = state->assume(V, true)) {
- stateTrue = stateTrue->bindExpr(Ex,
+ if (const GRState *stateTrue = state->Assume(*SEV, true)) {
+ stateTrue = stateTrue->BindExpr(Ex,
ValMgr.makeIntVal(1U, Ex->getType()));
- Dst.Add(Builder->generateNode(PostStmtCustom(Ex, &EagerlyAssumeTag),
+ Dst.Add(Builder->generateNode(PostStmtCustom(Ex,
+ &EagerlyAssumeTag, Pred->getLocationContext()),
stateTrue, Pred));
}
-
+
// Next, assume that the condition is false.
- if (const GRState *stateFalse = state->assume(V, false)) {
- stateFalse = stateFalse->bindExpr(Ex,
+ if (const GRState *stateFalse = state->Assume(*SEV, false)) {
+ stateFalse = stateFalse->BindExpr(Ex,
ValMgr.makeIntVal(0U, Ex->getType()));
- Dst.Add(Builder->generateNode(PostStmtCustom(Ex, &EagerlyAssumeTag),
+ Dst.Add(Builder->generateNode(PostStmtCustom(Ex, &EagerlyAssumeTag,
+ Pred->getLocationContext()),
stateFalse, Pred));
}
}
@@ -1638,21 +1711,20 @@ void GRExprEngine::EvalEagerlyAssume(NodeSet &Dst, NodeSet &Src, Expr *Ex) {
// Transfer function: Objective-C ivar references.
//===----------------------------------------------------------------------===//
-void GRExprEngine::VisitObjCIvarRefExpr(ObjCIvarRefExpr* Ex,
- NodeTy* Pred, NodeSet& Dst,
- bool asLValue) {
-
+void GRExprEngine::VisitObjCIvarRefExpr(ObjCIvarRefExpr* Ex, ExplodedNode* Pred,
+ ExplodedNodeSet& Dst, bool asLValue) {
+
Expr* Base = cast<Expr>(Ex->getBase());
- NodeSet Tmp;
+ ExplodedNodeSet Tmp;
Visit(Base, Pred, Tmp);
-
- for (NodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
const GRState* state = GetState(*I);
SVal BaseVal = state->getSVal(Base);
SVal location = state->getLValue(Ex->getDecl(), BaseVal);
-
+
if (asLValue)
- MakeNode(Dst, Ex, *I, state->bindExpr(Ex, location));
+ MakeNode(Dst, Ex, *I, state->BindExpr(Ex, location));
else
EvalLoad(Dst, Ex, *I, state, location);
}
@@ -1663,8 +1735,8 @@ void GRExprEngine::VisitObjCIvarRefExpr(ObjCIvarRefExpr* Ex,
//===----------------------------------------------------------------------===//
void GRExprEngine::VisitObjCForCollectionStmt(ObjCForCollectionStmt* S,
- NodeTy* Pred, NodeSet& Dst) {
-
+ ExplodedNode* Pred, ExplodedNodeSet& Dst) {
+
// ObjCForCollectionStmts are processed in two places. This method
// handles the case where an ObjCForCollectionStmt* occurs as one of the
// statements within a basic block. This transfer function does two things:
@@ -1676,7 +1748,7 @@ void GRExprEngine::VisitObjCForCollectionStmt(ObjCForCollectionStmt* S,
// whether or not the container has any more elements. This value
// will be tested in ProcessBranch. We need to explicitly bind
// this value because a container can contain nil elements.
- //
+ //
// FIXME: Eventually this logic should actually do dispatches to
// 'countByEnumeratingWithState:objects:count:' (NSFastEnumeration).
// This will require simulating a temporary NSFastEnumerationState, either
@@ -1689,51 +1761,51 @@ void GRExprEngine::VisitObjCForCollectionStmt(ObjCForCollectionStmt* S,
// For now: simulate (1) by assigning either a symbol or nil if the
// container is empty. Thus this transfer function will by default
// result in state splitting.
-
+
Stmt* elem = S->getElement();
SVal ElementV;
-
+
if (DeclStmt* DS = dyn_cast<DeclStmt>(elem)) {
VarDecl* ElemD = cast<VarDecl>(DS->getSingleDecl());
assert (ElemD->getInit() == 0);
- ElementV = GetState(Pred)->getLValue(ElemD);
+ ElementV = GetState(Pred)->getLValue(ElemD, Pred->getLocationContext());
VisitObjCForCollectionStmtAux(S, Pred, Dst, ElementV);
return;
}
- NodeSet Tmp;
+ ExplodedNodeSet Tmp;
VisitLValue(cast<Expr>(elem), Pred, Tmp);
-
- for (NodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I!=E; ++I) {
+
+ for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I!=E; ++I) {
const GRState* state = GetState(*I);
VisitObjCForCollectionStmtAux(S, *I, Dst, state->getSVal(elem));
}
}
void GRExprEngine::VisitObjCForCollectionStmtAux(ObjCForCollectionStmt* S,
- NodeTy* Pred, NodeSet& Dst,
+ ExplodedNode* Pred, ExplodedNodeSet& Dst,
SVal ElementV) {
-
-
+
+
// Get the current state. Use 'EvalLocation' to determine if it is a null
// pointer, etc.
Stmt* elem = S->getElement();
-
+
Pred = EvalLocation(elem, Pred, GetState(Pred), ElementV);
if (!Pred)
return;
-
+
const GRState *state = GetState(Pred);
// Handle the case where the container still has elements.
SVal TrueV = ValMgr.makeTruthVal(1);
- const GRState *hasElems = state->bindExpr(S, TrueV);
-
+ const GRState *hasElems = state->BindExpr(S, TrueV);
+
// Handle the case where the container has no elements.
SVal FalseV = ValMgr.makeTruthVal(0);
- const GRState *noElems = state->bindExpr(S, FalseV);
-
+ const GRState *noElems = state->BindExpr(S, FalseV);
+
if (loc::MemRegionVal* MV = dyn_cast<loc::MemRegionVal>(&ElementV))
if (const TypedRegion* R = dyn_cast<TypedRegion>(MV->getRegion())) {
// FIXME: The proper thing to do is to really iterate over the
@@ -1747,10 +1819,10 @@ void GRExprEngine::VisitObjCForCollectionStmtAux(ObjCForCollectionStmt* S,
hasElems = hasElems->bindLoc(ElementV, V);
// Bind the location to 'nil' on the false branch.
- SVal nilV = ValMgr.makeIntVal(0, T);
- noElems = noElems->bindLoc(ElementV, nilV);
+ SVal nilV = ValMgr.makeIntVal(0, T);
+ noElems = noElems->bindLoc(ElementV, nilV);
}
-
+
// Create the new nodes.
MakeNode(Dst, S, Pred, hasElems);
MakeNode(Dst, S, Pred, noElems);
@@ -1760,113 +1832,115 @@ void GRExprEngine::VisitObjCForCollectionStmtAux(ObjCForCollectionStmt* S,
// Transfer function: Objective-C message expressions.
//===----------------------------------------------------------------------===//
-void GRExprEngine::VisitObjCMessageExpr(ObjCMessageExpr* ME, NodeTy* Pred,
- NodeSet& Dst){
-
+void GRExprEngine::VisitObjCMessageExpr(ObjCMessageExpr* ME, ExplodedNode* Pred,
+ ExplodedNodeSet& Dst){
+
VisitObjCMessageExprArgHelper(ME, ME->arg_begin(), ME->arg_end(),
Pred, Dst);
-}
+}
void GRExprEngine::VisitObjCMessageExprArgHelper(ObjCMessageExpr* ME,
ObjCMessageExpr::arg_iterator AI,
ObjCMessageExpr::arg_iterator AE,
- NodeTy* Pred, NodeSet& Dst) {
+ ExplodedNode* Pred, ExplodedNodeSet& Dst) {
if (AI == AE) {
-
+
// Process the receiver.
-
+
if (Expr* Receiver = ME->getReceiver()) {
- NodeSet Tmp;
+ ExplodedNodeSet Tmp;
Visit(Receiver, Pred, Tmp);
-
- for (NodeSet::iterator NI = Tmp.begin(), NE = Tmp.end(); NI != NE; ++NI)
+
+ for (ExplodedNodeSet::iterator NI = Tmp.begin(), NE = Tmp.end(); NI != NE;
+ ++NI)
VisitObjCMessageExprDispatchHelper(ME, *NI, Dst);
-
+
return;
}
-
+
VisitObjCMessageExprDispatchHelper(ME, Pred, Dst);
return;
}
-
- NodeSet Tmp;
+
+ ExplodedNodeSet Tmp;
Visit(*AI, Pred, Tmp);
-
+
++AI;
-
- for (NodeSet::iterator NI = Tmp.begin(), NE = Tmp.end(); NI != NE; ++NI)
+
+ for (ExplodedNodeSet::iterator NI = Tmp.begin(), NE = Tmp.end();NI != NE;++NI)
VisitObjCMessageExprArgHelper(ME, AI, AE, *NI, Dst);
}
void GRExprEngine::VisitObjCMessageExprDispatchHelper(ObjCMessageExpr* ME,
- NodeTy* Pred,
- NodeSet& Dst) {
-
- // FIXME: More logic for the processing the method call.
-
+ ExplodedNode* Pred,
+ ExplodedNodeSet& Dst) {
+
+ // FIXME: More logic for the processing the method call.
+
const GRState* state = GetState(Pred);
bool RaisesException = false;
-
-
+
+
if (Expr* Receiver = ME->getReceiver()) {
-
- SVal L = state->getSVal(Receiver);
-
- // Check for undefined control-flow.
- if (L.isUndef()) {
- NodeTy* N = Builder->generateNode(ME, state, Pred);
-
+
+ SVal L_untested = state->getSVal(Receiver);
+
+ // Check for undefined control-flow.
+ if (L_untested.isUndef()) {
+ ExplodedNode* N = Builder->generateNode(ME, state, Pred);
+
if (N) {
N->markAsSink();
UndefReceivers.insert(N);
}
-
+
return;
}
-
- // "Assume" that the receiver is not NULL.
- const GRState *StNotNull = state->assume(L, true);
-
- // "Assume" that the receiver is NULL.
- const GRState *StNull = state->assume(L, false);
-
+
+ // "Assume" that the receiver is not NULL.
+ DefinedOrUnknownSVal L = cast<DefinedOrUnknownSVal>(L_untested);
+ const GRState *StNotNull = state->Assume(L, true);
+
+ // "Assume" that the receiver is NULL.
+ const GRState *StNull = state->Assume(L, false);
+
if (StNull) {
QualType RetTy = ME->getType();
-
+
// Check if the receiver was nil and the return value a struct.
- if(RetTy->isRecordType()) {
- if (BR.getParentMap().isConsumedExpr(ME)) {
+ if (RetTy->isRecordType()) {
+ if (Pred->getParentMap().isConsumedExpr(ME)) {
// The [0 ...] expressions will return garbage. Flag either an
// explicit or implicit error. Because of the structure of this
// function we currently do not bifurfacte the state graph at
// this point.
// FIXME: We should bifurcate and fill the returned struct with
- // garbage.
- if (NodeTy* N = Builder->generateNode(ME, StNull, Pred)) {
+ // garbage.
+ if (ExplodedNode* N = Builder->generateNode(ME, StNull, Pred)) {
N->markAsSink();
if (StNotNull)
NilReceiverStructRetImplicit.insert(N);
else
- NilReceiverStructRetExplicit.insert(N);
+ NilReceiverStructRetExplicit.insert(N);
}
}
}
else {
ASTContext& Ctx = getContext();
if (RetTy != Ctx.VoidTy) {
- if (BR.getParentMap().isConsumedExpr(ME)) {
+ if (Pred->getParentMap().isConsumedExpr(ME)) {
// sizeof(void *)
const uint64_t voidPtrSize = Ctx.getTypeSize(Ctx.VoidPtrTy);
// sizeof(return type)
const uint64_t returnTypeSize = Ctx.getTypeSize(ME->getType());
- if(voidPtrSize < returnTypeSize) {
- if (NodeTy* N = Builder->generateNode(ME, StNull, Pred)) {
+ if (voidPtrSize < returnTypeSize) {
+ if (ExplodedNode* N = Builder->generateNode(ME, StNull, Pred)) {
N->markAsSink();
- if(StNotNull)
+ if (StNotNull)
NilReceiverLargerThanVoidPtrRetImplicit.insert(N);
else
- NilReceiverLargerThanVoidPtrRetExplicit.insert(N);
+ NilReceiverLargerThanVoidPtrRetExplicit.insert(N);
}
}
else if (!StNotNull) {
@@ -1884,7 +1958,7 @@ void GRExprEngine::VisitObjCMessageExprDispatchHelper(ObjCMessageExpr* ME,
// of this case unless we have *a lot* more knowledge.
//
SVal V = ValMgr.makeZeroVal(ME->getType());
- MakeNode(Dst, ME, Pred, StNull->bindExpr(ME, V));
+ MakeNode(Dst, ME, Pred, StNull->BindExpr(ME, V));
return;
}
}
@@ -1894,99 +1968,99 @@ void GRExprEngine::VisitObjCMessageExprDispatchHelper(ObjCMessageExpr* ME,
// of this method should assume that the receiver is not nil.
if (!StNotNull)
return;
-
+
state = StNotNull;
}
-
+
// Check if the "raise" message was sent.
if (ME->getSelector() == RaiseSel)
RaisesException = true;
}
else {
-
+
IdentifierInfo* ClsName = ME->getClassName();
Selector S = ME->getSelector();
-
+
// Check for special instance methods.
-
- if (!NSExceptionII) {
+
+ if (!NSExceptionII) {
ASTContext& Ctx = getContext();
-
+
NSExceptionII = &Ctx.Idents.get("NSException");
}
-
+
if (ClsName == NSExceptionII) {
-
+
enum { NUM_RAISE_SELECTORS = 2 };
-
+
// Lazily create a cache of the selectors.
if (!NSExceptionInstanceRaiseSelectors) {
-
+
ASTContext& Ctx = getContext();
-
+
NSExceptionInstanceRaiseSelectors = new Selector[NUM_RAISE_SELECTORS];
-
+
llvm::SmallVector<IdentifierInfo*, NUM_RAISE_SELECTORS> II;
unsigned idx = 0;
-
- // raise:format:
+
+ // raise:format:
II.push_back(&Ctx.Idents.get("raise"));
- II.push_back(&Ctx.Idents.get("format"));
+ II.push_back(&Ctx.Idents.get("format"));
NSExceptionInstanceRaiseSelectors[idx++] =
- Ctx.Selectors.getSelector(II.size(), &II[0]);
-
- // raise:format::arguments:
+ Ctx.Selectors.getSelector(II.size(), &II[0]);
+
+ // raise:format::arguments:
II.push_back(&Ctx.Idents.get("arguments"));
NSExceptionInstanceRaiseSelectors[idx++] =
Ctx.Selectors.getSelector(II.size(), &II[0]);
}
-
+
for (unsigned i = 0; i < NUM_RAISE_SELECTORS; ++i)
if (S == NSExceptionInstanceRaiseSelectors[i]) {
RaisesException = true; break;
}
}
}
-
+
// Check for any arguments that are uninitialized/undefined.
-
+
for (ObjCMessageExpr::arg_iterator I = ME->arg_begin(), E = ME->arg_end();
I != E; ++I) {
-
+
if (state->getSVal(*I).isUndef()) {
-
+
// Generate an error node for passing an uninitialized/undefined value
// as an argument to a message expression. This node is a sink.
- NodeTy* N = Builder->generateNode(ME, state, Pred);
-
+ ExplodedNode* N = Builder->generateNode(ME, state, Pred);
+
if (N) {
N->markAsSink();
MsgExprUndefArgs[N] = *I;
}
-
+
return;
- }
+ }
}
-
+
// Check if we raise an exception. For now treat these as sinks. Eventually
// we will want to handle exceptions properly.
-
+
SaveAndRestore<bool> OldSink(Builder->BuildSinks);
if (RaisesException)
Builder->BuildSinks = true;
-
+
// Dispatch to plug-in transfer function.
-
+
unsigned size = Dst.size();
SaveOr OldHasGen(Builder->HasGeneratedNode);
-
+
EvalObjCMessageExpr(Dst, ME, Pred);
-
+
// Handle the case where no nodes where generated. Auto-generate that
// contains the updated state if we aren't generating sinks.
-
+
if (!Builder->BuildSinks && Dst.size() == size && !Builder->HasGeneratedNode)
MakeNode(Dst, ME, Pred, state);
}
@@ -1995,24 +2069,8 @@ void GRExprEngine::VisitObjCMessageExprDispatchHelper(ObjCMessageExpr* ME,
// Transfer functions: Miscellaneous statements.
//===----------------------------------------------------------------------===//
-void GRExprEngine::VisitCastPointerToInteger(SVal V, const GRState* state,
- QualType PtrTy,
- Expr* CastE, NodeTy* Pred,
- NodeSet& Dst) {
- if (!V.isUnknownOrUndef()) {
- // FIXME: Determine if the number of bits of the target type is
- // equal or exceeds the number of bits to store the pointer value.
- // If not, flag an error.
- MakeNode(Dst, CastE, Pred, state->bindExpr(CastE, EvalCast(cast<Loc>(V),
- CastE->getType())));
- }
- else
- MakeNode(Dst, CastE, Pred, state->bindExpr(CastE, V));
-}
-
-
-void GRExprEngine::VisitCast(Expr* CastE, Expr* Ex, NodeTy* Pred, NodeSet& Dst){
- NodeSet S1;
+void GRExprEngine::VisitCast(Expr* CastE, Expr* Ex, ExplodedNode* Pred, ExplodedNodeSet& Dst){
+ ExplodedNodeSet S1;
QualType T = CastE->getType();
QualType ExTy = Ex->getType();
@@ -2023,180 +2081,67 @@ void GRExprEngine::VisitCast(Expr* CastE, Expr* Ex, NodeTy* Pred, NodeSet& Dst){
VisitLValue(Ex, Pred, S1);
else
Visit(Ex, Pred, S1);
-
+
// Check for casting to "void".
- if (T->isVoidType()) {
- for (NodeSet::iterator I1 = S1.begin(), E1 = S1.end(); I1 != E1; ++I1)
+ if (T->isVoidType()) {
+ for (ExplodedNodeSet::iterator I1 = S1.begin(), E1 = S1.end(); I1 != E1; ++I1)
Dst.Add(*I1);
return;
}
-
- // FIXME: The rest of this should probably just go into EvalCall, and
- // let the transfer function object be responsible for constructing
- // nodes.
-
- for (NodeSet::iterator I1 = S1.begin(), E1 = S1.end(); I1 != E1; ++I1) {
- NodeTy* N = *I1;
+
+ for (ExplodedNodeSet::iterator I1 = S1.begin(), E1 = S1.end(); I1 != E1; ++I1) {
+ ExplodedNode* N = *I1;
const GRState* state = GetState(N);
SVal V = state->getSVal(Ex);
- ASTContext& C = getContext();
-
- // Unknown?
- if (V.isUnknown()) {
- Dst.Add(N);
- continue;
- }
-
- // Undefined?
- if (V.isUndef())
- goto PassThrough;
-
- // For const casts, just propagate the value.
- if (C.getCanonicalType(T).getUnqualifiedType() ==
- C.getCanonicalType(ExTy).getUnqualifiedType())
- goto PassThrough;
-
- // Check for casts from pointers to integers.
- if (T->isIntegerType() && Loc::IsLocType(ExTy)) {
- VisitCastPointerToInteger(V, state, ExTy, CastE, N, Dst);
- continue;
- }
-
- // Check for casts from integers to pointers.
- if (Loc::IsLocType(T) && ExTy->isIntegerType()) {
- if (nonloc::LocAsInteger *LV = dyn_cast<nonloc::LocAsInteger>(&V)) {
- // Just unpackage the lval and return it.
- V = LV->getLoc();
- MakeNode(Dst, CastE, N, state->bindExpr(CastE, V));
- continue;
- }
-
- goto DispatchCast;
- }
-
- // Just pass through function and block pointers.
- if (ExTy->isBlockPointerType() || ExTy->isFunctionPointerType()) {
- assert(Loc::IsLocType(T));
- goto PassThrough;
- }
-
- // Check for casts from array type to another type.
- if (ExTy->isArrayType()) {
- // We will always decay to a pointer.
- V = StateMgr.ArrayToPointer(cast<Loc>(V));
-
- // Are we casting from an array to a pointer? If so just pass on
- // the decayed value.
- if (T->isPointerType())
- goto PassThrough;
-
- // Are we casting from an array to an integer? If so, cast the decayed
- // pointer value to an integer.
- assert(T->isIntegerType());
- QualType ElemTy = cast<ArrayType>(ExTy)->getElementType();
- QualType PointerTy = getContext().getPointerType(ElemTy);
- VisitCastPointerToInteger(V, state, PointerTy, CastE, N, Dst);
- continue;
- }
-
- // Check for casts from a region to a specific type.
- if (loc::MemRegionVal *RV = dyn_cast<loc::MemRegionVal>(&V)) {
- // FIXME: For TypedViewRegions, we should handle the case where the
- // underlying symbolic pointer is a function pointer or
- // block pointer.
-
- // FIXME: We should handle the case where we strip off view layers to get
- // to a desugared type.
-
- assert(Loc::IsLocType(T));
- // We get a symbolic function pointer for a dereference of a function
- // pointer, but it is of function type. Example:
-
- // struct FPRec {
- // void (*my_func)(int * x);
- // };
- //
- // int bar(int x);
- //
- // int f1_a(struct FPRec* foo) {
- // int x;
- // (*foo->my_func)(&x);
- // return bar(x)+1; // no-warning
- // }
-
- assert(Loc::IsLocType(ExTy) || ExTy->isFunctionType());
-
- const MemRegion* R = RV->getRegion();
- StoreManager& StoreMgr = getStoreManager();
-
- // Delegate to store manager to get the result of casting a region
- // to a different type.
- const StoreManager::CastResult& Res = StoreMgr.CastRegion(state, R, T);
-
- // Inspect the result. If the MemRegion* returned is NULL, this
- // expression evaluates to UnknownVal.
- R = Res.getRegion();
- if (R) { V = loc::MemRegionVal(R); } else { V = UnknownVal(); }
-
- // Generate the new node in the ExplodedGraph.
- MakeNode(Dst, CastE, N, Res.getState()->bindExpr(CastE, V));
- continue;
- }
- // All other cases.
- DispatchCast: {
- MakeNode(Dst, CastE, N, state->bindExpr(CastE,
- EvalCast(V, CastE->getType())));
- continue;
- }
-
- PassThrough: {
- MakeNode(Dst, CastE, N, state->bindExpr(CastE, V));
- }
+ const SValuator::CastResult &Res = SVator.EvalCast(V, state, T, ExTy);
+ state = Res.getState()->BindExpr(CastE, Res.getSVal());
+ MakeNode(Dst, CastE, N, state);
}
}
void GRExprEngine::VisitCompoundLiteralExpr(CompoundLiteralExpr* CL,
- NodeTy* Pred, NodeSet& Dst,
+ ExplodedNode* Pred,
+ ExplodedNodeSet& Dst,
bool asLValue) {
InitListExpr* ILE = cast<InitListExpr>(CL->getInitializer()->IgnoreParens());
- NodeSet Tmp;
+ ExplodedNodeSet Tmp;
Visit(ILE, Pred, Tmp);
-
- for (NodeSet::iterator I = Tmp.begin(), EI = Tmp.end(); I!=EI; ++I) {
+
+ for (ExplodedNodeSet::iterator I = Tmp.begin(), EI = Tmp.end(); I!=EI; ++I) {
const GRState* state = GetState(*I);
SVal ILV = state->getSVal(ILE);
state = state->bindCompoundLiteral(CL, ILV);
if (asLValue)
- MakeNode(Dst, CL, *I, state->bindExpr(CL, state->getLValue(CL)));
+ MakeNode(Dst, CL, *I, state->BindExpr(CL, state->getLValue(CL)));
else
- MakeNode(Dst, CL, *I, state->bindExpr(CL, ILV));
+ MakeNode(Dst, CL, *I, state->BindExpr(CL, ILV));
}
}
-void GRExprEngine::VisitDeclStmt(DeclStmt* DS, NodeTy* Pred, NodeSet& Dst) {
+void GRExprEngine::VisitDeclStmt(DeclStmt *DS, ExplodedNode *Pred,
+ ExplodedNodeSet& Dst) {
- // The CFG has one DeclStmt per Decl.
+ // The CFG has one DeclStmt per Decl.
Decl* D = *DS->decl_begin();
-
+
if (!D || !isa<VarDecl>(D))
return;
-
- const VarDecl* VD = dyn_cast<VarDecl>(D);
+
+ const VarDecl* VD = dyn_cast<VarDecl>(D);
Expr* InitEx = const_cast<Expr*>(VD->getInit());
// FIXME: static variables may have an initializer, but the second
// time a function is called those values may not be current.
- NodeSet Tmp;
+ ExplodedNodeSet Tmp;
if (InitEx)
Visit(InitEx, Pred, Tmp);
-
- if (Tmp.empty())
+ else
Tmp.Add(Pred);
-
- for (NodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
const GRState* state = GetState(*I);
unsigned Count = Builder->getCurrentBlockCount();
@@ -2204,58 +2149,61 @@ void GRExprEngine::VisitDeclStmt(DeclStmt* DS, NodeTy* Pred, NodeSet& Dst) {
QualType T = getContext().getCanonicalType(VD->getType());
if (VariableArrayType* VLA = dyn_cast<VariableArrayType>(T)) {
// FIXME: Handle multi-dimensional VLAs.
-
+
Expr* SE = VLA->getSizeExpr();
- SVal Size = state->getSVal(SE);
-
- if (Size.isUndef()) {
- if (NodeTy* N = Builder->generateNode(DS, state, Pred)) {
- N->markAsSink();
+ SVal Size_untested = state->getSVal(SE);
+
+ if (Size_untested.isUndef()) {
+ if (ExplodedNode* N = Builder->generateNode(DS, state, Pred)) {
+ N->markAsSink();
ExplicitBadSizedVLA.insert(N);
}
continue;
}
-
- const GRState* zeroState = state->assume(Size, false);
- state = state->assume(Size, true);
-
+
+ DefinedOrUnknownSVal Size = cast<DefinedOrUnknownSVal>(Size_untested);
+ const GRState *zeroState = state->Assume(Size, false);
+ state = state->Assume(Size, true);
+
if (zeroState) {
- if (NodeTy* N = Builder->generateNode(DS, zeroState, Pred)) {
- N->markAsSink();
+ if (ExplodedNode* N = Builder->generateNode(DS, zeroState, Pred)) {
+ N->markAsSink();
if (state)
ImplicitBadSizedVLA.insert(N);
else
ExplicitBadSizedVLA.insert(N);
}
}
-
+
if (!state)
- continue;
+ continue;
}
-
+
// Decls without InitExpr are not initialized explicitly.
+ const LocationContext *LC = (*I)->getLocationContext();
+
if (InitEx) {
SVal InitVal = state->getSVal(InitEx);
QualType T = VD->getType();
-
+
// Recover some path-sensitivity if a scalar value evaluated to
// UnknownVal.
- if (InitVal.isUnknown() ||
+ if (InitVal.isUnknown() ||
!getConstraintManager().canReasonAbout(InitVal)) {
- InitVal = ValMgr.getConjuredSymbolVal(InitEx, Count);
- }
-
- state = state->bindDecl(VD, InitVal);
-
+ InitVal = ValMgr.getConjuredSymbolVal(NULL, InitEx, Count);
+ }
+
+ state = state->bindDecl(VD, LC, InitVal);
+
// The next thing to do is check if the GRTransferFuncs object wants to
// update the state based on the new binding. If the GRTransferFunc
// object doesn't do anything, just auto-propagate the current state.
GRStmtNodeBuilderRef BuilderRef(Dst, *Builder, *this, *I, state, DS,true);
- getTF().EvalBind(BuilderRef, loc::MemRegionVal(state->getRegion(VD)),
- InitVal);
- }
+ getTF().EvalBind(BuilderRef, loc::MemRegionVal(state->getRegion(VD, LC)),
+ InitVal);
+ }
else {
- state = state->bindDeclWithNoInit(VD);
+ state = state->bindDeclWithNoInit(VD, LC);
MakeNode(Dst, DS, *I, state);
}
}
@@ -2267,67 +2215,69 @@ namespace {
class VISIBILITY_HIDDEN InitListWLItem {
public:
llvm::ImmutableList<SVal> Vals;
- GRExprEngine::NodeTy* N;
+ ExplodedNode* N;
InitListExpr::reverse_iterator Itr;
-
- InitListWLItem(GRExprEngine::NodeTy* n, llvm::ImmutableList<SVal> vals,
- InitListExpr::reverse_iterator itr)
+
+ InitListWLItem(ExplodedNode* n, llvm::ImmutableList<SVal> vals,
+ InitListExpr::reverse_iterator itr)
: Vals(vals), N(n), Itr(itr) {}
};
}
-void GRExprEngine::VisitInitListExpr(InitListExpr* E, NodeTy* Pred,
- NodeSet& Dst) {
+void GRExprEngine::VisitInitListExpr(InitListExpr* E, ExplodedNode* Pred,
+ ExplodedNodeSet& Dst) {
const GRState* state = GetState(Pred);
QualType T = getContext().getCanonicalType(E->getType());
- unsigned NumInitElements = E->getNumInits();
+ unsigned NumInitElements = E->getNumInits();
- if (T->isArrayType() || T->isStructureType()) {
+ if (T->isArrayType() || T->isStructureType() ||
+ T->isUnionType() || T->isVectorType()) {
llvm::ImmutableList<SVal> StartVals = getBasicVals().getEmptySValList();
-
+
// Handle base case where the initializer has no elements.
// e.g: static int* myArray[] = {};
if (NumInitElements == 0) {
SVal V = ValMgr.makeCompoundVal(T, StartVals);
- MakeNode(Dst, E, Pred, state->bindExpr(E, V));
+ MakeNode(Dst, E, Pred, state->BindExpr(E, V));
return;
- }
-
+ }
+
// Create a worklist to process the initializers.
llvm::SmallVector<InitListWLItem, 10> WorkList;
- WorkList.reserve(NumInitElements);
- WorkList.push_back(InitListWLItem(Pred, StartVals, E->rbegin()));
+ WorkList.reserve(NumInitElements);
+ WorkList.push_back(InitListWLItem(Pred, StartVals, E->rbegin()));
InitListExpr::reverse_iterator ItrEnd = E->rend();
-
+ assert(!(E->rbegin() == E->rend()));
+
// Process the worklist until it is empty.
while (!WorkList.empty()) {
InitListWLItem X = WorkList.back();
WorkList.pop_back();
-
- NodeSet Tmp;
+
+ ExplodedNodeSet Tmp;
Visit(*X.Itr, X.N, Tmp);
-
+
InitListExpr::reverse_iterator NewItr = X.Itr + 1;
- for (NodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI!=NE; ++NI) {
+ for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI!=NE; ++NI) {
// Get the last initializer value.
state = GetState(*NI);
SVal InitV = state->getSVal(cast<Expr>(*X.Itr));
-
+
// Construct the new list of values by prepending the new value to
// the already constructed list.
llvm::ImmutableList<SVal> NewVals =
getBasicVals().consVals(InitV, X.Vals);
-
+
if (NewItr == ItrEnd) {
// Now we have a list holding all init values. Make CompoundValData.
SVal V = ValMgr.makeCompoundVal(T, NewVals);
// Make final state and node.
- MakeNode(Dst, E, *NI, state->bindExpr(E, V));
+ MakeNode(Dst, E, *NI, state->BindExpr(E, V));
}
else {
// Still some initializer values to go. Push them onto the worklist.
@@ -2335,25 +2285,18 @@ void GRExprEngine::VisitInitListExpr(InitListExpr* E, NodeTy* Pred,
}
}
}
-
- return;
- }
- if (T->isUnionType() || T->isVectorType()) {
- // FIXME: to be implemented.
- // Note: That vectors can return true for T->isIntegerType()
- MakeNode(Dst, E, Pred, state);
return;
}
-
+
if (Loc::IsLocType(T) || T->isIntegerType()) {
assert (E->getNumInits() == 1);
- NodeSet Tmp;
+ ExplodedNodeSet Tmp;
Expr* Init = E->getInit(0);
Visit(Init, Pred, Tmp);
- for (NodeSet::iterator I = Tmp.begin(), EI = Tmp.end(); I != EI; ++I) {
+ for (ExplodedNodeSet::iterator I = Tmp.begin(), EI = Tmp.end(); I != EI; ++I) {
state = GetState(*I);
- MakeNode(Dst, E, *I, state->bindExpr(E, state->getSVal(Init)));
+ MakeNode(Dst, E, *I, state->BindExpr(E, state->getSVal(Init)));
}
return;
}
@@ -2365,13 +2308,13 @@ void GRExprEngine::VisitInitListExpr(InitListExpr* E, NodeTy* Pred,
/// VisitSizeOfAlignOfExpr - Transfer function for sizeof(type).
void GRExprEngine::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr* Ex,
- NodeTy* Pred,
- NodeSet& Dst) {
+ ExplodedNode* Pred,
+ ExplodedNodeSet& Dst) {
QualType T = Ex->getTypeOfArgument();
- uint64_t amt;
-
+ uint64_t amt;
+
if (Ex->isSizeOf()) {
- if (T == getContext().VoidTy) {
+ if (T == getContext().VoidTy) {
// sizeof(void) == 1 byte.
amt = 1;
}
@@ -2382,195 +2325,206 @@ void GRExprEngine::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr* Ex,
else if (T->isObjCInterfaceType()) {
// Some code tries to take the sizeof an ObjCInterfaceType, relying that
// the compiler has laid out its representation. Just report Unknown
- // for these.
+ // for these.
return;
}
else {
// All other cases.
amt = getContext().getTypeSize(T) / 8;
- }
+ }
}
else // Get alignment of the type.
amt = getContext().getTypeAlign(T) / 8;
-
+
MakeNode(Dst, Ex, Pred,
- GetState(Pred)->bindExpr(Ex, ValMgr.makeIntVal(amt, Ex->getType())));
+ GetState(Pred)->BindExpr(Ex, ValMgr.makeIntVal(amt, Ex->getType())));
}
-void GRExprEngine::VisitUnaryOperator(UnaryOperator* U, NodeTy* Pred,
- NodeSet& Dst, bool asLValue) {
+void GRExprEngine::VisitUnaryOperator(UnaryOperator* U, ExplodedNode* Pred,
+ ExplodedNodeSet& Dst, bool asLValue) {
switch (U->getOpcode()) {
-
+
default:
break;
-
+
case UnaryOperator::Deref: {
-
+
Expr* Ex = U->getSubExpr()->IgnoreParens();
- NodeSet Tmp;
+ ExplodedNodeSet Tmp;
Visit(Ex, Pred, Tmp);
-
- for (NodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
-
+
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+
const GRState* state = GetState(*I);
SVal location = state->getSVal(Ex);
-
+
if (asLValue)
- MakeNode(Dst, U, *I, state->bindExpr(U, location),
+ MakeNode(Dst, U, *I, state->BindExpr(U, location),
ProgramPoint::PostLValueKind);
else
EvalLoad(Dst, U, *I, state, location);
- }
+ }
return;
}
-
+
case UnaryOperator::Real: {
-
+
Expr* Ex = U->getSubExpr()->IgnoreParens();
- NodeSet Tmp;
+ ExplodedNodeSet Tmp;
Visit(Ex, Pred, Tmp);
-
- for (NodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
-
+
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+
// FIXME: We don't have complex SValues yet.
if (Ex->getType()->isAnyComplexType()) {
// Just report "Unknown."
Dst.Add(*I);
continue;
}
-
+
// For all other types, UnaryOperator::Real is an identity operation.
assert (U->getType() == Ex->getType());
const GRState* state = GetState(*I);
- MakeNode(Dst, U, *I, state->bindExpr(U, state->getSVal(Ex)));
- }
-
+ MakeNode(Dst, U, *I, state->BindExpr(U, state->getSVal(Ex)));
+ }
+
return;
}
-
+
case UnaryOperator::Imag: {
-
+
Expr* Ex = U->getSubExpr()->IgnoreParens();
- NodeSet Tmp;
+ ExplodedNodeSet Tmp;
Visit(Ex, Pred, Tmp);
-
- for (NodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
// FIXME: We don't have complex SValues yet.
if (Ex->getType()->isAnyComplexType()) {
// Just report "Unknown."
Dst.Add(*I);
continue;
}
-
+
// For all other types, UnaryOperator::Float returns 0.
assert (Ex->getType()->isIntegerType());
const GRState* state = GetState(*I);
SVal X = ValMgr.makeZeroVal(Ex->getType());
- MakeNode(Dst, U, *I, state->bindExpr(U, X));
+ MakeNode(Dst, U, *I, state->BindExpr(U, X));
}
-
+
return;
}
-
- // FIXME: Just report "Unknown" for OffsetOf.
- case UnaryOperator::OffsetOf:
+
+ case UnaryOperator::OffsetOf: {
+ Expr::EvalResult Res;
+ if (U->Evaluate(Res, getContext()) && Res.Val.isInt()) {
+ const APSInt &IV = Res.Val.getInt();
+ assert(IV.getBitWidth() == getContext().getTypeSize(U->getType()));
+ assert(U->getType()->isIntegerType());
+ assert(IV.isSigned() == U->getType()->isSignedIntegerType());
+ SVal X = ValMgr.makeIntVal(IV);
+ MakeNode(Dst, U, Pred, GetState(Pred)->BindExpr(U, X));
+ return;
+ }
+ // FIXME: Handle the case where __builtin_offsetof is not a constant.
Dst.Add(Pred);
return;
-
+ }
+
case UnaryOperator::Plus: assert (!asLValue); // FALL-THROUGH.
case UnaryOperator::Extension: {
-
+
// Unary "+" is a no-op, similar to a parentheses. We still have places
// where it may be a block-level expression, so we need to
// generate an extra node that just propagates the value of the
// subexpression.
Expr* Ex = U->getSubExpr()->IgnoreParens();
- NodeSet Tmp;
+ ExplodedNodeSet Tmp;
Visit(Ex, Pred, Tmp);
-
- for (NodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
const GRState* state = GetState(*I);
- MakeNode(Dst, U, *I, state->bindExpr(U, state->getSVal(Ex)));
+ MakeNode(Dst, U, *I, state->BindExpr(U, state->getSVal(Ex)));
}
-
+
return;
}
-
+
case UnaryOperator::AddrOf: {
-
+
assert(!asLValue);
Expr* Ex = U->getSubExpr()->IgnoreParens();
- NodeSet Tmp;
+ ExplodedNodeSet Tmp;
VisitLValue(Ex, Pred, Tmp);
-
- for (NodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
const GRState* state = GetState(*I);
SVal V = state->getSVal(Ex);
- state = state->bindExpr(U, V);
+ state = state->BindExpr(U, V);
MakeNode(Dst, U, *I, state);
}
- return;
+ return;
}
-
+
case UnaryOperator::LNot:
case UnaryOperator::Minus:
case UnaryOperator::Not: {
-
+
assert (!asLValue);
Expr* Ex = U->getSubExpr()->IgnoreParens();
- NodeSet Tmp;
+ ExplodedNodeSet Tmp;
Visit(Ex, Pred, Tmp);
-
- for (NodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
const GRState* state = GetState(*I);
-
+
// Get the value of the subexpression.
SVal V = state->getSVal(Ex);
if (V.isUnknownOrUndef()) {
- MakeNode(Dst, U, *I, state->bindExpr(U, V));
+ MakeNode(Dst, U, *I, state->BindExpr(U, V));
continue;
}
-
+
// QualType DstT = getContext().getCanonicalType(U->getType());
// QualType SrcT = getContext().getCanonicalType(Ex->getType());
-//
+//
// if (DstT != SrcT) // Perform promotions.
-// V = EvalCast(V, DstT);
-//
+// V = EvalCast(V, DstT);
+//
// if (V.isUnknownOrUndef()) {
// MakeNode(Dst, U, *I, BindExpr(St, U, V));
// continue;
// }
-
+
switch (U->getOpcode()) {
default:
assert(false && "Invalid Opcode.");
break;
-
+
case UnaryOperator::Not:
// FIXME: Do we need to handle promotions?
- state = state->bindExpr(U, EvalComplement(cast<NonLoc>(V)));
- break;
-
+ state = state->BindExpr(U, EvalComplement(cast<NonLoc>(V)));
+ break;
+
case UnaryOperator::Minus:
// FIXME: Do we need to handle promotions?
- state = state->bindExpr(U, EvalMinus(cast<NonLoc>(V)));
- break;
-
- case UnaryOperator::LNot:
-
+ state = state->BindExpr(U, EvalMinus(cast<NonLoc>(V)));
+ break;
+
+ case UnaryOperator::LNot:
+
// C99 6.5.3.3: "The expression !E is equivalent to (0==E)."
//
// Note: technically we do "E == 0", but this is the same in the
// transfer functions as "0 == E".
SVal Result;
-
+
if (isa<Loc>(V)) {
Loc X = ValMgr.makeNull();
Result = EvalBinOp(state, BinaryOperator::EQ, cast<Loc>(V), X,
@@ -2578,18 +2532,18 @@ void GRExprEngine::VisitUnaryOperator(UnaryOperator* U, NodeTy* Pred,
}
else {
nonloc::ConcreteInt X(getBasicVals().getValue(0, Ex->getType()));
- Result = EvalBinOp(BinaryOperator::EQ, cast<NonLoc>(V), X,
+ Result = EvalBinOp(state, BinaryOperator::EQ, cast<NonLoc>(V), X,
U->getType());
}
-
- state = state->bindExpr(U, Result);
-
+
+ state = state->BindExpr(U, Result);
+
break;
}
-
+
MakeNode(Dst, U, *I, state);
}
-
+
return;
}
}
@@ -2597,170 +2551,183 @@ void GRExprEngine::VisitUnaryOperator(UnaryOperator* U, NodeTy* Pred,
// Handle ++ and -- (both pre- and post-increment).
assert (U->isIncrementDecrementOp());
- NodeSet Tmp;
+ ExplodedNodeSet Tmp;
Expr* Ex = U->getSubExpr()->IgnoreParens();
VisitLValue(Ex, Pred, Tmp);
-
- for (NodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I!=E; ++I) {
-
+
+ for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I!=E; ++I) {
+
const GRState* state = GetState(*I);
SVal V1 = state->getSVal(Ex);
-
- // Perform a load.
- NodeSet Tmp2;
+
+ // Perform a load.
+ ExplodedNodeSet Tmp2;
EvalLoad(Tmp2, Ex, *I, state, V1);
- for (NodeSet::iterator I2 = Tmp2.begin(), E2 = Tmp2.end(); I2!=E2; ++I2) {
-
+ for (ExplodedNodeSet::iterator I2 = Tmp2.begin(), E2 = Tmp2.end(); I2!=E2; ++I2) {
+
state = GetState(*I2);
- SVal V2 = state->getSVal(Ex);
-
- // Propagate unknown and undefined values.
- if (V2.isUnknownOrUndef()) {
- MakeNode(Dst, U, *I2, state->bindExpr(U, V2));
+ SVal V2_untested = state->getSVal(Ex);
+
+ // Propagate unknown and undefined values.
+ if (V2_untested.isUnknownOrUndef()) {
+ MakeNode(Dst, U, *I2, state->BindExpr(U, V2_untested));
continue;
- }
-
- // Handle all other values.
+ }
+ DefinedSVal V2 = cast<DefinedSVal>(V2_untested);
+
+ // Handle all other values.
BinaryOperator::Opcode Op = U->isIncrementOp() ? BinaryOperator::Add
: BinaryOperator::Sub;
- SVal Result = EvalBinOp(state, Op, V2, ValMgr.makeIntVal(1U,U->getType()),
- U->getType());
-
+ // If the UnaryOperator has non-location type, use its type to create the
+ // constant value. If the UnaryOperator has location type, create the
+ // constant with int type and pointer width.
+ SVal RHS;
+
+ if (U->getType()->isAnyPointerType())
+ RHS = ValMgr.makeIntValWithPtrWidth(1, false);
+ else
+ RHS = ValMgr.makeIntVal(1, U->getType());
+
+ SVal Result = EvalBinOp(state, Op, V2, RHS, U->getType());
+
// Conjure a new symbol if necessary to recover precision.
if (Result.isUnknown() || !getConstraintManager().canReasonAbout(Result)){
- Result = ValMgr.getConjuredSymbolVal(Ex,
- Builder->getCurrentBlockCount());
-
+ DefinedOrUnknownSVal SymVal =
+ ValMgr.getConjuredSymbolVal(NULL, Ex,
+ Builder->getCurrentBlockCount());
+ Result = SymVal;
+
// If the value is a location, ++/-- should always preserve
// non-nullness. Check if the original value was non-null, and if so
- // propagate that constraint.
+ // propagate that constraint.
if (Loc::IsLocType(U->getType())) {
- SVal Constraint = EvalBinOp(state, BinaryOperator::EQ, V2,
- ValMgr.makeZeroVal(U->getType()),
- getContext().IntTy);
-
- if (!state->assume(Constraint, true)) {
+ DefinedOrUnknownSVal Constraint =
+ SVator.EvalEQ(state, V2, ValMgr.makeZeroVal(U->getType()));
+
+ if (!state->Assume(Constraint, true)) {
// It isn't feasible for the original value to be null.
// Propagate this constraint.
- Constraint = EvalBinOp(state, BinaryOperator::EQ, Result,
- ValMgr.makeZeroVal(U->getType()),
- getContext().IntTy);
-
- state = state->assume(Constraint, false);
+ Constraint = SVator.EvalEQ(state, SymVal,
+ ValMgr.makeZeroVal(U->getType()));
+
+
+ state = state->Assume(Constraint, false);
assert(state);
- }
- }
+ }
+ }
}
-
- state = state->bindExpr(U, U->isPostfix() ? V2 : Result);
- // Perform the store.
+ state = state->BindExpr(U, U->isPostfix() ? V2 : Result);
+
+ // Perform the store.
EvalStore(Dst, U, *I2, state, V1, Result);
}
}
}
-void GRExprEngine::VisitAsmStmt(AsmStmt* A, NodeTy* Pred, NodeSet& Dst) {
+void GRExprEngine::VisitAsmStmt(AsmStmt* A, ExplodedNode* Pred, ExplodedNodeSet& Dst) {
VisitAsmStmtHelperOutputs(A, A->begin_outputs(), A->end_outputs(), Pred, Dst);
-}
+}
void GRExprEngine::VisitAsmStmtHelperOutputs(AsmStmt* A,
AsmStmt::outputs_iterator I,
AsmStmt::outputs_iterator E,
- NodeTy* Pred, NodeSet& Dst) {
+ ExplodedNode* Pred, ExplodedNodeSet& Dst) {
if (I == E) {
VisitAsmStmtHelperInputs(A, A->begin_inputs(), A->end_inputs(), Pred, Dst);
return;
}
-
- NodeSet Tmp;
+
+ ExplodedNodeSet Tmp;
VisitLValue(*I, Pred, Tmp);
-
+
++I;
-
- for (NodeSet::iterator NI = Tmp.begin(), NE = Tmp.end(); NI != NE; ++NI)
+
+ for (ExplodedNodeSet::iterator NI = Tmp.begin(), NE = Tmp.end(); NI != NE; ++NI)
VisitAsmStmtHelperOutputs(A, I, E, *NI, Dst);
}
void GRExprEngine::VisitAsmStmtHelperInputs(AsmStmt* A,
AsmStmt::inputs_iterator I,
AsmStmt::inputs_iterator E,
- NodeTy* Pred, NodeSet& Dst) {
+ ExplodedNode* Pred, ExplodedNodeSet& Dst) {
if (I == E) {
-
+
// We have processed both the inputs and the outputs. All of the outputs
// should evaluate to Locs. Nuke all of their values.
-
+
// FIXME: Some day in the future it would be nice to allow a "plug-in"
// which interprets the inline asm and stores proper results in the
// outputs.
-
+
const GRState* state = GetState(Pred);
-
+
for (AsmStmt::outputs_iterator OI = A->begin_outputs(),
OE = A->end_outputs(); OI != OE; ++OI) {
-
- SVal X = state->getSVal(*OI);
+
+ SVal X = state->getSVal(*OI);
assert (!isa<NonLoc>(X)); // Should be an Lval, or unknown, undef.
-
+
if (isa<Loc>(X))
state = state->bindLoc(cast<Loc>(X), UnknownVal());
}
-
+
MakeNode(Dst, A, Pred, state);
return;
}
-
- NodeSet Tmp;
+
+ ExplodedNodeSet Tmp;
Visit(*I, Pred, Tmp);
-
+
++I;
-
- for (NodeSet::iterator NI = Tmp.begin(), NE = Tmp.end(); NI != NE; ++NI)
+
+ for (ExplodedNodeSet::iterator NI = Tmp.begin(), NE = Tmp.end(); NI!=NE; ++NI)
VisitAsmStmtHelperInputs(A, I, E, *NI, Dst);
}
-void GRExprEngine::EvalReturn(NodeSet& Dst, ReturnStmt* S, NodeTy* Pred) {
+void GRExprEngine::EvalReturn(ExplodedNodeSet& Dst, ReturnStmt* S,
+ ExplodedNode* Pred) {
assert (Builder && "GRStmtNodeBuilder must be defined.");
-
- unsigned size = Dst.size();
+
+ unsigned size = Dst.size();
SaveAndRestore<bool> OldSink(Builder->BuildSinks);
SaveOr OldHasGen(Builder->HasGeneratedNode);
getTF().EvalReturn(Dst, *this, *Builder, S, Pred);
-
+
// Handle the case where no nodes where generated.
-
+
if (!Builder->BuildSinks && Dst.size() == size && !Builder->HasGeneratedNode)
MakeNode(Dst, S, Pred, GetState(Pred));
}
-void GRExprEngine::VisitReturnStmt(ReturnStmt* S, NodeTy* Pred, NodeSet& Dst) {
+void GRExprEngine::VisitReturnStmt(ReturnStmt* S, ExplodedNode* Pred,
+ ExplodedNodeSet& Dst) {
Expr* R = S->getRetValue();
-
+
if (!R) {
EvalReturn(Dst, S, Pred);
return;
}
- NodeSet Tmp;
+ ExplodedNodeSet Tmp;
Visit(R, Pred, Tmp);
- for (NodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E; ++I) {
+ for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E; ++I) {
SVal X = (*I)->getState()->getSVal(R);
-
+
// Check if we return the address of a stack variable.
if (isa<loc::MemRegionVal>(X)) {
// Determine if the value is on the stack.
const MemRegion* R = cast<loc::MemRegionVal>(&X)->getRegion();
-
+
if (R && R->hasStackStorage()) {
// Create a special node representing the error.
- if (NodeTy* N = Builder->generateNode(S, GetState(*I), *I)) {
+ if (ExplodedNode* N = Builder->generateNode(S, GetState(*I), *I)) {
N->markAsSink();
RetsStackAddr.insert(N);
}
@@ -2769,13 +2736,13 @@ void GRExprEngine::VisitReturnStmt(ReturnStmt* S, NodeTy* Pred, NodeSet& Dst) {
}
// Check if we return an undefined value.
else if (X.isUndef()) {
- if (NodeTy* N = Builder->generateNode(S, GetState(*I), *I)) {
+ if (ExplodedNode* N = Builder->generateNode(S, GetState(*I), *I)) {
N->markAsSink();
RetsUndef.insert(N);
}
continue;
}
-
+
EvalReturn(Dst, S, *I);
}
}
@@ -2784,127 +2751,76 @@ void GRExprEngine::VisitReturnStmt(ReturnStmt* S, NodeTy* Pred, NodeSet& Dst) {
// Transfer functions: Binary operators.
//===----------------------------------------------------------------------===//
-const GRState* GRExprEngine::CheckDivideZero(Expr* Ex, const GRState* state,
- NodeTy* Pred, SVal Denom) {
-
- // Divide by undefined? (potentially zero)
-
- if (Denom.isUndef()) {
- NodeTy* DivUndef = Builder->generateNode(Ex, state, Pred);
-
- if (DivUndef) {
- DivUndef->markAsSink();
- ExplicitBadDivides.insert(DivUndef);
- }
-
- return 0;
- }
-
- // Check for divide/remainder-by-zero.
- // First, "assume" that the denominator is 0 or undefined.
- const GRState* zeroState = state->assume(Denom, false);
-
- // Second, "assume" that the denominator cannot be 0.
- state = state->assume(Denom, true);
-
- // Create the node for the divide-by-zero (if it occurred).
- if (zeroState)
- if (NodeTy* DivZeroNode = Builder->generateNode(Ex, zeroState, Pred)) {
- DivZeroNode->markAsSink();
-
- if (state)
- ImplicitBadDivides.insert(DivZeroNode);
- else
- ExplicitBadDivides.insert(DivZeroNode);
-
- }
-
- return state;
-}
-
void GRExprEngine::VisitBinaryOperator(BinaryOperator* B,
- GRExprEngine::NodeTy* Pred,
- GRExprEngine::NodeSet& Dst) {
+ ExplodedNode* Pred,
+ ExplodedNodeSet& Dst) {
- NodeSet Tmp1;
+ ExplodedNodeSet Tmp1;
Expr* LHS = B->getLHS()->IgnoreParens();
Expr* RHS = B->getRHS()->IgnoreParens();
-
- // FIXME: Add proper support for ObjCKVCRefExpr.
- if (isa<ObjCKVCRefExpr>(LHS)) {
- Visit(RHS, Pred, Dst);
+
+ // FIXME: Add proper support for ObjCImplicitSetterGetterRefExpr.
+ if (isa<ObjCImplicitSetterGetterRefExpr>(LHS)) {
+ Visit(RHS, Pred, Dst);
return;
}
-
+
if (B->isAssignmentOp())
VisitLValue(LHS, Pred, Tmp1);
else
Visit(LHS, Pred, Tmp1);
- for (NodeSet::iterator I1=Tmp1.begin(), E1=Tmp1.end(); I1 != E1; ++I1) {
-
+ for (ExplodedNodeSet::iterator I1=Tmp1.begin(), E1=Tmp1.end(); I1!=E1; ++I1) {
SVal LeftV = (*I1)->getState()->getSVal(LHS);
-
- // Process the RHS.
-
- NodeSet Tmp2;
+ ExplodedNodeSet Tmp2;
Visit(RHS, *I1, Tmp2);
-
+
+ ExplodedNodeSet CheckedSet;
+ CheckerVisit(B, CheckedSet, Tmp2, true);
+
// With both the LHS and RHS evaluated, process the operation itself.
-
- for (NodeSet::iterator I2=Tmp2.begin(), E2=Tmp2.end(); I2 != E2; ++I2) {
- const GRState* state = GetState(*I2);
- const GRState* OldSt = state;
+ for (ExplodedNodeSet::iterator I2=CheckedSet.begin(), E2=CheckedSet.end();
+ I2 != E2; ++I2) {
+ const GRState *state = GetState(*I2);
+ const GRState *OldSt = state;
SVal RightV = state->getSVal(RHS);
+
BinaryOperator::Opcode Op = B->getOpcode();
-
switch (Op) {
-
case BinaryOperator::Assign: {
-
+
// EXPERIMENTAL: "Conjured" symbols.
// FIXME: Handle structs.
QualType T = RHS->getType();
-
- if ((RightV.isUnknown() ||
- !getConstraintManager().canReasonAbout(RightV))
- && (Loc::IsLocType(T) ||
+
+ if ((RightV.isUnknown() ||
+ !getConstraintManager().canReasonAbout(RightV))
+ && (Loc::IsLocType(T) ||
(T->isScalarType() && T->isIntegerType()))) {
- unsigned Count = Builder->getCurrentBlockCount();
- RightV = ValMgr.getConjuredSymbolVal(B->getRHS(), Count);
+ unsigned Count = Builder->getCurrentBlockCount();
+ RightV = ValMgr.getConjuredSymbolVal(NULL, B->getRHS(), Count);
}
-
+
// Simulate the effects of a "store": bind the value of the RHS
- // to the L-Value represented by the LHS.
- EvalStore(Dst, B, LHS, *I2, state->bindExpr(B, RightV), LeftV,
- RightV);
+ // to the L-Value represented by the LHS.
+ EvalStore(Dst, B, LHS, *I2, state->BindExpr(B, RightV),
+ LeftV, RightV);
continue;
}
-
- case BinaryOperator::Div:
- case BinaryOperator::Rem:
-
- // Special checking for integer denominators.
- if (RHS->getType()->isIntegerType() &&
- RHS->getType()->isScalarType()) {
-
- state = CheckDivideZero(B, state, *I2, RightV);
- if (!state) continue;
- }
-
+
// FALL-THROUGH.
default: {
-
+
if (B->isAssignmentOp())
break;
-
+
// Process non-assignments except commas or short-circuited
- // logical expressions (LAnd and LOr).
+ // logical expressions (LAnd and LOr).
SVal Result = EvalBinOp(state, Op, LeftV, RightV, B->getType());
-
+
if (Result.isUnknown()) {
if (OldSt != state) {
// Generate a new node if we have already created a new state.
@@ -2912,30 +2828,28 @@ void GRExprEngine::VisitBinaryOperator(BinaryOperator* B,
}
else
Dst.Add(*I2);
-
+
continue;
}
-
- if (Result.isUndef() && !LeftV.isUndef() && !RightV.isUndef()) {
-
+
+ state = state->BindExpr(B, Result);
+
+ if (Result.isUndef()) {
// The operands were *not* undefined, but the result is undefined.
// This is a special node that should be flagged as an error.
-
- if (NodeTy* UndefNode = Builder->generateNode(B, state, *I2)) {
- UndefNode->markAsSink();
+ if (ExplodedNode *UndefNode = Builder->generateNode(B, state, *I2)){
+ UndefNode->markAsSink();
UndefResults.insert(UndefNode);
}
-
continue;
}
-
+
// Otherwise, create a new node.
-
- MakeNode(Dst, B, *I2, state->bindExpr(B, Result));
+ MakeNode(Dst, B, *I2, state);
continue;
}
}
-
+
assert (B->isCompoundAssignmentOp());
switch (Op) {
@@ -2952,78 +2866,44 @@ void GRExprEngine::VisitBinaryOperator(BinaryOperator* B,
case BinaryOperator::XorAssign: Op = BinaryOperator::Xor; break;
case BinaryOperator::OrAssign: Op = BinaryOperator::Or; break;
}
-
+
// Perform a load (the LHS). This performs the checks for
// null dereferences, and so on.
- NodeSet Tmp3;
+ ExplodedNodeSet Tmp3;
SVal location = state->getSVal(LHS);
EvalLoad(Tmp3, LHS, *I2, state, location);
-
- for (NodeSet::iterator I3=Tmp3.begin(), E3=Tmp3.end(); I3!=E3; ++I3) {
-
+
+ for (ExplodedNodeSet::iterator I3=Tmp3.begin(), E3=Tmp3.end(); I3!=E3;
+ ++I3) {
+
state = GetState(*I3);
SVal V = state->getSVal(LHS);
- // Check for divide-by-zero.
- if ((Op == BinaryOperator::Div || Op == BinaryOperator::Rem)
- && RHS->getType()->isIntegerType()
- && RHS->getType()->isScalarType()) {
-
- // CheckDivideZero returns a new state where the denominator
- // is assumed to be non-zero.
- state = CheckDivideZero(B, state, *I3, RightV);
-
- if (!state)
- continue;
- }
-
- // Propagate undefined values (left-side).
- if (V.isUndef()) {
- EvalStore(Dst, B, LHS, *I3, state->bindExpr(B, V), location, V);
- continue;
- }
-
- // Propagate unknown values (left and right-side).
- if (RightV.isUnknown() || V.isUnknown()) {
- EvalStore(Dst, B, LHS, *I3, state->bindExpr(B, UnknownVal()),
- location, UnknownVal());
- continue;
- }
-
- // At this point:
- //
- // The LHS is not Undef/Unknown.
- // The RHS is not Unknown.
-
// Get the computation type.
- QualType CTy = cast<CompoundAssignOperator>(B)->getComputationResultType();
+ QualType CTy =
+ cast<CompoundAssignOperator>(B)->getComputationResultType();
CTy = getContext().getCanonicalType(CTy);
- QualType CLHSTy = cast<CompoundAssignOperator>(B)->getComputationLHSType();
- CLHSTy = getContext().getCanonicalType(CTy);
+ QualType CLHSTy =
+ cast<CompoundAssignOperator>(B)->getComputationLHSType();
+ CLHSTy = getContext().getCanonicalType(CLHSTy);
QualType LTy = getContext().getCanonicalType(LHS->getType());
QualType RTy = getContext().getCanonicalType(RHS->getType());
// Promote LHS.
- V = EvalCast(V, CLHSTy);
+ llvm::tie(state, V) = SVator.EvalCast(V, state, CLHSTy, LTy);
+
+ // Compute the result of the operation.
+ SVal Result;
+ llvm::tie(state, Result) = SVator.EvalCast(EvalBinOp(state, Op, V,
+ RightV, CTy),
+ state, B->getType(), CTy);
- // Evaluate operands and promote to result type.
- if (RightV.isUndef()) {
- // Propagate undefined values (right-side).
- EvalStore(Dst, B, LHS, *I3, state->bindExpr(B, RightV), location,
- RightV);
- continue;
- }
-
- // Compute the result of the operation.
- SVal Result = EvalCast(EvalBinOp(state, Op, V, RightV, CTy),
- B->getType());
-
if (Result.isUndef()) {
// The operands were not undefined, but the result is undefined.
- if (NodeTy* UndefNode = Builder->generateNode(B, state, *I3)) {
- UndefNode->markAsSink();
+ if (ExplodedNode* UndefNode = Builder->generateNode(B, state, *I3)) {
+ UndefNode->markAsSink();
UndefResults.insert(UndefNode);
}
continue;
@@ -3031,71 +2911,38 @@ void GRExprEngine::VisitBinaryOperator(BinaryOperator* B,
// EXPERIMENTAL: "Conjured" symbols.
// FIXME: Handle structs.
-
+
SVal LHSVal;
-
- if ((Result.isUnknown() ||
+
+ if ((Result.isUnknown() ||
!getConstraintManager().canReasonAbout(Result))
- && (Loc::IsLocType(CTy)
+ && (Loc::IsLocType(CTy)
|| (CTy->isScalarType() && CTy->isIntegerType()))) {
-
+
unsigned Count = Builder->getCurrentBlockCount();
-
+
// The symbolic value is actually for the type of the left-hand side
// expression, not the computation type, as this is the value the
// LValue on the LHS will bind to.
- LHSVal = ValMgr.getConjuredSymbolVal(B->getRHS(), LTy, Count);
-
+ LHSVal = ValMgr.getConjuredSymbolVal(NULL, B->getRHS(), LTy, Count);
+
// However, we need to convert the symbol to the computation type.
- Result = (LTy == CTy) ? LHSVal : EvalCast(LHSVal,CTy);
+ llvm::tie(state, Result) = SVator.EvalCast(LHSVal, state, CTy, LTy);
}
else {
// The left-hand side may bind to a different value then the
// computation type.
- LHSVal = (LTy == CTy) ? Result : EvalCast(Result,LTy);
+ llvm::tie(state, LHSVal) = SVator.EvalCast(Result, state, LTy, CTy);
}
-
- EvalStore(Dst, B, LHS, *I3, state->bindExpr(B, Result), location,
- LHSVal);
+
+ EvalStore(Dst, B, LHS, *I3, state->BindExpr(B, Result),
+ location, LHSVal);
}
}
}
}
//===----------------------------------------------------------------------===//
-// Transfer-function Helpers.
-//===----------------------------------------------------------------------===//
-
-SVal GRExprEngine::EvalBinOp(const GRState* state, BinaryOperator::Opcode Op,
- SVal L, SVal R, QualType T) {
-
- if (L.isUndef() || R.isUndef())
- return UndefinedVal();
-
- if (L.isUnknown() || R.isUnknown())
- return UnknownVal();
-
- if (isa<Loc>(L)) {
- if (isa<Loc>(R))
- return SVator->EvalBinOpLL(Op, cast<Loc>(L), cast<Loc>(R), T);
- else
- return SVator->EvalBinOpLN(state, Op, cast<Loc>(L), cast<NonLoc>(R), T);
- }
-
- if (isa<Loc>(R)) {
- // Support pointer arithmetic where the increment/decrement operand
- // is on the left and the pointer on the right.
-
- assert (Op == BinaryOperator::Add || Op == BinaryOperator::Sub);
-
- // Commute the operands.
- return SVator->EvalBinOpLN(state, Op, cast<Loc>(R), cast<NonLoc>(L), T);
- }
- else
- return SVator->EvalBinOpNN(Op, cast<NonLoc>(L), cast<NonLoc>(R), T);
-}
-
-//===----------------------------------------------------------------------===//
// Visualization.
//===----------------------------------------------------------------------===//
@@ -3105,67 +2952,65 @@ static SourceManager* GraphPrintSourceManager;
namespace llvm {
template<>
-struct VISIBILITY_HIDDEN DOTGraphTraits<GRExprEngine::NodeTy*> :
+struct VISIBILITY_HIDDEN DOTGraphTraits<ExplodedNode*> :
public DefaultDOTGraphTraits {
-
- static std::string getNodeAttributes(const GRExprEngine::NodeTy* N, void*) {
-
+
+ static std::string getNodeAttributes(const ExplodedNode* N, void*) {
+
if (GraphPrintCheckerState->isImplicitNullDeref(N) ||
GraphPrintCheckerState->isExplicitNullDeref(N) ||
GraphPrintCheckerState->isUndefDeref(N) ||
GraphPrintCheckerState->isUndefStore(N) ||
GraphPrintCheckerState->isUndefControlFlow(N) ||
- GraphPrintCheckerState->isExplicitBadDivide(N) ||
- GraphPrintCheckerState->isImplicitBadDivide(N) ||
GraphPrintCheckerState->isUndefResult(N) ||
GraphPrintCheckerState->isBadCall(N) ||
GraphPrintCheckerState->isUndefArg(N))
return "color=\"red\",style=\"filled\"";
-
+
if (GraphPrintCheckerState->isNoReturnCall(N))
return "color=\"blue\",style=\"filled\"";
-
+
return "";
}
-
- static std::string getNodeLabel(const GRExprEngine::NodeTy* N, void*,
- bool ShortNames) {
-
+
+ static std::string getNodeLabel(const ExplodedNode* N, void*,bool ShortNames){
+
std::string sbuf;
llvm::raw_string_ostream Out(sbuf);
// Program Location.
ProgramPoint Loc = N->getLocation();
-
+
switch (Loc.getKind()) {
case ProgramPoint::BlockEntranceKind:
- Out << "Block Entrance: B"
+ Out << "Block Entrance: B"
<< cast<BlockEntrance>(Loc).getBlock()->getBlockID();
break;
-
+
case ProgramPoint::BlockExitKind:
assert (false);
break;
-
+
default: {
- if (isa<PostStmt>(Loc)) {
- const PostStmt& L = cast<PostStmt>(Loc);
- Stmt* S = L.getStmt();
+ if (StmtPoint *L = dyn_cast<StmtPoint>(&Loc)) {
+ const Stmt* S = L->getStmt();
SourceLocation SLoc = S->getLocStart();
- Out << S->getStmtClassName() << ' ' << (void*) S << ' ';
+ Out << S->getStmtClassName() << ' ' << (void*) S << ' ';
LangOptions LO; // FIXME.
S->printPretty(Out, 0, PrintingPolicy(LO));
-
- if (SLoc.isFileID()) {
+
+ if (SLoc.isFileID()) {
Out << "\\lline="
<< GraphPrintSourceManager->getInstantiationLineNumber(SLoc)
<< " col="
<< GraphPrintSourceManager->getInstantiationColumnNumber(SLoc)
<< "\\l";
}
-
- if (isa<PostLoad>(Loc))
+
+ if (isa<PreStmt>(Loc))
+ Out << "\\lPreStmt\\l;";
+ else if (isa<PostLoad>(Loc))
Out << "\\lPostLoad\\l;";
else if (isa<PostStore>(Loc))
Out << "\\lPostStore\\l";
@@ -3175,7 +3020,7 @@ struct VISIBILITY_HIDDEN DOTGraphTraits<GRExprEngine::NodeTy*> :
Out << "\\lPostLocationChecksSucceed\\l";
else if (isa<PostNullCheckFailed>(Loc))
Out << "\\lPostNullCheckFailed\\l";
-
+
if (GraphPrintCheckerState->isImplicitNullDeref(N))
Out << "\\|Implicit-Null Dereference.\\l";
else if (GraphPrintCheckerState->isExplicitNullDeref(N))
@@ -3184,10 +3029,6 @@ struct VISIBILITY_HIDDEN DOTGraphTraits<GRExprEngine::NodeTy*> :
Out << "\\|Dereference of undefialied value.\\l";
else if (GraphPrintCheckerState->isUndefStore(N))
Out << "\\|Store to Undefined Loc.";
- else if (GraphPrintCheckerState->isExplicitBadDivide(N))
- Out << "\\|Explicit divide-by zero or undefined value.";
- else if (GraphPrintCheckerState->isImplicitBadDivide(N))
- Out << "\\|Implicit divide-by zero or undefined value.";
else if (GraphPrintCheckerState->isUndefResult(N))
Out << "\\|Result of operation is undefined.";
else if (GraphPrintCheckerState->isNoReturnCall(N))
@@ -3196,43 +3037,43 @@ struct VISIBILITY_HIDDEN DOTGraphTraits<GRExprEngine::NodeTy*> :
Out << "\\|Call to NULL/Undefined.";
else if (GraphPrintCheckerState->isUndefArg(N))
Out << "\\|Argument in call is undefined";
-
+
break;
}
const BlockEdge& E = cast<BlockEdge>(Loc);
Out << "Edge: (B" << E.getSrc()->getBlockID() << ", B"
<< E.getDst()->getBlockID() << ')';
-
+
if (Stmt* T = E.getSrc()->getTerminator()) {
-
+
SourceLocation SLoc = T->getLocStart();
-
+
Out << "\\|Terminator: ";
LangOptions LO; // FIXME.
E.getSrc()->printTerminator(Out, LO);
-
+
if (SLoc.isFileID()) {
Out << "\\lline="
<< GraphPrintSourceManager->getInstantiationLineNumber(SLoc)
<< " col="
<< GraphPrintSourceManager->getInstantiationColumnNumber(SLoc);
}
-
+
if (isa<SwitchStmt>(T)) {
Stmt* Label = E.getDst()->getLabel();
-
- if (Label) {
+
+ if (Label) {
if (CaseStmt* C = dyn_cast<CaseStmt>(Label)) {
Out << "\\lcase ";
LangOptions LO; // FIXME.
C->getLHS()->printPretty(Out, 0, PrintingPolicy(LO));
-
+
if (Stmt* RHS = C->getRHS()) {
Out << " .. ";
RHS->printPretty(Out, 0, PrintingPolicy(LO));
}
-
+
Out << ":";
}
else {
@@ -3240,7 +3081,7 @@ struct VISIBILITY_HIDDEN DOTGraphTraits<GRExprEngine::NodeTy*> :
Out << "\\ldefault:";
}
}
- else
+ else
Out << "\\l(implicit) default:";
}
else if (isa<IndirectGotoStmt>(T)) {
@@ -3251,46 +3092,45 @@ struct VISIBILITY_HIDDEN DOTGraphTraits<GRExprEngine::NodeTy*> :
if (*E.getSrc()->succ_begin() == E.getDst())
Out << "true";
else
- Out << "false";
+ Out << "false";
}
-
+
Out << "\\l";
}
-
+
if (GraphPrintCheckerState->isUndefControlFlow(N)) {
Out << "\\|Control-flow based on\\lUndefined value.\\l";
}
}
}
-
+
Out << "\\|StateID: " << (void*) N->getState() << "\\|";
const GRState *state = N->getState();
state->printDOT(Out);
-
+
Out << "\\l";
return Out.str();
}
};
-} // end llvm namespace
+} // end llvm namespace
#endif
#ifndef NDEBUG
template <typename ITERATOR>
-GRExprEngine::NodeTy* GetGraphNode(ITERATOR I) { return *I; }
+ExplodedNode* GetGraphNode(ITERATOR I) { return *I; }
-template <>
-GRExprEngine::NodeTy*
-GetGraphNode<llvm::DenseMap<GRExprEngine::NodeTy*, Expr*>::iterator>
- (llvm::DenseMap<GRExprEngine::NodeTy*, Expr*>::iterator I) {
+template <> ExplodedNode*
+GetGraphNode<llvm::DenseMap<ExplodedNode*, Expr*>::iterator>
+ (llvm::DenseMap<ExplodedNode*, Expr*>::iterator I) {
return I->first;
}
#endif
void GRExprEngine::ViewGraph(bool trim) {
-#ifndef NDEBUG
+#ifndef NDEBUG
if (trim) {
- std::vector<NodeTy*> Src;
+ std::vector<ExplodedNode*> Src;
// Flush any outstanding reports to make sure we cover all the nodes.
// This does not cause them to get displayed.
@@ -3299,14 +3139,15 @@ void GRExprEngine::ViewGraph(bool trim) {
// Iterate through the reports and get their nodes.
for (BugReporter::iterator I=BR.begin(), E=BR.end(); I!=E; ++I) {
- for (BugType::const_iterator I2=(*I)->begin(), E2=(*I)->end(); I2!=E2; ++I2) {
+ for (BugType::const_iterator I2=(*I)->begin(), E2=(*I)->end();
+ I2!=E2; ++I2) {
const BugReportEquivClass& EQ = *I2;
const BugReport &R = **EQ.begin();
- NodeTy *N = const_cast<NodeTy*>(R.getEndNode());
+ ExplodedNode *N = const_cast<ExplodedNode*>(R.getEndNode());
if (N) Src.push_back(N);
}
}
-
+
ViewGraph(&Src[0], &Src[0]+Src.size());
}
else {
@@ -3314,25 +3155,25 @@ void GRExprEngine::ViewGraph(bool trim) {
GraphPrintSourceManager = &getContext().getSourceManager();
llvm::ViewGraph(*G.roots_begin(), "GRExprEngine");
-
+
GraphPrintCheckerState = NULL;
GraphPrintSourceManager = NULL;
}
#endif
}
-void GRExprEngine::ViewGraph(NodeTy** Beg, NodeTy** End) {
+void GRExprEngine::ViewGraph(ExplodedNode** Beg, ExplodedNode** End) {
#ifndef NDEBUG
GraphPrintCheckerState = this;
GraphPrintSourceManager = &getContext().getSourceManager();
-
- std::auto_ptr<GRExprEngine::GraphTy> TrimmedG(G.Trim(Beg, End).first);
+
+ std::auto_ptr<ExplodedGraph> TrimmedG(G.Trim(Beg, End).first);
if (!TrimmedG.get())
- llvm::cerr << "warning: Trimmed ExplodedGraph is empty.\n";
+ llvm::errs() << "warning: Trimmed ExplodedGraph is empty.\n";
else
- llvm::ViewGraph(*TrimmedG->roots_begin(), "TrimmedGRExprEngine");
-
+ llvm::ViewGraph(*TrimmedG->roots_begin(), "TrimmedGRExprEngine");
+
GraphPrintCheckerState = NULL;
GraphPrintSourceManager = NULL;
#endif
diff --git a/lib/Analysis/GRExprEngineInternalChecks.cpp b/lib/Analysis/GRExprEngineInternalChecks.cpp
index a2ce79a2f390..cc1ec4b77e48 100644
--- a/lib/Analysis/GRExprEngineInternalChecks.cpp
+++ b/lib/Analysis/GRExprEngineInternalChecks.cpp
@@ -14,42 +14,30 @@
#include "clang/Analysis/PathSensitive/BugReporter.h"
#include "clang/Analysis/PathSensitive/GRExprEngine.h"
+#include "clang/Analysis/PathSensitive/CheckerVisitor.h"
#include "clang/Analysis/PathDiagnostic.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
+using namespace clang::bugreporter;
//===----------------------------------------------------------------------===//
// Utility functions.
//===----------------------------------------------------------------------===//
template <typename ITERATOR> inline
-ExplodedNode<GRState>* GetNode(ITERATOR I) {
+ExplodedNode* GetNode(ITERATOR I) {
return *I;
}
template <> inline
-ExplodedNode<GRState>* GetNode(GRExprEngine::undef_arg_iterator I) {
+ExplodedNode* GetNode(GRExprEngine::undef_arg_iterator I) {
return I->first;
}
//===----------------------------------------------------------------------===//
-// Forward declarations for bug reporter visitors.
-//===----------------------------------------------------------------------===//
-
-static const Stmt *GetDerefExpr(const ExplodedNode<GRState> *N);
-static const Stmt *GetReceiverExpr(const ExplodedNode<GRState> *N);
-static const Stmt *GetDenomExpr(const ExplodedNode<GRState> *N);
-static const Stmt *GetCalleeExpr(const ExplodedNode<GRState> *N);
-static const Stmt *GetRetValExpr(const ExplodedNode<GRState> *N);
-
-static void registerTrackNullOrUndefValue(BugReporterContext& BRC,
- const Stmt *ValExpr,
- const ExplodedNode<GRState>* N);
-
-//===----------------------------------------------------------------------===//
// Bug Descriptions.
//===----------------------------------------------------------------------===//
@@ -58,17 +46,17 @@ namespace {
class VISIBILITY_HIDDEN BuiltinBugReport : public RangedBugReport {
public:
BuiltinBugReport(BugType& bt, const char* desc,
- ExplodedNode<GRState> *n)
+ ExplodedNode *n)
: RangedBugReport(bt, desc, n) {}
-
+
BuiltinBugReport(BugType& bt, const char *shortDesc, const char *desc,
- ExplodedNode<GRState> *n)
- : RangedBugReport(bt, shortDesc, desc, n) {}
-
+ ExplodedNode *n)
+ : RangedBugReport(bt, shortDesc, desc, n) {}
+
void registerInitialVisitors(BugReporterContext& BRC,
- const ExplodedNode<GRState>* N);
-};
-
+ const ExplodedNode* N);
+};
+
class VISIBILITY_HIDDEN BuiltinBug : public BugType {
GRExprEngine &Eng;
protected:
@@ -79,30 +67,32 @@ public:
BuiltinBug(GRExprEngine *eng, const char* n)
: BugType(n, "Logic errors"), Eng(*eng), desc(n) {}
-
- virtual void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) = 0;
+
+ const std::string &getDescription() const { return desc; }
+
+ virtual void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {}
void FlushReports(BugReporter& BR) { FlushReportsImpl(BR, Eng); }
-
+
virtual void registerInitialVisitors(BugReporterContext& BRC,
- const ExplodedNode<GRState>* N,
+ const ExplodedNode* N,
BuiltinBugReport *R) {}
-
+
template <typename ITER> void Emit(BugReporter& BR, ITER I, ITER E);
};
-
-
+
+
template <typename ITER>
void BuiltinBug::Emit(BugReporter& BR, ITER I, ITER E) {
for (; I != E; ++I) BR.EmitReport(new BuiltinBugReport(*this, desc.c_str(),
GetNode(I)));
-}
+}
void BuiltinBugReport::registerInitialVisitors(BugReporterContext& BRC,
- const ExplodedNode<GRState>* N) {
+ const ExplodedNode* N) {
static_cast<BuiltinBug&>(getBugType()).registerInitialVisitors(BRC, N, this);
-}
-
+}
+
class VISIBILITY_HIDDEN NullDeref : public BuiltinBug {
public:
NullDeref(GRExprEngine* eng)
@@ -111,14 +101,14 @@ public:
void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
Emit(BR, Eng.null_derefs_begin(), Eng.null_derefs_end());
}
-
+
void registerInitialVisitors(BugReporterContext& BRC,
- const ExplodedNode<GRState>* N,
+ const ExplodedNode* N,
BuiltinBugReport *R) {
registerTrackNullOrUndefValue(BRC, GetDerefExpr(N), N);
}
};
-
+
class VISIBILITY_HIDDEN NilReceiverStructRet : public BuiltinBug {
public:
NilReceiverStructRet(GRExprEngine* eng) :
@@ -132,20 +122,20 @@ public:
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
PostStmt P = cast<PostStmt>((*I)->getLocation());
- ObjCMessageExpr *ME = cast<ObjCMessageExpr>(P.getStmt());
+ const ObjCMessageExpr *ME = cast<ObjCMessageExpr>(P.getStmt());
os << "The receiver in the message expression is 'nil' and results in the"
" returned value (of type '"
<< ME->getType().getAsString()
- << "') to be garbage or otherwise undefined.";
+ << "') to be garbage or otherwise undefined";
BuiltinBugReport *R = new BuiltinBugReport(*this, os.str().c_str(), *I);
R->addRange(ME->getReceiver()->getSourceRange());
BR.EmitReport(R);
}
}
-
+
void registerInitialVisitors(BugReporterContext& BRC,
- const ExplodedNode<GRState>* N,
+ const ExplodedNode* N,
BuiltinBugReport *R) {
registerTrackNullOrUndefValue(BRC, GetReceiverExpr(N), N);
}
@@ -156,46 +146,46 @@ public:
NilReceiverLargerThanVoidPtrRet(GRExprEngine* eng) :
BuiltinBug(eng,
"'nil' receiver with return type larger than sizeof(void *)") {}
-
+
void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
for (GRExprEngine::nil_receiver_larger_than_voidptr_ret_iterator
I=Eng.nil_receiver_larger_than_voidptr_ret_begin(),
E=Eng.nil_receiver_larger_than_voidptr_ret_end(); I!=E; ++I) {
-
+
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
PostStmt P = cast<PostStmt>((*I)->getLocation());
- ObjCMessageExpr *ME = cast<ObjCMessageExpr>(P.getStmt());
+ const ObjCMessageExpr *ME = cast<ObjCMessageExpr>(P.getStmt());
os << "The receiver in the message expression is 'nil' and results in the"
" returned value (of type '"
<< ME->getType().getAsString()
<< "' and of size "
<< Eng.getContext().getTypeSize(ME->getType()) / 8
- << " bytes) to be garbage or otherwise undefined.";
-
+ << " bytes) to be garbage or otherwise undefined";
+
BuiltinBugReport *R = new BuiltinBugReport(*this, os.str().c_str(), *I);
R->addRange(ME->getReceiver()->getSourceRange());
BR.EmitReport(R);
}
- }
+ }
void registerInitialVisitors(BugReporterContext& BRC,
- const ExplodedNode<GRState>* N,
+ const ExplodedNode* N,
BuiltinBugReport *R) {
registerTrackNullOrUndefValue(BRC, GetReceiverExpr(N), N);
}
};
-
+
class VISIBILITY_HIDDEN UndefinedDeref : public BuiltinBug {
public:
UndefinedDeref(GRExprEngine* eng)
: BuiltinBug(eng,"Dereference of undefined pointer value") {}
-
+
void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
Emit(BR, Eng.undef_derefs_begin(), Eng.undef_derefs_end());
}
-
+
void registerInitialVisitors(BugReporterContext& BRC,
- const ExplodedNode<GRState>* N,
+ const ExplodedNode* N,
BuiltinBugReport *R) {
registerTrackNullOrUndefValue(BRC, GetDerefExpr(N), N);
}
@@ -203,43 +193,100 @@ public:
class VISIBILITY_HIDDEN DivZero : public BuiltinBug {
public:
- DivZero(GRExprEngine* eng)
- : BuiltinBug(eng,"Division-by-zero",
- "Division by zero or undefined value.") {}
-
- void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
- Emit(BR, Eng.explicit_bad_divides_begin(), Eng.explicit_bad_divides_end());
- }
-
+ DivZero(GRExprEngine* eng = 0)
+ : BuiltinBug(eng,"Division by zero") {}
+
void registerInitialVisitors(BugReporterContext& BRC,
- const ExplodedNode<GRState>* N,
+ const ExplodedNode* N,
BuiltinBugReport *R) {
registerTrackNullOrUndefValue(BRC, GetDenomExpr(N), N);
}
};
-
+
class VISIBILITY_HIDDEN UndefResult : public BuiltinBug {
public:
- UndefResult(GRExprEngine* eng) : BuiltinBug(eng,"Undefined result",
- "Result of operation is undefined.") {}
-
+ UndefResult(GRExprEngine* eng)
+ : BuiltinBug(eng,"Undefined or garbage result",
+ "Result of operation is garbage or undefined") {}
+
void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
- Emit(BR, Eng.undef_results_begin(), Eng.undef_results_end());
+ for (GRExprEngine::undef_result_iterator I=Eng.undef_results_begin(),
+ E = Eng.undef_results_end(); I!=E; ++I) {
+
+ ExplodedNode *N = *I;
+ const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
+ BuiltinBugReport *report = NULL;
+
+ if (const BinaryOperator *B = dyn_cast<BinaryOperator>(S)) {
+ llvm::SmallString<256> sbuf;
+ llvm::raw_svector_ostream OS(sbuf);
+ const GRState *ST = N->getState();
+ const Expr *Ex = NULL;
+ bool isLeft = true;
+
+ if (ST->getSVal(B->getLHS()).isUndef()) {
+ Ex = B->getLHS()->IgnoreParenCasts();
+ isLeft = true;
+ }
+ else if (ST->getSVal(B->getRHS()).isUndef()) {
+ Ex = B->getRHS()->IgnoreParenCasts();
+ isLeft = false;
+ }
+
+ if (Ex) {
+ OS << "The " << (isLeft ? "left" : "right")
+ << " operand of '"
+ << BinaryOperator::getOpcodeStr(B->getOpcode())
+ << "' is a garbage value";
+ }
+ else {
+ // Neither operand was undefined, but the result is undefined.
+ OS << "The result of the '"
+ << BinaryOperator::getOpcodeStr(B->getOpcode())
+ << "' expression is undefined";
+ }
+
+ // FIXME: Use StringRefs to pass string information.
+ report = new BuiltinBugReport(*this, OS.str().str().c_str(), N);
+ if (Ex) report->addRange(Ex->getSourceRange());
+ }
+ else {
+ report = new BuiltinBugReport(*this,
+ "Expression evaluates to an uninitialized"
+ " or undefined value", N);
+ }
+
+ BR.EmitReport(report);
+ }
}
-};
+ void registerInitialVisitors(BugReporterContext& BRC,
+ const ExplodedNode* N,
+ BuiltinBugReport *R) {
+
+ const Stmt *S = N->getLocationAs<StmtPoint>()->getStmt();
+ const Stmt *X = S;
+
+ if (const BinaryOperator *B = dyn_cast<BinaryOperator>(S)) {
+ const GRState *ST = N->getState();
+ if (ST->getSVal(B->getLHS()).isUndef())
+ X = B->getLHS();
+ else if (ST->getSVal(B->getRHS()).isUndef())
+ X = B->getRHS();
+ }
+
+ registerTrackNullOrUndefValue(BRC, X, N);
+ }
+};
+
class VISIBILITY_HIDDEN BadCall : public BuiltinBug {
public:
- BadCall(GRExprEngine *eng)
+ BadCall(GRExprEngine *eng = 0)
: BuiltinBug(eng, "Invalid function call",
"Called function pointer is a null or undefined pointer value") {}
-
- void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
- Emit(BR, Eng.bad_calls_begin(), Eng.bad_calls_end());
- }
-
+
void registerInitialVisitors(BugReporterContext& BRC,
- const ExplodedNode<GRState>* N,
+ const ExplodedNode* N,
BuiltinBugReport *R) {
registerTrackNullOrUndefValue(BRC, GetCalleeExpr(N), N);
}
@@ -249,76 +296,65 @@ public:
class VISIBILITY_HIDDEN ArgReport : public BuiltinBugReport {
const Stmt *Arg;
public:
- ArgReport(BugType& bt, const char* desc, ExplodedNode<GRState> *n,
+ ArgReport(BugType& bt, const char* desc, ExplodedNode *n,
const Stmt *arg)
: BuiltinBugReport(bt, desc, n), Arg(arg) {}
-
+
ArgReport(BugType& bt, const char *shortDesc, const char *desc,
- ExplodedNode<GRState> *n, const Stmt *arg)
- : BuiltinBugReport(bt, shortDesc, desc, n), Arg(arg) {}
-
- const Stmt *getArg() const { return Arg; }
+ ExplodedNode *n, const Stmt *arg)
+ : BuiltinBugReport(bt, shortDesc, desc, n), Arg(arg) {}
+
+ const Stmt *getArg() const { return Arg; }
};
class VISIBILITY_HIDDEN BadArg : public BuiltinBug {
-public:
- BadArg(GRExprEngine* eng) : BuiltinBug(eng,"Uninitialized argument",
- "Pass-by-value argument in function call is undefined.") {}
+public:
+ BadArg(GRExprEngine* eng=0) : BuiltinBug(eng,"Uninitialized argument",
+ "Pass-by-value argument in function call is undefined") {}
BadArg(GRExprEngine* eng, const char* d)
: BuiltinBug(eng,"Uninitialized argument", d) {}
-
- void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
- for (GRExprEngine::UndefArgsTy::iterator I = Eng.undef_arg_begin(),
- E = Eng.undef_arg_end(); I!=E; ++I) {
- // Generate a report for this bug.
- ArgReport *report = new ArgReport(*this, desc.c_str(), I->first,
- I->second);
- report->addRange(I->second->getSourceRange());
- BR.EmitReport(report);
- }
- }
void registerInitialVisitors(BugReporterContext& BRC,
- const ExplodedNode<GRState>* N,
+ const ExplodedNode* N,
BuiltinBugReport *R) {
registerTrackNullOrUndefValue(BRC, static_cast<ArgReport*>(R)->getArg(),
N);
- }
+ }
};
-
+
class VISIBILITY_HIDDEN BadMsgExprArg : public BadArg {
public:
- BadMsgExprArg(GRExprEngine* eng)
+ BadMsgExprArg(GRExprEngine* eng)
: BadArg(eng,"Pass-by-value argument in message expression is undefined"){}
-
+
void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
for (GRExprEngine::UndefArgsTy::iterator I=Eng.msg_expr_undef_arg_begin(),
- E = Eng.msg_expr_undef_arg_end(); I!=E; ++I) {
+ E = Eng.msg_expr_undef_arg_end(); I!=E; ++I) {
// Generate a report for this bug.
ArgReport *report = new ArgReport(*this, desc.c_str(), I->first,
I->second);
report->addRange(I->second->getSourceRange());
BR.EmitReport(report);
- }
- }
+ }
+ }
};
-
+
class VISIBILITY_HIDDEN BadReceiver : public BuiltinBug {
-public:
+public:
BadReceiver(GRExprEngine* eng)
: BuiltinBug(eng,"Uninitialized receiver",
"Receiver in message expression is an uninitialized value") {}
-
+
void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
for (GRExprEngine::ErrorNodes::iterator I=Eng.undef_receivers_begin(),
End = Eng.undef_receivers_end(); I!=End; ++I) {
-
+
// Generate a report for this bug.
BuiltinBugReport *report = new BuiltinBugReport(*this, desc.c_str(), *I);
- ExplodedNode<GRState>* N = *I;
- Stmt *S = cast<PostStmt>(N->getLocation()).getStmt();
- Expr* E = cast<ObjCMessageExpr>(S)->getReceiver();
+ ExplodedNode* N = *I;
+ const Stmt *S = cast<PostStmt>(N->getLocation()).getStmt();
+ const Expr* E = cast<ObjCMessageExpr>(S)->getReceiver();
assert (E && "Receiver cannot be NULL");
report->addRange(E->getSourceRange());
BR.EmitReport(report);
@@ -326,61 +362,61 @@ public:
}
void registerInitialVisitors(BugReporterContext& BRC,
- const ExplodedNode<GRState>* N,
+ const ExplodedNode* N,
BuiltinBugReport *R) {
registerTrackNullOrUndefValue(BRC, GetReceiverExpr(N), N);
- }
+ }
};
class VISIBILITY_HIDDEN RetStack : public BuiltinBug {
public:
RetStack(GRExprEngine* eng)
: BuiltinBug(eng, "Return of address to stack-allocated memory") {}
-
+
void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
for (GRExprEngine::ret_stackaddr_iterator I=Eng.ret_stackaddr_begin(),
End = Eng.ret_stackaddr_end(); I!=End; ++I) {
- ExplodedNode<GRState>* N = *I;
- Stmt *S = cast<PostStmt>(N->getLocation()).getStmt();
- Expr* E = cast<ReturnStmt>(S)->getRetValue();
- assert (E && "Return expression cannot be NULL");
-
+ ExplodedNode* N = *I;
+ const Stmt *S = cast<PostStmt>(N->getLocation()).getStmt();
+ const Expr* E = cast<ReturnStmt>(S)->getRetValue();
+ assert(E && "Return expression cannot be NULL");
+
// Get the value associated with E.
loc::MemRegionVal V = cast<loc::MemRegionVal>(N->getState()->getSVal(E));
-
+
// Generate a report for this bug.
std::string buf;
llvm::raw_string_ostream os(buf);
SourceRange R;
-
+
// Check if the region is a compound literal.
- if (const CompoundLiteralRegion* CR =
+ if (const CompoundLiteralRegion* CR =
dyn_cast<CompoundLiteralRegion>(V.getRegion())) {
-
+
const CompoundLiteralExpr* CL = CR->getLiteralExpr();
os << "Address of stack memory associated with a compound literal "
"declared on line "
<< BR.getSourceManager()
.getInstantiationLineNumber(CL->getLocStart())
<< " returned.";
-
+
R = CL->getSourceRange();
}
else if (const AllocaRegion* AR = dyn_cast<AllocaRegion>(V.getRegion())) {
const Expr* ARE = AR->getExpr();
SourceLocation L = ARE->getLocStart();
R = ARE->getSourceRange();
-
+
os << "Address of stack memory allocated by call to alloca() on line "
<< BR.getSourceManager().getInstantiationLineNumber(L)
<< " returned.";
- }
- else {
+ }
+ else {
os << "Address of stack memory associated with local variable '"
<< V.getRegion()->getString() << "' returned.";
}
-
+
RangedBugReport *report = new RangedBugReport(*this, os.str().c_str(), N);
report->addRange(E->getSourceRange());
if (R.isValid()) report->addRange(R);
@@ -388,51 +424,52 @@ public:
}
}
};
-
+
class VISIBILITY_HIDDEN RetUndef : public BuiltinBug {
public:
- RetUndef(GRExprEngine* eng) : BuiltinBug(eng, "Uninitialized return value",
- "Uninitialized or undefined value returned to caller.") {}
-
+ RetUndef(GRExprEngine* eng) : BuiltinBug(eng, "Garbage return value",
+ "Undefined or garbage value returned to caller") {}
+
void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
Emit(BR, Eng.ret_undef_begin(), Eng.ret_undef_end());
}
-
+
void registerInitialVisitors(BugReporterContext& BRC,
- const ExplodedNode<GRState>* N,
+ const ExplodedNode* N,
BuiltinBugReport *R) {
registerTrackNullOrUndefValue(BRC, GetRetValExpr(N), N);
- }
+ }
};
class VISIBILITY_HIDDEN UndefBranch : public BuiltinBug {
struct VISIBILITY_HIDDEN FindUndefExpr {
GRStateManager& VM;
const GRState* St;
-
+
FindUndefExpr(GRStateManager& V, const GRState* S) : VM(V), St(S) {}
-
- Expr* FindExpr(Expr* Ex) {
+
+ Expr* FindExpr(Expr* Ex) {
if (!MatchesCriteria(Ex))
return 0;
-
+
for (Stmt::child_iterator I=Ex->child_begin(), E=Ex->child_end();I!=E;++I)
if (Expr* ExI = dyn_cast_or_null<Expr>(*I)) {
Expr* E2 = FindExpr(ExI);
if (E2) return E2;
}
-
+
return Ex;
}
-
+
bool MatchesCriteria(Expr* Ex) { return St->getSVal(Ex).isUndef(); }
};
-
+
public:
UndefBranch(GRExprEngine *eng)
- : BuiltinBug(eng,"Use of uninitialized value",
- "Branch condition evaluates to an uninitialized value.") {}
-
+ : BuiltinBug(eng,"Use of garbage value",
+ "Branch condition evaluates to an undefined or garbage value")
+ {}
+
void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
for (GRExprEngine::undef_branch_iterator I=Eng.undef_branches_begin(),
E=Eng.undef_branches_end(); I!=E; ++I) {
@@ -455,7 +492,7 @@ public:
// Note: any predecessor will do. They should have identical state,
// since all the BlockEdge did was act as an error sink since the value
// had to already be undefined.
- ExplodedNode<GRState> *N = *(*I)->pred_begin();
+ ExplodedNode *N = *(*I)->pred_begin();
ProgramPoint P = N->getLocation();
const GRState* St = (*I)->getState();
@@ -471,9 +508,9 @@ public:
BR.EmitReport(R);
}
}
-
+
void registerInitialVisitors(BugReporterContext& BRC,
- const ExplodedNode<GRState>* N,
+ const ExplodedNode* N,
BuiltinBugReport *R) {
registerTrackNullOrUndefValue(BRC, static_cast<ArgReport*>(R)->getArg(),
N);
@@ -490,12 +527,12 @@ public:
Emit(BR, Eng.explicit_oob_memacc_begin(), Eng.explicit_oob_memacc_end());
}
};
-
+
class VISIBILITY_HIDDEN BadSizeVLA : public BuiltinBug {
public:
BadSizeVLA(GRExprEngine* eng) :
BuiltinBug(eng, "Bad variable-length array (VLA) size") {}
-
+
void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
for (GRExprEngine::ErrorNodes::iterator
I = Eng.ExplicitBadSizedVLA.begin(),
@@ -503,27 +540,27 @@ public:
// Determine whether this was a 'zero-sized' VLA or a VLA with an
// undefined size.
- GRExprEngine::NodeTy* N = *I;
- PostStmt PS = cast<PostStmt>(N->getLocation());
- DeclStmt *DS = cast<DeclStmt>(PS.getStmt());
+ ExplodedNode* N = *I;
+ PostStmt PS = cast<PostStmt>(N->getLocation());
+ const DeclStmt *DS = cast<DeclStmt>(PS.getStmt());
VarDecl* VD = cast<VarDecl>(*DS->decl_begin());
QualType T = Eng.getContext().getCanonicalType(VD->getType());
VariableArrayType* VT = cast<VariableArrayType>(T);
Expr* SizeExpr = VT->getSizeExpr();
-
+
std::string buf;
llvm::raw_string_ostream os(buf);
os << "The expression used to specify the number of elements in the "
"variable-length array (VLA) '"
<< VD->getNameAsString() << "' evaluates to ";
-
+
bool isUndefined = N->getState()->getSVal(SizeExpr).isUndef();
-
+
if (isUndefined)
os << "an undefined or garbage value.";
else
os << "0. VLAs with no elements have undefined behavior.";
-
+
std::string shortBuf;
llvm::raw_string_ostream os_short(shortBuf);
os_short << "Variable-length array '" << VD->getNameAsString() << "' "
@@ -537,9 +574,9 @@ public:
BR.EmitReport(report);
}
}
-
+
void registerInitialVisitors(BugReporterContext& BRC,
- const ExplodedNode<GRState>* N,
+ const ExplodedNode* N,
BuiltinBugReport *R) {
registerTrackNullOrUndefValue(BRC, static_cast<ArgReport*>(R)->getArg(),
N);
@@ -549,370 +586,217 @@ public:
//===----------------------------------------------------------------------===//
// __attribute__(nonnull) checking
-class VISIBILITY_HIDDEN CheckAttrNonNull : public GRSimpleAPICheck {
+class VISIBILITY_HIDDEN CheckAttrNonNull :
+ public CheckerVisitor<CheckAttrNonNull> {
+
BugType *BT;
- BugReporter &BR;
-
+
public:
- CheckAttrNonNull(BugReporter &br) : BT(0), BR(br) {}
+ CheckAttrNonNull() : BT(0) {}
+ ~CheckAttrNonNull() {}
- virtual bool Audit(ExplodedNode<GRState>* N, GRStateManager& VMgr) {
- CallExpr* CE = cast<CallExpr>(cast<PostStmt>(N->getLocation()).getStmt());
- const GRState* state = N->getState();
-
+ const void *getTag() {
+ static int x = 0;
+ return &x;
+ }
+
+ void PreVisitCallExpr(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ const GRState *originalState = state;
+
+ // Check if the callee has a 'nonnull' attribute.
SVal X = state->getSVal(CE->getCallee());
const FunctionDecl* FD = X.getAsFunctionDecl();
if (!FD)
- return false;
+ return;
const NonNullAttr* Att = FD->getAttr<NonNullAttr>();
-
if (!Att)
- return false;
-
+ return;
+
// Iterate through the arguments of CE and check them for null.
unsigned idx = 0;
- bool hasError = false;
-
- for (CallExpr::arg_iterator I=CE->arg_begin(), E=CE->arg_end(); I!=E;
+
+ for (CallExpr::const_arg_iterator I=CE->arg_begin(), E=CE->arg_end(); I!=E;
++I, ++idx) {
-
- if (!VMgr.isEqual(state, *I, 0) || !Att->isNonNull(idx))
+
+ if (!Att->isNonNull(idx))
continue;
- // Lazily allocate the BugType object if it hasn't already been created.
- // Ownership is transferred to the BugReporter object once the BugReport
- // is passed to 'EmitWarning'.
- if (!BT) BT =
- new BugType("Argument with 'nonnull' attribute passed null", "API");
-
- RangedBugReport *R = new RangedBugReport(*BT,
- "Null pointer passed as an argument to a "
- "'nonnull' parameter", N);
+ const SVal &V = state->getSVal(*I);
+ const DefinedSVal *DV = dyn_cast<DefinedSVal>(&V);
- R->addRange((*I)->getSourceRange());
- BR.EmitReport(R);
- hasError = true;
+ if (!DV)
+ continue;
+
+ ConstraintManager &CM = C.getConstraintManager();
+ const GRState *stateNotNull, *stateNull;
+ llvm::tie(stateNotNull, stateNull) = CM.AssumeDual(state, *DV);
+
+ if (stateNull && !stateNotNull) {
+ // Generate an error node. Check for a null node in case
+ // we cache out.
+ if (ExplodedNode *errorNode = C.GenerateNode(CE, stateNull, true)) {
+
+ // Lazily allocate the BugType object if it hasn't already been
+ // created. Ownership is transferred to the BugReporter object once
+ // the BugReport is passed to 'EmitWarning'.
+ if (!BT)
+ BT = new BugType("Argument with 'nonnull' attribute passed null",
+ "API");
+
+ EnhancedBugReport *R =
+ new EnhancedBugReport(*BT,
+ "Null pointer passed as an argument to a "
+ "'nonnull' parameter", errorNode);
+
+ // Highlight the range of the argument that was null.
+ const Expr *arg = *I;
+ R->addRange(arg->getSourceRange());
+ R->addVisitorCreator(registerTrackNullOrUndefValue, arg);
+
+ // Emit the bug report.
+ C.EmitReport(R);
+ }
+
+ // Always return. Either we cached out or we just emitted an error.
+ return;
+ }
+
+ // If a pointer value passed the check we should assume that it is
+ // indeed not null from this point forward.
+ assert(stateNotNull);
+ state = stateNotNull;
}
-
- return hasError;
+
+ // If we reach here all of the arguments passed the nonnull check.
+ // If 'state' has been updated generated a new node.
+ if (state != originalState)
+ C.addTransition(C.GenerateNode(CE, state));
}
};
} // end anonymous namespace
-//===----------------------------------------------------------------------===//
-// Definitions for bug reporter visitors.
-//===----------------------------------------------------------------------===//
+// Undefined arguments checking.
+namespace {
+class VISIBILITY_HIDDEN CheckUndefinedArg
+ : public CheckerVisitor<CheckUndefinedArg> {
-static const Stmt *GetDerefExpr(const ExplodedNode<GRState> *N) {
- // Pattern match for a few useful cases (do something smarter later):
- // a[0], p->f, *p
- const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
+ BadArg *BT;
- if (const UnaryOperator *U = dyn_cast<UnaryOperator>(S)) {
- if (U->getOpcode() == UnaryOperator::Deref)
- return U->getSubExpr()->IgnoreParenCasts();
- }
- else if (const MemberExpr *ME = dyn_cast<MemberExpr>(S)) {
- return ME->getBase()->IgnoreParenCasts();
+public:
+ CheckUndefinedArg() : BT(0) {}
+ ~CheckUndefinedArg() {}
+
+ const void *getTag() {
+ static int x = 0;
+ return &x;
}
- else if (const ArraySubscriptExpr *AE = dyn_cast<ArraySubscriptExpr>(S)) {
- // Retrieve the base for arrays since BasicStoreManager doesn't know how
- // to reason about them.
- return AE->getBase();
+
+ void PreVisitCallExpr(CheckerContext &C, const CallExpr *CE);
+};
+
+void CheckUndefinedArg::PreVisitCallExpr(CheckerContext &C, const CallExpr *CE){
+ for (CallExpr::const_arg_iterator I = CE->arg_begin(), E = CE->arg_end();
+ I != E; ++I) {
+ if (C.getState()->getSVal(*I).isUndef()) {
+ if (ExplodedNode *ErrorNode = C.GenerateNode(CE, true)) {
+ if (!BT)
+ BT = new BadArg();
+ // Generate a report for this bug.
+ ArgReport *Report = new ArgReport(*BT, BT->getDescription().c_str(),
+ ErrorNode, *I);
+ Report->addRange((*I)->getSourceRange());
+ C.EmitReport(Report);
+ }
+ }
}
-
- return NULL;
}
-static const Stmt *GetReceiverExpr(const ExplodedNode<GRState> *N) {
- const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
- if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S))
- return ME->getReceiver();
- return NULL;
-}
-
-static const Stmt *GetDenomExpr(const ExplodedNode<GRState> *N) {
- const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
- if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(S))
- return BE->getRHS();
- return NULL;
-}
-
-static const Stmt *GetCalleeExpr(const ExplodedNode<GRState> *N) {
- const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
- if (const CallExpr *CE = dyn_cast<CallExpr>(S))
- return CE->getCallee();
- return NULL;
-}
-
-static const Stmt *GetRetValExpr(const ExplodedNode<GRState> *N) {
- const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
- if (const ReturnStmt *RS = dyn_cast<ReturnStmt>(S))
- return RS->getRetValue();
- return NULL;
-}
+class VISIBILITY_HIDDEN CheckBadCall : public CheckerVisitor<CheckBadCall> {
+ BadCall *BT;
-namespace {
-class VISIBILITY_HIDDEN FindLastStoreBRVisitor : public BugReporterVisitor {
- const MemRegion *R;
- SVal V;
- bool satisfied;
- const ExplodedNode<GRState> *StoreSite;
public:
- FindLastStoreBRVisitor(SVal v, const MemRegion *r)
- : R(r), V(v), satisfied(false), StoreSite(0) {}
-
- PathDiagnosticPiece* VisitNode(const ExplodedNode<GRState> *N,
- const ExplodedNode<GRState> *PrevN,
- BugReporterContext& BRC) {
-
- if (satisfied)
- return NULL;
-
- if (!StoreSite) {
- const ExplodedNode<GRState> *Node = N, *Last = NULL;
-
- for ( ; Node ; Last = Node, Node = Node->getFirstPred()) {
-
- if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
- if (const PostStmt *P = Node->getLocationAs<PostStmt>())
- if (const DeclStmt *DS = P->getStmtAs<DeclStmt>())
- if (DS->getSingleDecl() == VR->getDecl()) {
- Last = Node;
- break;
- }
- }
-
- if (Node->getState()->getSVal(R) != V)
- break;
- }
+ CheckBadCall() : BT(0) {}
+ ~CheckBadCall() {}
- if (!Node || !Last) {
- satisfied = true;
- return NULL;
- }
-
- StoreSite = Last;
- }
-
- if (StoreSite != N)
- return NULL;
+ const void *getTag() {
+ static int x = 0;
+ return &x;
+ }
- satisfied = true;
- std::string sbuf;
- llvm::raw_string_ostream os(sbuf);
-
- if (const PostStmt *PS = N->getLocationAs<PostStmt>()) {
- if (const DeclStmt *DS = PS->getStmtAs<DeclStmt>()) {
-
- if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
- os << "Variable '" << VR->getDecl()->getNameAsString() << "' ";
- }
- else
- return NULL;
-
- if (isa<loc::ConcreteInt>(V)) {
- bool b = false;
- ASTContext &C = BRC.getASTContext();
- if (R->isBoundable()) {
- if (const TypedRegion *TR = dyn_cast<TypedRegion>(R)) {
- if (C.isObjCObjectPointerType(TR->getValueType(C))) {
- os << "initialized to nil";
- b = true;
- }
- }
- }
-
- if (!b)
- os << "initialized to a null pointer value";
- }
- else if (isa<nonloc::ConcreteInt>(V)) {
- os << "initialized to " << cast<nonloc::ConcreteInt>(V).getValue();
- }
- else if (V.isUndef()) {
- if (isa<VarRegion>(R)) {
- const VarDecl *VD = cast<VarDecl>(DS->getSingleDecl());
- if (VD->getInit())
- os << "initialized to a garbage value";
- else
- os << "declared without an initial value";
- }
- }
- }
- }
+ void PreVisitCallExpr(CheckerContext &C, const CallExpr *CE);
+};
- if (os.str().empty()) {
- if (isa<loc::ConcreteInt>(V)) {
- bool b = false;
- ASTContext &C = BRC.getASTContext();
- if (R->isBoundable()) {
- if (const TypedRegion *TR = dyn_cast<TypedRegion>(R)) {
- if (C.isObjCObjectPointerType(TR->getValueType(C))) {
- os << "nil object reference stored to ";
- b = true;
- }
- }
- }
+void CheckBadCall::PreVisitCallExpr(CheckerContext &C, const CallExpr *CE) {
+ const Expr *Callee = CE->getCallee()->IgnoreParens();
+ SVal L = C.getState()->getSVal(Callee);
- if (!b)
- os << "Null pointer value stored to ";
- }
- else if (V.isUndef()) {
- os << "Uninitialized value stored to ";
- }
- else
- return NULL;
-
- if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
- os << '\'' << VR->getDecl()->getNameAsString() << '\'';
- }
- else
- return NULL;
+ if (L.isUndef() || isa<loc::ConcreteInt>(L)) {
+ if (ExplodedNode *N = C.GenerateNode(CE, true)) {
+ if (!BT)
+ BT = new BadCall();
+ C.EmitReport(new BuiltinBugReport(*BT, BT->getDescription().c_str(), N));
}
-
- // FIXME: Refactor this into BugReporterContext.
- Stmt *S = 0;
- ProgramPoint P = N->getLocation();
-
- if (BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
- CFGBlock *BSrc = BE->getSrc();
- S = BSrc->getTerminatorCondition();
- }
- else if (PostStmt *PS = dyn_cast<PostStmt>(&P)) {
- S = PS->getStmt();
- }
-
- if (!S)
- return NULL;
-
- // Construct a new PathDiagnosticPiece.
- PathDiagnosticLocation L(S, BRC.getSourceManager());
- return new PathDiagnosticEventPiece(L, os.str());
}
-};
-
-
-static void registerFindLastStore(BugReporterContext& BRC, const MemRegion *R,
- SVal V) {
- BRC.addVisitor(new FindLastStoreBRVisitor(V, R));
}
-class VISIBILITY_HIDDEN TrackConstraintBRVisitor : public BugReporterVisitor {
- SVal Constraint;
- const bool Assumption;
- bool isSatisfied;
+class VISIBILITY_HIDDEN CheckBadDiv : public CheckerVisitor<CheckBadDiv> {
+ DivZero *BT;
public:
- TrackConstraintBRVisitor(SVal constraint, bool assumption)
- : Constraint(constraint), Assumption(assumption), isSatisfied(false) {}
-
- PathDiagnosticPiece* VisitNode(const ExplodedNode<GRState> *N,
- const ExplodedNode<GRState> *PrevN,
- BugReporterContext& BRC) {
- if (isSatisfied)
- return NULL;
-
- // Check if in the previous state it was feasible for this constraint
- // to *not* be true.
- if (PrevN->getState()->assume(Constraint, !Assumption)) {
+ CheckBadDiv() : BT(0) {}
+ ~CheckBadDiv() {}
- isSatisfied = true;
-
- // As a sanity check, make sure that the negation of the constraint
- // was infeasible in the current state. If it is feasible, we somehow
- // missed the transition point.
- if (N->getState()->assume(Constraint, !Assumption))
- return NULL;
-
- // We found the transition point for the constraint. We now need to
- // pretty-print the constraint. (work-in-progress)
- std::string sbuf;
- llvm::raw_string_ostream os(sbuf);
-
- if (isa<Loc>(Constraint)) {
- os << "Assuming pointer value is ";
- os << (Assumption ? "non-null" : "null");
- }
-
- if (os.str().empty())
- return NULL;
-
- // FIXME: Refactor this into BugReporterContext.
- Stmt *S = 0;
- ProgramPoint P = N->getLocation();
-
- if (BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
- CFGBlock *BSrc = BE->getSrc();
- S = BSrc->getTerminatorCondition();
- }
- else if (PostStmt *PS = dyn_cast<PostStmt>(&P)) {
- S = PS->getStmt();
- }
-
- if (!S)
- return NULL;
-
- // Construct a new PathDiagnosticPiece.
- PathDiagnosticLocation L(S, BRC.getSourceManager());
- return new PathDiagnosticEventPiece(L, os.str());
- }
-
- return NULL;
- }
+ const void *getTag() {
+ static int x;
+ return &x;
+ }
+
+ void PreVisitBinaryOperator(CheckerContext &C, const BinaryOperator *B);
};
-} // end anonymous namespace
-static void registerTrackConstraint(BugReporterContext& BRC, SVal Constraint,
- bool Assumption) {
- BRC.addVisitor(new TrackConstraintBRVisitor(Constraint, Assumption));
-}
+void CheckBadDiv::PreVisitBinaryOperator(CheckerContext &C,
+ const BinaryOperator *B) {
+ BinaryOperator::Opcode Op = B->getOpcode();
+ if (Op != BinaryOperator::Div &&
+ Op != BinaryOperator::Rem &&
+ Op != BinaryOperator::DivAssign &&
+ Op != BinaryOperator::RemAssign)
+ return;
-static void registerTrackNullOrUndefValue(BugReporterContext& BRC,
- const Stmt *S,
- const ExplodedNode<GRState>* N) {
-
- if (!S)
+ if (!B->getRHS()->getType()->isIntegerType() ||
+ !B->getRHS()->getType()->isScalarType())
return;
- GRStateManager &StateMgr = BRC.getStateManager();
- const GRState *state = N->getState();
-
- if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S)) {
- if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
- const VarRegion *R =
- StateMgr.getRegionManager().getVarRegion(VD);
-
- // What did we load?
- SVal V = state->getSVal(S);
-
- if (isa<loc::ConcreteInt>(V) || isa<nonloc::ConcreteInt>(V)
- || V.isUndef()) {
- registerFindLastStore(BRC, R, V);
- }
- }
- }
-
- SVal V = state->getSValAsScalarOrLoc(S);
-
- // Uncomment this to find cases where we aren't properly getting the
- // base value that was dereferenced.
- // assert(!V.isUnknownOrUndef());
-
- // Is it a symbolic value?
- if (loc::MemRegionVal *L = dyn_cast<loc::MemRegionVal>(&V)) {
- const SubRegion *R = cast<SubRegion>(L->getRegion());
- while (R && !isa<SymbolicRegion>(R)) {
- R = dyn_cast<SubRegion>(R->getSuperRegion());
- }
-
- if (R) {
- assert(isa<SymbolicRegion>(R));
- registerTrackConstraint(BRC, loc::MemRegionVal(R), false);
+ SVal Denom = C.getState()->getSVal(B->getRHS());
+ const DefinedSVal *DV = dyn_cast<DefinedSVal>(&Denom);
+
+ // Divide-by-undefined handled in the generic checking for uses of
+ // undefined values.
+ if (!DV)
+ return;
+
+ // Check for divide by zero.
+ ConstraintManager &CM = C.getConstraintManager();
+ const GRState *stateNotZero, *stateZero;
+ llvm::tie(stateNotZero, stateZero) = CM.AssumeDual(C.getState(), *DV);
+
+ if (stateZero && !stateNotZero) {
+ if (ExplodedNode *N = C.GenerateNode(B, stateZero, true)) {
+ if (!BT)
+ BT = new DivZero();
+
+ C.EmitReport(new BuiltinBugReport(*BT, BT->getDescription().c_str(), N));
}
+ return;
}
-}
+ // If we get here, then the denom should not be zero.
+ if (stateNotZero != C.getState())
+ C.addTransition(C.GenerateNode(B, stateNotZero));
+}
+}
//===----------------------------------------------------------------------===//
// Check registration.
//===----------------------------------------------------------------------===//
@@ -926,23 +810,23 @@ void GRExprEngine::RegisterInternalChecks() {
BR.Register(new NullDeref(this));
BR.Register(new UndefinedDeref(this));
BR.Register(new UndefBranch(this));
- BR.Register(new DivZero(this));
BR.Register(new UndefResult(this));
- BR.Register(new BadCall(this));
BR.Register(new RetStack(this));
BR.Register(new RetUndef(this));
- BR.Register(new BadArg(this));
BR.Register(new BadMsgExprArg(this));
BR.Register(new BadReceiver(this));
BR.Register(new OutOfBoundMemoryAccess(this));
BR.Register(new BadSizeVLA(this));
BR.Register(new NilReceiverStructRet(this));
BR.Register(new NilReceiverLargerThanVoidPtrRet(this));
-
+
// The following checks do not need to have their associated BugTypes
// explicitly registered with the BugReporter. If they issue any BugReports,
// their associated BugType will get registered with the BugReporter
// automatically. Note that the check itself is owned by the GRExprEngine
// object.
- AddCheck(new CheckAttrNonNull(BR), Stmt::CallExprClass);
+ registerCheck(new CheckAttrNonNull());
+ registerCheck(new CheckUndefinedArg());
+ registerCheck(new CheckBadCall());
+ registerCheck(new CheckBadDiv());
}
diff --git a/lib/Analysis/GRState.cpp b/lib/Analysis/GRState.cpp
index 54c0afbff33e..f269824d5477 100644
--- a/lib/Analysis/GRState.cpp
+++ b/lib/Analysis/GRState.cpp
@@ -27,7 +27,7 @@ GRStateManager::~GRStateManager() {
for (std::vector<GRState::Printer*>::iterator I=Printers.begin(),
E=Printers.end(); I!=E; ++I)
delete *I;
-
+
for (GDMContextsTy::iterator I=GDMContexts.begin(), E=GDMContexts.end();
I!=E; ++I)
I->second.second(I->second.first);
@@ -46,12 +46,11 @@ GRStateManager::RemoveDeadBindings(const GRState* state, Stmt* Loc,
llvm::SmallVector<const MemRegion*, 10> RegionRoots;
GRState NewState = *state;
- NewState.Env = EnvMgr.RemoveDeadBindings(NewState.Env, Loc, SymReaper, *this,
+ NewState.Env = EnvMgr.RemoveDeadBindings(NewState.Env, Loc, SymReaper,
state, RegionRoots);
// Clean up the store.
- NewState.St = StoreMgr->RemoveDeadBindings(&NewState, Loc, SymReaper,
- RegionRoots);
+ StoreMgr->RemoveDeadBindings(NewState, Loc, SymReaper, RegionRoots);
return ConstraintMgr->RemoveDeadBindings(getPersistentState(NewState),
SymReaper);
@@ -59,14 +58,14 @@ GRStateManager::RemoveDeadBindings(const GRState* state, Stmt* Loc,
const GRState *GRState::unbindLoc(Loc LV) const {
Store OldStore = getStore();
- Store NewStore = Mgr->StoreMgr->Remove(OldStore, LV);
-
+ Store NewStore = getStateManager().StoreMgr->Remove(OldStore, LV);
+
if (NewStore == OldStore)
return this;
-
+
GRState NewSt = *this;
NewSt.St = NewStore;
- return Mgr->getPersistentState(NewSt);
+ return getStateManager().getPersistentState(NewSt);
}
SVal GRState::getSValAsScalarOrLoc(const MemRegion *R) const {
@@ -77,7 +76,7 @@ SVal GRState::getSValAsScalarOrLoc(const MemRegion *R) const {
return UnknownVal();
if (const TypedRegion *TR = dyn_cast<TypedRegion>(R)) {
- QualType T = TR->getValueType(Mgr->getContext());
+ QualType T = TR->getValueType(getStateManager().getContext());
if (Loc::IsLocType(T) || T->isIntegerType())
return getSVal(R);
}
@@ -86,55 +85,37 @@ SVal GRState::getSValAsScalarOrLoc(const MemRegion *R) const {
}
-const GRState *GRState::bindExpr(const Stmt* Ex, SVal V, bool isBlkExpr,
- bool Invalidate) const {
-
- Environment NewEnv = Mgr->EnvMgr.BindExpr(Env, Ex, V, isBlkExpr, Invalidate);
-
+const GRState *GRState::BindExpr(const Stmt* Ex, SVal V, bool Invalidate) const{
+ Environment NewEnv = getStateManager().EnvMgr.BindExpr(Env, Ex, V,
+ Invalidate);
if (NewEnv == Env)
return this;
-
+
GRState NewSt = *this;
NewSt.Env = NewEnv;
- return Mgr->getPersistentState(NewSt);
-}
-
-const GRState *GRState::bindExpr(const Stmt* Ex, SVal V,
- bool Invalidate) const {
-
- bool isBlkExpr = false;
-
- if (Ex == Mgr->CurrentStmt) {
- // FIXME: Should this just be an assertion? When would we want to set
- // the value of a block-level expression if it wasn't CurrentStmt?
- isBlkExpr = Mgr->cfg.isBlkExpr(Ex);
-
- if (!isBlkExpr)
- return this;
- }
-
- return bindExpr(Ex, V, isBlkExpr, Invalidate);
+ return getStateManager().getPersistentState(NewSt);
}
-const GRState* GRStateManager::getInitialState() {
- GRState StateImpl(this, EnvMgr.getInitialEnvironment(),
- StoreMgr->getInitialStore(),
- GDMFactory.GetEmptyMap());
+const GRState* GRStateManager::getInitialState(const LocationContext *InitLoc) {
+ GRState State(this,
+ EnvMgr.getInitialEnvironment(InitLoc->getAnalysisContext()),
+ StoreMgr->getInitialStore(InitLoc),
+ GDMFactory.GetEmptyMap());
- return getPersistentState(StateImpl);
+ return getPersistentState(State);
}
const GRState* GRStateManager::getPersistentState(GRState& State) {
-
+
llvm::FoldingSetNodeID ID;
- State.Profile(ID);
+ State.Profile(ID);
void* InsertPos;
-
+
if (GRState* I = StateSet.FindNodeOrInsertPos(ID, InsertPos))
return I;
-
+
GRState* I = (GRState*) Alloc.Allocate<GRState>();
- new (I) GRState(State);
+ new (I) GRState(State);
StateSet.InsertNode(I, InsertPos);
return I;
}
@@ -142,7 +123,7 @@ const GRState* GRStateManager::getPersistentState(GRState& State) {
const GRState* GRState::makeWithStore(Store store) const {
GRState NewSt = *this;
NewSt.St = store;
- return Mgr->getPersistentState(NewSt);
+ return getStateManager().getPersistentState(NewSt);
}
//===----------------------------------------------------------------------===//
@@ -150,51 +131,56 @@ const GRState* GRState::makeWithStore(Store store) const {
//===----------------------------------------------------------------------===//
void GRState::print(llvm::raw_ostream& Out, const char* nl,
- const char* sep) const {
+ const char* sep) const {
// Print the store.
- Mgr->getStoreManager().print(getStore(), Out, nl, sep);
-
+ GRStateManager &Mgr = getStateManager();
+ Mgr.getStoreManager().print(getStore(), Out, nl, sep);
+
+ CFG &C = *getAnalysisContext().getCFG();
+
// Print Subexpression bindings.
bool isFirst = true;
-
- for (seb_iterator I = seb_begin(), E = seb_end(); I != E; ++I) {
-
+
+ for (Environment::iterator I = Env.begin(), E = Env.end(); I != E; ++I) {
+ if (C.isBlkExpr(I.getKey()))
+ continue;
+
if (isFirst) {
Out << nl << nl << "Sub-Expressions:" << nl;
isFirst = false;
}
else { Out << nl; }
-
+
Out << " (" << (void*) I.getKey() << ") ";
LangOptions LO; // FIXME.
I.getKey()->printPretty(Out, 0, PrintingPolicy(LO));
- Out << " : ";
- I.getData().print(Out);
+ Out << " : " << I.getData();
}
-
+
// Print block-expression bindings.
isFirst = true;
-
- for (beb_iterator I = beb_begin(), E = beb_end(); I != E; ++I) {
+
+ for (Environment::iterator I = Env.begin(), E = Env.end(); I != E; ++I) {
+ if (!C.isBlkExpr(I.getKey()))
+ continue;
if (isFirst) {
Out << nl << nl << "Block-level Expressions:" << nl;
isFirst = false;
}
else { Out << nl; }
-
+
Out << " (" << (void*) I.getKey() << ") ";
LangOptions LO; // FIXME.
I.getKey()->printPretty(Out, 0, PrintingPolicy(LO));
- Out << " : ";
- I.getData().print(Out);
+ Out << " : " << I.getData();
}
-
- Mgr->getConstraintManager().print(this, Out, nl, sep);
-
+
+ Mgr.getConstraintManager().print(this, Out, nl, sep);
+
// Print checker-specific data.
- for (std::vector<Printer*>::iterator I = Mgr->Printers.begin(),
- E = Mgr->Printers.end(); I != E; ++I) {
+ for (std::vector<Printer*>::iterator I = Mgr.Printers.begin(),
+ E = Mgr.Printers.end(); I != E; ++I) {
(*I)->Print(Out, this, nl, sep);
}
}
@@ -219,23 +205,23 @@ void*
GRStateManager::FindGDMContext(void* K,
void* (*CreateContext)(llvm::BumpPtrAllocator&),
void (*DeleteContext)(void*)) {
-
+
std::pair<void*, void (*)(void*)>& p = GDMContexts[K];
if (!p.first) {
p.first = CreateContext(Alloc);
p.second = DeleteContext;
}
-
+
return p.first;
}
const GRState* GRStateManager::addGDM(const GRState* St, void* Key, void* Data){
GRState::GenericDataMap M1 = St->getGDM();
GRState::GenericDataMap M2 = GDMFactory.Add(M1, Key, Data);
-
+
if (M1 == M2)
return St;
-
+
GRState NewSt = *St;
NewSt.GDM = M2;
return getPersistentState(NewSt);
@@ -254,14 +240,14 @@ class VISIBILITY_HIDDEN ScanReachableSymbols : public SubRegionMap::Visitor {
SymbolVisitor &visitor;
llvm::OwningPtr<SubRegionMap> SRM;
public:
-
+
ScanReachableSymbols(const GRState *st, SymbolVisitor& v)
: state(st), visitor(v) {}
-
+
bool scan(nonloc::CompoundVal val);
bool scan(SVal val);
bool scan(const MemRegion *R);
-
+
// From SubRegionMap::Visitor.
bool Visit(const MemRegion* Parent, const MemRegion* SubRegion) {
return scan(SubRegion);
@@ -276,44 +262,44 @@ bool ScanReachableSymbols::scan(nonloc::CompoundVal val) {
return true;
}
-
+
bool ScanReachableSymbols::scan(SVal val) {
if (loc::MemRegionVal *X = dyn_cast<loc::MemRegionVal>(&val))
return scan(X->getRegion());
if (SymbolRef Sym = val.getAsSymbol())
return visitor.VisitSymbol(Sym);
-
+
if (nonloc::CompoundVal *X = dyn_cast<nonloc::CompoundVal>(&val))
return scan(*X);
-
+
return true;
}
-
+
bool ScanReachableSymbols::scan(const MemRegion *R) {
if (isa<MemSpaceRegion>(R) || visited.count(R))
return true;
-
+
visited.insert(R);
// If this is a symbolic region, visit the symbol for the region.
if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R))
if (!visitor.VisitSymbol(SR->getSymbol()))
return false;
-
+
// If this is a subregion, also visit the parent regions.
if (const SubRegion *SR = dyn_cast<SubRegion>(R))
if (!scan(SR->getSuperRegion()))
return false;
-
+
// Now look at the binding to this region (if any).
if (!scan(state->getSValAsScalarOrLoc(R)))
return false;
-
+
// Now look at the subregions.
if (!SRM.get())
SRM.reset(state->getStateManager().getStoreManager().getSubRegionMap(state));
-
+
return SRM->iterSubRegions(R, *this);
}
@@ -326,24 +312,24 @@ bool GRState::scanReachableSymbols(SVal val, SymbolVisitor& visitor) const {
// Queries.
//===----------------------------------------------------------------------===//
-bool GRStateManager::isEqual(const GRState* state, Expr* Ex,
+bool GRStateManager::isEqual(const GRState* state, const Expr* Ex,
const llvm::APSInt& Y) {
-
+
SVal V = state->getSVal(Ex);
-
+
if (loc::ConcreteInt* X = dyn_cast<loc::ConcreteInt>(&V))
return X->getValue() == Y;
if (nonloc::ConcreteInt* X = dyn_cast<nonloc::ConcreteInt>(&V))
return X->getValue() == Y;
-
+
if (SymbolRef Sym = V.getAsSymbol())
return ConstraintMgr->isEqual(state, Sym, Y);
return false;
}
-
-bool GRStateManager::isEqual(const GRState* state, Expr* Ex, uint64_t x) {
+
+bool GRStateManager::isEqual(const GRState* state, const Expr* Ex, uint64_t x) {
return isEqual(state, Ex, getBasicVals().getValue(x, Ex->getType()));
}
diff --git a/lib/Analysis/LiveVariables.cpp b/lib/Analysis/LiveVariables.cpp
index aead7f43ad8f..4d96c8f8f401 100644
--- a/lib/Analysis/LiveVariables.cpp
+++ b/lib/Analysis/LiveVariables.cpp
@@ -15,7 +15,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Expr.h"
-#include "clang/AST/CFG.h"
+#include "clang/Analysis/CFG.h"
#include "clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h"
#include "clang/Analysis/FlowSensitive/DataflowSolver.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -29,35 +29,35 @@ using namespace clang;
//===----------------------------------------------------------------------===//
// Useful constants.
-//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
static const bool Alive = true;
-static const bool Dead = false;
+static const bool Dead = false;
//===----------------------------------------------------------------------===//
// Dataflow initialization logic.
-//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
namespace {
-class VISIBILITY_HIDDEN RegisterDecls
+class VISIBILITY_HIDDEN RegisterDecls
: public CFGRecStmtDeclVisitor<RegisterDecls> {
-
+
LiveVariables::AnalysisDataTy& AD;
-
+
typedef llvm::SmallVector<VarDecl*, 20> AlwaysLiveTy;
AlwaysLiveTy AlwaysLive;
-
+
public:
RegisterDecls(LiveVariables::AnalysisDataTy& ad) : AD(ad) {}
~RegisterDecls() {
AD.AlwaysLive.resetValues(AD);
-
+
for (AlwaysLiveTy::iterator I = AlwaysLive.begin(), E = AlwaysLive.end();
- I != E; ++ I)
- AD.AlwaysLive(*I, AD) = Alive;
+ I != E; ++ I)
+ AD.AlwaysLive(*I, AD) = Alive;
}
void VisitImplicitParamDecl(ImplicitParamDecl* IPD) {
@@ -68,12 +68,12 @@ public:
void VisitVarDecl(VarDecl* VD) {
// Register the VarDecl for tracking.
AD.Register(VD);
-
+
// Does the variable have global storage? If so, it is always live.
if (VD->hasGlobalStorage())
- AlwaysLive.push_back(VD);
+ AlwaysLive.push_back(VD);
}
-
+
CFG& getCFG() { return AD.getCFG(); }
};
} // end anonymous namespace
@@ -82,14 +82,14 @@ LiveVariables::LiveVariables(ASTContext& Ctx, CFG& cfg) {
// Register all referenced VarDecls.
getAnalysisData().setCFG(cfg);
getAnalysisData().setContext(Ctx);
-
+
RegisterDecls R(getAnalysisData());
cfg.VisitBlockStmts(R);
}
//===----------------------------------------------------------------------===//
// Transfer functions.
-//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
namespace {
@@ -101,85 +101,85 @@ public:
LiveVariables::ValTy& getVal() { return LiveState; }
CFG& getCFG() { return AD.getCFG(); }
-
+
void VisitDeclRefExpr(DeclRefExpr* DR);
void VisitBinaryOperator(BinaryOperator* B);
void VisitAssign(BinaryOperator* B);
void VisitDeclStmt(DeclStmt* DS);
void BlockStmt_VisitObjCForCollectionStmt(ObjCForCollectionStmt* S);
void VisitUnaryOperator(UnaryOperator* U);
- void Visit(Stmt *S);
- void VisitTerminator(CFGBlock* B);
-
+ void Visit(Stmt *S);
+ void VisitTerminator(CFGBlock* B);
+
void SetTopValue(LiveVariables::ValTy& V) {
V = AD.AlwaysLive;
}
-
+
};
-
+
void TransferFuncs::Visit(Stmt *S) {
-
+
if (S == getCurrentBlkStmt()) {
-
+
if (AD.Observer)
AD.Observer->ObserveStmt(S,AD,LiveState);
-
+
if (getCFG().isBlkExpr(S)) LiveState(S,AD) = Dead;
StmtVisitor<TransferFuncs,void>::Visit(S);
}
else if (!getCFG().isBlkExpr(S)) {
-
+
if (AD.Observer)
AD.Observer->ObserveStmt(S,AD,LiveState);
-
+
StmtVisitor<TransferFuncs,void>::Visit(S);
-
+
}
else {
// For block-level expressions, mark that they are live.
LiveState(S,AD) = Alive;
}
}
-
+
void TransferFuncs::VisitTerminator(CFGBlock* B) {
-
+
const Stmt* E = B->getTerminatorCondition();
if (!E)
return;
-
+
assert (getCFG().isBlkExpr(E));
LiveState(E, AD) = Alive;
}
void TransferFuncs::VisitDeclRefExpr(DeclRefExpr* DR) {
- if (VarDecl* V = dyn_cast<VarDecl>(DR->getDecl()))
+ if (VarDecl* V = dyn_cast<VarDecl>(DR->getDecl()))
LiveState(V,AD) = Alive;
}
-
-void TransferFuncs::VisitBinaryOperator(BinaryOperator* B) {
+
+void TransferFuncs::VisitBinaryOperator(BinaryOperator* B) {
if (B->isAssignmentOp()) VisitAssign(B);
else VisitStmt(B);
}
void
TransferFuncs::BlockStmt_VisitObjCForCollectionStmt(ObjCForCollectionStmt* S) {
-
+
// This is a block-level expression. Its value is 'dead' before this point.
LiveState(S, AD) = Dead;
// This represents a 'use' of the collection.
Visit(S->getCollection());
-
+
// This represents a 'kill' for the variable.
Stmt* Element = S->getElement();
DeclRefExpr* DR = 0;
VarDecl* VD = 0;
-
+
if (DeclStmt* DS = dyn_cast<DeclStmt>(Element))
VD = cast<VarDecl>(DS->getSingleDecl());
else {
- Expr* ElemExpr = cast<Expr>(Element)->IgnoreParens();
+ Expr* ElemExpr = cast<Expr>(Element)->IgnoreParens();
if ((DR = dyn_cast<DeclRefExpr>(ElemExpr)))
VD = cast<VarDecl>(DR->getDecl());
else {
@@ -194,10 +194,10 @@ TransferFuncs::BlockStmt_VisitObjCForCollectionStmt(ObjCForCollectionStmt* S) {
}
}
-
+
void TransferFuncs::VisitUnaryOperator(UnaryOperator* U) {
Expr *E = U->getSubExpr();
-
+
switch (U->getOpcode()) {
case UnaryOperator::PostInc:
case UnaryOperator::PostDec:
@@ -206,7 +206,7 @@ void TransferFuncs::VisitUnaryOperator(UnaryOperator* U) {
// Walk through the subexpressions, blasting through ParenExprs
// until we either find a DeclRefExpr or some non-DeclRefExpr
// expression.
- if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(E->IgnoreParens()))
+ if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(E->IgnoreParens()))
if (VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl())) {
// Treat the --/++ operator as a kill.
if (AD.Observer) { AD.Observer->ObserverKill(DR); }
@@ -215,24 +215,24 @@ void TransferFuncs::VisitUnaryOperator(UnaryOperator* U) {
}
// Fall-through.
-
+
default:
return Visit(E);
}
}
-
-void TransferFuncs::VisitAssign(BinaryOperator* B) {
+
+void TransferFuncs::VisitAssign(BinaryOperator* B) {
Expr* LHS = B->getLHS();
// Assigning to a variable?
if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(LHS->IgnoreParens())) {
-
+
// Update liveness inforamtion.
unsigned bit = AD.getIdx(DR->getDecl());
LiveState.getDeclBit(bit) = Dead | AD.AlwaysLive.getDeclBit(bit);
-
+
if (AD.Observer) { AD.Observer->ObserverKill(DR); }
-
+
// Handle things like +=, etc., which also generate "uses"
// of a variable. Do this just by visiting the subexpression.
if (B->getOpcode() != BinaryOperator::Assign)
@@ -240,7 +240,7 @@ void TransferFuncs::VisitAssign(BinaryOperator* B) {
}
else // Not assigning to a variable. Process LHS as usual.
Visit(LHS);
-
+
Visit(B->getRHS());
}
@@ -255,44 +255,44 @@ void TransferFuncs::VisitDeclStmt(DeclStmt* DS) {
// transfer function for this expression first.
if (Expr* Init = VD->getInit())
Visit(Init);
-
+
if (const VariableArrayType* VT =
AD.getContext().getAsVariableArrayType(VD->getType())) {
StmtIterator I(const_cast<VariableArrayType*>(VT));
- StmtIterator E;
+ StmtIterator E;
for (; I != E; ++I) Visit(*I);
}
-
+
// Update liveness information by killing the VarDecl.
unsigned bit = AD.getIdx(VD);
LiveState.getDeclBit(bit) = Dead | AD.AlwaysLive.getDeclBit(bit);
}
}
-
+
} // end anonymous namespace
//===----------------------------------------------------------------------===//
// Merge operator: if something is live on any successor block, it is live
// in the current block (a set union).
-//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
namespace {
struct Merge {
- typedef StmtDeclBitVector_Types::ValTy ValTy;
-
+ typedef StmtDeclBitVector_Types::ValTy ValTy;
+
void operator()(ValTy& Dst, const ValTy& Src) {
Dst.OrDeclBits(Src);
Dst.OrBlkExprBits(Src);
}
};
-
+
typedef DataflowSolver<LiveVariables, TransferFuncs, Merge> Solver;
} // end anonymous namespace
//===----------------------------------------------------------------------===//
// External interface to run Liveness analysis.
-//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
void LiveVariables::runOnCFG(CFG& cfg) {
Solver S(*this);
@@ -337,22 +337,22 @@ bool LiveVariables::isLive(const Stmt* Loc, const VarDecl* D) const {
void LiveVariables::dumpLiveness(const ValTy& V, SourceManager& SM) const {
const AnalysisDataTy& AD = getAnalysisData();
-
+
for (AnalysisDataTy::decl_iterator I = AD.begin_decl(),
E = AD.end_decl(); I!=E; ++I)
- if (V.getDeclBit(I->second)) {
+ if (V.getDeclBit(I->second)) {
fprintf(stderr, " %s <", I->first->getIdentifier()->getName());
I->first->getLocation().dump(SM);
fprintf(stderr, ">\n");
}
-}
+}
void LiveVariables::dumpBlockLiveness(SourceManager& M) const {
for (BlockDataMapTy::iterator I = getBlockDataMap().begin(),
E = getBlockDataMap().end(); I!=E; ++I) {
fprintf(stderr, "\n[ B%d (live variables at block exit) ]\n",
I->first->getBlockID());
-
+
dumpLiveness(I->second,M);
}
diff --git a/lib/Analysis/MemRegion.cpp b/lib/Analysis/MemRegion.cpp
index 45305403585a..353e63240294 100644
--- a/lib/Analysis/MemRegion.cpp
+++ b/lib/Analysis/MemRegion.cpp
@@ -15,6 +15,8 @@
#include "llvm/Support/raw_ostream.h"
#include "clang/Analysis/PathSensitive/MemRegion.h"
+#include "clang/Analysis/PathSensitive/ValueManager.h"
+#include "clang/Analysis/PathSensitive/AnalysisContext.h"
using namespace clang;
@@ -37,7 +39,6 @@ bool SubRegion::isSubRegionOf(const MemRegion* R) const {
return false;
}
-
MemRegionManager* SubRegion::getMemRegionManager() const {
const SubRegion* r = this;
do {
@@ -54,8 +55,8 @@ void MemSpaceRegion::Profile(llvm::FoldingSetNodeID& ID) const {
ID.AddInteger((unsigned)getKind());
}
-void StringRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
- const StringLiteral* Str,
+void StringRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const StringLiteral* Str,
const MemRegion* superRegion) {
ID.AddInteger((unsigned) StringRegionKind);
ID.AddPointer(Str);
@@ -74,13 +75,6 @@ void AllocaRegion::Profile(llvm::FoldingSetNodeID& ID) const {
ProfileRegion(ID, Ex, Cnt, superRegion);
}
-void TypedViewRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, QualType T,
- const MemRegion* superRegion) {
- ID.AddInteger((unsigned) TypedViewRegionKind);
- ID.Add(T);
- ID.AddPointer(superRegion);
-}
-
void CompoundLiteralRegion::Profile(llvm::FoldingSetNodeID& ID) const {
CompoundLiteralRegion::ProfileRegion(ID, CL, superRegion);
}
@@ -104,6 +98,10 @@ void DeclRegion::Profile(llvm::FoldingSetNodeID& ID) const {
DeclRegion::ProfileRegion(ID, D, superRegion, getKind());
}
+void VarRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ VarRegion::ProfileRegion(ID, getDecl(), LC, superRegion);
+}
+
void SymbolicRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, SymbolRef sym,
const MemRegion *sreg) {
ID.AddInteger((unsigned) MemRegion::SymbolicRegionKind);
@@ -116,7 +114,7 @@ void SymbolicRegion::Profile(llvm::FoldingSetNodeID& ID) const {
}
void ElementRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
- QualType ElementType, SVal Idx,
+ QualType ElementType, SVal Idx,
const MemRegion* superRegion) {
ID.AddInteger(MemRegion::ElementRegionKind);
ID.Add(ElementType);
@@ -128,90 +126,88 @@ void ElementRegion::Profile(llvm::FoldingSetNodeID& ID) const {
ElementRegion::ProfileRegion(ID, ElementType, Index, superRegion);
}
-void CodeTextRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, const void* data,
- QualType t, const MemRegion*) {
+void CodeTextRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const FunctionDecl *FD,
+ const MemRegion*) {
ID.AddInteger(MemRegion::CodeTextRegionKind);
- ID.AddPointer(data);
- ID.Add(t);
+ ID.AddPointer(FD);
}
void CodeTextRegion::Profile(llvm::FoldingSetNodeID& ID) const {
- CodeTextRegion::ProfileRegion(ID, Data, LocationType, superRegion);
+ CodeTextRegion::ProfileRegion(ID, FD, superRegion);
}
//===----------------------------------------------------------------------===//
// Region pretty-printing.
//===----------------------------------------------------------------------===//
-void MemRegion::printStdErr() const {
- print(llvm::errs());
+void MemRegion::dump() const {
+ dumpToStream(llvm::errs());
}
std::string MemRegion::getString() const {
std::string s;
llvm::raw_string_ostream os(s);
- print(os);
+ dumpToStream(os);
return os.str();
}
-void MemRegion::print(llvm::raw_ostream& os) const {
+void MemRegion::dumpToStream(llvm::raw_ostream& os) const {
os << "<Unknown Region>";
}
-void AllocaRegion::print(llvm::raw_ostream& os) const {
+void AllocaRegion::dumpToStream(llvm::raw_ostream& os) const {
os << "alloca{" << (void*) Ex << ',' << Cnt << '}';
}
-void CodeTextRegion::print(llvm::raw_ostream& os) const {
- os << "code{";
- if (isDeclared())
- os << getDecl()->getDeclName().getAsString();
- else
- os << '$' << getSymbol();
-
- os << '}';
+void CodeTextRegion::dumpToStream(llvm::raw_ostream& os) const {
+ os << "code{" << getDecl()->getDeclName().getAsString() << '}';
}
-void CompoundLiteralRegion::print(llvm::raw_ostream& os) const {
+void CompoundLiteralRegion::dumpToStream(llvm::raw_ostream& os) const {
// FIXME: More elaborate pretty-printing.
os << "{ " << (void*) CL << " }";
}
-void ElementRegion::print(llvm::raw_ostream& os) const {
- superRegion->print(os);
- os << '['; Index.print(os); os << ']';
+void ElementRegion::dumpToStream(llvm::raw_ostream& os) const {
+ os << "element{" << superRegion << ','
+ << Index << ',' << getElementType().getAsString() << '}';
}
-void FieldRegion::print(llvm::raw_ostream& os) const {
- superRegion->print(os);
- os << "->" << getDecl()->getNameAsString();
+void FieldRegion::dumpToStream(llvm::raw_ostream& os) const {
+ os << superRegion << "->" << getDecl()->getNameAsString();
}
-void StringRegion::print(llvm::raw_ostream& os) const {
- LangOptions LO; // FIXME.
- Str->printPretty(os, 0, PrintingPolicy(LO));
+void ObjCIvarRegion::dumpToStream(llvm::raw_ostream& os) const {
+ os << "ivar{" << superRegion << ',' << getDecl()->getNameAsString() << '}';
}
-void SymbolicRegion::print(llvm::raw_ostream& os) const {
- os << "SymRegion-" << sym;
+void StringRegion::dumpToStream(llvm::raw_ostream& os) const {
+ Str->printPretty(os, 0, PrintingPolicy(getContext().getLangOptions()));
}
-void TypedViewRegion::print(llvm::raw_ostream& os) const {
- os << "typed_view{" << LValueType.getAsString() << ',';
- getSuperRegion()->print(os);
- os << '}';
+void SymbolicRegion::dumpToStream(llvm::raw_ostream& os) const {
+ os << "SymRegion{" << sym << '}';
}
-void VarRegion::print(llvm::raw_ostream& os) const {
+void VarRegion::dumpToStream(llvm::raw_ostream& os) const {
os << cast<VarDecl>(D)->getNameAsString();
}
+void RegionRawOffset::dump() const {
+ dumpToStream(llvm::errs());
+}
+
+void RegionRawOffset::dumpToStream(llvm::raw_ostream& os) const {
+ os << "raw_offset{" << getRegion() << ',' << getByteOffset() << '}';
+}
+
//===----------------------------------------------------------------------===//
// MemRegionManager methods.
//===----------------------------------------------------------------------===//
-
-MemSpaceRegion* MemRegionManager::LazyAllocate(MemSpaceRegion*& region) {
- if (!region) {
+
+MemSpaceRegion* MemRegionManager::LazyAllocate(MemSpaceRegion*& region) {
+ if (!region) {
region = (MemSpaceRegion*) A.Allocate<MemSpaceRegion>();
new (region) MemSpaceRegion(this);
}
@@ -251,8 +247,16 @@ StringRegion* MemRegionManager::getStringRegion(const StringLiteral* Str) {
return getRegion<StringRegion>(Str);
}
-VarRegion* MemRegionManager::getVarRegion(const VarDecl* d) {
- return getRegion<VarRegion>(d);
+VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
+ const LocationContext *LC) {
+
+ // FIXME: Once we implement scope handling, we will need to properly lookup
+ // 'D' to the proper LocationContext. For now, just strip down to the
+ // StackFrame.
+ while (!isa<StackFrameContext>(LC))
+ LC = LC->getParent();
+
+ return getRegion<VarRegion>(D, LC);
}
CompoundLiteralRegion*
@@ -262,7 +266,8 @@ MemRegionManager::getCompoundLiteralRegion(const CompoundLiteralExpr* CL) {
ElementRegion*
MemRegionManager::getElementRegion(QualType elementType, SVal Idx,
- const MemRegion* superRegion, ASTContext& Ctx){
+ const MemRegion* superRegion,
+ ASTContext& Ctx){
QualType T = Ctx.getCanonicalType(elementType);
@@ -282,13 +287,8 @@ MemRegionManager::getElementRegion(QualType elementType, SVal Idx,
return R;
}
-CodeTextRegion* MemRegionManager::getCodeTextRegion(const FunctionDecl* fd,
- QualType t) {
- return getRegion<CodeTextRegion>(fd, t);
-}
-
-CodeTextRegion* MemRegionManager::getCodeTextRegion(SymbolRef sym, QualType t) {
- return getRegion<CodeTextRegion>(sym, t);
+CodeTextRegion *MemRegionManager::getCodeTextRegion(const FunctionDecl *FD) {
+ return getRegion<CodeTextRegion>(FD);
}
/// getSymbolicRegion - Retrieve or create a "symbolic" memory region.
@@ -298,40 +298,34 @@ SymbolicRegion* MemRegionManager::getSymbolicRegion(SymbolRef sym) {
FieldRegion* MemRegionManager::getFieldRegion(const FieldDecl* d,
const MemRegion* superRegion) {
- return getRegion<FieldRegion>(d, superRegion);
+ return getSubRegion<FieldRegion>(d, superRegion);
}
ObjCIvarRegion*
MemRegionManager::getObjCIvarRegion(const ObjCIvarDecl* d,
const MemRegion* superRegion) {
- return getRegion<ObjCIvarRegion>(d, superRegion);
+ return getSubRegion<ObjCIvarRegion>(d, superRegion);
}
ObjCObjectRegion*
MemRegionManager::getObjCObjectRegion(const ObjCInterfaceDecl* d,
const MemRegion* superRegion) {
- return getRegion<ObjCObjectRegion>(d, superRegion);
-}
-
-TypedViewRegion*
-MemRegionManager::getTypedViewRegion(QualType t, const MemRegion* superRegion) {
- return getRegion<TypedViewRegion>(t, superRegion);
+ return getSubRegion<ObjCObjectRegion>(d, superRegion);
}
AllocaRegion* MemRegionManager::getAllocaRegion(const Expr* E, unsigned cnt) {
return getRegion<AllocaRegion>(E, cnt);
}
-
const MemSpaceRegion *MemRegion::getMemorySpace() const {
const MemRegion *R = this;
const SubRegion* SR = dyn_cast<SubRegion>(this);
-
+
while (SR) {
R = SR->getSuperRegion();
SR = dyn_cast<SubRegion>(R);
}
-
+
return dyn_cast<MemSpaceRegion>(R);
}
@@ -371,7 +365,7 @@ bool MemRegion::hasGlobalsStorage() const {
bool MemRegion::hasParametersStorage() const {
if (const MemSpaceRegion *MS = getMemorySpace())
return MS == getMemRegionManager()->getStackArgumentsRegion();
-
+
return false;
}
@@ -388,12 +382,76 @@ bool MemRegion::hasGlobalsOrParametersStorage() const {
// View handling.
//===----------------------------------------------------------------------===//
-const MemRegion *TypedViewRegion::removeViews() const {
- const SubRegion *SR = this;
- const MemRegion *R = SR;
- while (SR && isa<TypedViewRegion>(SR)) {
- R = SR->getSuperRegion();
- SR = dyn_cast<SubRegion>(R);
+const MemRegion *MemRegion::getBaseRegion() const {
+ const MemRegion *R = this;
+ while (true) {
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ // FIXME: generalize. Essentially we want to strip away ElementRegions
+ // that were layered on a symbolic region because of casts. We only
+ // want to strip away ElementRegions, however, where the index is 0.
+ SVal index = ER->getIndex();
+ if (nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(&index)) {
+ if (CI->getValue().getSExtValue() == 0) {
+ R = ER->getSuperRegion();
+ continue;
+ }
+ }
+ }
+ break;
}
return R;
}
+
+// FIXME: Merge with the implementation of the same method in Store.cpp
+static bool IsCompleteType(ASTContext &Ctx, QualType Ty) {
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *D = RT->getDecl();
+ if (!D->getDefinition(Ctx))
+ return false;
+ }
+
+ return true;
+}
+
+RegionRawOffset ElementRegion::getAsRawOffset() const {
+ int64_t offset = 0;
+ const ElementRegion *ER = this;
+ const MemRegion *superR = NULL;
+ ASTContext &C = getContext();
+
+ // FIXME: Handle multi-dimensional arrays.
+
+ while (ER) {
+ superR = ER->getSuperRegion();
+
+ // FIXME: generalize to symbolic offsets.
+ SVal index = ER->getIndex();
+ if (nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(&index)) {
+ // Update the offset.
+ int64_t i = CI->getValue().getSExtValue();
+
+ if (i != 0) {
+ QualType elemType = ER->getElementType();
+
+ // If we are pointing to an incomplete type, go no further.
+ if (!IsCompleteType(C, elemType)) {
+ superR = ER;
+ break;
+ }
+
+ int64_t size = (int64_t) (C.getTypeSize(elemType) / 8);
+ offset += (i * size);
+ }
+
+ // Go to the next ElementRegion (if any).
+ ER = dyn_cast<ElementRegion>(superR);
+ continue;
+ }
+
+ return NULL;
+ }
+
+ assert(superR && "super region cannot be NULL");
+ return RegionRawOffset(superR, offset);
+}
+
diff --git a/lib/Analysis/PathDiagnostic.cpp b/lib/Analysis/PathDiagnostic.cpp
index a608ce0d5884..800496a16142 100644
--- a/lib/Analysis/PathDiagnostic.cpp
+++ b/lib/Analysis/PathDiagnostic.cpp
@@ -27,7 +27,7 @@ bool PathDiagnosticMacroPiece::containsEvent() const {
for (const_iterator I = begin(), E = end(); I!=E; ++I) {
if (isa<PathDiagnosticEventPiece>(*I))
return true;
-
+
if (PathDiagnosticMacroPiece *MP = dyn_cast<PathDiagnosticMacroPiece>(*I))
if (MP->containsEvent())
return true;
@@ -38,14 +38,14 @@ bool PathDiagnosticMacroPiece::containsEvent() const {
static size_t GetNumCharsToLastNonPeriod(const char *s) {
const char *start = s;
- const char *lastNonPeriod = 0;
+ const char *lastNonPeriod = 0;
for ( ; *s != '\0' ; ++s)
if (*s != '.') lastNonPeriod = s;
-
+
if (!lastNonPeriod)
return 0;
-
+
return (lastNonPeriod - start) + 1;
}
@@ -84,7 +84,7 @@ void PathDiagnostic::resetPath(bool deletePieces) {
if (deletePieces)
for (iterator I=begin(), E=end(); I!=E; ++I)
delete &*I;
-
+
path.clear();
}
@@ -97,7 +97,7 @@ PathDiagnostic::PathDiagnostic(const char* bugtype, const char* desc,
Category(category, GetNumCharsToLastNonPeriod(category)) {}
PathDiagnostic::PathDiagnostic(const std::string& bugtype,
- const std::string& desc,
+ const std::string& desc,
const std::string& category)
: Size(0),
BugType(bugtype, 0, GetNumCharsToLastNonPeriod(bugtype)),
@@ -106,11 +106,11 @@ PathDiagnostic::PathDiagnostic(const std::string& bugtype,
void PathDiagnosticClient::HandleDiagnostic(Diagnostic::Level DiagLevel,
const DiagnosticInfo &Info) {
-
+
// Create a PathDiagnostic with a single piece.
-
+
PathDiagnostic* D = new PathDiagnostic();
-
+
const char *LevelStr;
switch (DiagLevel) {
default:
@@ -124,18 +124,18 @@ void PathDiagnosticClient::HandleDiagnostic(Diagnostic::Level DiagLevel,
llvm::SmallString<100> StrC;
StrC += LevelStr;
Info.FormatDiagnostic(StrC);
-
+
PathDiagnosticPiece *P =
new PathDiagnosticEventPiece(Info.getLocation(),
std::string(StrC.begin(), StrC.end()));
-
+
for (unsigned i = 0, e = Info.getNumRanges(); i != e; ++i)
P->addRange(Info.getRange(i));
for (unsigned i = 0, e = Info.getNumCodeModificationHints(); i != e; ++i)
P->addCodeModificationHint(Info.getCodeModificationHint(i));
D->push_front(P);
- HandlePathDiagnostic(D);
+ HandlePathDiagnostic(D);
}
//===----------------------------------------------------------------------===//
@@ -155,7 +155,7 @@ FullSourceLoc PathDiagnosticLocation::asLocation() const {
case DeclK:
return FullSourceLoc(D->getLocation(), const_cast<SourceManager&>(*SM));
}
-
+
return FullSourceLoc(R.getBegin(), const_cast<SourceManager&>(*SM));
}
@@ -178,7 +178,7 @@ PathDiagnosticRange PathDiagnosticLocation::asRange() const {
if (DS->isSingleDecl()) {
// Should always be the case, but we'll be defensive.
return SourceRange(DS->getLocStart(),
- DS->getSingleDecl()->getLocation());
+ DS->getSingleDecl()->getLocation());
}
break;
}
@@ -197,7 +197,7 @@ PathDiagnosticRange PathDiagnosticLocation::asRange() const {
return SourceRange(L, L);
}
}
-
+
return S->getSourceRange();
}
case DeclK:
@@ -207,7 +207,7 @@ PathDiagnosticRange PathDiagnosticLocation::asRange() const {
// FIXME: We would like to always get the function body, even
// when it needs to be de-serialized, but getting the
// ASTContext here requires significant changes.
- if (Stmt *Body = FD->getBodyIfAvailable()) {
+ if (Stmt *Body = FD->getBody()) {
if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Body))
return CS->getSourceRange();
else
@@ -219,7 +219,7 @@ PathDiagnosticRange PathDiagnosticLocation::asRange() const {
return PathDiagnosticRange(SourceRange(L, L), true);
}
}
-
+
return R;
}
@@ -239,4 +239,66 @@ void PathDiagnosticLocation::flatten() {
}
}
+//===----------------------------------------------------------------------===//
+// FoldingSet profiling methods.
+//===----------------------------------------------------------------------===//
+
+void PathDiagnosticLocation::Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger((unsigned) K);
+ switch (K) {
+ case RangeK:
+ ID.AddInteger(R.getBegin().getRawEncoding());
+ ID.AddInteger(R.getEnd().getRawEncoding());
+ break;
+ case SingleLocK:
+ ID.AddInteger(R.getBegin().getRawEncoding());
+ break;
+ case StmtK:
+ ID.Add(S);
+ break;
+ case DeclK:
+ ID.Add(D);
+ break;
+ }
+ return;
+}
+
+void PathDiagnosticPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger((unsigned) getKind());
+ ID.AddString(str);
+ // FIXME: Add profiling support for code hints.
+ ID.AddInteger((unsigned) getDisplayHint());
+ for (range_iterator I = ranges_begin(), E = ranges_end(); I != E; ++I) {
+ ID.AddInteger(I->getBegin().getRawEncoding());
+ ID.AddInteger(I->getEnd().getRawEncoding());
+ }
+}
+void PathDiagnosticSpotPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+ PathDiagnosticPiece::Profile(ID);
+ ID.Add(Pos);
+}
+
+void PathDiagnosticControlFlowPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+ PathDiagnosticPiece::Profile(ID);
+ for (const_iterator I = begin(), E = end(); I != E; ++I)
+ ID.Add(*I);
+}
+
+void PathDiagnosticMacroPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+ PathDiagnosticSpotPiece::Profile(ID);
+ for (const_iterator I = begin(), E = end(); I != E; ++I)
+ ID.Add(**I);
+}
+
+void PathDiagnostic::Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(Size);
+ ID.AddString(BugType);
+ ID.AddString(Desc);
+ ID.AddString(Category);
+ for (const_iterator I = begin(), E = end(); I != E; ++I)
+ ID.Add(*I);
+
+ for (meta_iterator I = meta_begin(), E = meta_end(); I != E; ++I)
+ ID.AddString(*I);
+}
diff --git a/lib/Analysis/RangeConstraintManager.cpp b/lib/Analysis/RangeConstraintManager.cpp
index 079462e8d19f..73b445e6ab36 100644
--- a/lib/Analysis/RangeConstraintManager.cpp
+++ b/lib/Analysis/RangeConstraintManager.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines RangeConstraintManager, a class that tracks simple
+// This file defines RangeConstraintManager, a class that tracks simple
// equality and inequality constraints on symbolic values of GRState.
//
//===----------------------------------------------------------------------===//
@@ -66,7 +66,7 @@ public:
// consistent (instead of comparing by pointer values) and can potentially
// be used to speed up some of the operations in RangeSet.
static inline bool isLess(key_type_ref lhs, key_type_ref rhs) {
- return *lhs.first < *rhs.first || (!(*rhs.first < *lhs.first) &&
+ return *lhs.first < *rhs.first || (!(*rhs.first < *lhs.first) &&
*lhs.second < *rhs.second);
}
};
@@ -78,7 +78,7 @@ class VISIBILITY_HIDDEN RangeSet {
typedef llvm::ImmutableSet<Range, RangeTrait> PrimRangeSet;
PrimRangeSet ranges; // no need to make const, since it is an
// ImmutableSet - this allows default operator=
- // to work.
+ // to work.
public:
typedef PrimRangeSet::Factory Factory;
typedef PrimRangeSet::iterator iterator;
@@ -88,13 +88,13 @@ public:
iterator begin() const { return ranges.begin(); }
iterator end() const { return ranges.end(); }
-
+
bool isEmpty() const { return ranges.isEmpty(); }
-
+
/// Construct a new RangeSet representing '{ [from, to] }'.
RangeSet(Factory &F, const llvm::APSInt &from, const llvm::APSInt &to)
: ranges(F.Add(F.GetEmptySet(), Range(from, to))) {}
-
+
/// Profile - Generates a hash profile of this RangeSet for use
/// by FoldingSet.
void Profile(llvm::FoldingSetNodeID &ID) const { ranges.Profile(ID); }
@@ -122,7 +122,7 @@ public:
/// value be not be equal to V.
RangeSet AddNE(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
PrimRangeSet newRanges = ranges;
-
+
// FIXME: We can perhaps enhance ImmutableSet to do this search for us
// in log(N) time using the sorted property of the internal AVL tree.
for (iterator i = begin(), e = end(); i != e; ++i) {
@@ -134,11 +134,11 @@ public:
newRanges = F.Add(newRanges, Range(i->From(), BV.Sub1(V)));
if (V != i->To())
newRanges = F.Add(newRanges, Range(BV.Add1(V), i->To()));
- // All of the ranges are non-overlapping, so we can stop.
+ // All of the ranges are non-overlapping, so we can stop.
break;
}
}
-
+
return newRanges;
}
@@ -153,7 +153,7 @@ public:
else if (i->To() < V)
newRanges = F.Add(newRanges, *i);
}
-
+
return newRanges;
}
@@ -168,7 +168,7 @@ public:
else if (i->To() <= V)
newRanges = F.Add(newRanges, *i);
}
-
+
return newRanges;
}
@@ -181,7 +181,7 @@ public:
else if (i->From() > V)
newRanges = F.Add(newRanges, *i);
}
-
+
return newRanges;
}
@@ -208,13 +208,13 @@ public:
isFirst = false;
else
os << ", ";
-
+
os << '[' << i->From().toString(10) << ", " << i->To().toString(10)
<< ']';
}
- os << " }";
+ os << " }";
}
-
+
bool operator==(const RangeSet &other) const {
return ranges == other.ranges;
}
@@ -227,13 +227,13 @@ namespace clang {
template<>
struct GRStateTrait<ConstraintRange>
: public GRStatePartialTrait<ConstraintRangeTy> {
- static inline void* GDMIndex() { return &ConstraintRangeIndex; }
+ static inline void* GDMIndex() { return &ConstraintRangeIndex; }
};
-}
-
+}
+
namespace {
class VISIBILITY_HIDDEN RangeConstraintManager : public SimpleConstraintManager{
- RangeSet GetRange(const GRState *state, SymbolRef sym);
+ RangeSet GetRange(const GRState *state, SymbolRef sym);
public:
RangeConstraintManager() {}
@@ -256,7 +256,7 @@ public:
const llvm::APSInt& V);
const llvm::APSInt* getSymVal(const GRState* St, SymbolRef sym) const;
-
+
// FIXME: Refactor into SimpleConstraintManager?
bool isEqual(const GRState* St, SymbolRef sym, const llvm::APSInt& V) const {
const llvm::APSInt *i = getSymVal(St, sym);
@@ -265,7 +265,7 @@ public:
const GRState* RemoveDeadBindings(const GRState* St, SymbolReaper& SymReaper);
- void print(const GRState* St, llvm::raw_ostream& Out,
+ void print(const GRState* St, llvm::raw_ostream& Out,
const char* nl, const char *sep);
private:
@@ -294,11 +294,11 @@ RangeConstraintManager::RemoveDeadBindings(const GRState* state,
ConstraintRangeTy::Factory& CRFactory = state->get_context<ConstraintRange>();
for (ConstraintRangeTy::iterator I = CR.begin(), E = CR.end(); I != E; ++I) {
- SymbolRef sym = I.getKey();
+ SymbolRef sym = I.getKey();
if (SymReaper.maybeDead(sym))
CR = CRFactory.Remove(CR, sym);
}
-
+
return state->set<ConstraintRange>(CR);
}
@@ -310,11 +310,11 @@ RangeSet
RangeConstraintManager::GetRange(const GRState *state, SymbolRef sym) {
if (ConstraintRangeTy::data_type* V = state->get<ConstraintRange>(sym))
return *V;
-
+
// Lazily generate a new RangeSet representing all possible values for the
// given symbol type.
QualType T = state->getSymbolManager().getType(sym);
- BasicValueFactory& BV = state->getBasicVals();
+ BasicValueFactory& BV = state->getBasicVals();
return RangeSet(F, BV.getMinValue(T), BV.getMaxValue(T));
}
@@ -341,16 +341,16 @@ AssumeX(GE)
// Pretty-printing.
//===------------------------------------------------------------------------===/
-void RangeConstraintManager::print(const GRState* St, llvm::raw_ostream& Out,
+void RangeConstraintManager::print(const GRState* St, llvm::raw_ostream& Out,
const char* nl, const char *sep) {
-
+
ConstraintRangeTy Ranges = St->get<ConstraintRange>();
-
+
if (Ranges.isEmpty())
return;
-
+
Out << nl << sep << "ranges of symbol values:";
-
+
for (ConstraintRangeTy::iterator I=Ranges.begin(), E=Ranges.end(); I!=E; ++I){
Out << nl << ' ' << I.getKey() << " : ";
I.getData().print(Out);
diff --git a/lib/Analysis/RegionStore.cpp b/lib/Analysis/RegionStore.cpp
index 23e8b738b601..3844d6a6149c 100644
--- a/lib/Analysis/RegionStore.cpp
+++ b/lib/Analysis/RegionStore.cpp
@@ -15,9 +15,11 @@
//
//===----------------------------------------------------------------------===//
#include "clang/Analysis/PathSensitive/MemRegion.h"
+#include "clang/Analysis/PathSensitive/AnalysisContext.h"
#include "clang/Analysis/PathSensitive/GRState.h"
#include "clang/Analysis/PathSensitive/GRStateTrait.h"
#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/Support/Optional.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/ImmutableMap.h"
@@ -27,8 +29,57 @@
using namespace clang;
+#define HEAP_UNDEFINED 0
+#define USE_EXPLICIT_COMPOUND 0
+
+namespace {
+class BindingVal {
+public:
+ enum BindingKind { Direct, Default };
+private:
+ SVal Value;
+ BindingKind Kind;
+
+public:
+ BindingVal(SVal V, BindingKind K) : Value(V), Kind(K) {}
+
+ bool isDefault() const { return Kind == Default; }
+
+ const SVal *getValue() const { return &Value; }
+
+ const SVal *getDirectValue() const { return isDefault() ? 0 : &Value; }
+
+ const SVal *getDefaultValue() const { return isDefault() ? &Value : 0; }
+
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ Value.Profile(ID);
+ ID.AddInteger(Kind);
+ }
+
+ inline bool operator==(const BindingVal& R) const {
+ return Value == R.Value && Kind == R.Kind;
+ }
+
+ inline bool operator!=(const BindingVal& R) const {
+ return !(*this == R);
+ }
+};
+}
+
+namespace llvm {
+static inline
+llvm::raw_ostream& operator<<(llvm::raw_ostream& os, BindingVal V) {
+ if (V.isDefault())
+ os << "(default) ";
+ else
+ os << "(direct) ";
+ os << *V.getValue();
+ return os;
+}
+} // end llvm namespace
+
// Actual Store type.
-typedef llvm::ImmutableMap<const MemRegion*, SVal> RegionBindingsTy;
+typedef llvm::ImmutableMap<const MemRegion*, BindingVal> RegionBindings;
//===----------------------------------------------------------------------===//
// Fine-grained control of RegionStoreManager.
@@ -36,57 +87,27 @@ typedef llvm::ImmutableMap<const MemRegion*, SVal> RegionBindingsTy;
namespace {
struct VISIBILITY_HIDDEN minimal_features_tag {};
-struct VISIBILITY_HIDDEN maximal_features_tag {};
-
+struct VISIBILITY_HIDDEN maximal_features_tag {};
+
class VISIBILITY_HIDDEN RegionStoreFeatures {
bool SupportsFields;
bool SupportsRemaining;
-
+
public:
RegionStoreFeatures(minimal_features_tag) :
SupportsFields(false), SupportsRemaining(false) {}
-
+
RegionStoreFeatures(maximal_features_tag) :
SupportsFields(true), SupportsRemaining(false) {}
-
+
void enableFields(bool t) { SupportsFields = t; }
-
+
bool supportsFields() const { return SupportsFields; }
bool supportsRemaining() const { return SupportsRemaining; }
};
}
//===----------------------------------------------------------------------===//
-// Region "Views"
-//===----------------------------------------------------------------------===//
-//
-// MemRegions can be layered on top of each other. This GDM entry tracks
-// what are the MemRegions that layer a given MemRegion.
-//
-typedef llvm::ImmutableSet<const MemRegion*> RegionViews;
-namespace { class VISIBILITY_HIDDEN RegionViewMap {}; }
-static int RegionViewMapIndex = 0;
-namespace clang {
- template<> struct GRStateTrait<RegionViewMap>
- : public GRStatePartialTrait<llvm::ImmutableMap<const MemRegion*,
- RegionViews> > {
-
- static void* GDMIndex() { return &RegionViewMapIndex; }
- };
-}
-
-// RegionCasts records the current cast type of a region.
-namespace { class VISIBILITY_HIDDEN RegionCasts {}; }
-static int RegionCastsIndex = 0;
-namespace clang {
- template<> struct GRStateTrait<RegionCasts>
- : public GRStatePartialTrait<llvm::ImmutableMap<const MemRegion*,
- QualType> > {
- static void* GDMIndex() { return &RegionCastsIndex; }
- };
-}
-
-//===----------------------------------------------------------------------===//
// Region "Extents"
//===----------------------------------------------------------------------===//
//
@@ -103,19 +124,15 @@ namespace clang {
}
//===----------------------------------------------------------------------===//
-// Regions with default values.
+// Utility functions.
//===----------------------------------------------------------------------===//
-//
-// This GDM entry tracks what regions have a default value if they have no bound
-// value and have not been killed.
-//
-namespace { class VISIBILITY_HIDDEN RegionDefaultValue {}; }
-static int RegionDefaultValueIndex = 0;
-namespace clang {
- template<> struct GRStateTrait<RegionDefaultValue>
- : public GRStatePartialTrait<llvm::ImmutableMap<const MemRegion*, SVal> > {
- static void* GDMIndex() { return &RegionDefaultValueIndex; }
- };
+
+static bool IsAnyPointerOrIntptr(QualType ty, ASTContext &Ctx) {
+ if (ty->isAnyPointerType())
+ return true;
+
+ return ty->isIntegerType() && ty->isScalarType() &&
+ Ctx.getTypeSize(ty) == Ctx.getTypeSize(Ctx.VoidPtrTy);
}
//===----------------------------------------------------------------------===//
@@ -125,87 +142,104 @@ namespace clang {
namespace {
class VISIBILITY_HIDDEN RegionStoreSubRegionMap : public SubRegionMap {
- typedef llvm::DenseMap<const MemRegion*,
- llvm::ImmutableSet<const MemRegion*> > Map;
-
- llvm::ImmutableSet<const MemRegion*>::Factory F;
+ typedef llvm::ImmutableSet<const MemRegion*> SetTy;
+ typedef llvm::DenseMap<const MemRegion*, SetTy> Map;
+ SetTy::Factory F;
Map M;
-
public:
- void add(const MemRegion* Parent, const MemRegion* SubRegion) {
+ bool add(const MemRegion* Parent, const MemRegion* SubRegion) {
Map::iterator I = M.find(Parent);
- M.insert(std::make_pair(Parent,
- F.Add(I == M.end() ? F.GetEmptySet() : I->second, SubRegion)));
+
+ if (I == M.end()) {
+ M.insert(std::make_pair(Parent, F.Add(F.GetEmptySet(), SubRegion)));
+ return true;
+ }
+
+ I->second = F.Add(I->second, SubRegion);
+ return false;
}
-
+
+ void process(llvm::SmallVectorImpl<const SubRegion*> &WL, const SubRegion *R);
+
~RegionStoreSubRegionMap() {}
-
+
bool iterSubRegions(const MemRegion* Parent, Visitor& V) const {
Map::iterator I = M.find(Parent);
if (I == M.end())
return true;
-
+
llvm::ImmutableSet<const MemRegion*> S = I->second;
for (llvm::ImmutableSet<const MemRegion*>::iterator SI=S.begin(),SE=S.end();
SI != SE; ++SI) {
if (!V.Visit(Parent, *SI))
return false;
}
-
+
return true;
}
-};
+
+ typedef SetTy::iterator iterator;
+
+ std::pair<iterator, iterator> begin_end(const MemRegion *R) {
+ Map::iterator I = M.find(R);
+ SetTy S = I == M.end() ? F.GetEmptySet() : I->second;
+ return std::make_pair(S.begin(), S.end());
+ }
+};
class VISIBILITY_HIDDEN RegionStoreManager : public StoreManager {
const RegionStoreFeatures Features;
- RegionBindingsTy::Factory RBFactory;
- RegionViews::Factory RVFactory;
-
- const MemRegion* SelfRegion;
- const ImplicitParamDecl *SelfDecl;
+ RegionBindings::Factory RBFactory;
+
+ typedef llvm::DenseMap<const GRState *, RegionStoreSubRegionMap*> SMCache;
+ SMCache SC;
public:
- RegionStoreManager(GRStateManager& mgr, const RegionStoreFeatures &f)
+ RegionStoreManager(GRStateManager& mgr, const RegionStoreFeatures &f)
: StoreManager(mgr),
Features(f),
- RBFactory(mgr.getAllocator()),
- RVFactory(mgr.getAllocator()),
- SelfRegion(0), SelfDecl(0) {
- if (const ObjCMethodDecl* MD =
- dyn_cast<ObjCMethodDecl>(&StateMgr.getCodeDecl()))
- SelfDecl = MD->getSelfDecl();
+ RBFactory(mgr.getAllocator()) {}
+
+ virtual ~RegionStoreManager() {
+ for (SMCache::iterator I = SC.begin(), E = SC.end(); I != E; ++I)
+ delete (*I).second;
}
- virtual ~RegionStoreManager() {}
+ SubRegionMap *getSubRegionMap(const GRState *state);
+
+ RegionStoreSubRegionMap *getRegionStoreSubRegionMap(Store store);
+
+ Optional<SVal> getBinding(RegionBindings B, const MemRegion *R);
+ Optional<SVal> getDirectBinding(RegionBindings B, const MemRegion *R);
+ /// getDefaultBinding - Returns an SVal* representing an optional default
+ /// binding associated with a region and its subregions.
+ Optional<SVal> getDefaultBinding(RegionBindings B, const MemRegion *R);
- SubRegionMap* getSubRegionMap(const GRState *state);
-
/// getLValueString - Returns an SVal representing the lvalue of a
/// StringLiteral. Within RegionStore a StringLiteral has an
/// associated StringRegion, and the lvalue of a StringLiteral is
/// the lvalue of that region.
- SVal getLValueString(const GRState *state, const StringLiteral* S);
+ SVal getLValueString(const StringLiteral* S);
/// getLValueCompoundLiteral - Returns an SVal representing the
/// lvalue of a compound literal. Within RegionStore a compound
/// literal has an associated region, and the lvalue of the
/// compound literal is the lvalue of that region.
- SVal getLValueCompoundLiteral(const GRState *state, const CompoundLiteralExpr*);
+ SVal getLValueCompoundLiteral(const CompoundLiteralExpr*);
/// getLValueVar - Returns an SVal that represents the lvalue of a
/// variable. Within RegionStore a variable has an associated
/// VarRegion, and the lvalue of the variable is the lvalue of that region.
- SVal getLValueVar(const GRState *state, const VarDecl* VD);
-
- SVal getLValueIvar(const GRState *state, const ObjCIvarDecl* D, SVal Base);
+ SVal getLValueVar(const VarDecl *VD, const LocationContext *LC);
- SVal getLValueField(const GRState *state, SVal Base, const FieldDecl* D);
-
- SVal getLValueFieldOrIvar(const GRState *state, SVal Base, const Decl* D);
+ SVal getLValueIvar(const ObjCIvarDecl* D, SVal Base);
+
+ SVal getLValueField(const FieldDecl* D, SVal Base);
- SVal getLValueElement(const GRState *state, QualType elementType,
- SVal Base, SVal Offset);
+ SVal getLValueFieldOrIvar(const Decl* D, SVal Base);
+
+ SVal getLValueElement(QualType elementType, SVal Offset, SVal Base);
/// ArrayToPointer - Emulates the "decay" of an array to a pointer
@@ -216,61 +250,52 @@ public:
/// casts from arrays to pointers.
SVal ArrayToPointer(Loc Array);
- CastResult CastRegion(const GRState *state, const MemRegion* R,
- QualType CastToTy);
-
SVal EvalBinOp(const GRState *state, BinaryOperator::Opcode Op,Loc L,
NonLoc R, QualType resultTy);
- Store getInitialStore() { return RBFactory.GetEmptyMap().getRoot(); }
-
- /// getSelfRegion - Returns the region for the 'self' (Objective-C) or
- /// 'this' object (C++). When used when analyzing a normal function this
- /// method returns NULL.
- const MemRegion* getSelfRegion(Store) {
- if (!SelfDecl)
- return 0;
-
- if (!SelfRegion) {
- const ObjCMethodDecl *MD = cast<ObjCMethodDecl>(&StateMgr.getCodeDecl());
- SelfRegion = MRMgr.getObjCObjectRegion(MD->getClassInterface(),
- MRMgr.getHeapRegion());
- }
-
- return SelfRegion;
+ Store getInitialStore(const LocationContext *InitLoc) {
+ return RBFactory.GetEmptyMap().getRoot();
}
-
+
//===-------------------------------------------------------------------===//
// Binding values to regions.
//===-------------------------------------------------------------------===//
+ const GRState *InvalidateRegion(const GRState *state, const MemRegion *R,
+ const Expr *E, unsigned Count);
+
+private:
+ void RemoveSubRegionBindings(RegionBindings &B, const MemRegion *R,
+ RegionStoreSubRegionMap &M);
+
+public:
const GRState *Bind(const GRState *state, Loc LV, SVal V);
const GRState *BindCompoundLiteral(const GRState *state,
- const CompoundLiteralExpr* CL, SVal V);
-
- const GRState *BindDecl(const GRState *state, const VarDecl* VD, SVal InitVal);
+ const CompoundLiteralExpr* CL, SVal V);
+
+ const GRState *BindDecl(const GRState *ST, const VarDecl *VD,
+ const LocationContext *LC, SVal InitVal);
- const GRState *BindDeclWithNoInit(const GRState *state, const VarDecl* VD) {
+ const GRState *BindDeclWithNoInit(const GRState *state, const VarDecl*,
+ const LocationContext *) {
return state;
}
/// BindStruct - Bind a compound value to a structure.
const GRState *BindStruct(const GRState *, const TypedRegion* R, SVal V);
-
+
const GRState *BindArray(const GRState *state, const TypedRegion* R, SVal V);
-
- /// KillStruct - Set the entire struct to unknown.
- const GRState *KillStruct(const GRState *state, const TypedRegion* R);
- const GRState *setDefaultValue(const GRState *state, const MemRegion* R, SVal V);
+ /// KillStruct - Set the entire struct to unknown.
+ Store KillStruct(Store store, const TypedRegion* R);
Store Remove(Store store, Loc LV);
//===------------------------------------------------------------------===//
// Loading values from regions.
//===------------------------------------------------------------------===//
-
+
/// The high level logic for this method is this:
/// Retrieve (L)
/// if L has binding
@@ -282,55 +307,66 @@ public:
/// return undefined
/// else
/// return symbolic
- SVal Retrieve(const GRState *state, Loc L, QualType T = QualType());
+ SValuator::CastResult Retrieve(const GRState *state, Loc L,
+ QualType T = QualType());
+
+ SVal RetrieveElement(const GRState *state, const ElementRegion *R);
+
+ SVal RetrieveField(const GRState *state, const FieldRegion *R);
+
+ SVal RetrieveObjCIvar(const GRState *state, const ObjCIvarRegion *R);
- SVal RetrieveElement(const GRState* state, const ElementRegion* R);
+ SVal RetrieveVar(const GRState *state, const VarRegion *R);
- SVal RetrieveField(const GRState* state, const FieldRegion* R);
+ SVal RetrieveLazySymbol(const GRState *state, const TypedRegion *R);
+
+ SVal RetrieveFieldOrElementCommon(const GRState *state, const TypedRegion *R,
+ QualType Ty, const MemRegion *superR);
/// Retrieve the values in a struct and return a CompoundVal, used when doing
- /// struct copy:
- /// struct s x, y;
+ /// struct copy:
+ /// struct s x, y;
/// x = y;
/// y's value is retrieved by this method.
SVal RetrieveStruct(const GRState *St, const TypedRegion* R);
-
+
SVal RetrieveArray(const GRState *St, const TypedRegion* R);
+ std::pair<const GRState*, const MemRegion*>
+ GetLazyBinding(RegionBindings B, const MemRegion *R);
+
+ const GRState* CopyLazyBindings(nonloc::LazyCompoundVal V,
+ const GRState *state,
+ const TypedRegion *R);
+
+ const ElementRegion *GetElementZeroRegion(const SymbolicRegion *SR,
+ QualType T);
+
//===------------------------------------------------------------------===//
// State pruning.
//===------------------------------------------------------------------===//
-
+
/// RemoveDeadBindings - Scans the RegionStore of 'state' for dead values.
/// It returns a new Store with these values removed.
- Store RemoveDeadBindings(const GRState *state, Stmt* Loc, SymbolReaper& SymReaper,
+ void RemoveDeadBindings(GRState &state, Stmt* Loc, SymbolReaper& SymReaper,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots);
+ const GRState *EnterStackFrame(const GRState *state,
+ const StackFrameContext *frame);
+
//===------------------------------------------------------------------===//
// Region "extents".
//===------------------------------------------------------------------===//
-
+
const GRState *setExtent(const GRState *state, const MemRegion* R, SVal Extent);
SVal getSizeInElements(const GRState *state, const MemRegion* R);
//===------------------------------------------------------------------===//
- // Region "views".
- //===------------------------------------------------------------------===//
-
- const GRState *AddRegionView(const GRState *state, const MemRegion* View,
- const MemRegion* Base);
-
- const GRState *RemoveRegionView(const GRState *state, const MemRegion* View,
- const MemRegion* Base);
-
- //===------------------------------------------------------------------===//
// Utility methods.
//===------------------------------------------------------------------===//
-
- const GRState *setCastType(const GRState *state, const MemRegion* R, QualType T);
- static inline RegionBindingsTy GetRegionBindings(Store store) {
- return RegionBindingsTy(static_cast<const RegionBindingsTy::TreeTy*>(store));
+ static inline RegionBindings GetRegionBindings(Store store) {
+ return RegionBindings(static_cast<const RegionBindings::TreeTy*>(store));
}
void print(Store store, llvm::raw_ostream& Out, const char* nl,
@@ -344,7 +380,7 @@ public:
BasicValueFactory& getBasicVals() {
return StateMgr.getBasicVals();
}
-
+
// FIXME: Remove.
ASTContext& getContext() { return StateMgr.getContext(); }
};
@@ -366,18 +402,155 @@ StoreManager *clang::CreateFieldsOnlyRegionStoreManager(GRStateManager &StMgr) {
return new RegionStoreManager(StMgr, F);
}
-SubRegionMap* RegionStoreManager::getSubRegionMap(const GRState *state) {
- RegionBindingsTy B = GetRegionBindings(state->getStore());
+void
+RegionStoreSubRegionMap::process(llvm::SmallVectorImpl<const SubRegion*> &WL,
+ const SubRegion *R) {
+ const MemRegion *superR = R->getSuperRegion();
+ if (add(superR, R))
+ if (const SubRegion *sr = dyn_cast<SubRegion>(superR))
+ WL.push_back(sr);
+}
+
+RegionStoreSubRegionMap*
+RegionStoreManager::getRegionStoreSubRegionMap(Store store) {
+ RegionBindings B = GetRegionBindings(store);
RegionStoreSubRegionMap *M = new RegionStoreSubRegionMap();
-
- for (RegionBindingsTy::iterator I=B.begin(), E=B.end(); I!=E; ++I) {
- if (const SubRegion* R = dyn_cast<SubRegion>(I.getKey()))
- M->add(R->getSuperRegion(), R);
+
+ llvm::SmallVector<const SubRegion*, 10> WL;
+
+ for (RegionBindings::iterator I=B.begin(), E=B.end(); I!=E; ++I)
+ if (const SubRegion *R = dyn_cast<SubRegion>(I.getKey()))
+ M->process(WL, R);
+
+ // We also need to record in the subregion map "intermediate" regions that
+ // don't have direct bindings but are super regions of those that do.
+ while (!WL.empty()) {
+ const SubRegion *R = WL.back();
+ WL.pop_back();
+ M->process(WL, R);
}
-
+
return M;
}
+SubRegionMap *RegionStoreManager::getSubRegionMap(const GRState *state) {
+ return getRegionStoreSubRegionMap(state->getStore());
+}
+
+//===----------------------------------------------------------------------===//
+// Binding invalidation.
+//===----------------------------------------------------------------------===//
+
+void RegionStoreManager::RemoveSubRegionBindings(RegionBindings &B,
+ const MemRegion *R,
+ RegionStoreSubRegionMap &M) {
+ RegionStoreSubRegionMap::iterator I, E;
+
+ for (llvm::tie(I, E) = M.begin_end(R); I != E; ++I)
+ RemoveSubRegionBindings(B, *I, M);
+
+ B = RBFactory.Remove(B, R);
+}
+
+const GRState *RegionStoreManager::InvalidateRegion(const GRState *state,
+ const MemRegion *R,
+ const Expr *Ex,
+ unsigned Count) {
+ ASTContext& Ctx = StateMgr.getContext();
+
+ // Strip away casts.
+ R = R->getBaseRegion();
+
+ // Get the mapping of regions -> subregions.
+ llvm::OwningPtr<RegionStoreSubRegionMap>
+ SubRegions(getRegionStoreSubRegionMap(state->getStore()));
+
+ RegionBindings B = GetRegionBindings(state->getStore());
+
+ llvm::DenseMap<const MemRegion *, unsigned> Visited;
+ llvm::SmallVector<const MemRegion *, 10> WorkList;
+ WorkList.push_back(R);
+
+ while (!WorkList.empty()) {
+ R = WorkList.back();
+ WorkList.pop_back();
+
+ // Have we visited this region before?
+ unsigned &visited = Visited[R];
+ if (visited)
+ continue;
+ visited = 1;
+
+ // Add subregions to work list.
+ RegionStoreSubRegionMap::iterator I, E;
+ for (llvm::tie(I, E) = SubRegions->begin_end(R); I!=E; ++I)
+ WorkList.push_back(*I);
+
+ // Get the old binding. Is it a region? If so, add it to the worklist.
+ if (Optional<SVal> V = getDirectBinding(B, R)) {
+ if (const MemRegion *RV = V->getAsRegion())
+ WorkList.push_back(RV);
+ }
+
+ // Handle region.
+ if (isa<AllocaRegion>(R) || isa<SymbolicRegion>(R) ||
+ isa<ObjCObjectRegion>(R)) {
+ // Invalidate the region by setting its default value to
+ // conjured symbol. The type of the symbol is irrelavant.
+ DefinedOrUnknownSVal V = ValMgr.getConjuredSymbolVal(R, Ex, Ctx.IntTy,
+ Count);
+ B = RBFactory.Add(B, R, BindingVal(V, BindingVal::Default));
+ continue;
+ }
+
+ if (!R->isBoundable())
+ continue;
+
+ const TypedRegion *TR = cast<TypedRegion>(R);
+ QualType T = TR->getValueType(Ctx);
+
+ if (const RecordType *RT = T->getAsStructureType()) {
+ const RecordDecl *RD = RT->getDecl()->getDefinition(Ctx);
+
+ // No record definition. There is nothing we can do.
+ if (!RD)
+ continue;
+
+ // Invalidate the region by setting its default value to
+ // conjured symbol. The type of the symbol is irrelavant.
+ DefinedOrUnknownSVal V = ValMgr.getConjuredSymbolVal(R, Ex, Ctx.IntTy,
+ Count);
+ B = RBFactory.Add(B, R, BindingVal(V, BindingVal::Default));
+ continue;
+ }
+
+ if (const ArrayType *AT = Ctx.getAsArrayType(T)) {
+ // Set the default value of the array to conjured symbol.
+ DefinedOrUnknownSVal V =
+ ValMgr.getConjuredSymbolVal(R, Ex, AT->getElementType(), Count);
+ B = RBFactory.Add(B, R, BindingVal(V, BindingVal::Default));
+ continue;
+ }
+
+ if ((isa<FieldRegion>(R)||isa<ElementRegion>(R)||isa<ObjCIvarRegion>(R))
+ && Visited[cast<SubRegion>(R)->getSuperRegion()]) {
+ // For fields and elements whose super region has also been invalidated,
+ // only remove the old binding. The super region will get set with a
+ // default value from which we can lazily derive a new symbolic value.
+ B = RBFactory.Remove(B, R);
+ continue;
+ }
+
+ // Invalidate the binding.
+ DefinedOrUnknownSVal V = ValMgr.getConjuredSymbolVal(R, Ex, T, Count);
+ assert(SymbolManager::canSymbolicate(T) || V.isUnknown());
+ B = RBFactory.Add(B, R, BindingVal(V, BindingVal::Direct));
+ }
+
+ // Create a new state with the updated bindings.
+ return state->makeWithStore(B.getRoot());
+}
+
//===----------------------------------------------------------------------===//
// getLValueXXX methods.
//===----------------------------------------------------------------------===//
@@ -386,40 +559,36 @@ SubRegionMap* RegionStoreManager::getSubRegionMap(const GRState *state) {
/// StringLiteral. Within RegionStore a StringLiteral has an
/// associated StringRegion, and the lvalue of a StringLiteral is the
/// lvalue of that region.
-SVal RegionStoreManager::getLValueString(const GRState *St,
- const StringLiteral* S) {
+SVal RegionStoreManager::getLValueString(const StringLiteral* S) {
return loc::MemRegionVal(MRMgr.getStringRegion(S));
}
/// getLValueVar - Returns an SVal that represents the lvalue of a
/// variable. Within RegionStore a variable has an associated
/// VarRegion, and the lvalue of the variable is the lvalue of that region.
-SVal RegionStoreManager::getLValueVar(const GRState *St, const VarDecl* VD) {
- return loc::MemRegionVal(MRMgr.getVarRegion(VD));
+SVal RegionStoreManager::getLValueVar(const VarDecl *VD,
+ const LocationContext *LC) {
+ return loc::MemRegionVal(MRMgr.getVarRegion(VD, LC));
}
/// getLValueCompoundLiteral - Returns an SVal representing the lvalue
/// of a compound literal. Within RegionStore a compound literal
/// has an associated region, and the lvalue of the compound literal
/// is the lvalue of that region.
-SVal
-RegionStoreManager::getLValueCompoundLiteral(const GRState *St,
- const CompoundLiteralExpr* CL) {
+SVal
+RegionStoreManager::getLValueCompoundLiteral(const CompoundLiteralExpr* CL) {
return loc::MemRegionVal(MRMgr.getCompoundLiteralRegion(CL));
}
-SVal RegionStoreManager::getLValueIvar(const GRState *St, const ObjCIvarDecl* D,
- SVal Base) {
- return getLValueFieldOrIvar(St, Base, D);
+SVal RegionStoreManager::getLValueIvar(const ObjCIvarDecl* D, SVal Base) {
+ return getLValueFieldOrIvar(D, Base);
}
-SVal RegionStoreManager::getLValueField(const GRState *St, SVal Base,
- const FieldDecl* D) {
- return getLValueFieldOrIvar(St, Base, D);
+SVal RegionStoreManager::getLValueField(const FieldDecl* D, SVal Base) {
+ return getLValueFieldOrIvar(D, Base);
}
-SVal RegionStoreManager::getLValueFieldOrIvar(const GRState *St, SVal Base,
- const Decl* D) {
+SVal RegionStoreManager::getLValueFieldOrIvar(const Decl* D, SVal Base) {
if (Base.isUnknownOrUndef())
return Base;
@@ -446,7 +615,7 @@ SVal RegionStoreManager::getLValueFieldOrIvar(const GRState *St, SVal Base,
assert(0 && "Unhandled Base.");
return Base;
}
-
+
// NOTE: We must have this check first because ObjCIvarDecl is a subclass
// of FieldDecl.
if (const ObjCIvarDecl *ID = dyn_cast<ObjCIvarDecl>(D))
@@ -455,9 +624,8 @@ SVal RegionStoreManager::getLValueFieldOrIvar(const GRState *St, SVal Base,
return loc::MemRegionVal(MRMgr.getFieldRegion(cast<FieldDecl>(D), BaseR));
}
-SVal RegionStoreManager::getLValueElement(const GRState *St,
- QualType elementType,
- SVal Base, SVal Offset) {
+SVal RegionStoreManager::getLValueElement(QualType elementType, SVal Offset,
+ SVal Base) {
// If the base is an unknown or undefined value, just return it back.
// FIXME: For absolute pointer addresses, we just return that value back as
@@ -474,7 +642,10 @@ SVal RegionStoreManager::getLValueElement(const GRState *St,
// Pointer of any type can be cast and used as array base.
const ElementRegion *ElemR = dyn_cast<ElementRegion>(BaseRegion);
-
+
+ // Convert the offset to the appropriate size and signedness.
+ Offset = ValMgr.convertToArrayIndex(Offset);
+
if (!ElemR) {
//
// If the base region is not an ElementRegion, create one.
@@ -485,54 +656,26 @@ SVal RegionStoreManager::getLValueElement(const GRState *St,
//
// Observe that 'p' binds to an AllocaRegion.
//
-
- // Offset might be unsigned. We have to convert it to signed ConcreteInt.
- if (nonloc::ConcreteInt* CI = dyn_cast<nonloc::ConcreteInt>(&Offset)) {
- const llvm::APSInt& OffI = CI->getValue();
- if (OffI.isUnsigned()) {
- llvm::APSInt Tmp = OffI;
- Tmp.setIsSigned(true);
- Offset = ValMgr.makeIntVal(Tmp);
- }
- }
return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Offset,
BaseRegion, getContext()));
}
-
+
SVal BaseIdx = ElemR->getIndex();
-
+
if (!isa<nonloc::ConcreteInt>(BaseIdx))
return UnknownVal();
-
+
const llvm::APSInt& BaseIdxI = cast<nonloc::ConcreteInt>(BaseIdx).getValue();
const llvm::APSInt& OffI = cast<nonloc::ConcreteInt>(Offset).getValue();
assert(BaseIdxI.isSigned());
-
- // FIXME: This appears to be the assumption of this code. We should review
- // whether or not BaseIdxI.getBitWidth() < OffI.getBitWidth(). If it
- // can't we need to put a comment here. If it can, we should handle it.
- assert(BaseIdxI.getBitWidth() >= OffI.getBitWidth());
- const MemRegion *ArrayR = ElemR->getSuperRegion();
- SVal NewIdx;
-
- if (OffI.isUnsigned() || OffI.getBitWidth() < BaseIdxI.getBitWidth()) {
- // 'Offset' might be unsigned. We have to convert it to signed and
- // possibly extend it.
- llvm::APSInt Tmp = OffI;
-
- if (OffI.getBitWidth() < BaseIdxI.getBitWidth())
- Tmp.extend(BaseIdxI.getBitWidth());
-
- Tmp.setIsSigned(true);
- Tmp += BaseIdxI; // Compute the new offset.
- NewIdx = ValMgr.makeIntVal(Tmp);
- }
- else
- NewIdx = nonloc::ConcreteInt(getBasicVals().getValue(BaseIdxI + OffI));
+ // Compute the new index.
+ SVal NewIdx = nonloc::ConcreteInt(getBasicVals().getValue(BaseIdxI + OffI));
+ // Construct the new ElementRegion.
+ const MemRegion *ArrayR = ElemR->getSuperRegion();
return loc::MemRegionVal(MRMgr.getElementRegion(elementType, NewIdx, ArrayR,
- getContext()));
+ getContext()));
}
//===----------------------------------------------------------------------===//
@@ -540,64 +683,62 @@ SVal RegionStoreManager::getLValueElement(const GRState *St,
//===----------------------------------------------------------------------===//
SVal RegionStoreManager::getSizeInElements(const GRState *state,
- const MemRegion* R) {
- if (const VarRegion* VR = dyn_cast<VarRegion>(R)) {
- // Get the type of the variable.
- QualType T = VR->getDesugaredValueType(getContext());
+ const MemRegion *R) {
- // FIXME: Handle variable-length arrays.
- if (isa<VariableArrayType>(T))
+ switch (R->getKind()) {
+ case MemRegion::MemSpaceRegionKind:
+ assert(0 && "Cannot index into a MemSpace");
return UnknownVal();
-
- if (const ConstantArrayType* CAT = dyn_cast<ConstantArrayType>(T)) {
- // return the size as signed integer.
- return ValMgr.makeIntVal(CAT->getSize(), false);
- }
- const QualType* CastTy = state->get<RegionCasts>(VR);
-
- // If the VarRegion is cast to other type, compute the size with respect to
- // that type.
- if (CastTy) {
- QualType EleTy =cast<PointerType>(CastTy->getTypePtr())->getPointeeType();
- QualType VarTy = VR->getValueType(getContext());
- uint64_t EleSize = getContext().getTypeSize(EleTy);
- uint64_t VarSize = getContext().getTypeSize(VarTy);
- assert(VarSize != 0);
- return ValMgr.makeIntVal(VarSize/EleSize, false);
- }
+ case MemRegion::CodeTextRegionKind:
+ // Technically this can happen if people do funny things with casts.
+ return UnknownVal();
- // Clients can use ordinary variables as if they were arrays. These
- // essentially are arrays of size 1.
- return ValMgr.makeIntVal(1, false);
- }
+ // Not yet handled.
+ case MemRegion::AllocaRegionKind:
+ case MemRegion::CompoundLiteralRegionKind:
+ case MemRegion::ElementRegionKind:
+ case MemRegion::FieldRegionKind:
+ case MemRegion::ObjCIvarRegionKind:
+ case MemRegion::ObjCObjectRegionKind:
+ case MemRegion::SymbolicRegionKind:
+ return UnknownVal();
- if (const StringRegion* SR = dyn_cast<StringRegion>(R)) {
- const StringLiteral* Str = SR->getStringLiteral();
- // We intentionally made the size value signed because it participates in
- // operations with signed indices.
- return ValMgr.makeIntVal(Str->getByteLength()+1, false);
- }
+ case MemRegion::StringRegionKind: {
+ const StringLiteral* Str = cast<StringRegion>(R)->getStringLiteral();
+ // We intentionally made the size value signed because it participates in
+ // operations with signed indices.
+ return ValMgr.makeIntVal(Str->getByteLength()+1, false);
+ }
- if (const FieldRegion* FR = dyn_cast<FieldRegion>(R)) {
- // FIXME: Unsupported yet.
- FR = 0;
- return UnknownVal();
- }
+ case MemRegion::VarRegionKind: {
+ const VarRegion* VR = cast<VarRegion>(R);
+ // Get the type of the variable.
+ QualType T = VR->getDesugaredValueType(getContext());
- if (isa<SymbolicRegion>(R)) {
- return UnknownVal();
- }
+ // FIXME: Handle variable-length arrays.
+ if (isa<VariableArrayType>(T))
+ return UnknownVal();
- if (isa<AllocaRegion>(R)) {
- return UnknownVal();
- }
+ if (const ConstantArrayType* CAT = dyn_cast<ConstantArrayType>(T)) {
+ // return the size as signed integer.
+ return ValMgr.makeIntVal(CAT->getSize(), false);
+ }
- if (isa<ElementRegion>(R)) {
- return UnknownVal();
+ // Clients can use ordinary variables as if they were arrays. These
+ // essentially are arrays of size 1.
+ return ValMgr.makeIntVal(1, false);
+ }
+
+ case MemRegion::BEG_DECL_REGIONS:
+ case MemRegion::END_DECL_REGIONS:
+ case MemRegion::BEG_TYPED_REGIONS:
+ case MemRegion::END_TYPED_REGIONS:
+ assert(0 && "Infeasible region");
+ return UnknownVal();
}
- assert(0 && "Other regions are not supported yet.");
+ assert(0 && "Unreachable");
return UnknownVal();
}
@@ -620,105 +761,29 @@ const GRState *RegionStoreManager::setExtent(const GRState *state,
SVal RegionStoreManager::ArrayToPointer(Loc Array) {
if (!isa<loc::MemRegionVal>(Array))
return UnknownVal();
-
+
const MemRegion* R = cast<loc::MemRegionVal>(&Array)->getRegion();
const TypedRegion* ArrayR = dyn_cast<TypedRegion>(R);
-
+
if (!ArrayR)
return UnknownVal();
-
+
// Strip off typedefs from the ArrayRegion's ValueType.
- QualType T = ArrayR->getValueType(getContext())->getDesugaredType();
+ QualType T = ArrayR->getValueType(getContext()).getDesugaredType();
ArrayType *AT = cast<ArrayType>(T);
T = AT->getElementType();
-
- nonloc::ConcreteInt Idx(getBasicVals().getZeroWithPtrWidth(false));
- ElementRegion* ER = MRMgr.getElementRegion(T, Idx, ArrayR, getContext());
-
- return loc::MemRegionVal(ER);
-}
-
-RegionStoreManager::CastResult
-RegionStoreManager::CastRegion(const GRState *state, const MemRegion* R,
- QualType CastToTy) {
-
- ASTContext& Ctx = StateMgr.getContext();
-
- // We need to know the real type of CastToTy.
- QualType ToTy = Ctx.getCanonicalType(CastToTy);
-
- // Check cast to ObjCQualifiedID type.
- if (ToTy->isObjCQualifiedIdType()) {
- // FIXME: Record the type information aside.
- return CastResult(state, R);
- }
-
- // CodeTextRegion should be cast to only function pointer type.
- if (isa<CodeTextRegion>(R)) {
- assert(CastToTy->isFunctionPointerType() || CastToTy->isBlockPointerType()
- || (CastToTy->isPointerType()
- && CastToTy->getAsPointerType()->getPointeeType()->isVoidType()));
- return CastResult(state, R);
- }
-
- // Now assume we are casting from pointer to pointer. Other cases should
- // already be handled.
- QualType PointeeTy = cast<PointerType>(ToTy.getTypePtr())->getPointeeType();
-
- // Process region cast according to the kind of the region being cast.
-
- // FIXME: Need to handle arbitrary downcasts.
- if (isa<SymbolicRegion>(R) || isa<AllocaRegion>(R)) {
- state = setCastType(state, R, ToTy);
- return CastResult(state, R);
- }
-
- // VarRegion, ElementRegion, and FieldRegion has an inherent type. Normally
- // they should not be cast. We only layer an ElementRegion when the cast-to
- // pointee type is of smaller size. In other cases, we return the original
- // VarRegion.
- if (isa<VarRegion>(R) || isa<ElementRegion>(R) || isa<FieldRegion>(R)
- || isa<ObjCIvarRegion>(R) || isa<CompoundLiteralRegion>(R)) {
- // If the pointee type is incomplete, do not compute its size, and return
- // the original region.
- if (const RecordType *RT = dyn_cast<RecordType>(PointeeTy.getTypePtr())) {
- const RecordDecl *D = RT->getDecl();
- if (!D->getDefinition(getContext()))
- return CastResult(state, R);
- }
-
- QualType ObjTy = cast<TypedRegion>(R)->getValueType(getContext());
- uint64_t PointeeTySize = getContext().getTypeSize(PointeeTy);
- uint64_t ObjTySize = getContext().getTypeSize(ObjTy);
-
- if ((PointeeTySize > 0 && PointeeTySize < ObjTySize) ||
- (ObjTy->isAggregateType() && PointeeTy->isScalarType()) ||
- ObjTySize == 0 /* R has 'void*' type. */) {
- // Record the cast type of the region.
- state = setCastType(state, R, ToTy);
-
- SVal Idx = ValMgr.makeZeroArrayIndex();
- ElementRegion* ER = MRMgr.getElementRegion(PointeeTy, Idx,R,getContext());
- return CastResult(state, ER);
- } else {
- state = setCastType(state, R, ToTy);
- return CastResult(state, R);
- }
- }
- if (isa<ObjCObjectRegion>(R)) {
- return CastResult(state, R);
- }
+ SVal ZeroIdx = ValMgr.makeZeroArrayIndex();
+ ElementRegion* ER = MRMgr.getElementRegion(T, ZeroIdx, ArrayR, getContext());
- assert(0 && "Unprocessed region.");
- return 0;
+ return loc::MemRegionVal(ER);
}
//===----------------------------------------------------------------------===//
// Pointer arithmetic.
//===----------------------------------------------------------------------===//
-SVal RegionStoreManager::EvalBinOp(const GRState *state,
+SVal RegionStoreManager::EvalBinOp(const GRState *state,
BinaryOperator::Opcode Op, Loc L, NonLoc R,
QualType resultTy) {
// Assume the base location is MemRegionVal.
@@ -728,64 +793,89 @@ SVal RegionStoreManager::EvalBinOp(const GRState *state,
const MemRegion* MR = cast<loc::MemRegionVal>(L).getRegion();
const ElementRegion *ER = 0;
- // If the operand is a symbolic or alloca region, create the first element
- // region on it.
- if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(MR)) {
- QualType T;
- // If the SymbolicRegion was cast to another type, use that type.
- if (const QualType *t = state->get<RegionCasts>(SR)) {
- T = *t;
- } else {
- // Otherwise use the symbol's type.
+ switch (MR->getKind()) {
+ case MemRegion::SymbolicRegionKind: {
+ const SymbolicRegion *SR = cast<SymbolicRegion>(MR);
SymbolRef Sym = SR->getSymbol();
- T = Sym->getType(getContext());
+ QualType T = Sym->getType(getContext());
+ QualType EleTy;
+
+ if (const PointerType *PT = T->getAs<PointerType>())
+ EleTy = PT->getPointeeType();
+ else
+ EleTy = T->getAs<ObjCObjectPointerType>()->getPointeeType();
+
+ SVal ZeroIdx = ValMgr.makeZeroArrayIndex();
+ ER = MRMgr.getElementRegion(EleTy, ZeroIdx, SR, getContext());
+ break;
+ }
+ case MemRegion::AllocaRegionKind: {
+ const AllocaRegion *AR = cast<AllocaRegion>(MR);
+ QualType T = getContext().CharTy; // Create an ElementRegion of bytes.
+ QualType EleTy = T->getAs<PointerType>()->getPointeeType();
+ SVal ZeroIdx = ValMgr.makeZeroArrayIndex();
+ ER = MRMgr.getElementRegion(EleTy, ZeroIdx, AR, getContext());
+ break;
}
- QualType EleTy = T->getAsPointerType()->getPointeeType();
- SVal ZeroIdx = ValMgr.makeZeroArrayIndex();
- ER = MRMgr.getElementRegion(EleTy, ZeroIdx, SR, getContext());
- }
- else if (const AllocaRegion *AR = dyn_cast<AllocaRegion>(MR)) {
- // Get the alloca region's current cast type.
+ case MemRegion::ElementRegionKind: {
+ ER = cast<ElementRegion>(MR);
+ break;
+ }
+ // Not yet handled.
+ case MemRegion::VarRegionKind:
+ case MemRegion::StringRegionKind: {
+
+ }
+ // Fall-through.
+ case MemRegion::CompoundLiteralRegionKind:
+ case MemRegion::FieldRegionKind:
+ case MemRegion::ObjCObjectRegionKind:
+ case MemRegion::ObjCIvarRegionKind:
+ return UnknownVal();
- GRStateTrait<RegionCasts>::lookup_type T = state->get<RegionCasts>(AR);
- assert(T && "alloca region has no type.");
- QualType EleTy = cast<PointerType>(T->getTypePtr())->getPointeeType();
- SVal ZeroIdx = ValMgr.makeZeroArrayIndex();
- ER = MRMgr.getElementRegion(EleTy, ZeroIdx, AR, getContext());
- }
- else if (isa<FieldRegion>(MR)) {
- // Not track pointer arithmetic on struct fields.
- return UnknownVal();
- }
- else {
- ER = cast<ElementRegion>(MR);
+ case MemRegion::CodeTextRegionKind:
+ // Technically this can happen if people do funny things with casts.
+ return UnknownVal();
+
+ case MemRegion::MemSpaceRegionKind:
+ assert(0 && "Cannot perform pointer arithmetic on a MemSpace");
+ return UnknownVal();
+
+ case MemRegion::BEG_DECL_REGIONS:
+ case MemRegion::END_DECL_REGIONS:
+ case MemRegion::BEG_TYPED_REGIONS:
+ case MemRegion::END_TYPED_REGIONS:
+ assert(0 && "Infeasible region");
+ return UnknownVal();
}
SVal Idx = ER->getIndex();
-
nonloc::ConcreteInt* Base = dyn_cast<nonloc::ConcreteInt>(&Idx);
- nonloc::ConcreteInt* Offset = dyn_cast<nonloc::ConcreteInt>(&R);
-
- // Only support concrete integer indexes for now.
- if (Base && Offset) {
- // FIXME: For now, convert the signedness and bitwidth of offset in case
- // they don't match. This can result from pointer arithmetic. In reality,
- // we should figure out what are the proper semantics and implement them.
- //
- // This addresses the test case test/Analysis/ptr-arith.c
- //
- nonloc::ConcreteInt OffConverted(getBasicVals().Convert(Base->getValue(),
- Offset->getValue()));
- SVal NewIdx = Base->evalBinOp(ValMgr, Op, OffConverted);
- const MemRegion* NewER =
- MRMgr.getElementRegion(ER->getElementType(), NewIdx,ER->getSuperRegion(),
- getContext());
- return ValMgr.makeLoc(NewER);
+ // For now, only support:
+ // (a) concrete integer indices that can easily be resolved
+ // (b) 0 + symbolic index
+ if (Base) {
+ if (nonloc::ConcreteInt *Offset = dyn_cast<nonloc::ConcreteInt>(&R)) {
+ // FIXME: Should use SValuator here.
+ SVal NewIdx =
+ Base->evalBinOp(ValMgr, Op,
+ cast<nonloc::ConcreteInt>(ValMgr.convertToArrayIndex(*Offset)));
+ const MemRegion* NewER =
+ MRMgr.getElementRegion(ER->getElementType(), NewIdx,
+ ER->getSuperRegion(), getContext());
+ return ValMgr.makeLoc(NewER);
+ }
+ if (0 == Base->getValue()) {
+ const MemRegion* NewER =
+ MRMgr.getElementRegion(ER->getElementType(), R,
+ ER->getSuperRegion(), getContext());
+ return ValMgr.makeLoc(NewER);
+ }
}
-
+
return UnknownVal();
}
@@ -793,7 +883,71 @@ SVal RegionStoreManager::EvalBinOp(const GRState *state,
// Loading values from regions.
//===----------------------------------------------------------------------===//
-SVal RegionStoreManager::Retrieve(const GRState *state, Loc L, QualType T) {
+Optional<SVal> RegionStoreManager::getDirectBinding(RegionBindings B,
+ const MemRegion *R) {
+ if (const BindingVal *BV = B.lookup(R))
+ return Optional<SVal>::create(BV->getDirectValue());
+
+ return Optional<SVal>();
+}
+
+Optional<SVal> RegionStoreManager::getDefaultBinding(RegionBindings B,
+ const MemRegion *R) {
+
+ if (R->isBoundable())
+ if (const TypedRegion *TR = dyn_cast<TypedRegion>(R))
+ if (TR->getValueType(getContext())->isUnionType())
+ return UnknownVal();
+
+ if (BindingVal const *V = B.lookup(R))
+ return Optional<SVal>::create(V->getDefaultValue());
+
+ return Optional<SVal>();
+}
+
+Optional<SVal> RegionStoreManager::getBinding(RegionBindings B,
+ const MemRegion *R) {
+ if (const BindingVal *BV = B.lookup(R))
+ return Optional<SVal>::create(BV->getValue());
+
+ return Optional<SVal>();
+}
+
+static bool IsReinterpreted(QualType RTy, QualType UsedTy, ASTContext &Ctx) {
+ RTy = Ctx.getCanonicalType(RTy);
+ UsedTy = Ctx.getCanonicalType(UsedTy);
+
+ if (RTy == UsedTy)
+ return false;
+
+
+ // Recursively check the types. We basically want to see if a pointer value
+ // is ever reinterpreted as a non-pointer, e.g. void** and intptr_t*
+ // represents a reinterpretation.
+ if (Loc::IsLocType(RTy) && Loc::IsLocType(UsedTy)) {
+ const PointerType *PRTy = RTy->getAs<PointerType>();
+ const PointerType *PUsedTy = UsedTy->getAs<PointerType>();
+
+ return PUsedTy && PRTy &&
+ IsReinterpreted(PRTy->getPointeeType(),
+ PUsedTy->getPointeeType(), Ctx);
+ }
+
+ return true;
+}
+
+const ElementRegion *
+RegionStoreManager::GetElementZeroRegion(const SymbolicRegion *SR, QualType T) {
+ ASTContext &Ctx = getContext();
+ SVal idx = ValMgr.makeZeroArrayIndex();
+ assert(!T.isNull());
+ return MRMgr.getElementRegion(T, idx, SR, Ctx);
+}
+
+
+
+SValuator::CastResult
+RegionStoreManager::Retrieve(const GRState *state, Loc L, QualType T) {
assert(!isa<UnknownVal>(L) && "location unknown");
assert(!isa<UndefinedVal>(L) && "location undefined");
@@ -801,7 +955,7 @@ SVal RegionStoreManager::Retrieve(const GRState *state, Loc L, QualType T) {
// FIXME: Is this even possible? Shouldn't this be treated as a null
// dereference at a higher level?
if (isa<loc::ConcreteInt>(L))
- return UndefinedVal();
+ return SValuator::CastResult(state, UndefinedVal());
const MemRegion *MR = cast<loc::MemRegionVal>(L).getRegion();
@@ -811,13 +965,19 @@ SVal RegionStoreManager::Retrieve(const GRState *state, Loc L, QualType T) {
// char* p = alloca();
// read(p);
// c = *p;
- if (isa<SymbolicRegion>(MR) || isa<AllocaRegion>(MR))
- return UnknownVal();
+ if (isa<AllocaRegion>(MR))
+ return SValuator::CastResult(state, UnknownVal());
+
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(MR))
+ MR = GetElementZeroRegion(SR, T);
+
+ if (isa<CodeTextRegion>(MR))
+ return SValuator::CastResult(state, UnknownVal());
// FIXME: Perhaps this method should just take a 'const MemRegion*' argument
// instead of 'Loc', and have the other Loc cases handled at a higher level.
const TypedRegion *R = cast<TypedRegion>(MR);
- assert(R && "bad region");
+ QualType RTy = R->getValueType(getContext());
// FIXME: We should eventually handle funny addressing. e.g.:
//
@@ -828,197 +988,319 @@ SVal RegionStoreManager::Retrieve(const GRState *state, Loc L, QualType T) {
//
// Such funny addressing will occur due to layering of regions.
- QualType RTy = R->getValueType(getContext());
+#if 0
+ ASTContext &Ctx = getContext();
+ if (!T.isNull() && IsReinterpreted(RTy, T, Ctx)) {
+ SVal ZeroIdx = ValMgr.makeZeroArrayIndex();
+ R = MRMgr.getElementRegion(T, ZeroIdx, R, Ctx);
+ RTy = T;
+ assert(Ctx.getCanonicalType(RTy) ==
+ Ctx.getCanonicalType(R->getValueType(Ctx)));
+ }
+#endif
if (RTy->isStructureType())
- return RetrieveStruct(state, R);
+ return SValuator::CastResult(state, RetrieveStruct(state, R));
+
+ // FIXME: Handle unions.
+ if (RTy->isUnionType())
+ return SValuator::CastResult(state, UnknownVal());
if (RTy->isArrayType())
- return RetrieveArray(state, R);
+ return SValuator::CastResult(state, RetrieveArray(state, R));
// FIXME: handle Vector types.
if (RTy->isVectorType())
- return UnknownVal();
+ return SValuator::CastResult(state, UnknownVal());
if (const FieldRegion* FR = dyn_cast<FieldRegion>(R))
- return RetrieveField(state, FR);
+ return CastRetrievedVal(RetrieveField(state, FR), state, FR, T);
if (const ElementRegion* ER = dyn_cast<ElementRegion>(R))
- return RetrieveElement(state, ER);
-
- RegionBindingsTy B = GetRegionBindings(state->getStore());
- RegionBindingsTy::data_type* V = B.lookup(R);
+ return CastRetrievedVal(RetrieveElement(state, ER), state, ER, T);
- // Check if the region has a binding.
- if (V)
- return *V;
+ if (const ObjCIvarRegion *IVR = dyn_cast<ObjCIvarRegion>(R))
+ return CastRetrievedVal(RetrieveObjCIvar(state, IVR), state, IVR, T);
- if (const ObjCIvarRegion *IVR = dyn_cast<ObjCIvarRegion>(R)) {
- const MemRegion *SR = IVR->getSuperRegion();
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R))
+ return CastRetrievedVal(RetrieveVar(state, VR), state, VR, T);
- // If the super region is 'self' then return the symbol representing
- // the value of the ivar upon entry to the method.
- if (SR == SelfRegion) {
- // FIXME: Do we need to handle the case where the super region
- // has a view? We want to canonicalize the bindings.
- return ValMgr.getRegionValueSymbolVal(R);
- }
-
- // Otherwise, we need a new symbol. For now return Unknown.
- return UnknownVal();
- }
+ RegionBindings B = GetRegionBindings(state->getStore());
+ RegionBindings::data_type* V = B.lookup(R);
+
+ // Check if the region has a binding.
+ if (V)
+ if (SVal const *SV = V->getValue())
+ return SValuator::CastResult(state, *SV);
// The location does not have a bound value. This means that it has
// the value it had upon its creation and/or entry to the analyzed
// function/method. These are either symbolic values or 'undefined'.
- // We treat function parameters as symbolic values.
- if (const VarRegion* VR = dyn_cast<VarRegion>(R)) {
- const VarDecl *VD = VR->getDecl();
-
- if (VD == SelfDecl)
- return loc::MemRegionVal(getSelfRegion(0));
-
- if (VR->hasGlobalsOrParametersStorage())
- return ValMgr.getRegionValueSymbolValOrUnknown(VR, VD->getType());
- }
-
+#if HEAP_UNDEFINED
if (R->hasHeapOrStackStorage()) {
+#else
+ if (R->hasStackStorage()) {
+#endif
// All stack variables are considered to have undefined values
// upon creation. All heap allocated blocks are considered to
// have undefined values as well unless they are explicitly bound
// to specific values.
- return UndefinedVal();
+ return SValuator::CastResult(state, UndefinedVal());
+ }
+
+ // All other values are symbolic.
+ return SValuator::CastResult(state,
+ ValMgr.getRegionValueSymbolValOrUnknown(R, RTy));
+}
+
+std::pair<const GRState*, const MemRegion*>
+RegionStoreManager::GetLazyBinding(RegionBindings B, const MemRegion *R) {
+ if (Optional<SVal> OV = getDirectBinding(B, R))
+ if (const nonloc::LazyCompoundVal *V =
+ dyn_cast<nonloc::LazyCompoundVal>(OV.getPointer()))
+ return std::make_pair(V->getState(), V->getRegion());
+
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ const std::pair<const GRState *, const MemRegion *> &X =
+ GetLazyBinding(B, ER->getSuperRegion());
+
+ if (X.first)
+ return std::make_pair(X.first,
+ MRMgr.getElementRegionWithSuper(ER, X.second));
}
+ else if (const FieldRegion *FR = dyn_cast<FieldRegion>(R)) {
+ const std::pair<const GRState *, const MemRegion *> &X =
+ GetLazyBinding(B, FR->getSuperRegion());
- // If the region is already cast to another type, use that type to create the
- // symbol value.
- if (const QualType *p = state->get<RegionCasts>(R)) {
- QualType T = *p;
- RTy = T->getAsPointerType()->getPointeeType();
+ if (X.first)
+ return std::make_pair(X.first,
+ MRMgr.getFieldRegionWithSuper(FR, X.second));
}
- // All other values are symbolic.
- return ValMgr.getRegionValueSymbolValOrUnknown(R, RTy);
+ return std::make_pair((const GRState*) 0, (const MemRegion *) 0);
}
SVal RegionStoreManager::RetrieveElement(const GRState* state,
const ElementRegion* R) {
// Check if the region has a binding.
- RegionBindingsTy B = GetRegionBindings(state->getStore());
- if (const SVal* V = B.lookup(R))
+ RegionBindings B = GetRegionBindings(state->getStore());
+ if (Optional<SVal> V = getDirectBinding(B, R))
return *V;
const MemRegion* superR = R->getSuperRegion();
// Check if the region is an element region of a string literal.
if (const StringRegion *StrR=dyn_cast<StringRegion>(superR)) {
+ // FIXME: Handle loads from strings where the literal is treated as
+ // an integer, e.g., *((unsigned int*)"hello")
+ ASTContext &Ctx = getContext();
+ QualType T = StrR->getValueType(Ctx)->getAs<ArrayType>()->getElementType();
+ if (T != Ctx.getCanonicalType(R->getElementType()))
+ return UnknownVal();
+
const StringLiteral *Str = StrR->getStringLiteral();
SVal Idx = R->getIndex();
if (nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(&Idx)) {
int64_t i = CI->getValue().getSExtValue();
- char c;
- if (i == Str->getByteLength())
- c = '\0';
- else
- c = Str->getStrData()[i];
- return ValMgr.makeIntVal(c, getContext().CharTy);
+ int64_t byteLength = Str->getByteLength();
+ if (i > byteLength) {
+ // Buffer overflow checking in GRExprEngine should handle this case,
+ // but we shouldn't rely on it to not overflow here if that checking
+ // is disabled.
+ return UnknownVal();
+ }
+ char c = (i == byteLength) ? '\0' : Str->getStrData()[i];
+ return ValMgr.makeIntVal(c, T);
}
}
- // Check if the super region has a default value.
- if (const SVal *D = state->get<RegionDefaultValue>(superR)) {
- if (D->hasConjuredSymbol())
- return ValMgr.getRegionValueSymbolVal(R);
- else
- return *D;
+ // Special case: the current region represents a cast and it and the super
+ // region both have pointer types or intptr_t types. If so, perform the
+ // retrieve from the super region and appropriately "cast" the value.
+ // This is needed to support OSAtomicCompareAndSwap and friends or other
+ // loads that treat integers as pointers and vis versa.
+ if (R->getIndex().isZeroConstant()) {
+ if (const TypedRegion *superTR = dyn_cast<TypedRegion>(superR)) {
+ ASTContext &Ctx = getContext();
+ if (IsAnyPointerOrIntptr(superTR->getValueType(Ctx), Ctx)) {
+ QualType valTy = R->getValueType(Ctx);
+ if (IsAnyPointerOrIntptr(valTy, Ctx)) {
+ // Retrieve the value from the super region. This will be casted to
+ // valTy when we return to 'Retrieve'.
+ const SValuator::CastResult &cr = Retrieve(state,
+ loc::MemRegionVal(superR),
+ valTy);
+ return cr.getSVal();
+ }
+ }
+ }
}
- // Check if the super region has a binding.
- if (B.lookup(superR)) {
- // We do not extract the bit value from super region for now.
+ // Check if the immediate super region has a direct binding.
+ if (Optional<SVal> V = getDirectBinding(B, superR)) {
+ if (SymbolRef parentSym = V->getAsSymbol())
+ return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R);
+
+ if (V->isUnknownOrUndef())
+ return *V;
+
+ // Handle LazyCompoundVals for the immediate super region. Other cases
+ // are handled in 'RetrieveFieldOrElementCommon'.
+ if (const nonloc::LazyCompoundVal *LCV =
+ dyn_cast<nonloc::LazyCompoundVal>(V)) {
+
+ R = MRMgr.getElementRegionWithSuper(R, LCV->getRegion());
+ return RetrieveElement(LCV->getState(), R);
+ }
+
+ // Other cases: give up.
return UnknownVal();
}
- if (R->hasHeapStorage()) {
- // FIXME: If the region has heap storage and we know nothing special
- // about its bindings, should we instead return UnknownVal? Seems like
- // we should only return UndefinedVal in the cases where we know the value
- // will be undefined.
- return UndefinedVal();
+ return RetrieveFieldOrElementCommon(state, R, R->getElementType(), superR);
+}
+
+SVal RegionStoreManager::RetrieveField(const GRState* state,
+ const FieldRegion* R) {
+
+ // Check if the region has a binding.
+ RegionBindings B = GetRegionBindings(state->getStore());
+ if (Optional<SVal> V = getDirectBinding(B, R))
+ return *V;
+
+ QualType Ty = R->getValueType(getContext());
+ return RetrieveFieldOrElementCommon(state, R, Ty, R->getSuperRegion());
+}
+
+SVal RegionStoreManager::RetrieveFieldOrElementCommon(const GRState *state,
+ const TypedRegion *R,
+ QualType Ty,
+ const MemRegion *superR) {
+
+ // At this point we have already checked in either RetrieveElement or
+ // RetrieveField if 'R' has a direct binding.
+
+ RegionBindings B = GetRegionBindings(state->getStore());
+
+ while (superR) {
+ if (const Optional<SVal> &D = getDefaultBinding(B, superR)) {
+ if (SymbolRef parentSym = D->getAsSymbol())
+ return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R);
+
+ if (D->isZeroConstant())
+ return ValMgr.makeZeroVal(Ty);
+
+ if (D->isUnknown())
+ return *D;
+
+ assert(0 && "Unknown default value");
+ }
+
+ // If our super region is a field or element itself, walk up the region
+ // hierarchy to see if there is a default value installed in an ancestor.
+ if (isa<FieldRegion>(superR) || isa<ElementRegion>(superR)) {
+ superR = cast<SubRegion>(superR)->getSuperRegion();
+ continue;
+ }
+
+ break;
+ }
+
+ // Lazy binding?
+ const GRState *lazyBindingState = NULL;
+ const MemRegion *lazyBindingRegion = NULL;
+ llvm::tie(lazyBindingState, lazyBindingRegion) = GetLazyBinding(B, R);
+
+ if (lazyBindingState) {
+ assert(lazyBindingRegion && "Lazy-binding region not set");
+
+ if (isa<ElementRegion>(R))
+ return RetrieveElement(lazyBindingState,
+ cast<ElementRegion>(lazyBindingRegion));
+
+ return RetrieveField(lazyBindingState,
+ cast<FieldRegion>(lazyBindingRegion));
}
if (R->hasStackStorage() && !R->hasParametersStorage()) {
- // Currently we don't reason specially about Clang-style vectors. Check
- // if superR is a vector and if so return Unknown.
- if (const TypedRegion *typedSuperR = dyn_cast<TypedRegion>(superR)) {
- if (typedSuperR->getValueType(getContext())->isVectorType())
- return UnknownVal();
+
+ if (isa<ElementRegion>(R)) {
+ // Currently we don't reason specially about Clang-style vectors. Check
+ // if superR is a vector and if so return Unknown.
+ if (const TypedRegion *typedSuperR = dyn_cast<TypedRegion>(superR)) {
+ if (typedSuperR->getValueType(getContext())->isVectorType())
+ return UnknownVal();
+ }
}
return UndefinedVal();
}
- QualType Ty = R->getValueType(getContext());
+ // All other values are symbolic.
+ return ValMgr.getRegionValueSymbolValOrUnknown(R, Ty);
+}
- // If the region is already cast to another type, use that type to create the
- // symbol value.
- if (const QualType *p = state->get<RegionCasts>(R))
- Ty = (*p)->getAsPointerType()->getPointeeType();
+SVal RegionStoreManager::RetrieveObjCIvar(const GRState* state,
+ const ObjCIvarRegion* R) {
- return ValMgr.getRegionValueSymbolValOrUnknown(R, Ty);
+ // Check if the region has a binding.
+ RegionBindings B = GetRegionBindings(state->getStore());
+
+ if (Optional<SVal> V = getDirectBinding(B, R))
+ return *V;
+
+ const MemRegion *superR = R->getSuperRegion();
+
+ // Check if the super region has a binding.
+ if (Optional<SVal> V = getDirectBinding(B, superR)) {
+ if (SymbolRef parentSym = V->getAsSymbol())
+ return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R);
+
+ // Other cases: give up.
+ return UnknownVal();
+ }
+
+ return RetrieveLazySymbol(state, R);
}
-SVal RegionStoreManager::RetrieveField(const GRState* state,
- const FieldRegion* R) {
- QualType Ty = R->getValueType(getContext());
+SVal RegionStoreManager::RetrieveVar(const GRState *state,
+ const VarRegion *R) {
// Check if the region has a binding.
- RegionBindingsTy B = GetRegionBindings(state->getStore());
- if (const SVal* V = B.lookup(R))
+ RegionBindings B = GetRegionBindings(state->getStore());
+
+ if (Optional<SVal> V = getDirectBinding(B, R))
return *V;
- const MemRegion* superR = R->getSuperRegion();
- if (const SVal* D = state->get<RegionDefaultValue>(superR)) {
- if (D->hasConjuredSymbol())
- return ValMgr.getRegionValueSymbolVal(R);
+ // Lazily derive a value for the VarRegion.
+ const VarDecl *VD = R->getDecl();
- if (D->isZeroConstant())
- return ValMgr.makeZeroVal(Ty);
+ if (R->hasGlobalsOrParametersStorage())
+ return ValMgr.getRegionValueSymbolValOrUnknown(R, VD->getType());
- if (D->isUnknown())
- return *D;
+ return UndefinedVal();
+}
- assert(0 && "Unknown default value");
- }
+SVal RegionStoreManager::RetrieveLazySymbol(const GRState *state,
+ const TypedRegion *R) {
- // FIXME: Is this correct? Should it be UnknownVal?
- if (R->hasHeapStorage())
- return UndefinedVal();
-
- if (R->hasStackStorage() && !R->hasParametersStorage())
- return UndefinedVal();
-
- // If the region is already cast to another type, use that type to create the
- // symbol value.
- if (const QualType *p = state->get<RegionCasts>(R)) {
- QualType tmp = *p;
- Ty = tmp->getAsPointerType()->getPointeeType();
- }
+ QualType valTy = R->getValueType(getContext());
// All other values are symbolic.
- return ValMgr.getRegionValueSymbolValOrUnknown(R, Ty);
+ return ValMgr.getRegionValueSymbolValOrUnknown(R, valTy);
}
-SVal RegionStoreManager::RetrieveStruct(const GRState *state,
- const TypedRegion* R){
+SVal RegionStoreManager::RetrieveStruct(const GRState *state,
+ const TypedRegion* R) {
QualType T = R->getValueType(getContext());
assert(T->isStructureType());
const RecordType* RT = T->getAsStructureType();
RecordDecl* RD = RT->getDecl();
assert(RD->isDefinition());
-
+ (void)RD;
+#if USE_EXPLICIT_COMPOUND
llvm::ImmutableList<SVal> StructVal = getBasicVals().getEmptySValList();
// FIXME: We shouldn't use a std::vector. If RecordDecl doesn't have a
@@ -1030,33 +1312,38 @@ SVal RegionStoreManager::RetrieveStruct(const GRState *state,
Field != FieldEnd; ++Field) {
FieldRegion* FR = MRMgr.getFieldRegion(*Field, R);
QualType FTy = (*Field)->getType();
- SVal FieldValue = Retrieve(state, loc::MemRegionVal(FR), FTy);
+ SVal FieldValue = Retrieve(state, loc::MemRegionVal(FR), FTy).getSVal();
StructVal = getBasicVals().consVals(FieldValue, StructVal);
}
return ValMgr.makeCompoundVal(T, StructVal);
+#else
+ return ValMgr.makeLazyCompoundVal(state, R);
+#endif
}
SVal RegionStoreManager::RetrieveArray(const GRState *state,
const TypedRegion * R) {
-
+#if USE_EXPLICIT_COMPOUND
QualType T = R->getValueType(getContext());
ConstantArrayType* CAT = cast<ConstantArrayType>(T.getTypePtr());
llvm::ImmutableList<SVal> ArrayVal = getBasicVals().getEmptySValList();
- llvm::APSInt Size(CAT->getSize(), false);
- llvm::APSInt i = getBasicVals().getZeroWithPtrWidth(false);
-
- for (; i < Size; ++i) {
- SVal Idx = ValMgr.makeIntVal(i);
+ uint64_t size = CAT->getSize().getZExtValue();
+ for (uint64_t i = 0; i < size; ++i) {
+ SVal Idx = ValMgr.makeArrayIndex(i);
ElementRegion* ER = MRMgr.getElementRegion(CAT->getElementType(), Idx, R,
- getContext());
+ getContext());
QualType ETy = ER->getElementType();
- SVal ElementVal = Retrieve(state, loc::MemRegionVal(ER), ETy);
+ SVal ElementVal = Retrieve(state, loc::MemRegionVal(ER), ETy).getSVal();
ArrayVal = getBasicVals().consVals(ElementVal, ArrayVal);
}
return ValMgr.makeCompoundVal(T, ArrayVal);
+#else
+ assert(isa<ConstantArrayType>(R->getValueType(getContext())));
+ return ValMgr.makeLazyCompoundVal(state, R);
+#endif
}
//===----------------------------------------------------------------------===//
@@ -1065,15 +1352,15 @@ SVal RegionStoreManager::RetrieveArray(const GRState *state,
Store RegionStoreManager::Remove(Store store, Loc L) {
const MemRegion* R = 0;
-
+
if (isa<loc::MemRegionVal>(L))
R = cast<loc::MemRegionVal>(L).getRegion();
-
+
if (R) {
- RegionBindingsTy B = GetRegionBindings(store);
+ RegionBindings B = GetRegionBindings(store);
return RBFactory.Remove(B, R).getRoot();
}
-
+
return store;
}
@@ -1082,32 +1369,67 @@ const GRState *RegionStoreManager::Bind(const GRState *state, Loc L, SVal V) {
return state;
// If we get here, the location should be a region.
- const MemRegion* R = cast<loc::MemRegionVal>(L).getRegion();
-
+ const MemRegion *R = cast<loc::MemRegionVal>(L).getRegion();
+
// Check if the region is a struct region.
if (const TypedRegion* TR = dyn_cast<TypedRegion>(R))
if (TR->getValueType(getContext())->isStructureType())
return BindStruct(state, TR, V);
-
- RegionBindingsTy B = GetRegionBindings(state->getStore());
-
- B = RBFactory.Add(B, R, V);
-
- return state->makeWithStore(B.getRoot());
+
+ // Special case: the current region represents a cast and it and the super
+ // region both have pointer types or intptr_t types. If so, perform the
+ // bind to the super region.
+ // This is needed to support OSAtomicCompareAndSwap and friends or other
+ // loads that treat integers as pointers and vis versa.
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ if (ER->getIndex().isZeroConstant()) {
+ if (const TypedRegion *superR =
+ dyn_cast<TypedRegion>(ER->getSuperRegion())) {
+ ASTContext &Ctx = getContext();
+ QualType superTy = superR->getValueType(Ctx);
+ QualType erTy = ER->getValueType(Ctx);
+
+ if (IsAnyPointerOrIntptr(superTy, Ctx) &&
+ IsAnyPointerOrIntptr(erTy, Ctx)) {
+ SValuator::CastResult cr =
+ ValMgr.getSValuator().EvalCast(V, state, superTy, erTy);
+ return Bind(cr.getState(), loc::MemRegionVal(superR), cr.getSVal());
+ }
+ // For now, just invalidate the fields of the struct/union/class.
+ // FIXME: Precisely handle the fields of the record.
+ if (superTy->isRecordType())
+ return InvalidateRegion(state, superR, NULL, 0);
+ }
+ }
+ }
+ else if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
+ // Binding directly to a symbolic region should be treated as binding
+ // to element 0.
+ QualType T = SR->getSymbol()->getType(getContext());
+ T = T->getAs<PointerType>()->getPointeeType();
+ R = GetElementZeroRegion(SR, T);
+ }
+
+ // Perform the binding.
+ RegionBindings B = GetRegionBindings(state->getStore());
+ return state->makeWithStore(
+ RBFactory.Add(B, R, BindingVal(V, BindingVal::Direct)).getRoot());
}
-const GRState *RegionStoreManager::BindDecl(const GRState *state,
- const VarDecl* VD, SVal InitVal) {
+const GRState *RegionStoreManager::BindDecl(const GRState *ST,
+ const VarDecl *VD,
+ const LocationContext *LC,
+ SVal InitVal) {
QualType T = VD->getType();
- VarRegion* VR = MRMgr.getVarRegion(VD);
+ VarRegion* VR = MRMgr.getVarRegion(VD, LC);
if (T->isArrayType())
- return BindArray(state, VR, InitVal);
+ return BindArray(ST, VR, InitVal);
if (T->isStructureType())
- return BindStruct(state, VR, InitVal);
+ return BindStruct(ST, VR, InitVal);
- return Bind(state, ValMgr.makeLoc(VR), InitVal);
+ return Bind(ST, ValMgr.makeLoc(VR), InitVal);
}
// FIXME: this method should be merged into Bind().
@@ -1115,21 +1437,20 @@ const GRState *
RegionStoreManager::BindCompoundLiteral(const GRState *state,
const CompoundLiteralExpr* CL,
SVal V) {
-
+
CompoundLiteralRegion* R = MRMgr.getCompoundLiteralRegion(CL);
return Bind(state, loc::MemRegionVal(R), V);
}
const GRState *RegionStoreManager::BindArray(const GRState *state,
- const TypedRegion* R,
+ const TypedRegion* R,
SVal Init) {
QualType T = R->getValueType(getContext());
ConstantArrayType* CAT = cast<ConstantArrayType>(T.getTypePtr());
QualType ElementTy = CAT->getElementType();
- llvm::APSInt Size(CAT->getSize(), false);
- llvm::APSInt i(llvm::APInt::getNullValue(Size.getBitWidth()), false);
+ uint64_t size = CAT->getSize().getZExtValue();
// Check if the init expr is a StringLiteral.
if (isa<loc::MemRegionVal>(Init)) {
@@ -1142,12 +1463,13 @@ const GRState *RegionStoreManager::BindArray(const GRState *state,
// Copy bytes from the string literal into the target array. Trailing bytes
// in the array that are not covered by the string literal are initialized
// to zero.
- for (; i < Size; ++i, ++j) {
+ for (uint64_t i = 0; i < size; ++i, ++j) {
if (j >= len)
break;
- SVal Idx = ValMgr.makeIntVal(i);
- ElementRegion* ER = MRMgr.getElementRegion(ElementTy, Idx,R,getContext());
+ SVal Idx = ValMgr.makeArrayIndex(i);
+ ElementRegion* ER = MRMgr.getElementRegion(ElementTy, Idx, R,
+ getContext());
SVal V = ValMgr.makeIntVal(str[j], sizeof(char)*8, true);
state = Bind(state, loc::MemRegionVal(ER), V);
@@ -1156,29 +1478,39 @@ const GRState *RegionStoreManager::BindArray(const GRState *state,
return state;
}
+ // Handle lazy compound values.
+ if (nonloc::LazyCompoundVal *LCV = dyn_cast<nonloc::LazyCompoundVal>(&Init))
+ return CopyLazyBindings(*LCV, state, R);
+
+ // Remaining case: explicit compound values.
nonloc::CompoundVal& CV = cast<nonloc::CompoundVal>(Init);
nonloc::CompoundVal::iterator VI = CV.begin(), VE = CV.end();
+ uint64_t i = 0;
- for (; i < Size; ++i, ++VI) {
+ for (; i < size; ++i, ++VI) {
// The init list might be shorter than the array length.
if (VI == VE)
break;
- SVal Idx = ValMgr.makeIntVal(i);
+ SVal Idx = ValMgr.makeArrayIndex(i);
ElementRegion* ER = MRMgr.getElementRegion(ElementTy, Idx, R, getContext());
if (CAT->getElementType()->isStructureType())
state = BindStruct(state, ER, *VI);
else
+ // FIXME: Do we need special handling of nested arrays?
state = Bind(state, ValMgr.makeLoc(ER), *VI);
}
// If the init list is shorter than the array length, set the array default
// value.
- if (i < Size) {
+ if (i < size) {
if (ElementTy->isIntegerType()) {
SVal V = ValMgr.makeZeroVal(ElementTy);
- state = setDefaultValue(state, R, V);
+ Store store = state->getStore();
+ RegionBindings B = GetRegionBindings(store);
+ B = RBFactory.Add(B, R, BindingVal(V, BindingVal::Default));
+ state = state->makeWithStore(B.getRoot());
}
}
@@ -1188,23 +1520,27 @@ const GRState *RegionStoreManager::BindArray(const GRState *state,
const GRState *
RegionStoreManager::BindStruct(const GRState *state, const TypedRegion* R,
SVal V) {
-
+
if (!Features.supportsFields())
return state;
-
+
QualType T = R->getValueType(getContext());
assert(T->isStructureType());
- const RecordType* RT = T->getAsRecordType();
+ const RecordType* RT = T->getAs<RecordType>();
RecordDecl* RD = RT->getDecl();
if (!RD->isDefinition())
return state;
+ // Handle lazy compound values.
+ if (const nonloc::LazyCompoundVal *LCV=dyn_cast<nonloc::LazyCompoundVal>(&V))
+ return CopyLazyBindings(*LCV, state, R);
+
// We may get non-CompoundVal accidentally due to imprecise cast logic.
// Ignore them and kill the field values.
if (V.isUnknown() || !isa<nonloc::CompoundVal>(V))
- return KillStruct(state, R);
+ return state->makeWithStore(KillStruct(state->getStore(), R));
nonloc::CompoundVal& CV = cast<nonloc::CompoundVal>(V);
nonloc::CompoundVal::iterator VI = CV.begin(), VE = CV.end();
@@ -1217,253 +1553,286 @@ RegionStoreManager::BindStruct(const GRState *state, const TypedRegion* R,
break;
QualType FTy = (*FI)->getType();
- FieldRegion* FR = MRMgr.getFieldRegion(*FI, R);
+ const FieldRegion* FR = MRMgr.getFieldRegion(*FI, R);
- if (Loc::IsLocType(FTy) || FTy->isIntegerType())
- state = Bind(state, ValMgr.makeLoc(FR), *VI);
- else if (FTy->isArrayType())
+ if (FTy->isArrayType())
state = BindArray(state, FR, *VI);
else if (FTy->isStructureType())
state = BindStruct(state, FR, *VI);
+ else
+ state = Bind(state, ValMgr.makeLoc(FR), *VI);
}
// There may be fewer values in the initialize list than the fields of struct.
- if (FI != FE)
- state = setDefaultValue(state, R, ValMgr.makeIntVal(0, false));
+ if (FI != FE) {
+ Store store = state->getStore();
+ RegionBindings B = GetRegionBindings(store);
+ B = RBFactory.Add(B, R,
+ BindingVal(ValMgr.makeIntVal(0, false), BindingVal::Default));
+ state = state->makeWithStore(B.getRoot());
+ }
return state;
}
-const GRState *RegionStoreManager::KillStruct(const GRState *state,
- const TypedRegion* R){
+Store RegionStoreManager::KillStruct(Store store, const TypedRegion* R) {
+ RegionBindings B = GetRegionBindings(store);
+ llvm::OwningPtr<RegionStoreSubRegionMap>
+ SubRegions(getRegionStoreSubRegionMap(store));
+ RemoveSubRegionBindings(B, R, *SubRegions);
// Set the default value of the struct region to "unknown".
- state = state->set<RegionDefaultValue>(R, UnknownVal());
-
- // Remove all bindings for the subregions of the struct.
- Store store = state->getStore();
- RegionBindingsTy B = GetRegionBindings(store);
- for (RegionBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
- const MemRegion* R = I.getKey();
- if (const SubRegion* subRegion = dyn_cast<SubRegion>(R))
- if (subRegion->isSubRegionOf(R))
- store = Remove(store, ValMgr.makeLoc(subRegion));
- }
-
- return state->makeWithStore(store);
-}
+ B = RBFactory.Add(B, R, BindingVal(UnknownVal(), BindingVal::Default));
-//===----------------------------------------------------------------------===//
-// Region views.
-//===----------------------------------------------------------------------===//
-
-const GRState *RegionStoreManager::AddRegionView(const GRState *state,
- const MemRegion* View,
- const MemRegion* Base) {
-
- // First, retrieve the region view of the base region.
- const RegionViews* d = state->get<RegionViewMap>(Base);
- RegionViews L = d ? *d : RVFactory.GetEmptySet();
-
- // Now add View to the region view.
- L = RVFactory.Add(L, View);
-
- // Create a new state with the new region view.
- return state->set<RegionViewMap>(Base, L);
+ return B.getRoot();
}
-const GRState *RegionStoreManager::RemoveRegionView(const GRState *state,
- const MemRegion* View,
- const MemRegion* Base) {
- // Retrieve the region view of the base region.
- const RegionViews* d = state->get<RegionViewMap>(Base);
+const GRState*
+RegionStoreManager::CopyLazyBindings(nonloc::LazyCompoundVal V,
+ const GRState *state,
+ const TypedRegion *R) {
- // If the base region has no view, return.
- if (!d)
- return state;
+ // Nuke the old bindings stemming from R.
+ RegionBindings B = GetRegionBindings(state->getStore());
- // Remove the view.
- return state->set<RegionViewMap>(Base, RVFactory.Remove(*d, View));
-}
+ llvm::OwningPtr<RegionStoreSubRegionMap>
+ SubRegions(getRegionStoreSubRegionMap(state->getStore()));
-const GRState *RegionStoreManager::setCastType(const GRState *state,
- const MemRegion* R, QualType T) {
- return state->set<RegionCasts>(R, T);
-}
+ // B and DVM are updated after the call to RemoveSubRegionBindings.
+ RemoveSubRegionBindings(B, R, *SubRegions.get());
-const GRState *RegionStoreManager::setDefaultValue(const GRState *state,
- const MemRegion* R, SVal V) {
- return state->set<RegionDefaultValue>(R, V);
+ // Now copy the bindings. This amounts to just binding 'V' to 'R'. This
+ // results in a zero-copy algorithm.
+ return state->makeWithStore(
+ RBFactory.Add(B, R, BindingVal(V, BindingVal::Direct)).getRoot());
}
//===----------------------------------------------------------------------===//
// State pruning.
//===----------------------------------------------------------------------===//
-static void UpdateLiveSymbols(SVal X, SymbolReaper& SymReaper) {
- if (loc::MemRegionVal *XR = dyn_cast<loc::MemRegionVal>(&X)) {
- const MemRegion *R = XR->getRegion();
-
- while (R) {
- if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
- SymReaper.markLive(SR->getSymbol());
- return;
- }
-
- if (const SubRegion *SR = dyn_cast<SubRegion>(R)) {
- R = SR->getSuperRegion();
- continue;
- }
-
- break;
- }
-
- return;
- }
+namespace {
+class VISIBILITY_HIDDEN RBDNode
+ : public std::pair<const GRState*, const MemRegion *> {
+public:
+ RBDNode(const GRState *st, const MemRegion *r)
+ : std::pair<const GRState*, const MemRegion*>(st, r) {}
- for (SVal::symbol_iterator SI=X.symbol_begin(), SE=X.symbol_end();SI!=SE;++SI)
- SymReaper.markLive(*SI);
-}
+ const GRState *getState() const { return first; }
+ const MemRegion *getRegion() const { return second; }
+};
-Store RegionStoreManager::RemoveDeadBindings(const GRState *state, Stmt* Loc,
- SymbolReaper& SymReaper,
- llvm::SmallVectorImpl<const MemRegion*>& RegionRoots)
-{
- Store store = state->getStore();
- RegionBindingsTy B = GetRegionBindings(store);
-
- // Lazily constructed backmap from MemRegions to SubRegions.
- typedef llvm::ImmutableSet<const MemRegion*> SubRegionsTy;
- typedef llvm::ImmutableMap<const MemRegion*, SubRegionsTy> SubRegionsMapTy;
-
- // FIXME: As a future optimization we can modifiy BumpPtrAllocator to have
- // the ability to reuse memory. This way we can keep TmpAlloc around as
- // an instance variable of RegionStoreManager (avoiding repeated malloc
- // overhead).
- llvm::BumpPtrAllocator TmpAlloc;
+enum VisitFlag { NotVisited = 0, VisitedFromSubRegion, VisitedFromSuperRegion };
+
+class RBDItem : public RBDNode {
+private:
+ const VisitFlag VF;
- // Factory objects.
- SubRegionsMapTy::Factory SubRegMapF(TmpAlloc);
- SubRegionsTy::Factory SubRegF(TmpAlloc);
+public:
+ RBDItem(const GRState *st, const MemRegion *r, VisitFlag vf)
+ : RBDNode(st, r), VF(vf) {}
+
+ VisitFlag getVisitFlag() const { return VF; }
+};
+} // end anonymous namespace
+void RegionStoreManager::RemoveDeadBindings(GRState &state, Stmt* Loc,
+ SymbolReaper& SymReaper,
+ llvm::SmallVectorImpl<const MemRegion*>& RegionRoots)
+{
+ Store store = state.getStore();
+ RegionBindings B = GetRegionBindings(store);
+
// The backmap from regions to subregions.
- SubRegionsMapTy SubRegMap = SubRegMapF.GetEmptyMap();
+ llvm::OwningPtr<RegionStoreSubRegionMap>
+ SubRegions(getRegionStoreSubRegionMap(store));
- // Do a pass over the regions in the store. For VarRegions we check if
- // the variable is still live and if so add it to the list of live roots.
- // For other regions we populate our region backmap.
+ // Do a pass over the regions in the store. For VarRegions we check if
+ // the variable is still live and if so add it to the list of live roots.
+ // For other regions we populate our region backmap.
llvm::SmallVector<const MemRegion*, 10> IntermediateRoots;
- for (RegionBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
- IntermediateRoots.push_back(I.getKey());
+ // Scan the direct bindings for "intermediate" roots.
+ for (RegionBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ const MemRegion *R = I.getKey();
+ IntermediateRoots.push_back(R);
}
+ // Process the "intermediate" roots to find if they are referenced by
+ // real roots.
+ llvm::SmallVector<RBDItem, 10> WorkList;
+ llvm::DenseMap<const MemRegion*,unsigned> IntermediateVisited;
+
while (!IntermediateRoots.empty()) {
const MemRegion* R = IntermediateRoots.back();
IntermediateRoots.pop_back();
+ unsigned &visited = IntermediateVisited[R];
+ if (visited)
+ continue;
+ visited = 1;
+
if (const VarRegion* VR = dyn_cast<VarRegion>(R)) {
- if (SymReaper.isLive(Loc, VR->getDecl())) {
- RegionRoots.push_back(VR); // This is a live "root".
- }
- }
- else if (const SymbolicRegion* SR = dyn_cast<SymbolicRegion>(R)) {
- if (SymReaper.isLive(SR->getSymbol()))
- RegionRoots.push_back(SR);
+ if (SymReaper.isLive(Loc, VR->getDecl()))
+ WorkList.push_back(RBDItem(&state, VR, VisitedFromSuperRegion));
+ continue;
}
- else {
- // Get the super region for R.
- const MemRegion* superR = cast<SubRegion>(R)->getSuperRegion();
-
- // Get the current set of subregions for SuperR.
- const SubRegionsTy* SRptr = SubRegMap.lookup(superR);
- SubRegionsTy SRs = SRptr ? *SRptr : SubRegF.GetEmptySet();
-
- // Add R to the subregions of SuperR.
- SubRegMap = SubRegMapF.Add(SubRegMap, superR, SubRegF.Add(SRs, R));
-
- // Super region may be VarRegion or subregion of another VarRegion. Add it
- // to the work list.
- if (isa<SubRegion>(superR))
- IntermediateRoots.push_back(superR);
+
+ if (const SymbolicRegion* SR = dyn_cast<SymbolicRegion>(R)) {
+ if (SymReaper.isLive(SR->getSymbol()))
+ WorkList.push_back(RBDItem(&state, SR, VisitedFromSuperRegion));
+ continue;
}
+
+ // Add the super region for R to the worklist if it is a subregion.
+ if (const SubRegion* superR =
+ dyn_cast<SubRegion>(cast<SubRegion>(R)->getSuperRegion()))
+ IntermediateRoots.push_back(superR);
+ }
+
+ // Enqueue the RegionRoots onto WorkList.
+ for (llvm::SmallVectorImpl<const MemRegion*>::iterator I=RegionRoots.begin(),
+ E=RegionRoots.end(); I!=E; ++I) {
+ WorkList.push_back(RBDItem(&state, *I, VisitedFromSuperRegion));
}
+ RegionRoots.clear();
- // Process the worklist of RegionRoots. This performs a "mark-and-sweep"
- // of the store. We want to find all live symbols and dead regions.
- llvm::SmallPtrSet<const MemRegion*, 10> Marked;
+ // Process the worklist.
+ typedef llvm::DenseMap<std::pair<const GRState*, const MemRegion*>, VisitFlag>
+ VisitMap;
+
+ VisitMap Visited;
- while (!RegionRoots.empty()) {
- // Dequeue the next region on the worklist.
- const MemRegion* R = RegionRoots.back();
- RegionRoots.pop_back();
+ while (!WorkList.empty()) {
+ RBDItem N = WorkList.back();
+ WorkList.pop_back();
+
+ // Have we visited this node before?
+ VisitFlag &VF = Visited[N];
+ if (VF >= N.getVisitFlag())
+ continue;
- // Check if we have already processed this region.
- if (Marked.count(R)) continue;
+ const MemRegion *R = N.getRegion();
+ const GRState *state_N = N.getState();
- // Mark this region as processed. This is needed for termination in case
- // a region is referenced more than once.
- Marked.insert(R);
+ // Enqueue subregions?
+ if (N.getVisitFlag() == VisitedFromSuperRegion) {
+ RegionStoreSubRegionMap *M;
+
+ if (&state == state_N)
+ M = SubRegions.get();
+ else {
+ RegionStoreSubRegionMap *& SM = SC[state_N];
+ if (!SM)
+ SM = getRegionStoreSubRegionMap(state_N->getStore());
+ M = SM;
+ }
+
+ RegionStoreSubRegionMap::iterator I, E;
+ for (llvm::tie(I, E) = M->begin_end(R); I != E; ++I)
+ WorkList.push_back(RBDItem(state_N, *I, VisitedFromSuperRegion));
+ }
+
+ // At this point, if we have already visited this region before, we are
+ // done.
+ if (VF != NotVisited) {
+ VF = N.getVisitFlag();
+ continue;
+ }
+ VF = N.getVisitFlag();
+ // Enqueue the super region.
+ if (const SubRegion *SR = dyn_cast<SubRegion>(R)) {
+ const MemRegion *superR = SR->getSuperRegion();
+ if (!isa<MemSpaceRegion>(superR)) {
+ // If 'R' is a field or an element, we want to keep the bindings
+ // for the other fields and elements around. The reason is that
+ // pointer arithmetic can get us to the other fields or elements.
+ // FIXME: add an assertion that this is always true.
+ VisitFlag NewVisit =
+ isa<FieldRegion>(R) || isa<ElementRegion>(R) || isa<ObjCIvarRegion>(R)
+ ? VisitedFromSuperRegion : VisitedFromSubRegion;
+
+ WorkList.push_back(RBDItem(state_N, superR, NewVisit));
+ }
+ }
+
// Mark the symbol for any live SymbolicRegion as "live". This means we
// should continue to track that symbol.
if (const SymbolicRegion* SymR = dyn_cast<SymbolicRegion>(R))
SymReaper.markLive(SymR->getSymbol());
+
+ Store store_N = state_N->getStore();
+ RegionBindings B_N = GetRegionBindings(store_N);
// Get the data binding for R (if any).
- RegionBindingsTy::data_type* Xptr = B.lookup(R);
- if (Xptr) {
- SVal X = *Xptr;
- UpdateLiveSymbols(X, SymReaper); // Update the set of live symbols.
+ Optional<SVal> V = getBinding(B_N, R);
+
+ if (V) {
+ // Check for lazy bindings.
+ if (const nonloc::LazyCompoundVal *LCV =
+ dyn_cast<nonloc::LazyCompoundVal>(V.getPointer())) {
- // If X is a region, then add it to the RegionRoots.
- if (const MemRegion *RX = X.getAsRegion()) {
- RegionRoots.push_back(RX);
-
- // Mark the super region of the RX as live.
- // e.g.: int x; char *y = (char*) &x; if (*y) ...
- // 'y' => element region. 'x' is its super region.
- // We only add one level super region for now.
- // FIXME: maybe multiple level of super regions should be added.
- if (const SubRegion *SR = dyn_cast<SubRegion>(RX)) {
- RegionRoots.push_back(SR->getSuperRegion());
- }
+ const LazyCompoundValData *D = LCV->getCVData();
+ WorkList.push_back(RBDItem(D->getState(), D->getRegion(),
+ VisitedFromSuperRegion));
+ }
+ else {
+ // Update the set of live symbols.
+ for (SVal::symbol_iterator SI=V->symbol_begin(), SE=V->symbol_end();
+ SI!=SE;++SI)
+ SymReaper.markLive(*SI);
+
+ // If V is a region, then add it to the worklist.
+ if (const MemRegion *RX = V->getAsRegion())
+ WorkList.push_back(RBDItem(state_N, RX, VisitedFromSuperRegion));
}
}
-
- // Get the subregions of R. These are RegionRoots as well since they
- // represent values that are also bound to R.
- const SubRegionsTy* SRptr = SubRegMap.lookup(R);
- if (!SRptr) continue;
- SubRegionsTy SR = *SRptr;
-
- for (SubRegionsTy::iterator I=SR.begin(), E=SR.end(); I!=E; ++I)
- RegionRoots.push_back(*I);
-
}
// We have now scanned the store, marking reachable regions and symbols
// as live. We now remove all the regions that are dead from the store
- // as well as update DSymbols with the set symbols that are now dead.
- for (RegionBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ // as well as update DSymbols with the set symbols that are now dead.
+ for (RegionBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) {
const MemRegion* R = I.getKey();
// If this region live? Is so, none of its symbols are dead.
- if (Marked.count(R))
+ if (Visited.find(std::make_pair(&state, R)) != Visited.end())
continue;
-
+
// Remove this dead region from the store.
store = Remove(store, ValMgr.makeLoc(R));
-
+
// Mark all non-live symbols that this region references as dead.
if (const SymbolicRegion* SymR = dyn_cast<SymbolicRegion>(R))
SymReaper.maybeDead(SymR->getSymbol());
-
- SVal X = I.getData();
+
+ SVal X = *I.getData().getValue();
SVal::symbol_iterator SI = X.symbol_begin(), SE = X.symbol_end();
- for (; SI != SE; ++SI) SymReaper.maybeDead(*SI);
+ for (; SI != SE; ++SI)
+ SymReaper.maybeDead(*SI);
}
-
- return store;
+
+ // Write the store back.
+ state.setStore(store);
+}
+
+GRState const *RegionStoreManager::EnterStackFrame(GRState const *state,
+ StackFrameContext const *frame) {
+ FunctionDecl const *FD = cast<FunctionDecl>(frame->getDecl());
+ CallExpr const *CE = cast<CallExpr>(frame->getCallSite());
+
+ FunctionDecl::param_const_iterator PI = FD->param_begin();
+
+ CallExpr::const_arg_iterator AI = CE->arg_begin(), AE = CE->arg_end();
+
+ // Copy the arg expression value to the arg variables.
+ for (; AI != AE; ++AI, ++PI) {
+ SVal ArgVal = state->getSVal(*AI);
+ MemRegion *R = MRMgr.getVarRegion(*PI, frame);
+ state = Bind(state, ValMgr.makeLoc(R), ArgVal);
+ }
+
+ return state;
}
//===----------------------------------------------------------------------===//
@@ -1472,11 +1841,9 @@ Store RegionStoreManager::RemoveDeadBindings(const GRState *state, Stmt* Loc,
void RegionStoreManager::print(Store store, llvm::raw_ostream& OS,
const char* nl, const char *sep) {
- RegionBindingsTy B = GetRegionBindings(store);
- OS << "Store:" << nl;
-
- for (RegionBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
- OS << ' '; I.getKey()->print(OS); OS << " : ";
- I.getData().print(OS); OS << nl;
- }
+ RegionBindings B = GetRegionBindings(store);
+ OS << "Store (direct bindings):" << nl;
+
+ for (RegionBindings::iterator I = B.begin(), E = B.end(); I != E; ++I)
+ OS << ' ' << I.getKey() << " : " << I.getData() << nl;
}
diff --git a/lib/Analysis/SVals.cpp b/lib/Analysis/SVals.cpp
index d711ce0a225e..688b7ff6e1e3 100644
--- a/lib/Analysis/SVals.cpp
+++ b/lib/Analysis/SVals.cpp
@@ -14,7 +14,6 @@
#include "clang/Analysis/PathSensitive/GRState.h"
#include "clang/Basic/IdentifierTable.h"
-#include "llvm/Support/Streams.h"
using namespace clang;
using llvm::dyn_cast;
@@ -43,52 +42,32 @@ bool SVal::hasConjuredSymbol() const {
SymbolRef sym = SR->getSymbol();
if (isa<SymbolConjured>(sym))
return true;
- } else if (const CodeTextRegion *CTR = dyn_cast<CodeTextRegion>(R)) {
- if (CTR->isSymbolic()) {
- SymbolRef sym = CTR->getSymbol();
- if (isa<SymbolConjured>(sym))
- return true;
- }
}
}
return false;
}
-const FunctionDecl* SVal::getAsFunctionDecl() const {
+const FunctionDecl *SVal::getAsFunctionDecl() const {
if (const loc::MemRegionVal* X = dyn_cast<loc::MemRegionVal>(this)) {
const MemRegion* R = X->getRegion();
- if (const CodeTextRegion* CTR = R->getAs<CodeTextRegion>()) {
- if (CTR->isDeclared())
- return CTR->getDecl();
- }
+ if (const CodeTextRegion *CTR = R->getAs<CodeTextRegion>())
+ return CTR->getDecl();
}
- return 0;
+ return NULL;
}
-/// getAsLocSymbol - If this SVal is a location (subclasses Loc) and
+/// getAsLocSymbol - If this SVal is a location (subclasses Loc) and
/// wraps a symbol, return that SymbolRef. Otherwise return 0.
// FIXME: should we consider SymbolRef wrapped in CodeTextRegion?
SymbolRef SVal::getAsLocSymbol() const {
if (const loc::MemRegionVal *X = dyn_cast<loc::MemRegionVal>(this)) {
- const MemRegion *R = X->getRegion();
-
- while (R) {
- // Blast through region views.
- if (const TypedViewRegion *View = dyn_cast<TypedViewRegion>(R)) {
- R = View->getSuperRegion();
- continue;
- }
-
- if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(R))
- return SymR->getSymbol();
-
- break;
- }
+ const MemRegion *R = X->getBaseRegion();
+ if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(R))
+ return SymR->getSymbol();
}
-
- return 0;
+ return NULL;
}
/// getAsSymbol - If this Sval wraps a symbol return that SymbolRef.
@@ -97,11 +76,11 @@ SymbolRef SVal::getAsLocSymbol() const {
SymbolRef SVal::getAsSymbol() const {
if (const nonloc::SymbolVal *X = dyn_cast<nonloc::SymbolVal>(this))
return X->getSymbol();
-
+
if (const nonloc::SymExprVal *X = dyn_cast<nonloc::SymExprVal>(this))
if (SymbolRef Y = dyn_cast<SymbolData>(X->getSymbolicExpression()))
return Y;
-
+
return getAsLocSymbol();
}
@@ -110,7 +89,7 @@ SymbolRef SVal::getAsSymbol() const {
const SymExpr *SVal::getAsSymbolicExpression() const {
if (const nonloc::SymExprVal *X = dyn_cast<nonloc::SymExprVal>(this))
return X->getSymbolicExpression();
-
+
return getAsSymbol();
}
@@ -121,6 +100,11 @@ const MemRegion *SVal::getAsRegion() const {
return 0;
}
+const MemRegion *loc::MemRegionVal::getBaseRegion() const {
+ const MemRegion *R = getRegion();
+ return R ? R->getBaseRegion() : NULL;
+}
+
bool SVal::symbol_iterator::operator==(const symbol_iterator &X) const {
return itr == X.itr;
}
@@ -131,13 +115,13 @@ bool SVal::symbol_iterator::operator!=(const symbol_iterator &X) const {
SVal::symbol_iterator::symbol_iterator(const SymExpr *SE) {
itr.push_back(SE);
- while (!isa<SymbolData>(itr.back())) expand();
+ while (!isa<SymbolData>(itr.back())) expand();
}
SVal::symbol_iterator& SVal::symbol_iterator::operator++() {
assert(!itr.empty() && "attempting to iterate on an 'end' iterator");
assert(isa<SymbolData>(itr.back()));
- itr.pop_back();
+ itr.pop_back();
if (!itr.empty())
while (!isa<SymbolData>(itr.back())) expand();
return *this;
@@ -151,20 +135,28 @@ SymbolRef SVal::symbol_iterator::operator*() {
void SVal::symbol_iterator::expand() {
const SymExpr *SE = itr.back();
itr.pop_back();
-
+
if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(SE)) {
itr.push_back(SIE->getLHS());
return;
- }
+ }
else if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(SE)) {
itr.push_back(SSE->getLHS());
itr.push_back(SSE->getRHS());
return;
}
-
+
assert(false && "unhandled expansion case");
}
+const GRState *nonloc::LazyCompoundVal::getState() const {
+ return static_cast<const LazyCompoundValData*>(Data)->getState();
+}
+
+const TypedRegion *nonloc::LazyCompoundVal::getRegion() const {
+ return static_cast<const LazyCompoundValData*>(Data)->getRegion();
+}
+
//===----------------------------------------------------------------------===//
// Other Iterators.
//===----------------------------------------------------------------------===//
@@ -197,10 +189,10 @@ bool SVal::isZeroConstant() const {
SVal nonloc::ConcreteInt::evalBinOp(ValueManager &ValMgr,
BinaryOperator::Opcode Op,
- const nonloc::ConcreteInt& R) const {
+ const nonloc::ConcreteInt& R) const {
const llvm::APSInt* X =
ValMgr.getBasicValueFactory().EvaluateAPSInt(Op, getValue(), R.getValue());
-
+
if (X)
return nonloc::ConcreteInt(*X);
else
@@ -223,12 +215,12 @@ nonloc::ConcreteInt nonloc::ConcreteInt::evalMinus(ValueManager &ValMgr) const {
SVal loc::ConcreteInt::EvalBinOp(BasicValueFactory& BasicVals,
BinaryOperator::Opcode Op,
const loc::ConcreteInt& R) const {
-
+
assert (Op == BinaryOperator::Add || Op == BinaryOperator::Sub ||
(Op >= BinaryOperator::LT && Op <= BinaryOperator::NE));
-
+
const llvm::APSInt* X = BasicVals.EvaluateAPSInt(Op, getValue(), R.getValue());
-
+
if (X)
return loc::ConcreteInt(*X);
else
@@ -239,98 +231,89 @@ SVal loc::ConcreteInt::EvalBinOp(BasicValueFactory& BasicVals,
// Pretty-Printing.
//===----------------------------------------------------------------------===//
-void SVal::printStdErr() const { print(llvm::errs()); }
-
-void SVal::print(llvm::raw_ostream& Out) const {
+void SVal::dump() const { dumpToStream(llvm::errs()); }
+void SVal::dumpToStream(llvm::raw_ostream& os) const {
switch (getBaseKind()) {
-
case UnknownKind:
- Out << "Invalid"; break;
-
+ os << "Invalid";
+ break;
case NonLocKind:
- cast<NonLoc>(this)->print(Out); break;
-
+ cast<NonLoc>(this)->dumpToStream(os);
+ break;
case LocKind:
- cast<Loc>(this)->print(Out); break;
-
+ cast<Loc>(this)->dumpToStream(os);
+ break;
case UndefinedKind:
- Out << "Undefined"; break;
-
+ os << "Undefined";
+ break;
default:
assert (false && "Invalid SVal.");
}
}
-void NonLoc::print(llvm::raw_ostream& Out) const {
-
- switch (getSubKind()) {
-
+void NonLoc::dumpToStream(llvm::raw_ostream& os) const {
+ switch (getSubKind()) {
case nonloc::ConcreteIntKind:
- Out << cast<nonloc::ConcreteInt>(this)->getValue().getZExtValue();
-
+ os << cast<nonloc::ConcreteInt>(this)->getValue().getZExtValue();
if (cast<nonloc::ConcreteInt>(this)->getValue().isUnsigned())
- Out << 'U';
-
+ os << 'U';
break;
-
case nonloc::SymbolValKind:
- Out << '$' << cast<nonloc::SymbolVal>(this)->getSymbol();
+ os << '$' << cast<nonloc::SymbolVal>(this)->getSymbol();
break;
-
case nonloc::SymExprValKind: {
const nonloc::SymExprVal& C = *cast<nonloc::SymExprVal>(this);
const SymExpr *SE = C.getSymbolicExpression();
- Out << SE;
+ os << SE;
break;
}
-
case nonloc::LocAsIntegerKind: {
const nonloc::LocAsInteger& C = *cast<nonloc::LocAsInteger>(this);
- C.getLoc().print(Out);
- Out << " [as " << C.getNumBits() << " bit integer]";
+ os << C.getLoc() << " [as " << C.getNumBits() << " bit integer]";
break;
}
-
case nonloc::CompoundValKind: {
const nonloc::CompoundVal& C = *cast<nonloc::CompoundVal>(this);
- Out << " {";
+ os << "compoundVal{";
bool first = true;
for (nonloc::CompoundVal::iterator I=C.begin(), E=C.end(); I!=E; ++I) {
- if (first) { Out << ' '; first = false; }
- else Out << ", ";
- (*I).print(Out);
+ if (first) {
+ os << ' '; first = false;
+ }
+ else
+ os << ", ";
+
+ (*I).dumpToStream(os);
}
- Out << " }";
+ os << "}";
+ break;
+ }
+ case nonloc::LazyCompoundValKind: {
+ const nonloc::LazyCompoundVal &C = *cast<nonloc::LazyCompoundVal>(this);
+ os << "lazyCompoundVal{" << (void*) C.getState() << ',' << C.getRegion()
+ << '}';
break;
}
-
default:
assert (false && "Pretty-printed not implemented for this NonLoc.");
break;
}
}
-void Loc::print(llvm::raw_ostream& Out) const {
-
- switch (getSubKind()) {
-
+void Loc::dumpToStream(llvm::raw_ostream& os) const {
+ switch (getSubKind()) {
case loc::ConcreteIntKind:
- Out << cast<loc::ConcreteInt>(this)->getValue().getZExtValue()
- << " (Loc)";
+ os << cast<loc::ConcreteInt>(this)->getValue().getZExtValue() << " (Loc)";
break;
-
case loc::GotoLabelKind:
- Out << "&&"
- << cast<loc::GotoLabel>(this)->getLabel()->getID()->getName();
+ os << "&&" << cast<loc::GotoLabel>(this)->getLabel()->getID()->getName();
break;
-
case loc::MemRegionKind:
- Out << '&' << cast<loc::MemRegionVal>(this)->getRegion()->getString();
+ os << '&' << cast<loc::MemRegionVal>(this)->getRegion()->getString();
break;
-
default:
- assert (false && "Pretty-printing not implemented for this Loc.");
+ assert(false && "Pretty-printing not implemented for this Loc.");
break;
}
}
diff --git a/lib/Analysis/SValuator.cpp b/lib/Analysis/SValuator.cpp
new file mode 100644
index 000000000000..573cac315b3a
--- /dev/null
+++ b/lib/Analysis/SValuator.cpp
@@ -0,0 +1,160 @@
+// SValuator.cpp - Basic class for all SValuator implementations --*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SValuator, the base class for all (complete) SValuator
+// implementations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/PathSensitive/SValuator.h"
+#include "clang/Analysis/PathSensitive/GRState.h"
+
+using namespace clang;
+
+
+SVal SValuator::EvalBinOp(const GRState *ST, BinaryOperator::Opcode Op,
+ SVal L, SVal R, QualType T) {
+
+ if (L.isUndef() || R.isUndef())
+ return UndefinedVal();
+
+ if (L.isUnknown() || R.isUnknown())
+ return UnknownVal();
+
+ if (isa<Loc>(L)) {
+ if (isa<Loc>(R))
+ return EvalBinOpLL(Op, cast<Loc>(L), cast<Loc>(R), T);
+
+ return EvalBinOpLN(ST, Op, cast<Loc>(L), cast<NonLoc>(R), T);
+ }
+
+ if (isa<Loc>(R)) {
+ // Support pointer arithmetic where the increment/decrement operand
+ // is on the left and the pointer on the right.
+ assert(Op == BinaryOperator::Add || Op == BinaryOperator::Sub);
+
+ // Commute the operands.
+ return EvalBinOpLN(ST, Op, cast<Loc>(R), cast<NonLoc>(L), T);
+ }
+
+ return EvalBinOpNN(ST, Op, cast<NonLoc>(L), cast<NonLoc>(R), T);
+}
+
+DefinedOrUnknownSVal SValuator::EvalEQ(const GRState *ST,
+ DefinedOrUnknownSVal L,
+ DefinedOrUnknownSVal R) {
+ return cast<DefinedOrUnknownSVal>(EvalBinOp(ST, BinaryOperator::EQ, L, R,
+ ValMgr.getContext().IntTy));
+}
+
+SValuator::CastResult SValuator::EvalCast(SVal val, const GRState *state,
+ QualType castTy, QualType originalTy){
+
+ if (val.isUnknownOrUndef() || castTy == originalTy)
+ return CastResult(state, val);
+
+ ASTContext &C = ValMgr.getContext();
+
+ // For const casts, just propagate the value.
+ if (C.getCanonicalType(castTy).getUnqualifiedType() ==
+ C.getCanonicalType(originalTy).getUnqualifiedType())
+ return CastResult(state, val);
+
+ // Check for casts from pointers to integers.
+ if (castTy->isIntegerType() && Loc::IsLocType(originalTy))
+ return CastResult(state, EvalCastL(cast<Loc>(val), castTy));
+
+ // Check for casts from integers to pointers.
+ if (Loc::IsLocType(castTy) && originalTy->isIntegerType()) {
+ if (nonloc::LocAsInteger *LV = dyn_cast<nonloc::LocAsInteger>(&val)) {
+ // Just unpackage the lval and return it.
+ return CastResult(state, LV->getLoc());
+ }
+
+ goto DispatchCast;
+ }
+
+ // Just pass through function and block pointers.
+ if (originalTy->isBlockPointerType() || originalTy->isFunctionPointerType()) {
+ assert(Loc::IsLocType(castTy));
+ return CastResult(state, val);
+ }
+
+ // Check for casts from array type to another type.
+ if (originalTy->isArrayType()) {
+ // We will always decay to a pointer.
+ val = ValMgr.getStateManager().ArrayToPointer(cast<Loc>(val));
+
+ // Are we casting from an array to a pointer? If so just pass on
+ // the decayed value.
+ if (castTy->isPointerType())
+ return CastResult(state, val);
+
+ // Are we casting from an array to an integer? If so, cast the decayed
+ // pointer value to an integer.
+ assert(castTy->isIntegerType());
+
+ // FIXME: Keep these here for now in case we decide soon that we
+ // need the original decayed type.
+ // QualType elemTy = cast<ArrayType>(originalTy)->getElementType();
+ // QualType pointerTy = C.getPointerType(elemTy);
+ return CastResult(state, EvalCastL(cast<Loc>(val), castTy));
+ }
+
+ // Check for casts from a region to a specific type.
+ if (const MemRegion *R = val.getAsRegion()) {
+ // FIXME: We should handle the case where we strip off view layers to get
+ // to a desugared type.
+
+ assert(Loc::IsLocType(castTy));
+ // We get a symbolic function pointer for a dereference of a function
+ // pointer, but it is of function type. Example:
+
+ // struct FPRec {
+ // void (*my_func)(int * x);
+ // };
+ //
+ // int bar(int x);
+ //
+ // int f1_a(struct FPRec* foo) {
+ // int x;
+ // (*foo->my_func)(&x);
+ // return bar(x)+1; // no-warning
+ // }
+
+ assert(Loc::IsLocType(originalTy) || originalTy->isFunctionType() ||
+ originalTy->isBlockPointerType());
+
+ StoreManager &storeMgr = ValMgr.getStateManager().getStoreManager();
+
+ // Delegate to store manager to get the result of casting a region to a
+ // different type. If the MemRegion* returned is NULL, this expression
+ // evaluates to UnknownVal.
+ R = storeMgr.CastRegion(R, castTy);
+
+ if (R)
+ return CastResult(state, loc::MemRegionVal(R));
+
+ return CastResult(state, UnknownVal());
+ }
+
+ // All other cases.
+DispatchCast:
+ return CastResult(state,
+ isa<Loc>(val) ? EvalCastL(cast<Loc>(val), castTy)
+ : EvalCastNL(cast<NonLoc>(val), castTy));
+}
+
+SValuator::DefinedOrUnknownCastResult
+SValuator::EvalCast(DefinedOrUnknownSVal V, const GRState *ST,
+ QualType castTy, QualType originalType) {
+ SValuator::CastResult X = EvalCast((SVal) V, ST, castTy, originalType);
+ return DefinedOrUnknownCastResult(X.getState(),
+ cast<DefinedOrUnknownSVal>(X.getSVal()));
+}
diff --git a/lib/Analysis/SimpleConstraintManager.cpp b/lib/Analysis/SimpleConstraintManager.cpp
index 82801eb05d38..015db76080df 100644
--- a/lib/Analysis/SimpleConstraintManager.cpp
+++ b/lib/Analysis/SimpleConstraintManager.cpp
@@ -23,10 +23,10 @@ SimpleConstraintManager::~SimpleConstraintManager() {}
bool SimpleConstraintManager::canReasonAbout(SVal X) const {
if (nonloc::SymExprVal *SymVal = dyn_cast<nonloc::SymExprVal>(&X)) {
const SymExpr *SE = SymVal->getSymbolicExpression();
-
+
if (isa<SymbolData>(SE))
return true;
-
+
if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(SE)) {
switch (SIE->getOpcode()) {
// We don't reason yet about bitwise-constraints on symbolic values.
@@ -46,7 +46,7 @@ bool SimpleConstraintManager::canReasonAbout(SVal X) const {
// All other cases.
default:
return true;
- }
+ }
}
return false;
@@ -54,13 +54,10 @@ bool SimpleConstraintManager::canReasonAbout(SVal X) const {
return true;
}
-
-const GRState *SimpleConstraintManager::Assume(const GRState *state,
- SVal Cond, bool Assumption) {
- if (Cond.isUnknown()) {
- return state;
- }
+const GRState *SimpleConstraintManager::Assume(const GRState *state,
+ DefinedSVal Cond,
+ bool Assumption) {
if (isa<NonLoc>(Cond))
return Assume(state, cast<NonLoc>(Cond), Assumption);
else
@@ -74,14 +71,14 @@ const GRState *SimpleConstraintManager::Assume(const GRState *state, Loc Cond,
// EvalAssume is used to call into the GRTransferFunction object to perform
// any checker-specific update of the state based on this assumption being
- // true or false.
+ // true or false.
return state ? state->getTransferFuncs().EvalAssume(state, Cond, Assumption)
: NULL;
}
const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
Loc Cond, bool Assumption) {
-
+
BasicValueFactory &BasicVals = state->getBasicVals();
switch (Cond.getSubKind()) {
@@ -91,7 +88,7 @@ const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
case loc::MemRegionKind: {
// FIXME: Should this go into the storemanager?
-
+
const MemRegion *R = cast<loc::MemRegionVal>(Cond).getRegion();
const SubRegion *SubR = dyn_cast<SubRegion>(R);
@@ -99,7 +96,7 @@ const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
// FIXME: now we only find the first symbolic region.
if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(SubR)) {
if (Assumption)
- return AssumeSymNE(state, SymR->getSymbol(),
+ return AssumeSymNE(state, SymR->getSymbol(),
BasicVals.getZeroWithPtrWidth());
else
return AssumeSymEQ(state, SymR->getSymbol(),
@@ -107,15 +104,15 @@ const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
}
SubR = dyn_cast<SubRegion>(SubR->getSuperRegion());
}
-
+
// FALL-THROUGH.
}
-
+
case loc::GotoLabelKind:
return Assumption ? state : NULL;
case loc::ConcreteIntKind: {
- bool b = cast<loc::ConcreteInt>(Cond).getValue() != 0;
+ bool b = cast<loc::ConcreteInt>(Cond).getValue() != 0;
bool isFeasible = b ? Assumption : !Assumption;
return isFeasible ? state : NULL;
}
@@ -130,7 +127,7 @@ const GRState *SimpleConstraintManager::Assume(const GRState *state,
// EvalAssume is used to call into the GRTransferFunction object to perform
// any checker-specific update of the state based on this assumption being
- // true or false.
+ // true or false.
return state ? state->getTransferFuncs().EvalAssume(state, Cond, Assumption)
: NULL;
}
@@ -138,13 +135,13 @@ const GRState *SimpleConstraintManager::Assume(const GRState *state,
const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
NonLoc Cond,
bool Assumption) {
-
+
// We cannot reason about SymIntExpr and SymSymExpr.
if (!canReasonAbout(Cond)) {
// Just return the current state indicating that the path is feasible.
// This may be an over-approximation of what is possible.
return state;
- }
+ }
BasicValueFactory &BasicVals = state->getBasicVals();
SymbolManager &SymMgr = state->getSymbolManager();
@@ -156,7 +153,7 @@ const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
case nonloc::SymbolValKind: {
nonloc::SymbolVal& SV = cast<nonloc::SymbolVal>(Cond);
SymbolRef sym = SV.getSymbol();
- QualType T = SymMgr.getType(sym);
+ QualType T = SymMgr.getType(sym);
const llvm::APSInt &zero = BasicVals.getValue(0, T);
return Assumption ? AssumeSymNE(state, sym, zero)
@@ -165,9 +162,20 @@ const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
case nonloc::SymExprValKind: {
nonloc::SymExprVal V = cast<nonloc::SymExprVal>(Cond);
- if (const SymIntExpr *SE = dyn_cast<SymIntExpr>(V.getSymbolicExpression()))
- return AssumeSymInt(state, Assumption, SE);
-
+ if (const SymIntExpr *SE = dyn_cast<SymIntExpr>(V.getSymbolicExpression())){
+ // FIXME: This is a hack. It silently converts the RHS integer to be
+ // of the same type as on the left side. This should be removed once
+ // we support truncation/extension of symbolic values.
+ GRStateManager &StateMgr = state->getStateManager();
+ ASTContext &Ctx = StateMgr.getContext();
+ QualType LHSType = SE->getLHS()->getType(Ctx);
+ BasicValueFactory &BasicVals = StateMgr.getBasicVals();
+ const llvm::APSInt &RHS = BasicVals.Convert(LHSType, SE->getRHS());
+ SymIntExpr SENew(SE->getLHS(), SE->getOpcode(), RHS, SE->getType(Ctx));
+
+ return AssumeSymInt(state, Assumption, &SENew);
+ }
+
// For all other symbolic expressions, over-approximate and consider
// the constraint feasible.
return state;
@@ -194,7 +202,7 @@ const GRState *SimpleConstraintManager::AssumeSymInt(const GRState *state,
// rest of the constraint manager logic.
SymbolRef Sym = cast<SymbolData>(SE->getLHS());
const llvm::APSInt &Int = SE->getRHS();
-
+
switch (SE->getOpcode()) {
default:
// No logic yet for other operators. Assume the constraint is feasible.
@@ -218,7 +226,7 @@ const GRState *SimpleConstraintManager::AssumeSymInt(const GRState *state,
case BinaryOperator::LT:
return Assumption ? AssumeSymLT(state, Sym, Int)
: AssumeSymGE(state, Sym, Int);
-
+
case BinaryOperator::LE:
return Assumption ? AssumeSymLE(state, Sym, Int)
: AssumeSymGT(state, Sym, Int);
@@ -226,9 +234,9 @@ const GRState *SimpleConstraintManager::AssumeSymInt(const GRState *state,
}
const GRState *SimpleConstraintManager::AssumeInBound(const GRState *state,
- SVal Idx,
- SVal UpperBound,
- bool Assumption) {
+ DefinedSVal Idx,
+ DefinedSVal UpperBound,
+ bool Assumption) {
// Only support ConcreteInt for now.
if (!(isa<nonloc::ConcreteInt>(Idx) && isa<nonloc::ConcreteInt>(UpperBound)))
diff --git a/lib/Analysis/SimpleConstraintManager.h b/lib/Analysis/SimpleConstraintManager.h
index 1e1a10da030f..0c58440ac0b6 100644
--- a/lib/Analysis/SimpleConstraintManager.h
+++ b/lib/Analysis/SimpleConstraintManager.h
@@ -22,15 +22,16 @@ namespace clang {
class SimpleConstraintManager : public ConstraintManager {
public:
SimpleConstraintManager() {}
- virtual ~SimpleConstraintManager();
-
+ virtual ~SimpleConstraintManager();
+
//===------------------------------------------------------------------===//
// Common implementation for the interface provided by ConstraintManager.
//===------------------------------------------------------------------===//
bool canReasonAbout(SVal X) const;
- const GRState *Assume(const GRState *state, SVal Cond, bool Assumption);
+ const GRState *Assume(const GRState *state, DefinedSVal Cond,
+ bool Assumption);
const GRState *Assume(const GRState *state, Loc Cond, bool Assumption);
@@ -38,16 +39,17 @@ public:
const GRState *AssumeSymInt(const GRState *state, bool Assumption,
const SymIntExpr *SE);
-
- const GRState *AssumeInBound(const GRState *state, SVal Idx, SVal UpperBound,
+
+ const GRState *AssumeInBound(const GRState *state, DefinedSVal Idx,
+ DefinedSVal UpperBound,
bool Assumption);
-
+
protected:
-
+
//===------------------------------------------------------------------===//
// Interface that subclasses must implement.
//===------------------------------------------------------------------===//
-
+
virtual const GRState *AssumeSymNE(const GRState *state, SymbolRef sym,
const llvm::APSInt& V) = 0;
@@ -65,13 +67,13 @@ protected:
virtual const GRState *AssumeSymGE(const GRState *state, SymbolRef sym,
const llvm::APSInt& V) = 0;
-
+
//===------------------------------------------------------------------===//
// Internal implementation.
//===------------------------------------------------------------------===//
-
+
const GRState *AssumeAux(const GRState *state, Loc Cond,bool Assumption);
-
+
const GRState *AssumeAux(const GRState *state, NonLoc Cond, bool Assumption);
};
diff --git a/lib/Analysis/SimpleSValuator.cpp b/lib/Analysis/SimpleSValuator.cpp
index 76a8bc782eb5..636ce15c3326 100644
--- a/lib/Analysis/SimpleSValuator.cpp
+++ b/lib/Analysis/SimpleSValuator.cpp
@@ -19,21 +19,23 @@ using namespace clang;
namespace {
class VISIBILITY_HIDDEN SimpleSValuator : public SValuator {
+protected:
+ virtual SVal EvalCastNL(NonLoc val, QualType castTy);
+ virtual SVal EvalCastL(Loc val, QualType castTy);
+
public:
SimpleSValuator(ValueManager &valMgr) : SValuator(valMgr) {}
virtual ~SimpleSValuator() {}
-
- virtual SVal EvalCast(NonLoc val, QualType castTy);
- virtual SVal EvalCast(Loc val, QualType castTy);
- virtual SVal EvalMinus(NonLoc val);
- virtual SVal EvalComplement(NonLoc val);
- virtual SVal EvalBinOpNN(BinaryOperator::Opcode op, NonLoc lhs, NonLoc rhs,
- QualType resultTy);
+
+ virtual SVal EvalMinus(NonLoc val);
+ virtual SVal EvalComplement(NonLoc val);
+ virtual SVal EvalBinOpNN(const GRState *state, BinaryOperator::Opcode op,
+ NonLoc lhs, NonLoc rhs, QualType resultTy);
virtual SVal EvalBinOpLL(BinaryOperator::Opcode op, Loc lhs, Loc rhs,
QualType resultTy);
virtual SVal EvalBinOpLN(const GRState *state, BinaryOperator::Opcode op,
Loc lhs, NonLoc rhs, QualType resultTy);
-};
+};
} // end anonymous namespace
SValuator *clang::CreateSimpleSValuator(ValueManager &valMgr) {
@@ -44,16 +46,48 @@ SValuator *clang::CreateSimpleSValuator(ValueManager &valMgr) {
// Transfer function for Casts.
//===----------------------------------------------------------------------===//
-SVal SimpleSValuator::EvalCast(NonLoc val, QualType castTy) {
+SVal SimpleSValuator::EvalCastNL(NonLoc val, QualType castTy) {
+
+ bool isLocType = Loc::IsLocType(castTy);
+
+ if (nonloc::LocAsInteger *LI = dyn_cast<nonloc::LocAsInteger>(&val)) {
+ if (isLocType)
+ return LI->getLoc();
+
+ ASTContext &Ctx = ValMgr.getContext();
+
+ // FIXME: Support promotions/truncations.
+ if (Ctx.getTypeSize(castTy) == Ctx.getTypeSize(Ctx.VoidPtrTy))
+ return val;
+
+ return UnknownVal();
+ }
+
+ if (const SymExpr *se = val.getAsSymbolicExpression()) {
+ ASTContext &Ctx = ValMgr.getContext();
+ QualType T = Ctx.getCanonicalType(se->getType(Ctx));
+ if (T == Ctx.getCanonicalType(castTy))
+ return val;
+
+ // FIXME: Remove this hack when we support symbolic truncation/extension.
+ // HACK: If both castTy and T are integers, ignore the cast. This is
+ // not a permanent solution. Eventually we want to precisely handle
+ // extension/truncation of symbolic integers. This prevents us from losing
+ // precision when we assign 'x = y' and 'y' is symbolic and x and y are
+ // different integer types.
+ if (T->isIntegerType() && castTy->isIntegerType())
+ return val;
+
+ return UnknownVal();
+ }
+
if (!isa<nonloc::ConcreteInt>(val))
return UnknownVal();
- bool isLocType = Loc::IsLocType(castTy);
-
// Only handle casts from integers to integers.
if (!isLocType && !castTy->isIntegerType())
return UnknownVal();
-
+
llvm::APSInt i = cast<nonloc::ConcreteInt>(val).getValue();
i.setIsUnsigned(castTy->isUnsignedIntegerType() || Loc::IsLocType(castTy));
i.extOrTrunc(ValMgr.getContext().getTypeSize(castTy));
@@ -64,30 +98,28 @@ SVal SimpleSValuator::EvalCast(NonLoc val, QualType castTy) {
return ValMgr.makeIntVal(i);
}
-SVal SimpleSValuator::EvalCast(Loc val, QualType castTy) {
-
+SVal SimpleSValuator::EvalCastL(Loc val, QualType castTy) {
+
// Casts from pointers -> pointers, just return the lval.
//
// Casts from pointers -> references, just return the lval. These
// can be introduced by the frontend for corner cases, e.g
// casting from va_list* to __builtin_va_list&.
//
- assert(!val.isUnknownOrUndef());
-
if (Loc::IsLocType(castTy) || castTy->isReferenceType())
return val;
-
+
// FIXME: Handle transparent unions where a value can be "transparently"
// lifted into a union type.
if (castTy->isUnionType())
return UnknownVal();
-
+
assert(castTy->isIntegerType());
unsigned BitWidth = ValMgr.getContext().getTypeSize(castTy);
if (!isa<loc::ConcreteInt>(val))
return ValMgr.makeLocAsInteger(val, BitWidth);
-
+
llvm::APSInt i = cast<loc::ConcreteInt>(val).getValue();
i.setIsUnsigned(castTy->isUnsignedIntegerType() || Loc::IsLocType(castTy));
i.extOrTrunc(BitWidth);
@@ -99,7 +131,7 @@ SVal SimpleSValuator::EvalCast(Loc val, QualType castTy) {
//===----------------------------------------------------------------------===//
SVal SimpleSValuator::EvalMinus(NonLoc val) {
- switch (val.getSubKind()) {
+ switch (val.getSubKind()) {
case nonloc::ConcreteIntKind:
return cast<nonloc::ConcreteInt>(val).evalMinus(ValMgr);
default:
@@ -133,18 +165,18 @@ static BinaryOperator::Opcode NegateComparison(BinaryOperator::Opcode op) {
}
}
-// Equality operators for Locs.
+// Equality operators for Locs.
// FIXME: All this logic will be revamped when we have MemRegion::getLocation()
// implemented.
static SVal EvalEquality(ValueManager &ValMgr, Loc lhs, Loc rhs, bool isEqual,
QualType resultTy) {
-
+
switch (lhs.getSubKind()) {
default:
assert(false && "EQ/NE not implemented for this Loc.");
return UnknownVal();
-
+
case loc::ConcreteIntKind: {
if (SymbolRef rSym = rhs.getAsSymbol())
return ValMgr.makeNonLoc(rSym,
@@ -153,7 +185,7 @@ static SVal EvalEquality(ValueManager &ValMgr, Loc lhs, Loc rhs, bool isEqual,
cast<loc::ConcreteInt>(lhs).getValue(),
resultTy);
break;
- }
+ }
case loc::MemRegionKind: {
if (SymbolRef lSym = lhs.getAsLocSymbol()) {
if (isa<loc::ConcreteInt>(rhs)) {
@@ -166,27 +198,43 @@ static SVal EvalEquality(ValueManager &ValMgr, Loc lhs, Loc rhs, bool isEqual,
}
break;
}
-
+
case loc::GotoLabelKind:
break;
}
-
+
return ValMgr.makeTruthVal(isEqual ? lhs == rhs : lhs != rhs, resultTy);
}
-SVal SimpleSValuator::EvalBinOpNN(BinaryOperator::Opcode op,
+SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
+ BinaryOperator::Opcode op,
NonLoc lhs, NonLoc rhs,
- QualType resultTy) {
+ QualType resultTy) {
+ // Handle trivial case where left-side and right-side are the same.
+ if (lhs == rhs)
+ switch (op) {
+ default:
+ break;
+ case BinaryOperator::EQ:
+ case BinaryOperator::LE:
+ case BinaryOperator::GE:
+ return ValMgr.makeTruthVal(true, resultTy);
+ case BinaryOperator::LT:
+ case BinaryOperator::GT:
+ case BinaryOperator::NE:
+ return ValMgr.makeTruthVal(false, resultTy);
+ }
+
while (1) {
switch (lhs.getSubKind()) {
default:
- return UnknownVal();
+ return UnknownVal();
case nonloc::LocAsIntegerKind: {
- Loc lhsL = cast<nonloc::LocAsInteger>(lhs).getLoc();
+ Loc lhsL = cast<nonloc::LocAsInteger>(lhs).getLoc();
switch (rhs.getSubKind()) {
case nonloc::LocAsIntegerKind:
return EvalBinOpLL(op, lhsL, cast<nonloc::LocAsInteger>(rhs).getLoc(),
- resultTy);
+ resultTy);
case nonloc::ConcreteIntKind: {
// Transform the integer into a location and compare.
ASTContext& Ctx = ValMgr.getContext();
@@ -195,7 +243,7 @@ SVal SimpleSValuator::EvalBinOpNN(BinaryOperator::Opcode op,
i.extOrTrunc(Ctx.getTypeSize(Ctx.VoidPtrTy));
return EvalBinOpLL(op, lhsL, ValMgr.makeLoc(i), resultTy);
}
- default:
+ default:
switch (op) {
case BinaryOperator::EQ:
return ValMgr.makeTruthVal(false, resultTy);
@@ -206,15 +254,15 @@ SVal SimpleSValuator::EvalBinOpNN(BinaryOperator::Opcode op,
return UnknownVal();
}
}
- }
+ }
case nonloc::SymExprValKind: {
- // Logical not?
+ // Logical not?
if (!(op == BinaryOperator::EQ && rhs.isZeroConstant()))
return UnknownVal();
const SymExpr *symExpr =
cast<nonloc::SymExprVal>(lhs).getSymbolicExpression();
-
+
// Only handle ($sym op constant) for now.
if (const SymIntExpr *symIntExpr = dyn_cast<SymIntExpr>(symExpr)) {
BinaryOperator::Opcode opc = symIntExpr->getOpcode();
@@ -257,7 +305,7 @@ SVal SimpleSValuator::EvalBinOpNN(BinaryOperator::Opcode op,
case BinaryOperator::GT:
case BinaryOperator::LE:
case BinaryOperator::GE:
- case BinaryOperator::EQ:
+ case BinaryOperator::EQ:
case BinaryOperator::NE:
opc = NegateComparison(opc);
assert(symIntExpr->getType(ValMgr.getContext()) == resultTy);
@@ -266,7 +314,7 @@ SVal SimpleSValuator::EvalBinOpNN(BinaryOperator::Opcode op,
}
}
}
- case nonloc::ConcreteIntKind: {
+ case nonloc::ConcreteIntKind: {
if (isa<nonloc::ConcreteInt>(rhs)) {
const nonloc::ConcreteInt& lhsInt = cast<nonloc::ConcreteInt>(lhs);
return lhsInt.evalBinOp(ValMgr, op, cast<nonloc::ConcreteInt>(rhs));
@@ -278,7 +326,7 @@ SVal SimpleSValuator::EvalBinOpNN(BinaryOperator::Opcode op,
NonLoc tmp = rhs;
rhs = lhs;
lhs = tmp;
-
+
switch (op) {
case BinaryOperator::LT: op = BinaryOperator::GT; continue;
case BinaryOperator::GT: op = BinaryOperator::LT; continue;
@@ -291,12 +339,27 @@ SVal SimpleSValuator::EvalBinOpNN(BinaryOperator::Opcode op,
continue;
default:
return UnknownVal();
- }
+ }
}
}
case nonloc::SymbolValKind: {
+ nonloc::SymbolVal *slhs = cast<nonloc::SymbolVal>(&lhs);
+ SymbolRef Sym = slhs->getSymbol();
+
+ // Does the symbol simplify to a constant?
+ if (Sym->getType(ValMgr.getContext())->isIntegerType())
+ if (const llvm::APSInt *Constant = state->getSymVal(Sym)) {
+ // What should we convert it to?
+ if (nonloc::ConcreteInt *rhs_I = dyn_cast<nonloc::ConcreteInt>(&rhs)){
+ BasicValueFactory &BVF = ValMgr.getBasicValueFactory();
+ lhs = nonloc::ConcreteInt(BVF.Convert(rhs_I->getValue(),
+ *Constant));
+ continue;
+ }
+ }
+
if (isa<nonloc::ConcreteInt>(rhs)) {
- return ValMgr.makeNonLoc(cast<nonloc::SymbolVal>(lhs).getSymbol(), op,
+ return ValMgr.makeNonLoc(slhs->getSymbol(), op,
cast<nonloc::ConcreteInt>(rhs).getValue(),
resultTy);
}
@@ -308,19 +371,26 @@ SVal SimpleSValuator::EvalBinOpNN(BinaryOperator::Opcode op,
}
SVal SimpleSValuator::EvalBinOpLL(BinaryOperator::Opcode op, Loc lhs, Loc rhs,
- QualType resultTy) {
+ QualType resultTy) {
switch (op) {
default:
return UnknownVal();
case BinaryOperator::EQ:
case BinaryOperator::NE:
return EvalEquality(ValMgr, lhs, rhs, op == BinaryOperator::EQ, resultTy);
+ case BinaryOperator::LT:
+ case BinaryOperator::GT:
+ // FIXME: Generalize. For now, just handle the trivial case where
+ // the two locations are identical.
+ if (lhs == rhs)
+ return ValMgr.makeTruthVal(false, resultTy);
+ return UnknownVal();
}
}
SVal SimpleSValuator::EvalBinOpLN(const GRState *state,
BinaryOperator::Opcode op,
- Loc lhs, NonLoc rhs, QualType resultTy) {
+ Loc lhs, NonLoc rhs, QualType resultTy) {
// Special case: 'rhs' is an integer that has the same width as a pointer and
// we are using the integer location in a comparison. Normally this cannot be
// triggered, but transfer functions like those for OSCommpareAndSwapBarrier32
@@ -333,13 +403,13 @@ SVal SimpleSValuator::EvalBinOpLN(const GRState *state,
if (ctx.getTypeSize(ctx.VoidPtrTy) == x->getBitWidth()) {
// Convert the signedness of the integer (if necessary).
if (x->isSigned())
- x = &ValMgr.getBasicValueFactory().getValue(*x, true);
+ x = &ValMgr.getBasicValueFactory().getValue(*x, true);
return EvalBinOpLL(op, lhs, loc::ConcreteInt(*x), resultTy);
}
}
}
-
+
// Delegate pointer arithmetic to the StoreManager.
return state->getStateManager().getStoreManager().EvalBinOp(state, op, lhs,
rhs, resultTy);
diff --git a/lib/Analysis/Store.cpp b/lib/Analysis/Store.cpp
index cb099862f055..4b4ae6580820 100644
--- a/lib/Analysis/Store.cpp
+++ b/lib/Analysis/Store.cpp
@@ -17,95 +17,189 @@
using namespace clang;
StoreManager::StoreManager(GRStateManager &stateMgr)
- : ValMgr(stateMgr.getValueManager()),
- StateMgr(stateMgr),
+ : ValMgr(stateMgr.getValueManager()), StateMgr(stateMgr),
MRMgr(ValMgr.getRegionManager()) {}
-StoreManager::CastResult
-StoreManager::CastRegion(const GRState* state, const MemRegion* R,
- QualType CastToTy) {
-
+const MemRegion *StoreManager::MakeElementRegion(const MemRegion *Base,
+ QualType EleTy, uint64_t index) {
+ SVal idx = ValMgr.makeArrayIndex(index);
+ return MRMgr.getElementRegion(EleTy, idx, Base, ValMgr.getContext());
+}
+
+// FIXME: Merge with the implementation of the same method in MemRegion.cpp
+static bool IsCompleteType(ASTContext &Ctx, QualType Ty) {
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *D = RT->getDecl();
+ if (!D->getDefinition(Ctx))
+ return false;
+ }
+
+ return true;
+}
+
+const MemRegion *StoreManager::CastRegion(const MemRegion *R, QualType CastToTy) {
+
ASTContext& Ctx = StateMgr.getContext();
- // We need to know the real type of CastToTy.
- QualType ToTy = Ctx.getCanonicalType(CastToTy);
+ // Handle casts to Objective-C objects.
+ if (CastToTy->isObjCObjectPointerType())
+ return R->getBaseRegion();
- // Return the same region if the region types are compatible.
- if (const TypedRegion* TR = dyn_cast<TypedRegion>(R)) {
- QualType Ta = Ctx.getCanonicalType(TR->getLocationType(Ctx));
+ if (CastToTy->isBlockPointerType()) {
+ // FIXME: We may need different solutions, depending on the symbol
+ // involved. Blocks can be casted to/from 'id', as they can be treated
+ // as Objective-C objects. This could possibly be handled by enhancing
+ // our reasoning of downcasts of symbolic objects.
+ if (isa<CodeTextRegion>(R) || isa<SymbolicRegion>(R))
+ return R;
- if (Ta == ToTy)
- return CastResult(state, R);
+ // We don't know what to make of it. Return a NULL region, which
+ // will be interpretted as UnknownVal.
+ return NULL;
}
-
- if (const PointerType* PTy = dyn_cast<PointerType>(ToTy.getTypePtr())) {
- // Check if we are casting to 'void*'.
- // FIXME: Handle arbitrary upcasts.
- QualType Pointee = PTy->getPointeeType();
- if (Pointee->isVoidType()) {
-
- do {
- if (const TypedViewRegion *TR = dyn_cast<TypedViewRegion>(R)) {
- // Casts to void* removes TypedViewRegion. This happens when:
- //
- // void foo(void*);
- // ...
- // void bar() {
- // int x;
- // foo(&x);
- // }
- //
- R = TR->removeViews();
- continue;
+
+ // Now assume we are casting from pointer to pointer. Other cases should
+ // already be handled.
+ QualType PointeeTy = CastToTy->getAs<PointerType>()->getPointeeType();
+ QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy);
+
+ // Handle casts to void*. We just pass the region through.
+ if (CanonPointeeTy.getUnqualifiedType() == Ctx.VoidTy)
+ return R;
+
+ // Handle casts from compatible types.
+ if (R->isBoundable())
+ if (const TypedRegion *TR = dyn_cast<TypedRegion>(R)) {
+ QualType ObjTy = Ctx.getCanonicalType(TR->getValueType(Ctx));
+ if (CanonPointeeTy == ObjTy)
+ return R;
+ }
+
+ // Process region cast according to the kind of the region being cast.
+ switch (R->getKind()) {
+ case MemRegion::BEG_TYPED_REGIONS:
+ case MemRegion::MemSpaceRegionKind:
+ case MemRegion::BEG_DECL_REGIONS:
+ case MemRegion::END_DECL_REGIONS:
+ case MemRegion::END_TYPED_REGIONS: {
+ assert(0 && "Invalid region cast");
+ break;
+ }
+ case MemRegion::CodeTextRegionKind: {
+ // CodeTextRegion should be cast to only a function or block pointer type,
+ // although they can in practice be casted to anything, e.g, void*, char*,
+ // etc.
+ // Just return the region.
+ return R;
+ }
+
+ case MemRegion::StringRegionKind:
+ case MemRegion::ObjCObjectRegionKind:
+ // FIXME: Need to handle arbitrary downcasts.
+ case MemRegion::SymbolicRegionKind:
+ case MemRegion::AllocaRegionKind:
+ case MemRegion::CompoundLiteralRegionKind:
+ case MemRegion::FieldRegionKind:
+ case MemRegion::ObjCIvarRegionKind:
+ case MemRegion::VarRegionKind:
+ return MakeElementRegion(R, PointeeTy);
+
+ case MemRegion::ElementRegionKind: {
+ // If we are casting from an ElementRegion to another type, the
+ // algorithm is as follows:
+ //
+ // (1) Compute the "raw offset" of the ElementRegion from the
+ // base region. This is done by calling 'getAsRawOffset()'.
+ //
+ // (2a) If we get a 'RegionRawOffset' after calling
+ // 'getAsRawOffset()', determine if the absolute offset
+ // can be exactly divided into chunks of the size of the
+ // casted-pointee type. If so, create a new ElementRegion with
+ // the pointee-cast type as the new ElementType and the index
+ // being the offset divded by the chunk size. If not, create
+ // a new ElementRegion at offset 0 off the raw offset region.
+ //
+ // (2b) If we don't a get a 'RegionRawOffset' after calling
+ // 'getAsRawOffset()', it means that we are at offset 0.
+ //
+ // FIXME: Handle symbolic raw offsets.
+
+ const ElementRegion *elementR = cast<ElementRegion>(R);
+ const RegionRawOffset &rawOff = elementR->getAsRawOffset();
+ const MemRegion *baseR = rawOff.getRegion();
+
+ // If we cannot compute a raw offset, throw up our hands and return
+ // a NULL MemRegion*.
+ if (!baseR)
+ return NULL;
+
+ int64_t off = rawOff.getByteOffset();
+
+ if (off == 0) {
+ // Edge case: we are at 0 bytes off the beginning of baseR. We
+ // check to see if type we are casting to is the same as the base
+ // region. If so, just return the base region.
+ if (const TypedRegion *TR = dyn_cast<TypedRegion>(baseR)) {
+ QualType ObjTy = Ctx.getCanonicalType(TR->getValueType(Ctx));
+ QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy);
+ if (CanonPointeeTy == ObjTy)
+ return baseR;
}
- else if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
- // Casts to void* also removes ElementRegions. This happens when:
- //
- // void foo(void*);
- // ...
- // void bar() {
- // int x;
- // foo((char*)&x);
- // }
- //
- R = ER->getSuperRegion();
- continue;
+
+ // Otherwise, create a new ElementRegion at offset 0.
+ return MakeElementRegion(baseR, PointeeTy);
+ }
+
+ // We have a non-zero offset from the base region. We want to determine
+ // if the offset can be evenly divided by sizeof(PointeeTy). If so,
+ // we create an ElementRegion whose index is that value. Otherwise, we
+ // create two ElementRegions, one that reflects a raw offset and the other
+ // that reflects the cast.
+
+ // Compute the index for the new ElementRegion.
+ int64_t newIndex = 0;
+ const MemRegion *newSuperR = 0;
+
+ // We can only compute sizeof(PointeeTy) if it is a complete type.
+ if (IsCompleteType(Ctx, PointeeTy)) {
+ // Compute the size in **bytes**.
+ int64_t pointeeTySize = (int64_t) (Ctx.getTypeSize(PointeeTy) / 8);
+
+ // Is the offset a multiple of the size? If so, we can layer the
+ // ElementRegion (with elementType == PointeeTy) directly on top of
+ // the base region.
+ if (off % pointeeTySize == 0) {
+ newIndex = off / pointeeTySize;
+ newSuperR = baseR;
}
- else
- break;
}
- while (0);
-
- return CastResult(state, R);
- }
- else if (Pointee->isIntegerType()) {
- // FIXME: At some point, it stands to reason that this 'dyn_cast' should
- // become a 'cast' and that 'R' will always be a TypedRegion.
- if (const TypedRegion *TR = dyn_cast<TypedRegion>(R)) {
- // Check if we are casting to a region with an integer type. We now
- // the types aren't the same, so we construct an ElementRegion.
- SVal Idx = ValMgr.makeZeroArrayIndex();
-
- // If the super region is an element region, strip it away.
- // FIXME: Is this the right thing to do in all cases?
- const MemRegion *Base = isa<ElementRegion>(TR) ? TR->getSuperRegion()
- : TR;
- ElementRegion* ER = MRMgr.getElementRegion(Pointee, Idx, Base,
- StateMgr.getContext());
- return CastResult(state, ER);
+
+ if (!newSuperR) {
+ // Create an intermediate ElementRegion to represent the raw byte.
+ // This will be the super region of the final ElementRegion.
+ newSuperR = MakeElementRegion(baseR, Ctx.CharTy, off);
}
+
+ return MakeElementRegion(newSuperR, PointeeTy, newIndex);
}
}
- // FIXME: Need to handle arbitrary downcasts.
- // FIXME: Handle the case where a TypedViewRegion (layering a SymbolicRegion
- // or an AllocaRegion is cast to another view, thus causing the memory
- // to be re-used for a different purpose.
+ assert(0 && "unreachable");
+ return 0;
+}
+
- if (isa<SymbolicRegion>(R) || isa<AllocaRegion>(R)) {
- const MemRegion* ViewR = MRMgr.getTypedViewRegion(CastToTy, R);
- return CastResult(AddRegionView(state, ViewR, R), ViewR);
- }
-
- return CastResult(state, R);
+/// CastRetrievedVal - Used by subclasses of StoreManager to implement
+/// implicit casts that arise from loads from regions that are reinterpreted
+/// as another region.
+SValuator::CastResult StoreManager::CastRetrievedVal(SVal V,
+ const GRState *state,
+ const TypedRegion *R,
+ QualType castTy) {
+ if (castTy.isNull())
+ return SValuator::CastResult(state, V);
+
+ ASTContext &Ctx = ValMgr.getContext();
+ return ValMgr.getSValuator().EvalCast(V, state, castTy, R->getValueType(Ctx));
}
+
diff --git a/lib/Analysis/SymbolManager.cpp b/lib/Analysis/SymbolManager.cpp
index 275f30a2963e..22e110192956 100644
--- a/lib/Analysis/SymbolManager.cpp
+++ b/lib/Analysis/SymbolManager.cpp
@@ -18,9 +18,11 @@
using namespace clang;
-static void print(llvm::raw_ostream& os, const SymExpr *SE);
+void SymExpr::dump() const {
+ dumpToStream(llvm::errs());
+}
-static void print(llvm::raw_ostream& os, BinaryOperator::Opcode Op) {
+static void print(llvm::raw_ostream& os, BinaryOperator::Opcode Op) {
switch (Op) {
default:
assert(false && "operator printing not implemented");
@@ -35,92 +37,100 @@ static void print(llvm::raw_ostream& os, BinaryOperator::Opcode Op) {
case BinaryOperator::LT: os << "<" ; break;
case BinaryOperator::GT: os << '>' ; break;
case BinaryOperator::LE: os << "<=" ; break;
- case BinaryOperator::GE: os << ">=" ; break;
+ case BinaryOperator::GE: os << ">=" ; break;
case BinaryOperator::EQ: os << "==" ; break;
case BinaryOperator::NE: os << "!=" ; break;
case BinaryOperator::And: os << '&' ; break;
case BinaryOperator::Xor: os << '^' ; break;
case BinaryOperator::Or: os << '|' ; break;
- }
+ }
}
-static void print(llvm::raw_ostream& os, const SymIntExpr *SE) {
+void SymIntExpr::dumpToStream(llvm::raw_ostream& os) const {
os << '(';
- print(os, SE->getLHS());
+ getLHS()->dumpToStream(os);
os << ") ";
- print(os, SE->getOpcode());
- os << ' ' << SE->getRHS().getZExtValue();
- if (SE->getRHS().isUnsigned()) os << 'U';
+ print(os, getOpcode());
+ os << ' ' << getRHS().getZExtValue();
+ if (getRHS().isUnsigned()) os << 'U';
}
-
-static void print(llvm::raw_ostream& os, const SymSymExpr *SE) {
+
+void SymSymExpr::dumpToStream(llvm::raw_ostream& os) const {
os << '(';
- print(os, SE->getLHS());
+ getLHS()->dumpToStream(os);
os << ") ";
os << '(';
- print(os, SE->getRHS());
- os << ')';
-}
-
-static void print(llvm::raw_ostream& os, const SymExpr *SE) {
- switch (SE->getKind()) {
- case SymExpr::BEGIN_SYMBOLS:
- case SymExpr::RegionValueKind:
- case SymExpr::ConjuredKind:
- case SymExpr::END_SYMBOLS:
- os << '$' << cast<SymbolData>(SE)->getSymbolID();
- return;
- case SymExpr::SymIntKind:
- print(os, cast<SymIntExpr>(SE));
- return;
- case SymExpr::SymSymKind:
- print(os, cast<SymSymExpr>(SE));
- return;
- }
+ getRHS()->dumpToStream(os);
+ os << ')';
}
+void SymbolConjured::dumpToStream(llvm::raw_ostream& os) const {
+ os << "conj_$" << getSymbolID() << '{' << T.getAsString() << '}';
+}
+
+void SymbolDerived::dumpToStream(llvm::raw_ostream& os) const {
+ os << "derived_$" << getSymbolID() << '{'
+ << getParentSymbol() << ',' << getRegion() << '}';
+}
-llvm::raw_ostream& llvm::operator<<(llvm::raw_ostream& os, const SymExpr *SE) {
- print(os, SE);
- return os;
+void SymbolRegionValue::dumpToStream(llvm::raw_ostream& os) const {
+ os << "reg_$" << getSymbolID() << "<" << R << ">";
}
-const SymbolRegionValue*
+const SymbolRegionValue*
SymbolManager::getRegionValueSymbol(const MemRegion* R, QualType T) {
llvm::FoldingSetNodeID profile;
SymbolRegionValue::Profile(profile, R, T);
- void* InsertPos;
- SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
- if (!SD) {
+ void* InsertPos;
+ SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+ if (!SD) {
SD = (SymExpr*) BPAlloc.Allocate<SymbolRegionValue>();
- new (SD) SymbolRegionValue(SymbolCounter, R, T);
+ new (SD) SymbolRegionValue(SymbolCounter, R, T);
DataSet.InsertNode(SD, InsertPos);
++SymbolCounter;
}
-
+
return cast<SymbolRegionValue>(SD);
}
const SymbolConjured*
SymbolManager::getConjuredSymbol(const Stmt* E, QualType T, unsigned Count,
const void* SymbolTag) {
-
+
llvm::FoldingSetNodeID profile;
SymbolConjured::Profile(profile, E, T, Count, SymbolTag);
- void* InsertPos;
- SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
- if (!SD) {
+ void* InsertPos;
+ SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+ if (!SD) {
SD = (SymExpr*) BPAlloc.Allocate<SymbolConjured>();
- new (SD) SymbolConjured(SymbolCounter, E, T, Count, SymbolTag);
- DataSet.InsertNode(SD, InsertPos);
+ new (SD) SymbolConjured(SymbolCounter, E, T, Count, SymbolTag);
+ DataSet.InsertNode(SD, InsertPos);
++SymbolCounter;
}
-
+
return cast<SymbolConjured>(SD);
}
+const SymbolDerived*
+SymbolManager::getDerivedSymbol(SymbolRef parentSymbol,
+ const TypedRegion *R) {
+
+ llvm::FoldingSetNodeID profile;
+ SymbolDerived::Profile(profile, parentSymbol, R);
+ void* InsertPos;
+ SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+ if (!SD) {
+ SD = (SymExpr*) BPAlloc.Allocate<SymbolDerived>();
+ new (SD) SymbolDerived(SymbolCounter, parentSymbol, R);
+ DataSet.InsertNode(SD, InsertPos);
+ ++SymbolCounter;
+ }
+
+ return cast<SymbolDerived>(SD);
+}
+
const SymIntExpr *SymbolManager::getSymIntExpr(const SymExpr *lhs,
- BinaryOperator::Opcode op,
+ BinaryOperator::Opcode op,
const llvm::APSInt& v,
QualType t) {
llvm::FoldingSetNodeID ID;
@@ -133,7 +143,7 @@ const SymIntExpr *SymbolManager::getSymIntExpr(const SymExpr *lhs,
new (data) SymIntExpr(lhs, op, v, t);
DataSet.InsertNode(data, InsertPos);
}
-
+
return cast<SymIntExpr>(data);
}
@@ -151,7 +161,7 @@ const SymSymExpr *SymbolManager::getSymSymExpr(const SymExpr *lhs,
new (data) SymSymExpr(lhs, op, rhs, t);
DataSet.InsertNode(data, InsertPos);
}
-
+
return cast<SymSymExpr>(data);
}
@@ -159,39 +169,52 @@ QualType SymbolConjured::getType(ASTContext&) const {
return T;
}
+
+QualType SymbolDerived::getType(ASTContext& Ctx) const {
+ return R->getValueType(Ctx);
+}
+
QualType SymbolRegionValue::getType(ASTContext& C) const {
if (!T.isNull())
return T;
if (const TypedRegion* TR = dyn_cast<TypedRegion>(R))
return TR->getValueType(C);
-
+
return QualType();
}
SymbolManager::~SymbolManager() {}
bool SymbolManager::canSymbolicate(QualType T) {
- return Loc::IsLocType(T) || T->isIntegerType();
+ return Loc::IsLocType(T) || (T->isIntegerType() && T->isScalarType());
}
void SymbolReaper::markLive(SymbolRef sym) {
- TheLiving = F.Add(TheLiving, sym);
- TheDead = F.Remove(TheDead, sym);
+ TheLiving.insert(sym);
+ TheDead.erase(sym);
}
bool SymbolReaper::maybeDead(SymbolRef sym) {
if (isLive(sym))
return false;
-
- TheDead = F.Add(TheDead, sym);
+
+ TheDead.insert(sym);
return true;
}
bool SymbolReaper::isLive(SymbolRef sym) {
- if (TheLiving.contains(sym))
+ if (TheLiving.count(sym))
return true;
-
+
+ if (const SymbolDerived *derived = dyn_cast<SymbolDerived>(sym)) {
+ if (isLive(derived->getParentSymbol())) {
+ markLive(sym);
+ return true;
+ }
+ return false;
+ }
+
// Interogate the symbol. It may derive from an input value to
// the analyzed function/method.
return isa<SymbolRegionValue>(sym);
diff --git a/lib/Analysis/UninitializedValues.cpp b/lib/Analysis/UninitializedValues.cpp
index 014ea8255e68..8e7b15862d66 100644
--- a/lib/Analysis/UninitializedValues.cpp
+++ b/lib/Analysis/UninitializedValues.cpp
@@ -25,21 +25,21 @@ using namespace clang;
//===----------------------------------------------------------------------===//
// Dataflow initialization logic.
-//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
namespace {
class VISIBILITY_HIDDEN RegisterDecls
- : public CFGRecStmtDeclVisitor<RegisterDecls> {
+ : public CFGRecStmtDeclVisitor<RegisterDecls> {
UninitializedValues::AnalysisDataTy& AD;
public:
RegisterDecls(UninitializedValues::AnalysisDataTy& ad) : AD(ad) {}
-
+
void VisitVarDecl(VarDecl* VD) { AD.Register(VD); }
CFG& getCFG() { return AD.getCFG(); }
};
-
+
} // end anonymous namespace
void UninitializedValues::InitializeValues(const CFG& cfg) {
@@ -49,25 +49,25 @@ void UninitializedValues::InitializeValues(const CFG& cfg) {
//===----------------------------------------------------------------------===//
// Transfer functions.
-//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
namespace {
class VISIBILITY_HIDDEN TransferFuncs
: public CFGStmtVisitor<TransferFuncs,bool> {
-
+
UninitializedValues::ValTy V;
UninitializedValues::AnalysisDataTy& AD;
public:
TransferFuncs(UninitializedValues::AnalysisDataTy& ad) : AD(ad) {}
-
+
UninitializedValues::ValTy& getVal() { return V; }
CFG& getCFG() { return AD.getCFG(); }
-
+
void SetTopValue(UninitializedValues::ValTy& X) {
X.setDeclValues(AD);
X.resetBlkExprValues(AD);
}
-
+
bool VisitDeclRefExpr(DeclRefExpr* DR);
bool VisitBinaryOperator(BinaryOperator* B);
bool VisitUnaryOperator(UnaryOperator* U);
@@ -76,24 +76,24 @@ public:
bool VisitDeclStmt(DeclStmt* D);
bool VisitConditionalOperator(ConditionalOperator* C);
bool BlockStmt_VisitObjCForCollectionStmt(ObjCForCollectionStmt* S);
-
+
bool Visit(Stmt *S);
bool BlockStmt_VisitExpr(Expr* E);
-
+
void VisitTerminator(CFGBlock* B) { }
};
-
+
static const bool Initialized = false;
-static const bool Uninitialized = true;
+static const bool Uninitialized = true;
bool TransferFuncs::VisitDeclRefExpr(DeclRefExpr* DR) {
-
+
if (VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl()))
if (VD->isBlockVarDecl()) {
-
+
if (AD.Observer)
AD.Observer->ObserveDeclRefExpr(V, AD, DR, VD);
-
+
// Pseudo-hack to prevent cascade of warnings. If an accessed variable
// is uninitialized, then we are already going to flag a warning for
// this variable, which a "source" of uninitialized values.
@@ -103,17 +103,17 @@ bool TransferFuncs::VisitDeclRefExpr(DeclRefExpr* DR) {
if (AD.FullUninitTaint)
return V(VD,AD);
}
-
+
return Initialized;
}
static VarDecl* FindBlockVarDecl(Expr* E) {
-
+
// Blast through casts and parentheses to find any DeclRefExprs that
// refer to a block VarDecl.
-
+
if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(E->IgnoreParenCasts()))
- if (VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl()))
+ if (VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl()))
if (VD->isBlockVarDecl()) return VD;
return NULL;
@@ -136,7 +136,7 @@ bool TransferFuncs::VisitDeclStmt(DeclStmt* S) {
for (DeclStmt::decl_iterator I=S->decl_begin(), E=S->decl_end(); I!=E; ++I) {
VarDecl *VD = dyn_cast<VarDecl>(*I);
if (VD && VD->isBlockVarDecl()) {
- if (Stmt* I = VD->getInit())
+ if (Stmt* I = VD->getInit())
V(VD,AD) = AD.FullUninitTaint ? V(cast<Expr>(I),AD) : Initialized;
else {
// Special case for declarations of array types. For things like:
@@ -145,20 +145,20 @@ bool TransferFuncs::VisitDeclStmt(DeclStmt* S) {
//
// we should treat "x" as being initialized, because the variable
// "x" really refers to the memory block. Clearly x[1] is
- // uninitialized, but expressions like "(char *) x" really do refer to
- // an initialized value. This simple dataflow analysis does not reason
+ // uninitialized, but expressions like "(char *) x" really do refer to
+ // an initialized value. This simple dataflow analysis does not reason
// about the contents of arrays, although it could be potentially
// extended to do so if the array were of constant size.
if (VD->getType()->isArrayType())
V(VD,AD) = Initialized;
- else
+ else
V(VD,AD) = Uninitialized;
}
}
}
return Uninitialized; // Value is never consumed.
}
-
+
bool TransferFuncs::VisitCallExpr(CallExpr* C) {
VisitChildren(C);
return Initialized;
@@ -172,14 +172,14 @@ bool TransferFuncs::VisitUnaryOperator(UnaryOperator* U) {
return V(VD,AD) = Initialized;
break;
}
-
+
default:
break;
}
return Visit(U->getSubExpr());
}
-
+
bool
TransferFuncs::BlockStmt_VisitObjCForCollectionStmt(ObjCForCollectionStmt* S) {
// This represents a use of the 'collection'
@@ -203,12 +203,12 @@ TransferFuncs::BlockStmt_VisitObjCForCollectionStmt(ObjCForCollectionStmt* S) {
else
return Visit(ElemExpr);
}
-
+
V(VD,AD) = Initialized;
return Initialized;
}
-
-
+
+
bool TransferFuncs::VisitConditionalOperator(ConditionalOperator* C) {
Visit(C->getCond());
@@ -228,21 +228,21 @@ bool TransferFuncs::VisitStmt(Stmt* S) {
// or "Initialized" to variables referenced in the other subexpressions.
for (Stmt::child_iterator I=S->child_begin(), E=S->child_end(); I!=E; ++I)
if (*I && Visit(*I) == Uninitialized) x = Uninitialized;
-
+
return x;
}
-
+
bool TransferFuncs::Visit(Stmt *S) {
if (AD.isTracked(static_cast<Expr*>(S))) return V(static_cast<Expr*>(S),AD);
else return static_cast<CFGStmtVisitor<TransferFuncs,bool>*>(this)->Visit(S);
}
bool TransferFuncs::BlockStmt_VisitExpr(Expr* E) {
- bool x = static_cast<CFGStmtVisitor<TransferFuncs,bool>*>(this)->Visit(E);
+ bool x = static_cast<CFGStmtVisitor<TransferFuncs,bool>*>(this)->Visit(E);
if (AD.isTracked(E)) V(E,AD) = x;
return x;
}
-
+
} // end anonymous namespace
//===----------------------------------------------------------------------===//
@@ -255,7 +255,7 @@ bool TransferFuncs::BlockStmt_VisitExpr(Expr* E) {
// Merges take the same approach, preferring soundness. At a confluence point,
// if any predecessor has a variable marked uninitialized, the value is
// uninitialized at the confluence point.
-//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
namespace {
typedef StmtDeclBitVector_Types::Union Merge;
@@ -264,28 +264,28 @@ namespace {
//===----------------------------------------------------------------------===//
// Uninitialized values checker. Scan an AST and flag variable uses
-//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
UninitializedValues_ValueTypes::ObserverTy::~ObserverTy() {}
namespace {
class VISIBILITY_HIDDEN UninitializedValuesChecker
: public UninitializedValues::ObserverTy {
-
+
ASTContext &Ctx;
Diagnostic &Diags;
llvm::SmallPtrSet<VarDecl*,10> AlreadyWarned;
-
+
public:
UninitializedValuesChecker(ASTContext &ctx, Diagnostic &diags)
: Ctx(ctx), Diags(diags) {}
-
+
virtual void ObserveDeclRefExpr(UninitializedValues::ValTy& V,
UninitializedValues::AnalysisDataTy& AD,
DeclRefExpr* DR, VarDecl* VD) {
assert ( AD.isTracked(VD) && "Unknown VarDecl.");
-
+
if (V(VD,AD) == Uninitialized)
if (AlreadyWarned.insert(VD))
Diags.Report(Ctx.getFullLoc(DR->getSourceRange().getBegin()),
@@ -297,13 +297,13 @@ public:
namespace clang {
void CheckUninitializedValues(CFG& cfg, ASTContext &Ctx, Diagnostic &Diags,
bool FullUninitTaint) {
-
+
// Compute the uninitialized values information.
UninitializedValues U(cfg);
U.getAnalysisData().FullUninitTaint = FullUninitTaint;
Solver S(U);
S.runOnCFG(cfg);
-
+
// Scan for DeclRefExprs that use uninitialized values.
UninitializedValuesChecker Observer(Ctx,Diags);
U.getAnalysisData().Observer = &Observer;
diff --git a/lib/Analysis/ValueManager.cpp b/lib/Analysis/ValueManager.cpp
index 724a2e92d744..fe670e79b3b5 100644
--- a/lib/Analysis/ValueManager.cpp
+++ b/lib/Analysis/ValueManager.cpp
@@ -22,16 +22,16 @@ using namespace llvm;
// Utility methods for constructing SVals.
//===----------------------------------------------------------------------===//
-SVal ValueManager::makeZeroVal(QualType T) {
+DefinedOrUnknownSVal ValueManager::makeZeroVal(QualType T) {
if (Loc::IsLocType(T))
return makeNull();
if (T->isIntegerType())
return makeIntVal(0, T);
-
+
// FIXME: Handle floats.
// FIXME: Handle structs.
- return UnknownVal();
+ return UnknownVal();
}
//===----------------------------------------------------------------------===//
@@ -55,71 +55,89 @@ NonLoc ValueManager::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
}
-SVal ValueManager::getRegionValueSymbolVal(const MemRegion* R, QualType T) {
- SymbolRef sym = SymMgr.getRegionValueSymbol(R, T);
-
- if (const TypedRegion* TR = dyn_cast<TypedRegion>(R)) {
- if (T.isNull())
- T = TR->getValueType(SymMgr.getContext());
-
- // If T is of function pointer type, create a CodeTextRegion wrapping a
- // symbol.
- if (T->isFunctionPointerType()) {
- return loc::MemRegionVal(MemMgr.getCodeTextRegion(sym, T));
- }
-
- if (Loc::IsLocType(T))
- return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
-
- // Only handle integers for now.
- if (T->isIntegerType() && T->isScalarType())
- return nonloc::SymbolVal(sym);
+SVal ValueManager::convertToArrayIndex(SVal V) {
+ if (V.isUnknownOrUndef())
+ return V;
+
+ // Common case: we have an appropriately sized integer.
+ if (nonloc::ConcreteInt* CI = dyn_cast<nonloc::ConcreteInt>(&V)) {
+ const llvm::APSInt& I = CI->getValue();
+ if (I.getBitWidth() == ArrayIndexWidth && I.isSigned())
+ return V;
}
- return UnknownVal();
+ return SVator->EvalCastNL(cast<NonLoc>(V), ArrayIndexTy);
}
-SVal ValueManager::getConjuredSymbolVal(const Expr* E, unsigned Count) {
- QualType T = E->getType();
- SymbolRef sym = SymMgr.getConjuredSymbol(E, Count);
+DefinedOrUnknownSVal ValueManager::getRegionValueSymbolVal(const MemRegion* R,
+ QualType T) {
- // If T is of function pointer type, create a CodeTextRegion wrapping a
- // symbol.
- if (T->isFunctionPointerType()) {
- return loc::MemRegionVal(MemMgr.getCodeTextRegion(sym, T));
+ if (T.isNull()) {
+ const TypedRegion* TR = cast<TypedRegion>(R);
+ T = TR->getValueType(SymMgr.getContext());
}
+ if (!SymbolManager::canSymbolicate(T))
+ return UnknownVal();
+
+ SymbolRef sym = SymMgr.getRegionValueSymbol(R, T);
+
if (Loc::IsLocType(T))
return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
- if (T->isIntegerType() && T->isScalarType())
- return nonloc::SymbolVal(sym);
-
- return UnknownVal();
+ return nonloc::SymbolVal(sym);
}
-SVal ValueManager::getConjuredSymbolVal(const Expr* E, QualType T,
- unsigned Count) {
+DefinedOrUnknownSVal ValueManager::getConjuredSymbolVal(const void *SymbolTag,
+ const Expr *E,
+ unsigned Count) {
+ QualType T = E->getType();
+
+ if (!SymbolManager::canSymbolicate(T))
+ return UnknownVal();
- SymbolRef sym = SymMgr.getConjuredSymbol(E, T, Count);
+ SymbolRef sym = SymMgr.getConjuredSymbol(E, Count, SymbolTag);
- // If T is of function pointer type, create a CodeTextRegion wrapping a
- // symbol.
- if (T->isFunctionPointerType()) {
- return loc::MemRegionVal(MemMgr.getCodeTextRegion(sym, T));
- }
+ if (Loc::IsLocType(T))
+ return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+ return nonloc::SymbolVal(sym);
+}
+
+DefinedOrUnknownSVal ValueManager::getConjuredSymbolVal(const void *SymbolTag,
+ const Expr *E,
+ QualType T,
+ unsigned Count) {
+
+ if (!SymbolManager::canSymbolicate(T))
+ return UnknownVal();
+
+ SymbolRef sym = SymMgr.getConjuredSymbol(E, T, Count, SymbolTag);
if (Loc::IsLocType(T))
return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
- if (T->isIntegerType() && T->isScalarType())
- return nonloc::SymbolVal(sym);
+ return nonloc::SymbolVal(sym);
+}
+
- return UnknownVal();
+DefinedOrUnknownSVal
+ValueManager::getDerivedRegionValueSymbolVal(SymbolRef parentSymbol,
+ const TypedRegion *R) {
+ QualType T = R->getValueType(R->getContext());
+
+ if (!SymbolManager::canSymbolicate(T))
+ return UnknownVal();
+
+ SymbolRef sym = SymMgr.getDerivedSymbol(parentSymbol, R);
+
+ if (Loc::IsLocType(T))
+ return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+ return nonloc::SymbolVal(sym);
}
-SVal ValueManager::getFunctionPointer(const FunctionDecl* FD) {
- CodeTextRegion* R
- = MemMgr.getCodeTextRegion(FD, Context.getPointerType(FD->getType()));
+DefinedSVal ValueManager::getFunctionPointer(const FunctionDecl* FD) {
+ CodeTextRegion *R = MemMgr.getCodeTextRegion(FD);
return loc::MemRegionVal(R);
}